code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 4
991
| language
stringclasses 9
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
/*
* Copyright (C) 2013 salesforce.com, inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* =======================================
* CONFIGURATION DOCS
* =======================================
*/
/**
* Config object that contains all of the configuration options for
* a scroller instance.
*
* This object is supplied by the implementer when instantiating a scroller. Some
* properties have default values if they are not supplied by the implementer.
* All the properties with the exception of `enabled` and `scroll` are saved
* inside the instance and are accessible through the `this.opts` property.
*
* You can add your own options to be used by your own plugins.
*
* @class config
* @static
*
**/
/**
* Toggle the state of the scroller. If `enabled:false`, the scroller will not
* respond to any gesture events.
*
* @property {Boolean} enabled
* @default true
*
**/
/**
* Define the duration in ms of the transition when the scroller snaps out of the boundaries.
*
*
* @property {Boolean} bounceTime
* @default 600
*
**/
/**
* Use CSS transitions to perform the scrolling. By default this is set to false and
* a transition based on `requestAnimationFrame` is used instead.
*
* Given a position and duration to scroll, it applies a `matrix3d()` transform,
* a `transition-timing-function` (by default a cubic-bezier curve),
* and a `transition-duration` to make the element scroll.
*
* Most of the libraries use this CSS technique to create a synthetic scroller.
* While this is the most simple and leanest (that is, closest to the browser) implementation
* possible, when dealing with large ammounts of DOM or really large scroller sizes,
* performance will start to degrade due to the massive amounts of GPU, CPU, and memory
* needed to manipulate this large and complex region.
*
* Moreover, this technique does not allow you to have any control over
* or give you any position information while scrolling, given that the only event
* fired by the browser is a `transitionEnd`, which is triggered once the transition is over.
*
* **It's recommended to use this configuration when:**
*
* - The scrolling size is reasonably small
* - The content of the scroller is not changing often (little DOM manipulation)
* - You don't need position information updates while scrolling
*
*
* @property {Boolean} useCSSTransition
* @default false
*
**/
/**
*
* Enable dual listeners (mouse and pointer events at the same time). This is useful for devices
* where they can handle both types of interactions interchangeably.
* This is set to false by default, allowing only one type of input interaction.
*
* @property {Boolean} dualListeners
* @default false
*
**/
/**
*
* The minimum numbers of pixels necessary to start moving the scroller.
* This is useful when you want to make sure that the user gesture
* has well-defined direction (either horizontal or vertical).
*
* @property {integer} minThreshold
* @default 5
*
**/
/**
*
* The minimum number of pixels neccesary to calculate
* the direction of the gesture.
*
* Ideally this value should be less than `minThreshold` to be able to
* control the action of the scroller based on the direction of the gesture.
* For example, you may want to lock the scroller movement if the gesture is horizontal.
*
* @property {integer} minDirectionThreshold
* @default 2
*
**/
/**
*
* Locks the scroller if the direction of the gesture matches one provided.
* This property is meant to be used in conjunction with `minThreshold and``minDirectionThreshold`.
*
* Valid values:
* - horizontal
* - vertical
*
* @property {boolean} lockOnDirection
*
**/
/**
*
* Sets the scroller with the height of the items that the scroller contains.
*
* This property is used only when
* `scroll:vertical` and `gpuOptimization: true`.
* It helps the scroller calculate the positions of the surfaces
* attached to the DOM, which slightly improves the performance of the scroller
* (that is, the painting of that surface can occur asyncronously and outside of the JS execution).
*
* @plugin SurfaceManager
* @property {integer} itemHeight
*
**/
/**
*
* Sets the scroller with the width of the items that the scroller contains.
*
* This property is used only when
* `scroll:vertical` and `gpuOptimization: true`.
* It helps the scroller calculate the positions of the surfaces
* attached to the DOM, which slightly improves the performance of the scroller
* (that is, the painting of that surface can occur asyncronously and outside of the JS execution).
*
* @plugin SurfaceManager
* @property {integer} itemWidth
*
**/
/**
*
* Bind the event handlers to the scroller wrapper.
* This is useful when using nested scrollers or when adding some custom logic
* in a parent node as the event bubbles up.
*
* If set to true once the scroller is out of the wrapper container, it will stop scrolling.
*
* @property {integer} bindToWrapper
* @default false
*
**/
/**
*
* Set the direction of the scroll.
* By default, vertical scrolling is enabled.
*
* Valid values:
* - horizontal
* - vertical
*
* @property {string} scroll
* @default vertical
*
**/
/**
*
* Activates pullToRefresh functionality.
* Note that you need to include the `PullToRefresh` plugin as part of your scroller bundle,
* otherwise this option is ignored.
*
* @plugin PullToRefresh
* @property {boolean} pullToRefresh
* @default false
**/
/**
*
* Activates pullToLoadMore functionality.
* Note that you need to include the `PullToLoadMore` plugin as part of your scroller bundle,
* otherwise this option is ignored.
*
* @plugin PullToLoadMore
* @property {boolean} pullToLoadMore
* @default false
*
**/
/**
*
* Creates scrollbars on the direction of the scroll.
* @plugin Indicators
* @property {boolean} scrollbars
* @default false
*
**/
/**
*
* Scrollbar configuration.
*
* @plugin Indicators
* @property {Object} scrollbarsConfig
* @default false
*
**/
/**
*
* Activates infiniteLoading.
*
* @plugin InfiniteLoading
* @property {boolean} infiniteLoading
* @default false
*
**/
/**
*
* Sets the configuration for infiniteLoading.
* The `infiniteLoading` option must be set to true.
*
* @property {Object} infiniteLoadingConfig
*
**/
/**
*
* TODO: Debounce
*
* @property {boolean} debounce
*
**/
/**
*
* TODO: GPUOptimization
* @plugin SurfaceManager
* @property {boolean} gpuOptimization
*
**/ | forcedotcom/scrollerjs | src/config.js | JavaScript | apache-2.0 | 6,867 |
/**
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2015 the original author or authors.
*/
package org.assertj.core.api.iterable;
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.List;
import org.junit.Test;
/**
* @author Johannes Schneider (<a href="mailto:js@cedarsoft.com">js@cedarsoft.com</a>)
*/
public class Iterable_generics_with_varargs_Test {
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testWithoutGenerics() throws Exception {
List strings = asList("a", "b", "c");
assertThat(strings).contains("a", "b");
}
@Test
public void testConcreteType() throws Exception {
List<String> strings = asList("a", "b", "c");
assertThat(strings).contains("a", "b");
}
@Test
public void testListAssertWithGenerics() throws Exception {
List<? extends String> strings = asList("a", "b", "c");
assertThat(strings).contains("a", "b");
}
}
| mdecourci/assertj-core | src/test/java/org/assertj/core/api/iterable/Iterable_generics_with_varargs_Test.java | Java | apache-2.0 | 1,467 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.classroom.model;
/**
* Representation of a Google Drive file.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Google Classroom API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class DriveFile extends com.google.api.client.json.GenericJson {
/**
* URL that can be used to access the Drive item.
*
* Read-only.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String alternateLink;
/**
* Drive API resource ID.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String id;
/**
* URL of a thumbnail image of the Drive item.
*
* Read-only.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String thumbnailUrl;
/**
* Title of the Drive item.
*
* Read-only.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String title;
/**
* URL that can be used to access the Drive item.
*
* Read-only.
* @return value or {@code null} for none
*/
public java.lang.String getAlternateLink() {
return alternateLink;
}
/**
* URL that can be used to access the Drive item.
*
* Read-only.
* @param alternateLink alternateLink or {@code null} for none
*/
public DriveFile setAlternateLink(java.lang.String alternateLink) {
this.alternateLink = alternateLink;
return this;
}
/**
* Drive API resource ID.
* @return value or {@code null} for none
*/
public java.lang.String getId() {
return id;
}
/**
* Drive API resource ID.
* @param id id or {@code null} for none
*/
public DriveFile setId(java.lang.String id) {
this.id = id;
return this;
}
/**
* URL of a thumbnail image of the Drive item.
*
* Read-only.
* @return value or {@code null} for none
*/
public java.lang.String getThumbnailUrl() {
return thumbnailUrl;
}
/**
* URL of a thumbnail image of the Drive item.
*
* Read-only.
* @param thumbnailUrl thumbnailUrl or {@code null} for none
*/
public DriveFile setThumbnailUrl(java.lang.String thumbnailUrl) {
this.thumbnailUrl = thumbnailUrl;
return this;
}
/**
* Title of the Drive item.
*
* Read-only.
* @return value or {@code null} for none
*/
public java.lang.String getTitle() {
return title;
}
/**
* Title of the Drive item.
*
* Read-only.
* @param title title or {@code null} for none
*/
public DriveFile setTitle(java.lang.String title) {
this.title = title;
return this;
}
@Override
public DriveFile set(String fieldName, Object value) {
return (DriveFile) super.set(fieldName, value);
}
@Override
public DriveFile clone() {
return (DriveFile) super.clone();
}
}
| googleapis/google-api-java-client-services | clients/google-api-services-classroom/v1/1.29.2/com/google/api/services/classroom/model/DriveFile.java | Java | apache-2.0 | 3,882 |
/**
* Copyright 2015 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import '../amp-brightcove';
import * as consent from '../../../../src/consent';
import {BaseElement} from '../../../../src/base-element';
import {CONSENT_POLICY_STATE} from '../../../../src/consent-state';
import {CommonSignals} from '../../../../src/common-signals';
import {VideoEvents} from '../../../../src/video-interface';
import {
createElementWithAttributes,
whenUpgradedToCustomElement,
} from '../../../../src/dom';
import {listenOncePromise} from '../../../../src/event-helper';
import {macroTask} from '../../../../testing/yield';
import {parseUrlDeprecated} from '../../../../src/url';
import {user} from '../../../../src/log';
describes.realWin(
'amp-brightcove',
{
amp: {
extensions: ['amp-brightcove'],
runtimeOn: true,
},
},
(env) => {
let win, doc;
beforeEach(() => {
win = env.win;
doc = win.document;
// make sync
env.sandbox
.stub(BaseElement.prototype, 'mutateElement')
.callsFake((mutator) => {
mutator();
});
});
async function getBrightcoveBuild(attributes) {
const element = createElementWithAttributes(doc, 'amp-brightcove', {
width: '111',
height: '222',
...attributes,
});
doc.body.appendChild(element);
await whenUpgradedToCustomElement(element);
await element.whenBuilt();
return element;
}
async function getBrightcove(attributes) {
const element = await getBrightcoveBuild(attributes);
const impl = await element.getImpl(false);
await element.signals().whenSignal(CommonSignals.LOAD_START);
// Wait for the promise in layoutCallback() to resolve
await macroTask();
try {
fakePostMessage(impl, {event: 'ready'});
} catch (_) {
// This fails when the iframe is not available (after layoutCallback
// fails) in which case awaiting the LOAD_END sigal below will throw.
}
await element.signals().whenSignal(CommonSignals.LOAD_END);
return element;
}
function fakePostMessage(impl, info) {
impl.handlePlayerMessage_({
origin: 'https://players.brightcove.net',
source: impl.element.querySelector('iframe').contentWindow,
data: JSON.stringify(info),
});
}
// https://go.amp.dev/issue/32706
it('should remove `dock`', async () => {
const warn = env.sandbox.spy(user(), 'warn');
const element = await getBrightcoveBuild({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
'dock': '',
});
expect(element.hasAttribute('dock')).to.be.false;
expect(
warn.withArgs(
env.sandbox.match.any,
env.sandbox.match(/`dock` has been disabled/)
)
).to.have.been.calledOnce;
});
// https://go.amp.dev/issue/32706
it('should not warn without `dock`', async () => {
const warn = env.sandbox.spy(user(), 'warn');
const element = await getBrightcoveBuild({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
});
expect(element.hasAttribute('dock')).to.be.false;
expect(warn).to.not.have.been.called;
});
it('renders', () => {
return getBrightcove({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
}).then((bc) => {
const iframe = bc.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.tagName).to.equal('IFRAME');
expect(iframe.src).to.equal(
'https://players.brightcove.net/1290862519001/default_default' +
'/index.html?videoId=ref:amp-test-video&playsinline=true'
);
});
});
it('removes iframe after unlayoutCallback', async () => {
const bc = await getBrightcove({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
});
const obj = await bc.getImpl();
const iframe = bc.querySelector('iframe');
expect(iframe).to.not.be.null;
obj.unlayoutCallback();
expect(bc.querySelector('iframe')).to.be.null;
expect(obj.iframe_).to.be.null;
});
it('should pass data-param-* attributes to the iframe src', () => {
return getBrightcove({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
'data-param-my-param': 'hello world',
}).then((bc) => {
const iframe = bc.querySelector('iframe');
const params = parseUrlDeprecated(iframe.src).search.split('&');
expect(params).to.contain('myParam=hello%20world');
});
});
it('should propagate mutated attributes', () => {
return getBrightcove({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
}).then((bc) => {
const iframe = bc.querySelector('iframe');
expect(iframe.src).to.equal(
'https://players.brightcove.net/1290862519001/default_default' +
'/index.html?videoId=ref:amp-test-video&playsinline=true'
);
bc.setAttribute('data-account', '12345');
bc.setAttribute('data-video-id', 'abcdef');
bc.mutatedAttributesCallback({
'data-account': '12345',
'data-video-id': 'abcdef',
});
expect(iframe.src).to.equal(
'https://players.brightcove.net/' +
'12345/default_default/index.html?videoId=abcdef&playsinline=true'
);
});
});
it('should give precedence to playlist id', () => {
return getBrightcove({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
'data-playlist-id': 'ref:test-playlist',
}).then((bc) => {
const iframe = bc.querySelector('iframe');
expect(iframe.src).to.contain('playlistId=ref:test-playlist');
expect(iframe.src).not.to.contain('videoId');
});
});
it('should allow both playlist and video id to be unset', () => {
return getBrightcove({
'data-account': '1290862519001',
}).then((bc) => {
const iframe = bc.querySelector('iframe');
expect(iframe.src).not.to.contain('&playlistId');
expect(iframe.src).not.to.contain('&videoId');
});
});
it('should pass referrer', () => {
return getBrightcove({
'data-account': '1290862519001',
'data-referrer': 'COUNTER',
}).then((bc) => {
const iframe = bc.querySelector('iframe');
expect(iframe.src).to.contain('referrer=1');
});
});
it('should force playsinline', () => {
return getBrightcove({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
'data-param-playsinline': 'false',
}).then((bc) => {
const iframe = bc.querySelector('iframe');
expect(iframe.src).to.contain('playsinline=true');
});
});
it('should forward events', async () => {
const bc = await getBrightcove({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
});
const impl = await bc.getImpl();
return Promise.resolve()
.then(() => {
const p = listenOncePromise(bc, VideoEvents.LOAD);
fakePostMessage(impl, {event: 'ready', muted: false, playing: false});
return p;
})
.then(() => {
const p = listenOncePromise(bc, VideoEvents.LOADEDMETADATA);
fakePostMessage(impl, {
event: 'loadedmetadata',
muted: false,
playing: false,
});
return p;
})
.then(() => {
const p = listenOncePromise(bc, VideoEvents.AD_START);
fakePostMessage(impl, {
event: 'ads-ad-started',
muted: false,
playing: false,
});
return p;
})
.then(() => {
const p = listenOncePromise(bc, VideoEvents.AD_END);
fakePostMessage(impl, {
event: 'ads-ad-ended',
muted: false,
playing: false,
});
return p;
})
.then(() => {
const p = listenOncePromise(bc, VideoEvents.PLAYING);
fakePostMessage(impl, {
event: 'playing',
muted: false,
playing: true,
});
return p;
})
.then(() => {
const p = listenOncePromise(bc, VideoEvents.MUTED);
fakePostMessage(impl, {
event: 'volumechange',
muted: true,
playing: true,
});
return p;
})
.then(() => {
const p = listenOncePromise(bc, VideoEvents.UNMUTED);
fakePostMessage(impl, {
event: 'volumechange',
muted: false,
playing: true,
});
return p;
})
.then(() => {
const p = listenOncePromise(bc, VideoEvents.PAUSE);
fakePostMessage(impl, {event: 'pause', muted: false, playing: false});
return p;
})
.then(() => {
const p = listenOncePromise(bc, VideoEvents.ENDED);
fakePostMessage(impl, {event: 'ended', muted: false, playing: false});
return p;
});
});
it('should propagate consent state to iframe', () => {
env.sandbox
.stub(consent, 'getConsentPolicyState')
.resolves(CONSENT_POLICY_STATE.SUFFICIENT);
env.sandbox
.stub(consent, 'getConsentPolicySharedData')
.resolves({a: 1, b: 2});
env.sandbox.stub(consent, 'getConsentPolicyInfo').resolves('abc');
return getBrightcove({
'data-account': '1290862519001',
'data-video-id': 'ref:amp-test-video',
'data-block-on-consent': '_till_accepted',
}).then((bc) => {
const iframe = bc.querySelector('iframe');
expect(iframe.src).to.contain(
`ampInitialConsentState=${CONSENT_POLICY_STATE.SUFFICIENT}`
);
expect(iframe.src).to.contain(
`ampConsentSharedData=${encodeURIComponent(
JSON.stringify({a: 1, b: 2})
)}`
);
expect(iframe.src).to.contain('ampInitialConsentValue=abc');
});
});
}
);
| lannka/amphtml | extensions/amp-brightcove/0.1/test/test-amp-brightcove.js | JavaScript | apache-2.0 | 10,948 |
# -*- coding: utf-8 -*-
import logging
import itertools
import math
import urllib
import httplib as http
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from flask import request
from framework import utils
from framework import sentry
from framework.auth.core import User
from framework.flask import redirect # VOL-aware redirect
from framework.routing import proxy_url
from framework.exceptions import HTTPError
from framework.auth.forms import SignInForm
from framework.forms import utils as form_utils
from framework.auth.forms import RegistrationForm
from framework.auth.forms import ResetPasswordForm
from framework.auth.forms import ForgotPasswordForm
from framework.auth.decorators import must_be_logged_in
from website.models import Guid
from website.models import Node, Institution
from website.institutions.views import view_institution
from website.util import sanitize
from website.project import model
from website.util import permissions
from website.project import new_bookmark_collection
logger = logging.getLogger(__name__)
def _render_node(node, auth=None):
"""
:param node:
:return:
"""
perm = None
# NOTE: auth.user may be None if viewing public project while not
# logged in
if auth and auth.user and node.get_permissions(auth.user):
perm_list = node.get_permissions(auth.user)
perm = permissions.reduce_permissions(perm_list)
return {
'title': node.title,
'id': node._primary_key,
'url': node.url,
'api_url': node.api_url,
'primary': node.primary,
'date_modified': utils.iso8601format(node.date_modified),
'category': node.category,
'permissions': perm, # A string, e.g. 'admin', or None,
'archiving': node.archiving,
}
def _render_nodes(nodes, auth=None, show_path=False):
"""
:param nodes:
:return:
"""
ret = {
'nodes': [
_render_node(node, auth)
for node in nodes
],
'show_path': show_path
}
return ret
def index():
try:
#TODO : make this way more robust
inst = Institution.find_one(Q('domains', 'eq', request.host.lower()))
inst_dict = view_institution(inst._id)
inst_dict.update({
'home': False,
'institution': True,
'redirect_url': '/institutions/{}/'.format(inst._id)
})
return inst_dict
except NoResultsFound:
pass
return {'home': True}
def find_bookmark_collection(user):
bookmark_collection = Node.find(Q('is_bookmark_collection', 'eq', True) & Q('contributors', 'eq', user._id))
if bookmark_collection.count() == 0:
new_bookmark_collection(user)
return bookmark_collection[0]
@must_be_logged_in
def dashboard(auth):
user = auth.user
dashboard_folder = find_bookmark_collection(user)
dashboard_id = dashboard_folder._id
return {'addons_enabled': user.get_addon_names(),
'dashboard_id': dashboard_id,
}
def validate_page_num(page, pages):
if page < 0 or (pages and page >= pages):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
def paginate(items, total, page, size):
pages = math.ceil(total / float(size))
validate_page_num(page, pages)
start = page * size
paginated_items = itertools.islice(items, start, start + size)
return paginated_items, pages
@must_be_logged_in
def watched_logs_get(**kwargs):
user = kwargs['auth'].user
try:
page = int(request.args.get('page', 0))
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
try:
size = int(request.args.get('size', 10))
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "size".'
))
total = sum(1 for x in user.get_recent_log_ids())
paginated_logs, pages = paginate(user.get_recent_log_ids(), total, page, size)
logs = (model.NodeLog.load(id) for id in paginated_logs)
return {
"logs": [serialize_log(log) for log in logs],
"total": total,
"pages": pages,
"page": page
}
def serialize_log(node_log, auth=None, anonymous=False):
'''Return a dictionary representation of the log.'''
return {
'id': str(node_log._primary_key),
'user': node_log.user.serialize()
if isinstance(node_log.user, User)
else {'fullname': node_log.foreign_user},
'contributors': [node_log._render_log_contributor(c) for c in node_log.params.get("contributors", [])],
'action': node_log.action,
'params': sanitize.unescape_entities(node_log.params),
'date': utils.iso8601format(node_log.date),
'node': node_log.original_node.serialize(auth) if node_log.original_node else None,
'anonymous': anonymous
}
def reproducibility():
return redirect('/ezcuj/wiki')
def registration_form():
return form_utils.jsonify(RegistrationForm(prefix='register'))
def signin_form():
return form_utils.jsonify(SignInForm())
def forgot_password_form():
return form_utils.jsonify(ForgotPasswordForm(prefix='forgot_password'))
def reset_password_form():
return form_utils.jsonify(ResetPasswordForm())
# GUID ###
def _build_guid_url(base, suffix=None):
url = '/'.join([
each.strip('/') for each in [base, suffix]
if each
])
if not isinstance(url, unicode):
url = url.decode('utf-8')
return u'/{0}/'.format(url)
def resolve_guid(guid, suffix=None):
"""Load GUID by primary key, look up the corresponding view function in the
routing table, and return the return value of the view function without
changing the URL.
:param str guid: GUID primary key
:param str suffix: Remainder of URL after the GUID
:return: Return value of proxied view function
"""
# Look up GUID
guid_object = Guid.load(guid)
if guid_object:
# verify that the object implements a GuidStoredObject-like interface. If a model
# was once GuidStoredObject-like but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a deep_url attribute or otherwise don't behave as
# expected.
if not hasattr(guid_object.referent, 'deep_url'):
sentry.log_message(
'Guid `{}` resolved to an object with no deep_url'.format(guid)
)
raise HTTPError(http.NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http.NOT_FOUND)
if not referent.deep_url:
raise HTTPError(http.NOT_FOUND)
url = _build_guid_url(urllib.unquote(referent.deep_url), suffix)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(guid.lower(), suffix)
)
# GUID not found
raise HTTPError(http.NOT_FOUND)
##### Redirects #####
# Redirect /about/ to OSF wiki page
# https://github.com/CenterForOpenScience/osf.io/issues/3862
# https://github.com/CenterForOpenScience/community/issues/294
def redirect_about(**kwargs):
return redirect('https://osf.io/4znzp/wiki/home/')
def redirect_howosfworks(**kwargs):
return redirect('/getting-started/')
def redirect_getting_started(**kwargs):
return redirect('http://help.osf.io/')
def redirect_to_home():
# Redirect to support page
return redirect('/')
| zachjanicki/osf.io | website/views.py | Python | apache-2.0 | 7,843 |
// Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authn
// AuthConfig contains authorization information for connecting to a Registry
// Inlined what we use from github.com/cli/cli/config/types
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth,omitempty"`
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string `json:"identitytoken,omitempty"`
// RegistryToken is a bearer token to be sent to a registry
RegistryToken string `json:"registrytoken,omitempty"`
}
// Authenticator is used to authenticate Docker transports.
type Authenticator interface {
// Authorization returns the value to use in an http transport's Authorization header.
Authorization() (*AuthConfig, error)
}
| knative/test-infra | vendor/github.com/google/go-containerregistry/pkg/authn/authn.go | GO | apache-2.0 | 1,409 |
/**
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.decision.unit.engine.validator;
import com.stratio.decision.commons.messages.StratioStreamingMessage;
import com.stratio.decision.exception.RequestValidationException;
import com.stratio.decision.functions.validator.MongoStreamNameValidator;
public class MongoNameRegularExpressionValidatorTest extends BaseRegularExpressionValidatorTest {
private MongoStreamNameValidator mongoStreamNameValidator;
@Override
public void setUp() {
mongoStreamNameValidator = new MongoStreamNameValidator();
}
@Override
public void test(StratioStreamingMessage message) throws RequestValidationException {
mongoStreamNameValidator.validate(message);
}
@Override
public String[] getGoodStrings() {
return new String[] { "test_test$etstsdd", "&&&&", "$$$$", "\n\n\n" };
}
@Override
public String[] getBadStrings() {
return new String[] { "*test", "test*", "test test", ">><<<<>", "_____|" };
}
}
| Stratio/streaming-cep-engine | engine/src/test/java/com/stratio/decision/unit/engine/validator/MongoNameRegularExpressionValidatorTest.java | Java | apache-2.0 | 1,609 |
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"math/rand"
"sync"
"time"
"github.com/google/gapid/core/app"
"github.com/google/gapid/core/app/crash"
"github.com/google/gapid/core/event/task"
"github.com/google/gapid/core/log"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
)
type stresstestVerb struct{ StressTestFlags }
func init() {
app.AddVerb(&app.Verb{
Name: "stress-test",
ShortHelp: "Performs evil things on GAPIS to try to break it",
Action: &stresstestVerb{},
})
}
func (verb *stresstestVerb) Run(ctx context.Context, flags flag.FlagSet) error {
if flags.NArg() != 1 {
app.Usage(ctx, "Exactly one gfx trace file expected, got %d", flags.NArg())
return nil
}
client, c, err := getGapisAndLoadCapture(ctx, verb.Gapis, verb.Gapir, flags.Arg(0), verb.CaptureFileFlags)
if err != nil {
return err
}
defer client.Close()
boxedCapture, err := client.Get(ctx, c.Path(), nil)
if err != nil {
return log.Err(ctx, err, "Failed to load the capture")
}
count := int(boxedCapture.(*service.Capture).NumCommands)
wg := sync.WaitGroup{}
for l := 0; l < 10; l++ {
for i := 0; i < 10000; i++ {
at := uint64(rand.Intn(count - 1))
duration := time.Second + time.Duration(rand.Intn(int(time.Second*10)))
wg.Add(1)
const (
getStateAfter = iota
getMesh
getCount
)
method := rand.Intn(getCount)
crash.Go(func() {
defer wg.Done()
ctx, _ := task.WithTimeout(ctx, duration)
switch method {
case getStateAfter:
boxedTree, err := client.Get(ctx, c.Command(at).StateAfter().Tree().Path(), nil)
if err == nil {
tree := boxedTree.(*service.StateTree)
client.Get(ctx, tree.Root.Path(), nil)
}
case getMesh:
boxedMesh, err := client.Get(ctx, c.Command(at).Mesh(path.NewMeshOptions(true)).Path(), nil)
if err == nil {
mesh := boxedMesh.(*api.Mesh)
_ = mesh
}
}
})
}
wg.Wait()
}
return nil
}
| Qining/gapid | cmd/gapit/stresstest.go | GO | apache-2.0 | 2,609 |
/*
* Copyright 2005 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.conflict;
import org.drools.spi.Activation;
import org.drools.spi.ConflictResolver;
/**
* Convenience base class for <code>ConflictResolver</code>s.
*
*
* @version $Id: AbstractConflictResolver.java,v 1.1 2004/10/06 13:38:05
* mproctor Exp $
*/
public abstract class AbstractConflictResolver
implements
ConflictResolver {
/**
* @see ConflictResolver
*/
public final int compare(final Object existing,
final Object adding) {
return compare( (Activation) existing,
(Activation) adding );
}
}
| pperboires/PocDrools | drools-core/src/main/java/org/drools/conflict/AbstractConflictResolver.java | Java | apache-2.0 | 1,213 |
/* Copyright 2007-2016 QReal Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. */
#include "graphicType.h"
#include <QtCore/QDebug>
#include <qrutils/outFile.h>
#include "property.h"
#include "label.h"
#include "diagram.h"
#include "nameNormalizer.h"
#include "nodeType.h"
#include "edgeType.h"
using namespace utils;
GraphicType::ContainerProperties::ContainerProperties()
: isSortingContainer(false), sizeOfForestalling(4, 0)
, sizeOfChildrenForestalling(0), hasMovableChildren(true)
, minimizesToChildren(false), maximizesChildren(false)
{
}
GraphicType::GeneralizationProperties::GeneralizationProperties(const QString &name, const QString &overrides)
: name(name)
{
overridePorts = overrides.contains("ports", Qt::CaseInsensitive);
overrideLabels = overrides.contains("labels", Qt::CaseInsensitive);
overridePictures = overrides.contains("pictures", Qt::CaseInsensitive);
if (overrides.contains("all", Qt::CaseInsensitive)) {
overridePorts = true;
overrideLabels = true;
overridePictures = true;
}
}
GraphicType::ResolvingHelper::ResolvingHelper(bool &resolvingFlag)
: mResolvingFlag(resolvingFlag)
{
mResolvingFlag = true;
}
GraphicType::ResolvingHelper::~ResolvingHelper()
{
mResolvingFlag = false;
}
GraphicType::GraphicType(Diagram *diagram)
: Type(false, diagram)
, mVisible(false)
, mWidth(-1)
, mHeight(-1)
, mCreateChildrenFromMenu(false)
, mResolving(false)
{
}
GraphicType::~GraphicType()
{
foreach (Label *label, mLabels) {
delete label;
}
}
void GraphicType::copyFields(GraphicType *type) const
{
Type::copyFields(type);
type->mElement = mElement;
type->mGraphics = mGraphics;
type->mHeight = mHeight;
for (Label *label : mLabels) {
type->mLabels.append(label->clone());
}
type->mLogic = mLogic;
type->mParents = mParents;
type->mVisible = mVisible;
type->mWidth = mWidth;
type->mContainerProperties = mContainerProperties;
type->mContains = mContains;
type->mExplosions = mExplosions;
}
void GraphicType::copyLabels(GraphicType *parent)
{
for (Label *label : parent->mLabels) {
mLabels.append(label->clone());
}
}
QStringList GraphicType::immediateParents() const
{
QStringList result;
for (const GeneralizationProperties &generalization : mParents) {
result << generalization.name;
}
return result;
}
void GraphicType::generateCommonData(OutFile &out) const
{
generateName(out);
generateFriendlyName(out);
generateDiagram(out);
generateDescription(out);
generateLabels(out);
}
void GraphicType::generateLabels(OutFile &out) const
{
for (const Label *label : mLabels) {
label->generateCodeForConstructor(out);
}
}
bool GraphicType::init(const QDomElement &element, const QString &context)
{
mElement = element;
if (Type::init(element, context)) {
mDescription = element.attribute("description", "");
mAbstract = element.attribute("abstract", "");
mLogic = element.firstChildElement("logic");
if (mLogic.isNull()) {
qDebug() << "ERROR: can't find logic tag of graphic type";
return false;
}
mGraphics = element.firstChildElement("graphics");
return initRoles() && initParents() && initProperties() && initDividability() && initContainers()
&& initGraphics() && initLabels() && initPossibleEdges() && initPortTypes()
&& initCreateChildrenFromMenu() && initContainerProperties()
&& initExplosions();
}
return false;
}
bool GraphicType::initParents()
{
QDomElement parentsElement = mLogic.firstChildElement("generalizations");
if (parentsElement.isNull()) {
return true;
}
for (QDomElement parentElement = parentsElement.firstChildElement("parent")
; !parentElement.isNull()
; parentElement = parentElement.nextSiblingElement("parent"))
{
const QString parentName = parentElement.attribute("parentName");
if (parentName.isEmpty()) {
qWarning() << "ERROR: anonymous parent of node" << qualifiedName();
return false;
}
const QString overrides = parentElement.attribute("overrides");
for (const auto &parent : mParents) {
if (parent.name == parentName) {
qWarning() << "ERROR: parent of node" << qualifiedName() << "duplicated";
return false;
}
}
mParents.append({parentName, overrides});
}
return true;
}
bool GraphicType::initProperties()
{
initRoleProperties();
const QDomElement propertiesElement = mLogic.firstChildElement("properties");
if (propertiesElement.isNull()) {
return true;
}
for (QDomElement propertyElement = propertiesElement.firstChildElement("property")
; !propertyElement.isNull()
; propertyElement = propertyElement.nextSiblingElement("property"))
{
Property *property = new Property();
if (!property->init(propertyElement)) {
delete property;
continue;
}
if (!addProperty(property, "")) {
return false;
}
}
return true;
}
bool GraphicType::initFieldList(const QString &listName, const QString &listElementName
, QStringList &resultingList, const QString &fieldName, const bool isNeedToNormalizeAtt) const
{
QDomElement containerElement = mLogic.firstChildElement(listName);
if (containerElement.isNull()) {
return true;
}
for (QDomElement childElement = containerElement.firstChildElement(listElementName)
; !childElement.isNull()
; childElement = childElement.nextSiblingElement(listElementName))
{
QString typeName;
if (isNeedToNormalizeAtt) {
typeName = NameNormalizer::normalize(childElement.attribute(fieldName));
} else {
typeName = childElement.attribute(fieldName);
}
if (typeName.isEmpty()) {
qDebug() << "Error: anonymous " << fieldName << " in the " << listName << " list, in " << qualifiedName();
return false;
}
if (!resultingList.contains(typeName)) {
resultingList.append(typeName);
} else {
qDebug() << "ERROR: " << fieldName << " in the " << listName << " list in "
<< qualifiedName() << "duplicated";
return false;
}
}
return true;
}
bool GraphicType::initTypeList(const QString &listName, const QString &listElementName
, QStringList &resultingList) const
{
return initFieldList(listName, listElementName, resultingList, "type", true);
}
bool GraphicType::initContainers()
{
return initTypeList("container", "contains", mContains);
}
bool GraphicType::initContainerProperties()
{
QDomElement containerElement = mLogic.firstChildElement("container");
if (containerElement.isNull()) {
return true;
}
QDomElement containerPropertiesElement = containerElement.firstChildElement("properties");
if (containerPropertiesElement.isNull()) {
return true;
}
for (QDomElement childElement = containerPropertiesElement.firstChildElement()
; !childElement.isNull()
; childElement = childElement.nextSiblingElement())
{
if (childElement.tagName() == "sortContainer") {
mContainerProperties.isSortingContainer = true;
} else if (childElement.tagName() == "forestalling") {
QString sizeAttribute = childElement.attribute("size");
bool isSizeOk = false;
mContainerProperties.sizeOfForestalling = toIntVector(sizeAttribute, &isSizeOk);
if (!isSizeOk) {
return false;
}
} else if (childElement.tagName() == "childrenForestalling") {
QString sizeAttribute = childElement.attribute("size");
bool isSizeOk = false;
mContainerProperties.sizeOfChildrenForestalling = sizeAttribute.toInt(&isSizeOk);
if (!isSizeOk) {
return false;
}
} else if (childElement.tagName() == "minimizeToChildren") {
mContainerProperties.minimizesToChildren = true;
} else if (childElement.tagName() == "banChildrenMove") {
mContainerProperties.hasMovableChildren = false;
} else if (childElement.tagName() == "maximizeChildren") {
mContainerProperties.maximizesChildren = true;
}
}
return true;
}
bool GraphicType::initCreateChildrenFromMenu()
{
if (!mLogic.elementsByTagName("createChildrenFromMenu").isEmpty()) {
mCreateChildrenFromMenu = true;
}
return true;
}
bool GraphicType::initPossibleEdges()
{
const QString listName = "possibleEdges";
const QString listElementName = "possibleEdge";
QDomElement containerElement = mLogic.firstChildElement(listName);
if (containerElement.isNull()) {
return true;
}
for (QDomElement childElement = containerElement.firstChildElement(listElementName);
!childElement.isNull();
childElement = childElement.nextSiblingElement(listElementName))
{
QString beginName = NameNormalizer::normalize(childElement.attribute("beginName"));
QString endName = NameNormalizer::normalize(childElement.attribute("endName"));
QString temp = childElement.attribute("directed");
if (beginName.isEmpty() || endName.isEmpty() || ((temp != "true") && (temp != "false"))) {
qWarning() << "Error: one of attributes is incorrect " <<
"(perhaps, \"beginName\" or \"endName\" is empty or " <<
"\"directed\" isn't \"true\" or \"false\".')" << qualifiedName();
return false;
}
const bool directed = temp == "true";
const QString edgeName = NameNormalizer::normalize(qualifiedName());
QPair<QPair<QString, QString>, QPair<bool, QString> > possibleEdge(qMakePair(beginName, endName)
, qMakePair(directed, edgeName));
if (!mPossibleEdges.contains(possibleEdge)) {
mPossibleEdges.append(possibleEdge);
} else {
qDebug() << "ERROR: this edge is already in list " << qualifiedName();
return false;
}
}
return true;
}
bool GraphicType::initExplosions()
{
const QDomElement explodesTo = mLogic.firstChildElement("explodesTo");
if (explodesTo.isNull()) {
return true;
}
for (QDomElement targetElement = explodesTo.firstChildElement()
; !targetElement.isNull()
; targetElement = targetElement.nextSiblingElement())
{
const QString targetName = targetElement.attribute("type");
if (targetName.isEmpty()) {
return false;
}
const bool isReusable = targetElement.attribute("makeReusable", "false").toLower().trimmed() == "true";
const bool immediateLinkage
= targetElement.attribute("requireImmediateLinkage", "false").toLower().trimmed() == "true";
mExplosions[targetName] = qMakePair(isReusable, immediateLinkage);
}
return true;
}
bool GraphicType::initLabels()
{
int count = 1;
for (QDomElement element = mGraphics.firstChildElement("labels").firstChildElement("label");
!element.isNull();
element = element.nextSiblingElement("label"))
{
Label *label = new Label();
if (!initLabel(label, element, count)) {
delete label;
} else {
mLabels.append(label);
++count;
}
}
return true;
}
bool GraphicType::addProperty(Property *property, const QString &roleName)
{
QString propertyName = this->propertyName(property, roleName);
if (propertyName.isEmpty()) {
propertyName = property->name();
}
if (mProperties.contains(propertyName)) {
// This will automaticly dispose property in this branch.
QScopedPointer<Property> propertyDisposer(property);
Q_UNUSED(propertyDisposer)
/// @todo Good for overriding parent properties, but bad in multiple inheritance case
/// --- we can allow invalid rhomb inheritance.
if (mProperties[propertyName] != property && *mProperties[propertyName] != *property) {
qWarning() << "Property" << propertyName << "duplicated with different attributes";
return false;
}
} else {
mProperties[propertyName] = property;
}
return true;
}
bool GraphicType::isResolving() const
{
return mResolving;
}
bool GraphicType::resolve()
{
if (mResolvingFinished) {
return true;
}
ResolvingHelper helper(mResolving);
Q_UNUSED(helper)
/// @todo Ensure that parents are not duplicated.
for (const GeneralizationProperties &generalization : mParents) {
// Parents are searched in "native" context of a type, so if it was imported links must not be broken.
const QString qualifiedParentName = generalization.name.contains("::")
? generalization.name
: nativeContext() + "::" + generalization.name;
Type *parent = mDiagram->findType(qualifiedParentName);
if (parent == nullptr) {
// Parent was not found in local context, trying to search in global context
parent = mDiagram->findType(generalization.name);
if (parent == nullptr) {
qDebug() << "ERROR: can't find parent" << generalization.name << "for" << qualifiedName();
return false;
}
}
if (parent->isResolving()) {
qDebug() << "ERROR: circular inheritance between" << generalization.name << "and" << qualifiedName();
return false;
}
if (!parent->isResolved()) {
if (!parent->resolve()) {
return false;
}
}
for (Property *property : parent->properties().values()) {
if (!addProperty(property->clone(), "")) {
return false;
}
}
GraphicType * const graphicParent = dynamic_cast<GraphicType*>(parent);
if (graphicParent != nullptr) {
if (!generalization.overrideLabels) {
copyLabels(graphicParent);
}
if (!generalization.overridePictures) {
copyPictures(graphicParent);
}
NodeType* const nodeParent = dynamic_cast<NodeType*>(parent);
if (nodeParent != nullptr) {
if (!generalization.overridePorts) {
copyPorts(nodeParent);
}
}
for (PossibleEdge pEdge : graphicParent->mPossibleEdges) {
mPossibleEdges.append(qMakePair(pEdge.first,qMakePair(pEdge.second.first,name())));
}
}
}
for (int i = 0; i < mLabels.size(); ++i) {
mLabels.value(i)->changeIndex(i + 1);
}
mResolvingFinished = true;
return true;
}
void GraphicType::generateName(OutFile &out) const
{
const QString normalizedName = NameNormalizer::normalize(qualifiedName());
out() << QString("\t\t\tsetName(\"%1\");\n").arg(normalizedName);
/// @todo: I don`t know why we check it here but think it can live for now. This should be moved
/// into appropriate place later.
for (const QPair<QString, QStringList> &part : mDiagram->paletteGroups()) {
for (auto part2 : part.second) {
if (part2 == normalizedName && mAbstract == "true" ) {
qDebug() << "ERROR! Element" << qualifiedName() << "is abstract.";
return;
}
}
}
}
void GraphicType::generateFriendlyName(OutFile &out) const
{
const QString actualDisplayedName = displayedName().isEmpty() ? name() : displayedName();
out() << QString("\t\t\tsetFriendlyName(QObject::tr(\"%1\"));\n").arg(actualDisplayedName);
}
void GraphicType::generateDiagram(OutFile &out) const
{
const QString diagramName = NameNormalizer::normalize(mDiagram->name());
out() << QString("\t\t\tsetDiagram(\"%1\");\n").arg(diagramName);
}
void GraphicType::generateDescription(OutFile &out) const
{
out() << "\t\t\tsetDescription(QObject::tr(\"" << mDescription << "\"));\n";
}
QStringList GraphicType::sortProperties(const QList<QString> &properties) const
{
QList<QString> result;
QStringList propertiesWithRoles;
QStringList pureProperties;
for (const QString &property : properties) {
if (property.contains("!")) {
propertiesWithRoles.append(property);
} else {
pureProperties.append(property);
}
}
propertiesWithRoles.sort();
pureProperties.sort();
result = propertiesWithRoles + pureProperties;
return result;
}
void GraphicType::generatePropertyData(OutFile &out) const
{
out() << "\t\tvoid initProperties()\n\t\t{\n";
const QStringList keys = sortProperties(mProperties.keys());
for (const QString &key : keys) {
Property *property = mProperties[key];
// Validating property names.
if (property->name() == "fromPort" || property->name() == "toPort"
|| property->name() == "from" || property->name() == "to"
|| property->name() == "name")
{
qWarning() << "ERROR: predefined property" << property->name() << "should not appear in metamodel, ignored";
continue;
}
const QString name = key == property->name() ? property->name() : key;
const QString stringConstructor = property->type() == "string" ? "QObject::tr" : "QString::fromUtf8";
out() << QString("\t\t\taddProperty(\"%1\", \"%2\", %3(\"%4\"), QObject::tr(\"%5\"), "\
"QObject::tr(\"%6\"), %7);\n").arg(name, property->type(), stringConstructor
, property->defaultValue(), property->displayedName(), property->description()
, property->isReferenceProperty() ? "true" : "false");
}
out() << "\t\t}\n";
}
QString GraphicType::resourceName(const QString &resourceType) const
{
QString name = NameNormalizer::normalize(qualifiedName());
return name + resourceType + ".sdf";
}
QStringList GraphicType::containedTypes() const
{
return mContains;
}
const QMap<QString, QPair<bool, bool> > &GraphicType::explosions() const
{
return mExplosions;
}
QString GraphicType::boolToString(bool value) const
{
return value ? "true" : "false";
}
QVector<int> GraphicType::toIntVector(const QString &s, bool *isOk) const
{
const QStringList strings = s.split(',');
QVector<int> result(4, 0);
if (strings.size() != 4) {
*isOk = false;
return result;
}
for (int i = 0; i < 4; i++) {
result[i] = strings[i].toInt(isOk);
if (!*isOk)
return result;
}
return result;
}
| danilaml/qreal | qrxc/graphicType.cpp | C++ | apache-2.0 | 17,325 |
package org.zstack.sdk.iam2.entity;
public enum StateEvent {
enable,
disable,
}
| AlanJager/zstack | sdk/src/main/java/org/zstack/sdk/iam2/entity/StateEvent.java | Java | apache-2.0 | 83 |
/*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.marshalling.river;
import org.jboss.marshalling.MarshallerFactory;
import org.jboss.marshalling.Marshaller;
import org.jboss.marshalling.Marshalling;
import org.jboss.marshalling.Unmarshaller;
import org.jboss.marshalling.MarshallingConfiguration;
import java.io.ByteArrayOutputStream;
import java.io.ByteArrayInputStream;
/**
*
*/
public abstract class ReadWriteTest {
public void run() throws Throwable {
final MarshallerFactory factory = new RiverMarshallerFactory();
final MarshallingConfiguration configuration = new MarshallingConfiguration();
configure(configuration);
final Marshaller marshaller = factory.createMarshaller(configuration);
final ByteArrayOutputStream baos = new ByteArrayOutputStream(10240);
marshaller.start(Marshalling.createByteOutput(baos));
runWrite(marshaller);
marshaller.finish();
final byte[] bytes = baos.toByteArray();
final Unmarshaller unmarshaller = factory.createUnmarshaller(configuration);
unmarshaller.start(Marshalling.createByteInput(new ByteArrayInputStream(bytes)));
runRead(unmarshaller);
unmarshaller.finish();
}
public void configure(MarshallingConfiguration configuration) throws Throwable {}
public void runWrite(Marshaller marshaller) throws Throwable {};
public void runRead(Unmarshaller unmarshaller) throws Throwable {};
}
| kohsuke/jboss-marshalling | river/src/test/java/org/jboss/marshalling/river/ReadWriteTest.java | Java | apache-2.0 | 2,135 |
package manager
import (
ds "github.com/Comcast/traffic_control/traffic_monitor/experimental/traffic_monitor/deliveryservice"
"sync"
)
type LastStatsThreadsafe struct {
stats *ds.LastStats
m *sync.RWMutex
}
func NewLastStatsThreadsafe() LastStatsThreadsafe {
s := ds.NewLastStats()
return LastStatsThreadsafe{m: &sync.RWMutex{}, stats: &s}
}
// Get returns the last KBPS stats object. Callers MUST NOT modify the object. It is not threadsafe for writing. If the object must be modified, callers must call LastStats.Copy() and modify the copy.
func (o *LastStatsThreadsafe) Get() ds.LastStats {
o.m.RLock()
defer o.m.RUnlock()
return *o.stats
}
func (o *LastStatsThreadsafe) Set(s ds.LastStats) {
o.m.Lock()
*o.stats = s
o.m.Unlock()
}
| dneuman64/traffic_control | traffic_monitor/experimental/traffic_monitor/manager/lastkbpsstats.go | GO | apache-2.0 | 756 |
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.stunner.bpmn.backend.legacy.profile.impl;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import javax.enterprise.context.ApplicationScoped;
import javax.servlet.ServletContext;
import javax.xml.stream.XMLInputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import bpsim.impl.BpsimFactoryImpl;
import com.fasterxml.jackson.core.JsonParseException;
import org.eclipse.bpmn2.Bpmn2Package;
import org.eclipse.bpmn2.Definitions;
import org.eclipse.bpmn2.DocumentRoot;
import org.eclipse.bpmn2.util.Bpmn2ResourceFactoryImpl;
import org.eclipse.emf.common.util.URI;
import org.eclipse.emf.ecore.resource.Resource;
import org.eclipse.emf.ecore.resource.ResourceSet;
import org.eclipse.emf.ecore.resource.impl.ResourceSetImpl;
import org.eclipse.emf.ecore.xmi.XMLResource;
import org.jboss.drools.impl.DroolsFactoryImpl;
import org.kie.workbench.common.stunner.bpmn.backend.legacy.Bpmn2JsonMarshaller;
import org.kie.workbench.common.stunner.bpmn.backend.legacy.Bpmn2JsonUnmarshaller;
import org.kie.workbench.common.stunner.bpmn.backend.legacy.plugin.IDiagramPlugin;
import org.kie.workbench.common.stunner.bpmn.backend.legacy.plugin.impl.PluginServiceImpl;
import org.kie.workbench.common.stunner.bpmn.backend.legacy.profile.IDiagramProfile;
import org.kie.workbench.common.stunner.bpmn.backend.legacy.repository.Repository;
import org.kie.workbench.common.stunner.bpmn.backend.legacy.resource.JBPMBpmn2ResourceImpl;
import org.kie.workbench.common.stunner.bpmn.backend.legacy.util.ConfigurationProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The implementation of the default profile for Process Designer.
* @author Antoine Toulme
*/
@ApplicationScoped
public class DefaultProfileImpl implements IDiagramProfile {
private static Logger _logger = LoggerFactory.getLogger(DefaultProfileImpl.class);
private Map<String, IDiagramPlugin> _plugins = new LinkedHashMap<String, IDiagramPlugin>();
private String _stencilSet;
private String _localHistoryEnabled;
private String _localHistoryTimeout;
private String _repositoryId;
private String _repositoryRoot;
private String _repositoryName;
private String _repositoryHost;
private String _repositoryProtocol;
private String _repositorySubdomain;
private String _repositoryUsr;
private String _repositoryPwd;
private String _repositoryGlobalDir;
public DefaultProfileImpl() {
}
public DefaultProfileImpl(ServletContext servletContext) {
this(servletContext,
true);
}
public DefaultProfileImpl(ServletContext servletContext,
boolean initializeLocalPlugins) {
if (initializeLocalPlugins) {
initializeLocalPlugins(servletContext);
}
}
public String getTitle() {
return "Process Designer";
}
public String getStencilSet() {
return _stencilSet;
}
public Collection<String> getStencilSetExtensions() {
return Collections.emptyList();
}
public Collection<String> getPlugins() {
return Collections.unmodifiableCollection(_plugins.keySet());
}
private void initializeLocalPlugins(ServletContext context) {
Map<String, IDiagramPlugin> registry = PluginServiceImpl.getLocalPluginsRegistry(context);
//we read the default.xml file and make sense of it.
FileInputStream fileStream = null;
try {
try {
fileStream = new FileInputStream(new StringBuilder(context.getRealPath("/")).append("/").
append(ConfigurationProvider.getInstance().getDesignerContext()).append("profiles").append("/").append("default.xml").toString());
} catch (FileNotFoundException e) {
throw new RuntimeException(e);
}
XMLInputFactory factory = XMLInputFactory.newInstance();
XMLStreamReader reader = factory.createXMLStreamReader(fileStream,
"UTF-8");
while (reader.hasNext()) {
if (reader.next() == XMLStreamReader.START_ELEMENT) {
if ("profile".equals(reader.getLocalName())) {
for (int i = 0; i < reader.getAttributeCount(); i++) {
if ("stencilset".equals(reader.getAttributeLocalName(i))) {
_stencilSet = reader.getAttributeValue(i);
}
}
} else if ("plugin".equals(reader.getLocalName())) {
String name = null;
for (int i = 0; i < reader.getAttributeCount(); i++) {
if ("name".equals(reader.getAttributeLocalName(i))) {
name = reader.getAttributeValue(i);
}
}
_plugins.put(name,
registry.get(name));
}
}
}
} catch (XMLStreamException e) {
_logger.error(e.getMessage(),
e);
throw new RuntimeException(e); // stop initialization
} finally {
if (fileStream != null) {
try {
fileStream.close();
} catch (IOException e) {
}
}
;
}
}
public String getName() {
return "default";
}
public String getSerializedModelExtension() {
return "bpmn";
}
public String getRepositoryId() {
return _repositoryId;
}
public String getRepositoryRoot() {
return _repositoryRoot;
}
public String getRepositoryName() {
return _repositoryName;
}
public String getRepositoryHost() {
return _repositoryHost;
}
public String getRepositoryProtocol() {
return _repositoryProtocol;
}
public String getRepositorySubdomain() {
return _repositorySubdomain;
}
public String getRepositoryUsr() {
return _repositoryUsr;
}
public String getRepositoryPwd() {
return _repositoryPwd;
}
public String getRepositoryGlobalDir() {
return _repositoryGlobalDir;
}
public String getRepositoryGlobalDir(String uuid) {
return _repositoryGlobalDir;
}
public String getLocalHistoryEnabled() {
return _localHistoryEnabled;
}
public String getLocalHistoryTimeout() {
return _localHistoryTimeout;
}
@Override
public String getStoreSVGonSaveOption() {
return "false";
}
public Repository getRepository() {
return null;
}
@Override
public void init(ServletContext context) {
}
public IDiagramMarshaller createMarshaller() {
return new IDiagramMarshaller() {
public String parseModel(String jsonModel,
String preProcessingData) {
Bpmn2JsonUnmarshaller unmarshaller = new Bpmn2JsonUnmarshaller();
//Definitions def;
Resource res;
try {
res = unmarshaller.unmarshall(jsonModel,
preProcessingData);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
Map saveMap = new HashMap();
saveMap.put(XMLResource.OPTION_ENCODING,
"UTF-8");
saveMap.put(XMLResource.OPTION_DEFER_IDREF_RESOLUTION,
true);
saveMap.put(XMLResource.OPTION_DISABLE_NOTIFY,
true);
saveMap.put(XMLResource.OPTION_PROCESS_DANGLING_HREF,
XMLResource.OPTION_PROCESS_DANGLING_HREF_RECORD);
res.save(outputStream,
saveMap);
return outputStream.toString();
} catch (JsonParseException e) {
_logger.error(e.getMessage(),
e);
} catch (IOException e) {
_logger.error(e.getMessage(),
e);
}
return "";
}
public Definitions getDefinitions(String jsonModel,
String preProcessingData) {
try {
Bpmn2JsonUnmarshaller unmarshaller = new Bpmn2JsonUnmarshaller();
JBPMBpmn2ResourceImpl res = (JBPMBpmn2ResourceImpl) unmarshaller.unmarshall(jsonModel,
preProcessingData);
return (Definitions) res.getContents().get(0);
} catch (JsonParseException e) {
_logger.error(e.getMessage(),
e);
} catch (IOException e) {
_logger.error(e.getMessage(),
e);
}
return null;
}
public Resource getResource(String jsonModel,
String preProcessingData) {
try {
Bpmn2JsonUnmarshaller unmarshaller = new Bpmn2JsonUnmarshaller();
return (JBPMBpmn2ResourceImpl) unmarshaller.unmarshall(jsonModel,
preProcessingData);
} catch (JsonParseException e) {
_logger.error(e.getMessage(),
e);
} catch (IOException e) {
_logger.error(e.getMessage(),
e);
}
return null;
}
};
}
public IDiagramUnmarshaller createUnmarshaller() {
return new IDiagramUnmarshaller() {
public String parseModel(String xmlModel,
IDiagramProfile profile,
String preProcessingData) {
Bpmn2JsonMarshaller marshaller = new Bpmn2JsonMarshaller();
marshaller.setProfile(profile);
try {
return marshaller.marshall(getDefinitions(xmlModel),
preProcessingData);
} catch (Exception e) {
_logger.error(e.getMessage(),
e);
}
return "";
}
};
}
private Definitions getDefinitions(String xml) {
try {
DroolsFactoryImpl.init();
BpsimFactoryImpl.init();
ResourceSet resourceSet = new ResourceSetImpl();
resourceSet.getResourceFactoryRegistry().getExtensionToFactoryMap()
.put(Resource.Factory.Registry.DEFAULT_EXTENSION,
new Bpmn2ResourceFactoryImpl());
resourceSet.getPackageRegistry().put("http://www.omg.org/spec/BPMN/20100524/MODEL",
Bpmn2Package.eINSTANCE);
Resource resource = resourceSet.createResource(URI.createURI("inputStream://dummyUriWithValidSuffix.xml"));
InputStream is = new ByteArrayInputStream(xml.getBytes("UTF-8"));
resource.load(is,
Collections.EMPTY_MAP);
resource.load(Collections.EMPTY_MAP);
return ((DocumentRoot) resource.getContents().get(0)).getDefinitions();
} catch (Throwable t) {
t.printStackTrace();
return null;
}
}
public String getStencilSetURL() {
return "/org.jbpm.designer.jBPMDesigner/stencilsets/bpmn2.0/bpmn2.0.json";
}
public String getStencilSetNamespaceURL() {
return "http://b3mn.org/stencilset/bpmn2.0#";
}
public String getStencilSetExtensionURL() {
return "http://oryx-editor.org/stencilsets/extensions/bpmncosts-2.0#";
}
}
| jhrcek/kie-wb-common | kie-wb-common-stunner/kie-wb-common-stunner-sets/kie-wb-common-stunner-bpmn/kie-wb-common-stunner-bpmn-backend/src/main/java/org/kie/workbench/common/stunner/bpmn/backend/legacy/profile/impl/DefaultProfileImpl.java | Java | apache-2.0 | 13,298 |
/*
* Copyright 2010 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.ide.common.client.factconstraints.dataprovider;
import java.util.Map;
public interface FieldDataProvider {
public void setFactTYpe(String factType);
public void setFieldName(String fieldName);
public String[] getArgumentKeys();
public Object getArgumentValue(String key);
public void setArgumentValue(String key, Object value);
public Map<Object,String> getData();
public Object getDefault();
}
| Rikkola/guvnor | droolsjbpm-ide-common/src/main/java/org/drools/ide/common/client/factconstraints/dataprovider/FieldDataProvider.java | Java | apache-2.0 | 1,045 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.olingo.client.core.http;
import java.net.URI;
import org.apache.http.annotation.NotThreadSafe;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
/**
* Class identifying MERGE HTTP method.
*/
@NotThreadSafe
public class HttpMerge extends HttpEntityEnclosingRequestBase {
public final static String METHOD_NAME = "MERGE";
/**
* Constructor.
*/
public HttpMerge() {
super();
}
/**
* Constructor.
*
* @param uri request URI.
*/
public HttpMerge(final URI uri) {
super();
setURI(uri);
}
/**
* Constructor.
*
* @param uri request URI.
* @throws IllegalArgumentException if the uri is invalid.
*/
public HttpMerge(final String uri) {
super();
setURI(URI.create(uri));
}
/**
* Gets HTTP method name.
*
* @return HTTP method name.
*/
@Override
public String getMethod() {
return METHOD_NAME;
}
}
| mtaal/olingo-odata4-jpa | lib/client-core/src/main/java/org/apache/olingo/client/core/http/HttpMerge.java | Java | apache-2.0 | 1,741 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import clownfish._clownfish
from clownfish._clownfish import *
| rectang/lucy-clownfish | runtime/python/src/clownfish/__init__.py | Python | apache-2.0 | 846 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import os
import random
import sys
from oslo.config import cfg
from nova import conductor
from nova import context
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import service
from nova import servicegroup
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='seconds between nodes reporting state to datastore'),
cfg.BoolOpt('periodic_enable',
default=True,
help='enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'metadata'],
help='a list of APIs to enable by default'),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help='a list of APIs with enabled SSL'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='IP address for EC2 API to listen'),
cfg.IntOpt('ec2_listen_port',
default=8773,
help='port for ec2 api to listen'),
cfg.IntOpt('ec2_workers',
default=None,
help='Number of workers for EC2 API service'),
cfg.StrOpt('osapi_compute_listen',
default="0.0.0.0",
help='IP address for OpenStack API to listen'),
cfg.IntOpt('osapi_compute_listen_port',
default=8774,
help='list port for osapi compute'),
cfg.IntOpt('osapi_compute_workers',
default=None,
help='Number of workers for OpenStack API service'),
cfg.StrOpt('metadata_manager',
default='nova.api.manager.MetadataManager',
help='OpenStack metadata service manager'),
cfg.StrOpt('metadata_listen',
default="0.0.0.0",
help='IP address for metadata api to listen'),
cfg.IntOpt('metadata_listen_port',
default=8775,
help='port for metadata api to listen'),
cfg.IntOpt('metadata_workers',
default=None,
help='Number of workers for metadata service'),
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
help='full class name for the Manager for compute'),
cfg.StrOpt('console_manager',
default='nova.console.manager.ConsoleProxyManager',
help='full class name for the Manager for console proxy'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='full class name for the Manager for cert'),
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='full class name for the Manager for network'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
cfg.IntOpt('service_down_time',
default=60,
help='maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.netconf')
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
super(Service, self).__init__()
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
# NOTE(russellb) We want to make sure to create the servicegroup API
# instance early, before creating other things such as the manager,
# that will also create a servicegroup API instance. Internally, the
# servicegroup only allocates a single instance of the driver API and
# we want to make sure that our value of db_allowed is there when it
# gets created. For that to happen, this has to be the first instance
# of the servicegroup API.
self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.backdoor_port = None
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
def start(self):
verstr = version.version_string_with_package()
LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
self.service_ref = self.conductor_api.service_get_by_args(ctxt,
self.host, self.binary)
self.service_id = self.service_ref['id']
except exception.NotFound:
self.service_ref = self._create_service_ref(ctxt)
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
self.manager.pre_start_hook(rpc_connection=self.conn)
rpc_dispatcher = self.manager.create_rpc_dispatcher(self.backdoor_port)
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.manager.post_start_hook()
LOG.debug(_("Join ServiceGroup membership for this service %s")
% self.topic)
# Add service to the ServiceGroup membership group.
self.servicegroup_api.join(self.host, self.topic, self)
if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
self.tg.add_dynamic_timer(self.periodic_tasks,
initial_delay=initial_delay,
periodic_interval_max=
self.periodic_interval_max)
def _create_service_ref(self, context):
svc_values = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
return service
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
periodic_fuzzy_delay=None, periodic_interval_max=None,
db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(sys.argv[0])
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
manager_cls = ('%s_manager' %
binary.rpartition('nova-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_enable is None:
periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
periodic_interval_max=periodic_interval_max,
db_allowed=db_allowed)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
self.conductor_api.service_destroy(context.get_admin_context(),
self.service_id)
except exception.NotFound:
LOG.warn(_('Service killed that has no database entry'))
def stop(self):
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir():
pass
except Exception as e:
LOG.error(_('Temporary directory is invalid: %s'), e)
sys.exit(1)
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = getattr(CONF, '%s_workers' % name, None)
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def process_launcher():
return service.ProcessLauncher()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(server, workers=workers)
def wait():
_launcher.wait()
| Brocade-OpenSource/OpenStack-DNRM-Nova | nova/service.py | Python | apache-2.0 | 15,048 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.core;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.IndexSettingProvider;
import org.elasticsearch.xpack.cluster.routing.allocation.DataTierAllocationDecider;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
/**
* The {@code DataTier} class encapsulates the formalization of the "content",
* "hot", "warm", and "cold" tiers as node roles. In contains the
* roles themselves as well as helpers for validation and determining if a node
* has a tier configured.
*
* Related:
* {@link org.elasticsearch.xpack.cluster.routing.allocation.DataTierAllocationDecider}
*/
public class DataTier {
public static final String DATA_CONTENT = "data_content";
public static final String DATA_HOT = "data_hot";
public static final String DATA_WARM = "data_warm";
public static final String DATA_COLD = "data_cold";
public static final String DATA_FROZEN = "data_frozen";
public static final Set<String> ALL_DATA_TIERS =
new HashSet<>(Arrays.asList(DATA_CONTENT, DATA_HOT, DATA_WARM, DATA_COLD, DATA_FROZEN));
/**
* Returns true if the given tier name is a valid tier
*/
public static boolean validTierName(String tierName) {
return DATA_CONTENT.equals(tierName) ||
DATA_HOT.equals(tierName) ||
DATA_WARM.equals(tierName) ||
DATA_COLD.equals(tierName) ||
DATA_FROZEN.equals(tierName);
}
/**
* Returns true iff the given settings have a data tier setting configured
*/
public static boolean isExplicitDataTier(Settings settings) {
/*
* This method can be called before the o.e.n.NodeRoleSettings.NODE_ROLES_SETTING is
* initialized. We do not want to trigger initialization prematurely because that will bake
* the default roles before plugins have had a chance to register them. Therefore,
* to avoid initializing this setting prematurely, we avoid using the actual node roles
* setting instance here in favor of the string.
*/
if (settings.hasValue("node.roles")) {
return settings.getAsList("node.roles").stream().anyMatch(DataTier::validTierName);
}
return false;
}
public static boolean isContentNode(DiscoveryNode discoveryNode) {
return discoveryNode.getRoles().contains(DiscoveryNodeRole.DATA_CONTENT_NODE_ROLE)
|| discoveryNode.getRoles().contains(DiscoveryNodeRole.DATA_ROLE);
}
public static boolean isHotNode(DiscoveryNode discoveryNode) {
return discoveryNode.getRoles().contains(DiscoveryNodeRole.DATA_HOT_NODE_ROLE)
|| discoveryNode.getRoles().contains(DiscoveryNodeRole.DATA_ROLE);
}
public static boolean isWarmNode(DiscoveryNode discoveryNode) {
return discoveryNode.getRoles().contains(DiscoveryNodeRole.DATA_WARM_NODE_ROLE)
|| discoveryNode.getRoles().contains(DiscoveryNodeRole.DATA_ROLE);
}
public static boolean isColdNode(DiscoveryNode discoveryNode) {
return discoveryNode.getRoles().contains(DiscoveryNodeRole.DATA_COLD_NODE_ROLE)
|| discoveryNode.getRoles().contains(DiscoveryNodeRole.DATA_ROLE);
}
public static boolean isFrozenNode(DiscoveryNode discoveryNode) {
return isFrozenNode(discoveryNode.getRoles());
}
public static boolean isFrozenNode(final Set<DiscoveryNodeRole> roles) {
return roles.contains(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE) || roles.contains(DiscoveryNodeRole.DATA_ROLE);
}
/**
* This setting provider injects the setting allocating all newly created indices with
* {@code index.routing.allocation.include._tier: "data_hot"} unless the user overrides the
* setting while the index is being created (in a create index request for instance)
*/
public static class DefaultHotAllocationSettingProvider implements IndexSettingProvider {
private static final Logger logger = LogManager.getLogger(DefaultHotAllocationSettingProvider.class);
@Override
public Settings getAdditionalIndexSettings(String indexName, boolean isDataStreamIndex, Settings indexSettings) {
Set<String> settings = indexSettings.keySet();
if (settings.contains(DataTierAllocationDecider.INDEX_ROUTING_PREFER)) {
// It's okay to put it, it will be removed or overridden by the template/request settings
return Settings.builder().put(DataTierAllocationDecider.INDEX_ROUTING_PREFER, DATA_HOT).build();
} else if (settings.stream().anyMatch(s -> s.startsWith(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".")) ||
settings.stream().anyMatch(s -> s.startsWith(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + ".")) ||
settings.stream().anyMatch(s -> s.startsWith(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + "."))) {
// A different index level require, include, or exclude has been specified, so don't put the setting
logger.debug("index [{}] specifies custom index level routing filtering, skipping tier allocation", indexName);
return Settings.EMPTY;
} else {
// Otherwise, put the setting in place by default, the "hot"
// tier if the index is part of a data stream, the "content"
// tier if it is not.
if (isDataStreamIndex) {
return Settings.builder().put(DataTierAllocationDecider.INDEX_ROUTING_PREFER, DATA_HOT).build();
} else {
return Settings.builder().put(DataTierAllocationDecider.INDEX_ROUTING_PREFER, DATA_CONTENT).build();
}
}
}
}
}
| robin13/elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTier.java | Java | apache-2.0 | 6,329 |
from flask import Blueprint, render_template, request, url_for
from CTFd.models import Users
from CTFd.utils import config
from CTFd.utils.decorators import authed_only
from CTFd.utils.decorators.visibility import (
check_account_visibility,
check_score_visibility,
)
from CTFd.utils.helpers import get_errors, get_infos
from CTFd.utils.user import get_current_user
users = Blueprint("users", __name__)
@users.route("/users")
@check_account_visibility
def listing():
q = request.args.get("q")
field = request.args.get("field", "name")
if field not in ("name", "affiliation", "website"):
field = "name"
filters = []
if q:
filters.append(getattr(Users, field).like("%{}%".format(q)))
users = (
Users.query.filter_by(banned=False, hidden=False)
.filter(*filters)
.order_by(Users.id.asc())
.paginate(per_page=50)
)
args = dict(request.args)
args.pop("page", 1)
return render_template(
"users/users.html",
users=users,
prev_page=url_for(request.endpoint, page=users.prev_num, **args),
next_page=url_for(request.endpoint, page=users.next_num, **args),
q=q,
field=field,
)
@users.route("/profile")
@users.route("/user")
@authed_only
def private():
infos = get_infos()
errors = get_errors()
user = get_current_user()
if config.is_scoreboard_frozen():
infos.append("Scoreboard has been frozen")
return render_template(
"users/private.html",
user=user,
account=user.account,
infos=infos,
errors=errors,
)
@users.route("/users/<int:user_id>")
@check_account_visibility
@check_score_visibility
def public(user_id):
infos = get_infos()
errors = get_errors()
user = Users.query.filter_by(id=user_id, banned=False, hidden=False).first_or_404()
if config.is_scoreboard_frozen():
infos.append("Scoreboard has been frozen")
return render_template(
"users/public.html", user=user, account=user.account, infos=infos, errors=errors
)
| LosFuzzys/CTFd | CTFd/users.py | Python | apache-2.0 | 2,090 |
<?php
/*
private variable and functions are prefixed with underscore('_')
*/
require_once('TranslationContainer.php');
class TMXParser {
private $_parser = null;
private $_xmlFile = null;
private $_currentTuid = null;
private $_boolSeg = false;
private $_currentLanguage = null;
private $_currentPage = null;
private $_boolPage = false;
private $_currentData = '';
private $_tc = null;
private $_masterLanguage = 'en';
public function __construct($xmlFile='', $masterLanguage = '') {
if($masterLanguage != '') {
$this->_masterLanguage = $masterLanguage;
}
$this->_xmlFile = $xmlFile;
}
public function doParse() {
$this->_tc = new TranslationContainer($this->_masterLanguage);
$this->_parser = xml_parser_create();
xml_set_element_handler($this->_parser, "_startElement", "_endElement");
xml_set_object($this->_parser, $this);
xml_parser_set_option($this->_parser, XML_OPTION_CASE_FOLDING, 0);
xml_parser_set_option($this->_parser, XML_OPTION_TARGET_ENCODING, 'utf-8');
xml_set_character_data_handler($this->_parser, "_contentElement");
if ($this->_xmlFile === null) {
throw new Exception('Translation source xml is set. Use setXML() or constructor to set source xml.');
}
if (!is_readable($this->_xmlFile)) {
throw new Exception('Translation source xml is not readable.');
}
if (!xml_parse($this->_parser, file_get_contents($this->_xmlFile))) {
$ex = sprintf('XML error: %s at line %d',
xml_error_string(xml_get_error_code($this->_parser)),
xml_get_current_line_number($this->_parser));
xml_parser_free($this->_parser);
throw new Exception($ex);
}
return $this->_tc;
}
private function _startElement($parser, $name, $atrrs) {
//echo '<br>name='.$name.'<br>';
//print_r($atrrs);
if ($this->_boolSeg != false) {
//echo '<p>##############</p>';
$this->_currentData .= "<".$name;
foreach($atrrs as $key => $value) {
$this->_currentData .= " $key=\"$value\"";
}
$this->_content .= ">";
}
else {
switch (strtolower($name)) {
case 'tu':
if(isset($atrrs['tuid']) === true){
$this->_currentTuid = $atrrs['tuid'];
}
break;
case 'tuv':
if(isset($atrrs['xml:lang']) === true){
$this->_currentLanguage = $atrrs['xml:lang'];
}
break;
case 'prop':
if( isset($atrrs['type']) === true && $atrrs['type'] == 'page' ){
$this->_boolPage = true;
$this->_currentPage = '';
}
break;
case 'seg':
$this->_boolSeg = true;
$this->_currentData = '';
break;
}
}
}
private function _endElement($parser, $name) {
//echo '<br>endname='.$name.'<br>';
if (($this->_boolSeg != false) and ($name !== 'seg')) {
//echo '<p>##############</p>';
$this->_currentData .= "</".$name.">";
}
else {
switch (strtolower($name)) {
case 'tu':
$this->_currentTuid = null;
break;
case 'tuv':
$this->_currentLanguage = null;
break;
case 'prop':
if( $this->_boolPage ){
$this->_boolPage = false;
//$this->_currentPage = '';
}
break;
case 'seg':
$this->_boolSeg = false;
if( ($this->_tc !== null) || !$this->_tc->hasId($this->_currentTuid) ) {
//$this->_currentData = html_entity_decode($this->_currentData, ENT_QUOTES, 'utf-8');
$this->_currentData = base64_decode($this->_currentData);
$this->_tc->addWordTuid($this->_currentTuid, $this->_currentData, $this->_currentLanguage, $this->_currentPage);
//echo ' <br> '.$this->_currentData;
}
break;
}
}
}
private function _contentElement($parser, $data) {
//echo ' <br>content='.$this->_currentData;
if($this->_boolSeg && $this->_currentTuid !== null && $this->_currentLanguage !== null ) {
$this->_currentData .= $data;
}
if( $this->_boolPage ) {
$this->_currentPage = $data;
}
}
public function getXML() {
return $this->_xmlFile;
}
public function setXML($xmlFile) {
$this->_xmlFile = $xmlFile;
}
public function getMasterLanguage() {
return $this->_xmlFile;
}
public function setMasterLanguage($lang) {
$this->_masterLanguage = $lang;
}
}
| mrinsss/Full-Repo | tripezi/system/application/libraries/multilanguage/TMXParser.php | PHP | apache-2.0 | 4,332 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.processor.interceptor;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.AdviceWithRouteBuilder;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.seda.SedaEndpoint;
import org.apache.camel.reifier.RouteReifier;
import org.junit.Test;
public class AdviceWithMockMultipleEndpointsWithSkipTest extends ContextTestSupport {
// START SNIPPET: e1
// tag::e1[]
@Test
public void testAdvisedMockEndpointsWithSkip() throws Exception {
// advice the first route using the inlined AdviceWith route builder
// which has extended capabilities than the regular route builder
RouteReifier.adviceWith(context.getRouteDefinitions().get(0), context, new AdviceWithRouteBuilder() {
@Override
public void configure() throws Exception {
// mock sending to direct:foo and direct:bar and skip send to it
mockEndpointsAndSkip("direct:foo", "direct:bar");
}
});
getMockEndpoint("mock:result").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:direct:foo").expectedMessageCount(1);
getMockEndpoint("mock:direct:bar").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
// the message was not send to the direct:foo route and thus not sent to the seda endpoint
SedaEndpoint seda = context.getEndpoint("seda:foo", SedaEndpoint.class);
assertEquals(0, seda.getCurrentQueueSize());
}
// end::e1[]
// END SNIPPET: e1
// START SNIPPET: route
// tag::route[]
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:start").to("direct:foo").to("direct:bar").to("mock:result");
from("direct:foo").transform(constant("Bye World")).to("seda:foo");
from("direct:bar").transform(constant("Hi World")).to("seda:foo");
}
};
}
// end::route[]
// END SNIPPET: route
}
| Fabryprog/camel | docs/components/modules/ROOT/examples/core/camel-core/src/test/java/org/apache/camel/processor/interceptor/AdviceWithMockMultipleEndpointsWithSkipTest.java | Java | apache-2.0 | 3,016 |
#
# Cookbook:: hadoop
# Recipe:: pig
#
# Copyright © 2013-2016 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe 'hadoop::repo'
package hadoop_package('pig') do
action :install
end
| caskdata/hadoop_cookbook | recipes/pig.rb | Ruby | apache-2.0 | 714 |
<?php
// locale: great britain english (en-gb)
// author: Chris Gedrim https://github.com/chrisgedrim
return array(
"months" => explode('_', 'January_February_March_April_May_June_July_August_September_October_November_December'),
"monthsNominative" => explode('_', 'January_February_March_April_May_June_July_August_September_October_November_December'),
"monthsShort" => explode('_', 'Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec'),
"weekdays" => explode('_', 'Monday_Tuesday_Wednesday_Thursday_Friday_Saturday_Sunday'),
"weekdaysShort" => explode('_', 'Mon_Tue_Wed_Thu_Fri_Sat_Sun'),
"calendar" => array(
"sameDay" => '[Today]',
"nextDay" => '[Tomorrow]',
"lastDay" => '[Yesterday]',
"lastWeek" => '[Last] l',
"sameElse" => 'l',
"withTime" => '[at] H:i',
"default" => 'd/m/Y',
),
"relativeTime" => array(
"future" => 'in %s',
"past" => '%s ago',
"s" => 'a few seconds',
"ss" => '%d seconds',
"m" => 'a minute',
"mm" => '%d minutes',
"h" => 'an hour',
"hh" => '%d hours',
"d" => 'a day',
"dd" => '%d days',
"M" => 'a month',
"MM" => '%d months',
"y" => 'a year',
"yy" => '%d years',
),
"ordinal" => function ($number)
{
$n = $number % 100;
$ends = array('th', 'st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th');
if ($n >= 11 && $n <= 13)
{
return $number . '[th]';
}
return $number . '[' . $ends[$number % 10] . ']';
},
"week" => array(
"dow" => 1, // Monday is the first day of the week.
"doy" => 4 // The week that contains Jan 4th is the first week of the year.
),
"customFormats" => array(
"LT" => "G:i", // 22:00
"LTS" => "G:i:s", // 22:00:00
"L" => "d/m/Y", // 12/06/2010
"l" => "j/n/Y", // 12/6/2010
"LL" => "j F Y", // 12 June 2010
"ll" => "j M Y", // 12 Jun 2010
"LLL" => "j F Y G:i", // 12 June 2010 22:00
"lll" => "j M Y G:i", // 12 Jun 2010 22:00
"LLLL" => "l, j F F Y G:i", // Saturday, 12 June June 2010 22:00
"llll" => "D, j M Y G:i", // Sat, 12 Jun 2010 22:00
),
); | novaramedia/novaramedia-com | vendor/fightbulc/moment/src/Locales/en_GB.php | PHP | apache-2.0 | 2,478 |
/*
* Copyright © 2014-2015 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package co.cask.cdap.api.dataset.lib;
import co.cask.cdap.api.annotation.Beta;
import co.cask.cdap.api.dataset.DataSetException;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
/**
* Represents a dataset that is split into partitions that can be uniquely addressed
* by time. Each partition is a path in a file set, with a timestamp attached as meta data.
* The timestamp is mapped to a partition key of a {@link co.cask.cdap.api.dataset.lib.PartitionedFileSet}
* with five integer partitioning fields: the year, month, day, hour and minute. Partitions can
* be retrieved using time range or using a {@link co.cask.cdap.api.dataset.lib.PartitionFilter}.
* <p>
* The granularity of time is in minutes, that is, any seconds or milliseconds after the
* full minute is ignored for the partition keys. That means, there can not be be two partitions
* in the same minute. Also, when retrieving partitions via time or time range using
* {@link #getPartitionByTime}, {@link #getPartitionsByTime}, or when writing a partition using
* {@link #getPartitionOutput}, the seconds and milliseconds on the
* time or time range are ignored.
* <p>
* This dataset can be made available for querying with SQL (explore). This is enabled through dataset
* properties when the dataset is created. See {@link co.cask.cdap.api.dataset.lib.FileSetProperties}
* for details. If it is enabled for explore, a Hive external table will be created when the dataset is
* created. The Hive table is partitioned by year, month, day, hour and minute.
*/
@Beta
public interface TimePartitionedFileSet extends PartitionedFileSet {
/**
* Add a partition for a given time, stored at a given path (relative to the file set's base path).
*/
void addPartition(long time, String path);
/**
* Add a partition for a given time, stored at a given path (relative to the file set's base path),
* with given metadata.
*/
void addPartition(long time, String path, Map<String, String> metadata);
/**
* Adds a new metadata entry for a particular partition.
* Note that existing entries can not be updated.
* @throws DataSetException in case an attempt is made to update existing entries.
*/
void addMetadata(long time, String metadataKey, String metadataValue);
/**
* Adds a set of new metadata entries for a particular partition
* Note that existing entries can not be updated.
* * @throws DataSetException in case an attempt is made to update existing entries.
*/
void addMetadata(long time, Map<String, String> metadata);
/**
* Remove a partition for a given time.
*/
void dropPartition(long time);
/**
* Return the partition associated with the given time, rounded to the minute;
* or null if no such partition exists.
*/
@Nullable
TimePartitionDetail getPartitionByTime(long time);
/**
* Return all partitions within the time range given by startTime (inclusive) and endTime (exclusive),
* both rounded to the full minute.
*/
Set<TimePartitionDetail> getPartitionsByTime(long startTime, long endTime);
/**
* Return a partition output for a specific time, rounded to the minute, in preparation for creating a new partition.
* Obtain the location to write from the PartitionOutput, then call the {@link PartitionOutput#addPartition}
* to add the partition to this dataset.
*/
TimePartitionOutput getPartitionOutput(long time);
}
| chtyim/cdap | cdap-api/src/main/java/co/cask/cdap/api/dataset/lib/TimePartitionedFileSet.java | Java | apache-2.0 | 4,051 |
/*******************************************************************************
* Copyright 2015-2019 Toaker NewBeyondViewPager
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package net.soulwolf.newbeyondviewpager;
import android.annotation.TargetApi;
import android.content.Context;
import android.os.Build;
import android.util.AttributeSet;
import android.view.ViewGroup;
import com.toaker.common.tlog.TLog;
/**
* Decorator for NewBeyondViewPager
*
* @author Toaker [Toaker](ToakerQin@gmail.com)
* [Toaker](http://www.toaker.com)
* @Time Create by 2015/5/14 9:38
*/
public class NewBeyondViewPager extends ViewGroup {
private static final boolean DEBUG = true;
private static final String LOG_TAG = "NewBeyondViewPager:";
public NewBeyondViewPager(Context context) {
super(context);
}
public NewBeyondViewPager(Context context, AttributeSet attrs) {
super(context, attrs);
}
public NewBeyondViewPager(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
}
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
public NewBeyondViewPager(Context context, AttributeSet attrs, int defStyleAttr, int defStyleRes) {
super(context, attrs, defStyleAttr, defStyleRes);
}
@Override
protected void onLayout(boolean changed, int l, int t, int r, int b) {
if(DEBUG){
TLog.d(LOG_TAG,"onLayout: %s %s %s %s",l,t,r,b);
}
}
}
| RyanTech/NewBeyondViewPager | library/src/main/java/net/soulwolf/newbeyondviewpager/NewBeyondViewPager.java | Java | apache-2.0 | 2,071 |
<?php
error_reporting(E_ALL ^ E_NOTICE);
include_once("Include_GetString.php") ;
$Theme="Office2007";
?>
<html xmlns="http://www.w3.org/1999/xhtml">
<head ID="Head1">
<title><?php echo GetString("MoreColors") ; ?></title>
<meta http-equiv="Page-Enter" content="blendTrans(Duration=0.1)" />
<meta http-equiv="Page-Exit" content="blendTrans(Duration=0.1)" />
<script type="text/javascript" src="../Scripts/Dialog/DialogHead.js"></script>
<script type="text/javascript" src="../Scripts/Dialog/Dialog_ColorPicker.js"></script>
<link href="../Themes/<?php echo $Theme; ?>/dialog.css" type="text/css" rel="stylesheet" />
<style type="text/css">
.colorcell
{
width:22px;
height:11px;
cursor:hand;
}
.colordiv
{
border:solid 1px #808080;
width:22px;
height:11px;
font-size:1px;
}
</style>
<script>
function DoubleHex(v)
{
if(v<16)return "0"+v.toString(16);
return v.toString(16);
}
function ToHexString(r,g,b)
{
return ("#"+DoubleHex(r*51)+DoubleHex(g*51)+DoubleHex(b*51)).toUpperCase();
}
function MakeHex(z,x,y)
{
//hor->ver
var l=z%2
var t=(z-l)/2
z=l*3+t
//left column , l/r mirrow
if(z<3)x=5-x;
//middle row , t/b mirrow
if(z==1||z==4)y=5-y;
return ToHexString(5-y,5-x,5-z);
}
var colors=new Array(216);
for(var z=0;z<6;z++)
{
for(var x=0;x<6;x++)
{
for(var y=0;y<6;y++)
{
var hex=MakeHex(z,x,y)
var xx=(z%2)*6+x;
var yy=Math.floor(z/2)*6+y;
colors[yy*12+xx]=hex;
}
}
}
var arr=[];
for(var i=0;i<colors.length;i++)
{
if(i%12==0)arr.push("<tr>");
arr.push("<td class='colorcell'><div class='colordiv' style='background-color:")
arr.push(colors[i]);
arr.push("' cvalue='");
arr.push(colors[i]);
arr.push("' title='")
arr.push(colors[i]);
arr.push("'> </div></td>");
if(i%12==11)arr.push("</tr>");
}
</script>
</head>
<body>
<div id="ajaxdiv">
<div class="tab-pane-control tab-pane" id="tabPane1">
<div class="tab-row">
<h2 class="tab selected">
<a tabindex="-1" href='colorpicker.php?Theme=<?php echo $Theme; ?>&<?php echo $_SERVER["QUERY_STRING"]; ?>'>
<span style="white-space:nowrap;">
<?php echo GetString("WebPalette") ; ?>
</span>
</a>
</h2>
<h2 class="tab">
<a tabindex="-1" href='colorpicker_basic.php?Theme=<?php echo $Theme; ?>&<?php echo $_SERVER["QUERY_STRING"]; ?>'>
<span style="white-space:nowrap;">
<?php echo GetString("NamedColors") ; ?>
</span>
</a>
</h2>
<h2 class="tab">
<a tabindex="-1" href='colorpicker_more.php?Theme=<?php echo $Theme; ?>&<?php echo $_SERVER["QUERY_STRING"]; ?>'>
<span style="white-space:nowrap;">
<?php echo GetString("CustomColor") ; ?>
</span>
</a>
</h2>
</div>
<div class="tab-page">
<table cellSpacing='2' cellPadding="1" align="center">
<script>
document.write(arr.join(""));
</script>
<tr>
<td colspan="12" height="12"><p align="left"></p>
</td>
</tr>
<tr>
<td colspan="12" valign="middle" height="24">
<span style="height:24px;width:50px;vertical-align:middle;"><?php echo GetString("Color") ; ?>: </span>
<input type="text" id="divpreview" size="7" maxlength="7" style="width:180px;height:24px;border:#a0a0a0 1px solid; Padding:4;"/>
</td>
</tr>
</table>
</div>
</div>
<div id="container-bottom">
<input type="button" id="buttonok" value="<?php echo GetString("OK") ; ?>" class="formbutton" style="width:70px" onclick="do_insert();" />
<input type="button" id="buttoncancel" value="<?php echo GetString("Cancel") ; ?>" class="formbutton" style="width:70px" onclick="do_Close();" />
</div>
</div>
</body>
</html> | srinivasans/educloud | others/editor/Dialogs/colorpicker.php | PHP | apache-2.0 | 3,928 |
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _getPrototypeOf = require('babel-runtime/core-js/object/get-prototype-of');
var _getPrototypeOf2 = _interopRequireDefault(_getPrototypeOf);
var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck');
var _classCallCheck3 = _interopRequireDefault(_classCallCheck2);
var _createClass2 = require('babel-runtime/helpers/createClass');
var _createClass3 = _interopRequireDefault(_createClass2);
var _possibleConstructorReturn2 = require('babel-runtime/helpers/possibleConstructorReturn');
var _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2);
var _inherits2 = require('babel-runtime/helpers/inherits');
var _inherits3 = _interopRequireDefault(_inherits2);
var _simpleAssign = require('simple-assign');
var _simpleAssign2 = _interopRequireDefault(_simpleAssign);
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _propTypes = require('prop-types');
var _propTypes2 = _interopRequireDefault(_propTypes);
var _keyboardArrowUp = require('../svg-icons/hardware/keyboard-arrow-up');
var _keyboardArrowUp2 = _interopRequireDefault(_keyboardArrowUp);
var _keyboardArrowDown = require('../svg-icons/hardware/keyboard-arrow-down');
var _keyboardArrowDown2 = _interopRequireDefault(_keyboardArrowDown);
var _IconButton = require('../IconButton');
var _IconButton2 = _interopRequireDefault(_IconButton);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function getStyles() {
return {
root: {
top: 0,
bottom: 0,
right: 4,
margin: 'auto',
position: 'absolute'
}
};
}
var CardExpandable = function (_Component) {
(0, _inherits3.default)(CardExpandable, _Component);
function CardExpandable() {
(0, _classCallCheck3.default)(this, CardExpandable);
return (0, _possibleConstructorReturn3.default)(this, (CardExpandable.__proto__ || (0, _getPrototypeOf2.default)(CardExpandable)).apply(this, arguments));
}
(0, _createClass3.default)(CardExpandable, [{
key: 'render',
value: function render() {
var styles = getStyles(this.props, this.context);
return _react2.default.createElement(
_IconButton2.default,
{
style: (0, _simpleAssign2.default)(styles.root, this.props.style),
onClick: this.props.onExpanding,
iconStyle: this.props.iconStyle
},
this.props.expanded ? this.props.openIcon : this.props.closeIcon
);
}
}]);
return CardExpandable;
}(_react.Component);
CardExpandable.contextTypes = {
muiTheme: _propTypes2.default.object.isRequired
};
CardExpandable.defaultProps = {
closeIcon: _react2.default.createElement(_keyboardArrowDown2.default, null),
openIcon: _react2.default.createElement(_keyboardArrowUp2.default, null)
};
CardExpandable.propTypes = process.env.NODE_ENV !== "production" ? {
closeIcon: _propTypes2.default.node,
expanded: _propTypes2.default.bool,
iconStyle: _propTypes2.default.object,
onExpanding: _propTypes2.default.func.isRequired,
openIcon: _propTypes2.default.node,
style: _propTypes2.default.object
} : {};
exports.default = CardExpandable; | yaolei/Samoyed | node_modules/material-ui/Card/CardExpandable.js | JavaScript | apache-2.0 | 3,257 |
/**********************************************************************************
* $URL$
* $Id$
***********************************************************************************
*
* Copyright (c) 2004, 2005, 2006, 2007, 2008 The Sakai Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************/
package org.sakaiproject.tool.assessment.jsf.renderer;
import java.io.IOException;
import java.util.Map;
import javax.faces.component.UIComponent;
import javax.faces.component.UIInput;
import javax.faces.component.UIViewRoot;
import javax.faces.component.ValueHolder;
import javax.faces.context.FacesContext;
import javax.faces.context.ResponseWriter;
import javax.faces.render.Renderer;
import org.sakaiproject.util.ResourceLoader;
import org.sakaiproject.tool.assessment.jsf.renderer.util.RendererUtil;
import org.sakaiproject.tool.assessment.ui.listener.util.ContextUtil;
/**
* <p>Description: </p>
* <p>Render the custom color picker control.</p>
* <p>Copyright: Copyright (c) 2004</p>
* <p>Organization: Sakai Project</p>
* @author Ed Smiley
* @version $id: $
*/
public class DatePickerRenderer extends Renderer
{
// icon height and width
private static final String HEIGHT = "16";
private static final String WIDTH = "16";
private static final String CURSORSTYLE = "cursor:pointer;";
//moved to properties
//private static final String CLICKALT = "Click Here to Pick Date";
public boolean supportsComponentType(UIComponent component)
{
return (component instanceof UIInput);
}
/**
* decode the value
* @param context
* @param component
*/
public void decode(FacesContext context, UIComponent component)
{
// we haven't added these attributes--yet--defensive programming...
if(RendererUtil.isDisabledOrReadonly(component))
{
return;
}
String clientId = component.getClientId(context);
Map requestParameterMap = context.getExternalContext()
.getRequestParameterMap();
String newValue = (String) requestParameterMap.get(clientId );
UIInput comp = (UIInput) component;
comp.setSubmittedValue(newValue);
}
public void encodeBegin(FacesContext context,
UIComponent component) throws IOException
{
;
}
public void encodeChildren(FacesContext context,
UIComponent component) throws IOException
{
;
}
/**
* <p>Faces render output method .</p>
* <p>Method Generator: org.sakaiproject.tool.assessment.devtoolsRenderMaker</p>
*
* @param context <code>FacesContext</code> for the current request
* @param component <code>UIComponent</code> being rendered
*
* @throws IOException if an input/output error occurs
*/
public void encodeEnd(FacesContext context,
UIComponent component) throws IOException
{
ResourceLoader rb= new ResourceLoader("org.sakaiproject.tool.assessment.bundle.AssessmentSettingsMessages");
ResponseWriter writer = context.getResponseWriter();
String contextPath = context.getExternalContext()
.getRequestContextPath();
String jsfId = (String) component.getAttributes().get("id");
String id = jsfId;
if (component.getId() != null &&
!component.getId().startsWith(UIViewRoot.UNIQUE_ID_PREFIX))
{
id = component.getClientId(context);
}
Object value = null;
if (component instanceof UIInput)
{
value = ( (UIInput) component).getSubmittedValue();
}
if (value == null && component instanceof ValueHolder)
{
value = ( (ValueHolder) component).getValue();
}
String valString = "";
if (value != null)
{
valString = value.toString();
}
String type = "text";
String size = (String) component.getAttributes().get("size");
if (size == null)
{
size = "20";
// script creates unique calendar object with input object
}
String display_dateFormat= ContextUtil.getLocalizedString("org.sakaiproject.tool.assessment.bundle.GeneralMessages","output_data_picker_w_sec");
String genDate = null;
String prsDate = null;
if (display_dateFormat.toLowerCase().startsWith("dd")) {
genDate = "cal_gen_date2_dm";
prsDate = "cal_prs_date2_dm";
}
else {
genDate = "cal_gen_date2_md";
prsDate = "cal_prs_date2_md";
}
String calRand = "cal" + ("" + Math.random()).substring(2);
String calScript =
"var " + calRand + " = new calendar2(" +
"document.getElementById('" + id + "'), " + genDate + ", " + prsDate + ");" +
"" + calRand + ".year_scroll = true;" +
"" + calRand + ".time_comp = true;";
writer.write("<input type=\"" + type + "\" name=\"" + id +
"\" id=\"" + id + "\" size=\"" + size + "\" value=");
writer.write("\"" + valString + "\"> <img \n onclick=");
writer.write("\"javascript:" + calScript +
calRand + ".popup('','" + contextPath +
"/html/');\"\n");
// "/jsf/widget/datepicker/');\"\n");
writer.write(" width=\"" + WIDTH + "\"\n");
writer.write(" height=\"" + HEIGHT + "\"\n");
writer.write(" style=\"" + CURSORSTYLE + "\" ");
writer.write(" src=\"" + contextPath + "/images/calendar/cal.gif\"\n");
writer.write(" border=\"0\"\n");
writer.write(" id=\"_datePickerPop_" + id + "\"");
//writer.write(" alt=\"" + CLICKALT + "\"/>  \n");
writer.write(" alt=\"" + rb.getString("dp_CLICKALT") + "\"/>  \n");
}
}
| payten/nyu-sakai-10.4 | samigo/samigo-app/src/java/org/sakaiproject/tool/assessment/jsf/renderer/DatePickerRenderer.java | Java | apache-2.0 | 6,076 |
import {BrowserDomAdapter} from 'angular2/src/dom/browser_adapter';
import {PromiseWrapper} from 'angular2/src/facade/async';
import {List, ListWrapper, Map, MapWrapper} from 'angular2/src/facade/collection';
import {DateWrapper, Type, print} from 'angular2/src/facade/lang';
import {
Parser,
Lexer,
DynamicChangeDetection
} from 'angular2/src/change_detection/change_detection';
import {Compiler, CompilerCache} from 'angular2/src/core/compiler/compiler';
import {DirectiveResolver} from 'angular2/src/core/compiler/directive_resolver';
import {PipeResolver} from 'angular2/src/core/compiler/pipe_resolver';
import * as viewModule from 'angular2/src/core/annotations_impl/view';
import {Component, Directive, View} from 'angular2/angular2';
import {ViewResolver} from 'angular2/src/core/compiler/view_resolver';
import {UrlResolver} from 'angular2/src/services/url_resolver';
import {AppRootUrl} from 'angular2/src/services/app_root_url';
import {ComponentUrlMapper} from 'angular2/src/core/compiler/component_url_mapper';
import {reflector} from 'angular2/src/reflection/reflection';
import {ReflectionCapabilities} from 'angular2/src/reflection/reflection_capabilities';
import {getIntParameter, bindAction} from 'angular2/src/test_lib/benchmark_util';
import {ProtoViewFactory} from 'angular2/src/core/compiler/proto_view_factory';
import {
ViewLoader,
DefaultDomCompiler,
SharedStylesHost,
TemplateCloner
} from 'angular2/src/render/render';
import {DomElementSchemaRegistry} from 'angular2/src/render/dom/schema/dom_element_schema_registry';
export function main() {
BrowserDomAdapter.makeCurrent();
var count = getIntParameter('elements');
reflector.reflectionCapabilities = new ReflectionCapabilities();
var reader = new DirectiveResolver();
var pipeResolver = new PipeResolver();
var cache = new CompilerCache();
var viewResolver = new MultipleViewResolver(
count, [BenchmarkComponentNoBindings, BenchmarkComponentWithBindings]);
var urlResolver = new UrlResolver();
var appRootUrl = new AppRootUrl("");
var renderCompiler = new DefaultDomCompiler(
new DomElementSchemaRegistry(), new TemplateCloner(-1), new Parser(new Lexer()),
new ViewLoader(null, null, null), new SharedStylesHost(), 'a');
var compiler = new Compiler(reader, pipeResolver, [], cache, viewResolver,
new ComponentUrlMapper(), urlResolver, renderCompiler,
new ProtoViewFactory(new DynamicChangeDetection()), appRootUrl);
function measureWrapper(func, desc) {
return function() {
var begin = DateWrapper.now();
print(`[${desc}] Begin...`);
var onSuccess = function(_) {
var elapsedMs = DateWrapper.toMillis(DateWrapper.now()) - DateWrapper.toMillis(begin);
print(`[${desc}] ...done, took ${elapsedMs} ms`);
};
PromiseWrapper.then(func(), onSuccess, null);
};
}
function compileNoBindings() {
cache.clear();
return compiler.compileInHost(BenchmarkComponentNoBindings);
}
function compileWithBindings() {
cache.clear();
return compiler.compileInHost(BenchmarkComponentWithBindings);
}
bindAction('#compileNoBindings', measureWrapper(compileNoBindings, 'No Bindings'));
bindAction('#compileWithBindings', measureWrapper(compileWithBindings, 'With Bindings'));
}
@Directive({selector: '[dir0]', properties: ['prop: attr0']})
class Dir0 {
}
@Directive({selector: '[dir1]', properties: ['prop: attr1']})
class Dir1 {
constructor(dir0: Dir0) {}
}
@Directive({selector: '[dir2]', properties: ['prop: attr2']})
class Dir2 {
constructor(dir1: Dir1) {}
}
@Directive({selector: '[dir3]', properties: ['prop: attr3']})
class Dir3 {
constructor(dir2: Dir2) {}
}
@Directive({selector: '[dir4]', properties: ['prop: attr4']})
class Dir4 {
constructor(dir3: Dir3) {}
}
class MultipleViewResolver extends ViewResolver {
_multiple: number;
_cache: Map<any, any>;
constructor(multiple: number, components: List<Type>) {
super();
this._multiple = multiple;
this._cache = new Map();
ListWrapper.forEach(components, (c) => this._warmUp(c));
}
_warmUp(component: Type) {
var view = super.resolve(component);
var multiplier = ListWrapper.createFixedSize(this._multiple);
for (var i = 0; i < this._multiple; ++i) {
multiplier[i] = view.template;
}
this._cache.set(component, ListWrapper.join(multiplier, ''));
}
resolve(component: Type): viewModule.View {
var view = super.resolve(component);
var myView = new viewModule.View(
{template:<string>this._cache.get(component), directives: view.directives});
return myView;
}
}
@Component({selector: 'cmp-nobind'})
@View({
directives: [Dir0, Dir1, Dir2, Dir3, Dir4],
template: `
<div class="class0 class1 class2 class3 class4 " nodir0="" attr0="value0" nodir1="" attr1="value1" nodir2="" attr2="value2" nodir3="" attr3="value3" nodir4="" attr4="value4">
<div class="class0 class1 class2 class3 class4 " nodir0="" attr0="value0" nodir1="" attr1="value1" nodir2="" attr2="value2" nodir3="" attr3="value3" nodir4="" attr4="value4">
<div class="class0 class1 class2 class3 class4 " nodir0="" attr0="value0" nodir1="" attr1="value1" nodir2="" attr2="value2" nodir3="" attr3="value3" nodir4="" attr4="value4">
<div class="class0 class1 class2 class3 class4 " nodir0="" attr0="value0" nodir1="" attr1="value1" nodir2="" attr2="value2" nodir3="" attr3="value3" nodir4="" attr4="value4">
<div class="class0 class1 class2 class3 class4 " nodir0="" attr0="value0" nodir1="" attr1="value1" nodir2="" attr2="value2" nodir3="" attr3="value3" nodir4="" attr4="value4">
</div>
</div>
</div>
</div>
</div>`
})
class BenchmarkComponentNoBindings {
}
@Component({selector: 'cmp-withbind'})
@View({
directives: [Dir0, Dir1, Dir2, Dir3, Dir4],
template: `
<div class="class0 class1 class2 class3 class4 " dir0="" [attr0]="value0" dir1="" [attr1]="value1" dir2="" [attr2]="value2" dir3="" [attr3]="value3" dir4="" [attr4]="value4">
{{inter0}}{{inter1}}{{inter2}}{{inter3}}{{inter4}}
<div class="class0 class1 class2 class3 class4 " dir0="" [attr0]="value0" dir1="" [attr1]="value1" dir2="" [attr2]="value2" dir3="" [attr3]="value3" dir4="" [attr4]="value4">
{{inter0}}{{inter1}}{{inter2}}{{inter3}}{{inter4}}
<div class="class0 class1 class2 class3 class4 " dir0="" [attr0]="value0" dir1="" [attr1]="value1" dir2="" [attr2]="value2" dir3="" [attr3]="value3" dir4="" [attr4]="value4">
{{inter0}}{{inter1}}{{inter2}}{{inter3}}{{inter4}}
<div class="class0 class1 class2 class3 class4 " dir0="" [attr0]="value0" dir1="" [attr1]="value1" dir2="" [attr2]="value2" dir3="" [attr3]="value3" dir4="" [attr4]="value4">
{{inter0}}{{inter1}}{{inter2}}{{inter3}}{{inter4}}
<div class="class0 class1 class2 class3 class4 " dir0="" [attr0]="value0" dir1="" [attr1]="value1" dir2="" [attr2]="value2" dir3="" [attr3]="value3" dir4="" [attr4]="value4">
{{inter0}}{{inter1}}{{inter2}}{{inter3}}{{inter4}}
</div>
</div>
</div>
</div>
</div>`
})
class BenchmarkComponentWithBindings {
}
| tkarling/angular | modules/benchmarks/src/compiler/compiler_benchmark.ts | TypeScript | apache-2.0 | 7,166 |
/**
* Copyright 2014 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Class: IBitmapDrawable
module Shumway.AVMX.AS.flash.display {
import notImplemented = Shumway.Debug.notImplemented;
import axCoerceString = Shumway.AVMX.axCoerceString;
export interface IBitmapDrawable {
// JS -> AS Bindings
// AS -> JS Bindings
}
}
| yurydelendik/shumway | src/flash/display/IBitmapDrawable.ts | TypeScript | apache-2.0 | 891 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lens.cube.metadata;
import java.lang.reflect.Constructor;
import java.util.*;
import java.util.Map.Entry;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.metadata.*;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.mapred.TextInputFormat;
import com.google.common.collect.Maps;
/**
* Storage is Named Interface which would represent the underlying storage of the data.
*/
public abstract class Storage extends AbstractCubeTable implements PartitionMetahook {
private static final List<FieldSchema> COLUMNS = new ArrayList<FieldSchema>();
static {
COLUMNS.add(new FieldSchema("dummy", "string", "dummy column"));
}
protected Storage(String name, Map<String, String> properties) {
super(name, COLUMNS, properties, 0L);
addProperties();
}
public Storage(Table hiveTable) {
super(hiveTable);
}
/**
* Get the name prefix of the storage
*
* @return Name followed by storage separator
*/
public String getPrefix() {
return getPrefix(getName());
}
@Override
public CubeTableType getTableType() {
return CubeTableType.STORAGE;
}
@Override
public Set<String> getStorages() {
throw new NotImplementedException();
}
@Override
protected void addProperties() {
super.addProperties();
getProperties().put(MetastoreUtil.getStorageClassKey(getName()), getClass().getCanonicalName());
}
/**
* Get the name prefix of the storage
*
* @param name Name of the storage
* @return Name followed by storage separator
*/
public static String getPrefix(String name) {
return name + StorageConstants.STORGAE_SEPARATOR;
}
public static final class LatestInfo {
Map<String, LatestPartColumnInfo> latestParts = new HashMap<String, LatestPartColumnInfo>();
Partition part = null;
void addLatestPartInfo(String partCol, LatestPartColumnInfo partInfo) {
latestParts.put(partCol, partInfo);
}
void setPart(Partition part) {
this.part = part;
}
}
public static final class LatestPartColumnInfo extends HashMap<String, String> {
public LatestPartColumnInfo(Map<String, String> partParams) {
putAll(partParams);
}
public Map<String, String> getPartParams(Map<String, String> parentParams) {
putAll(parentParams);
return this;
}
}
/**
* Get the storage table descriptor for the given parent table.
*
* @param client The metastore client
* @param parent Is either Fact or Dimension table
* @param crtTbl Create table info
* @return Table describing the storage table
* @throws HiveException
*/
public Table getStorageTable(Hive client, Table parent, StorageTableDesc crtTbl) throws HiveException {
String storageTableName = MetastoreUtil.getStorageTableName(parent.getTableName(), this.getPrefix());
Table tbl = client.getTable(storageTableName, false);
if (tbl == null) {
tbl = client.newTable(storageTableName);
}
tbl.getTTable().setSd(new StorageDescriptor(parent.getTTable().getSd()));
if (crtTbl.getTblProps() != null) {
tbl.getTTable().getParameters().putAll(crtTbl.getTblProps());
}
if (crtTbl.getPartCols() != null) {
tbl.setPartCols(crtTbl.getPartCols());
}
if (crtTbl.getNumBuckets() != -1) {
tbl.setNumBuckets(crtTbl.getNumBuckets());
}
if (!StringUtils.isBlank(crtTbl.getStorageHandler())) {
tbl.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
crtTbl.getStorageHandler());
}
HiveStorageHandler storageHandler = tbl.getStorageHandler();
if (crtTbl.getSerName() == null) {
if (storageHandler == null || storageHandler.getSerDeClass() == null) {
tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
} else {
String serDeClassName = storageHandler.getSerDeClass().getName();
tbl.setSerializationLib(serDeClassName);
}
} else {
// let's validate that the serde exists
tbl.setSerializationLib(crtTbl.getSerName());
}
if (crtTbl.getFieldDelim() != null) {
tbl.setSerdeParam(serdeConstants.FIELD_DELIM, crtTbl.getFieldDelim());
tbl.setSerdeParam(serdeConstants.SERIALIZATION_FORMAT, crtTbl.getFieldDelim());
}
if (crtTbl.getFieldEscape() != null) {
tbl.setSerdeParam(serdeConstants.ESCAPE_CHAR, crtTbl.getFieldEscape());
}
if (crtTbl.getCollItemDelim() != null) {
tbl.setSerdeParam(serdeConstants.COLLECTION_DELIM, crtTbl.getCollItemDelim());
}
if (crtTbl.getMapKeyDelim() != null) {
tbl.setSerdeParam(serdeConstants.MAPKEY_DELIM, crtTbl.getMapKeyDelim());
}
if (crtTbl.getLineDelim() != null) {
tbl.setSerdeParam(serdeConstants.LINE_DELIM, crtTbl.getLineDelim());
}
if (crtTbl.getSerdeProps() != null) {
for (Entry<String, String> m : crtTbl.getSerdeProps().entrySet()) {
tbl.setSerdeParam(m.getKey(), m.getValue());
}
}
if (crtTbl.getBucketCols() != null) {
tbl.setBucketCols(crtTbl.getBucketCols());
}
if (crtTbl.getSortCols() != null) {
tbl.setSortCols(crtTbl.getSortCols());
}
if (crtTbl.getComment() != null) {
tbl.setProperty("comment", crtTbl.getComment());
}
if (crtTbl.getLocation() != null) {
tbl.setDataLocation(new Path(crtTbl.getLocation()));
}
if (crtTbl.getSkewedColNames() != null) {
tbl.setSkewedColNames(crtTbl.getSkewedColNames());
}
if (crtTbl.getSkewedColValues() != null) {
tbl.setSkewedColValues(crtTbl.getSkewedColValues());
}
tbl.setStoredAsSubDirectories(crtTbl.isStoredAsSubDirectories());
if (crtTbl.getInputFormat() != null) {
tbl.setInputFormatClass(crtTbl.getInputFormat());
} else {
tbl.setInputFormatClass(TextInputFormat.class.getName());
}
if (crtTbl.getOutputFormat() != null) {
tbl.setOutputFormatClass(crtTbl.getOutputFormat());
} else {
tbl.setOutputFormatClass(IgnoreKeyTextOutputFormat.class.getName());
}
tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName());
tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName());
if (crtTbl.isExternal()) {
tbl.setProperty("EXTERNAL", "TRUE");
tbl.setTableType(TableType.EXTERNAL_TABLE);
}
return tbl;
}
/**
* Add single partition to storage. Just calls #addPartitions.
* @param client
* @param addPartitionDesc
* @param latestInfo
* @throws HiveException
*/
public List<Partition> addPartition(Hive client, StoragePartitionDesc addPartitionDesc, LatestInfo latestInfo)
throws HiveException {
Map<Map<String, String>, LatestInfo> latestInfos = Maps.newHashMap();
latestInfos.put(addPartitionDesc.getNonTimePartSpec(), latestInfo);
return addPartitions(client, addPartitionDesc.getCubeTableName(), addPartitionDesc.getUpdatePeriod(),
Collections.singletonList(addPartitionDesc), latestInfos);
}
/**
* Add given partitions in the underlying hive table and update latest partition links
*
* @param client hive client instance
* @param factOrDimTable fact or dim name
* @param updatePeriod update period of partitions.
* @param storagePartitionDescs all partitions to be added
* @param latestInfos new latest info. atleast one partition for the latest value exists for each part
* column
* @throws HiveException
*/
public List<Partition> addPartitions(Hive client, String factOrDimTable, UpdatePeriod updatePeriod,
List<StoragePartitionDesc> storagePartitionDescs,
Map<Map<String, String>, LatestInfo> latestInfos) throws HiveException {
preAddPartitions(storagePartitionDescs);
Map<Map<String, String>, Map<String, Integer>> latestPartIndexForPartCols = Maps.newHashMap();
boolean success = false;
try {
String tableName = MetastoreUtil.getStorageTableName(factOrDimTable, this.getPrefix());
String dbName = SessionState.get().getCurrentDatabase();
AddPartitionDesc addParts = new AddPartitionDesc(dbName, tableName, true);
Table storageTbl = client.getTable(dbName, tableName);
for (StoragePartitionDesc addPartitionDesc : storagePartitionDescs) {
String location = null;
if (addPartitionDesc.getLocation() != null) {
Path partLocation = new Path(addPartitionDesc.getLocation());
if (partLocation.isAbsolute()) {
location = addPartitionDesc.getLocation();
} else {
location = new Path(storageTbl.getPath(), partLocation).toString();
}
}
Map<String, String> partParams = addPartitionDesc.getPartParams();
if (partParams == null) {
partParams = new HashMap<String, String>();
}
partParams.put(MetastoreConstants.PARTITION_UPDATE_PERIOD, addPartitionDesc.getUpdatePeriod().name());
addParts.addPartition(addPartitionDesc.getStoragePartSpec(), location);
int curIndex = addParts.getPartitionCount() - 1;
addParts.getPartition(curIndex).setPartParams(partParams);
addParts.getPartition(curIndex).setInputFormat(addPartitionDesc.getInputFormat());
addParts.getPartition(curIndex).setOutputFormat(addPartitionDesc.getOutputFormat());
addParts.getPartition(curIndex).setNumBuckets(addPartitionDesc.getNumBuckets());
addParts.getPartition(curIndex).setCols(addPartitionDesc.getCols());
addParts.getPartition(curIndex).setSerializationLib(addPartitionDesc.getSerializationLib());
addParts.getPartition(curIndex).setSerdeParams(addPartitionDesc.getSerdeParams());
addParts.getPartition(curIndex).setBucketCols(addPartitionDesc.getBucketCols());
addParts.getPartition(curIndex).setSortCols(addPartitionDesc.getSortCols());
if (latestInfos != null && latestInfos.get(addPartitionDesc.getNonTimePartSpec()) != null) {
for (Map.Entry<String, LatestPartColumnInfo> entry : latestInfos
.get(addPartitionDesc.getNonTimePartSpec()).latestParts.entrySet()) {
if (addPartitionDesc.getTimePartSpec().containsKey(entry.getKey())
&& entry.getValue().get(MetastoreUtil.getLatestPartTimestampKey(entry.getKey())).equals(
updatePeriod.format().format(addPartitionDesc.getTimePartSpec().get(entry.getKey())))) {
if (latestPartIndexForPartCols.get(addPartitionDesc.getNonTimePartSpec()) == null) {
latestPartIndexForPartCols.put(addPartitionDesc.getNonTimePartSpec(),
Maps.<String, Integer>newHashMap());
}
latestPartIndexForPartCols.get(addPartitionDesc.getNonTimePartSpec()).put(entry.getKey(), curIndex);
}
}
}
}
if (latestInfos != null) {
for (Map.Entry<Map<String, String>, LatestInfo> entry1 : latestInfos.entrySet()) {
Map<String, String> nonTimeParts = entry1.getKey();
LatestInfo latestInfo = entry1.getValue();
for (Map.Entry<String, LatestPartColumnInfo> entry : latestInfo.latestParts.entrySet()) {
// symlink this partition to latest
List<Partition> latest;
String latestPartCol = entry.getKey();
try {
latest = client
.getPartitionsByFilter(storageTbl, StorageConstants.getLatestPartFilter(latestPartCol, nonTimeParts));
} catch (Exception e) {
throw new HiveException("Could not get latest partition", e);
}
if (!latest.isEmpty()) {
client.dropPartition(storageTbl.getTableName(), latest.get(0).getValues(), false);
}
if (latestPartIndexForPartCols.get(nonTimeParts).containsKey(latestPartCol)) {
AddPartitionDesc.OnePartitionDesc latestPartWithFullTimestamp = addParts.getPartition(
latestPartIndexForPartCols.get(nonTimeParts).get(latestPartCol));
addParts.addPartition(
StorageConstants.getLatestPartSpec(latestPartWithFullTimestamp.getPartSpec(), latestPartCol),
latestPartWithFullTimestamp.getLocation());
int curIndex = addParts.getPartitionCount() - 1;
addParts.getPartition(curIndex).setPartParams(entry.getValue().getPartParams(
latestPartWithFullTimestamp.getPartParams()));
addParts.getPartition(curIndex).setInputFormat(latestPartWithFullTimestamp.getInputFormat());
addParts.getPartition(curIndex).setOutputFormat(latestPartWithFullTimestamp.getOutputFormat());
addParts.getPartition(curIndex).setNumBuckets(latestPartWithFullTimestamp.getNumBuckets());
addParts.getPartition(curIndex).setCols(latestPartWithFullTimestamp.getCols());
addParts.getPartition(curIndex).setSerializationLib(latestPartWithFullTimestamp.getSerializationLib());
addParts.getPartition(curIndex).setSerdeParams(latestPartWithFullTimestamp.getSerdeParams());
addParts.getPartition(curIndex).setBucketCols(latestPartWithFullTimestamp.getBucketCols());
addParts.getPartition(curIndex).setSortCols(latestPartWithFullTimestamp.getSortCols());
}
}
}
}
List<Partition> partitionsAdded = client.createPartitions(addParts);
success = true;
return partitionsAdded;
} finally {
if (success) {
commitAddPartitions(storagePartitionDescs);
} else {
rollbackAddPartitions(storagePartitionDescs);
}
}
}
/**
* Update existing partition
* @param client hive client instance
* @param fact fact name
* @param partition partition to be updated
* @throws InvalidOperationException
* @throws HiveException
*/
public void updatePartition(Hive client, String fact, Partition partition)
throws InvalidOperationException, HiveException {
client.alterPartition(MetastoreUtil.getFactOrDimtableStorageTableName(fact, getName()), partition);
}
/**
* Update existing partitions
* @param client hive client instance
* @param fact fact name
* @param partitions partitions to be updated
* @throws InvalidOperationException
* @throws HiveException
*/
public void updatePartitions(Hive client, String fact, List<Partition> partitions)
throws InvalidOperationException, HiveException {
boolean success = false;
try {
client.alterPartitions(MetastoreUtil.getFactOrDimtableStorageTableName(fact, getName()), partitions);
success = true;
} finally {
if (success) {
commitUpdatePartition(partitions);
} else {
rollbackUpdatePartition(partitions);
}
}
}
/**
* Drop the partition in the underlying hive table and update latest partition link
*
* @param client The metastore client
* @param storageTableName TableName
* @param partVals Partition specification
* @param updateLatestInfo The latest partition info if it needs update, null if latest should not be updated
* @param nonTimePartSpec
* @throws HiveException
*/
public void dropPartition(Hive client, String storageTableName, List<String> partVals,
Map<String, LatestInfo> updateLatestInfo, Map<String, String> nonTimePartSpec) throws HiveException {
preDropPartition(storageTableName, partVals);
boolean success = false;
try {
client.dropPartition(storageTableName, partVals, false);
String dbName = SessionState.get().getCurrentDatabase();
Table storageTbl = client.getTable(storageTableName);
// update latest info
if (updateLatestInfo != null) {
for (Entry<String, LatestInfo> entry : updateLatestInfo.entrySet()) {
String latestPartCol = entry.getKey();
// symlink this partition to latest
List<Partition> latestParts;
try {
latestParts = client.getPartitionsByFilter(storageTbl,
StorageConstants.getLatestPartFilter(latestPartCol, nonTimePartSpec));
MetastoreUtil.filterPartitionsByNonTimeParts(latestParts, nonTimePartSpec, latestPartCol);
} catch (Exception e) {
throw new HiveException("Could not get latest partition", e);
}
if (!latestParts.isEmpty()) {
assert latestParts.size() == 1;
client.dropPartition(storageTbl.getTableName(), latestParts.get(0).getValues(), false);
}
LatestInfo latest = entry.getValue();
if (latest != null && latest.part != null) {
AddPartitionDesc latestPart = new AddPartitionDesc(dbName, storageTableName, true);
latestPart.addPartition(StorageConstants.getLatestPartSpec(latest.part.getSpec(), latestPartCol),
latest.part.getLocation());
latestPart.getPartition(0).setPartParams(
latest.latestParts.get(latestPartCol).getPartParams(latest.part.getParameters()));
latestPart.getPartition(0).setInputFormat(latest.part.getInputFormatClass().getCanonicalName());
latestPart.getPartition(0).setOutputFormat(latest.part.getOutputFormatClass().getCanonicalName());
latestPart.getPartition(0).setNumBuckets(latest.part.getBucketCount());
latestPart.getPartition(0).setCols(latest.part.getCols());
latestPart.getPartition(0).setSerializationLib(
latest.part.getTPartition().getSd().getSerdeInfo().getSerializationLib());
latestPart.getPartition(0).setSerdeParams(
latest.part.getTPartition().getSd().getSerdeInfo().getParameters());
latestPart.getPartition(0).setBucketCols(latest.part.getBucketCols());
latestPart.getPartition(0).setSortCols(latest.part.getSortCols());
client.createPartitions(latestPart);
}
}
}
success = true;
} finally {
if (success) {
commitDropPartition(storageTableName, partVals);
} else {
rollbackDropPartition(storageTableName, partVals);
}
}
}
static Storage createInstance(Table tbl) throws HiveException {
String storageName = tbl.getTableName();
String storageClassName = tbl.getParameters().get(MetastoreUtil.getStorageClassKey(storageName));
try {
Class<?> clazz = Class.forName(storageClassName);
Constructor<?> constructor = clazz.getConstructor(Table.class);
return (Storage) constructor.newInstance(tbl);
} catch (Exception e) {
throw new HiveException("Could not create storage class" + storageClassName, e);
}
}
}
| adeelmahmood/lens | lens-cube/src/main/java/org/apache/lens/cube/metadata/Storage.java | Java | apache-2.0 | 20,153 |
module MiqPolicyController::PolicyProfiles
extend ActiveSupport::Concern
def profile_edit
case params[:button]
when "cancel"
@edit = nil
@profile = MiqPolicySet.find_by_id(session[:edit][:profile_id]) if session[:edit] && session[:edit][:profile_id]
if !@profile || (@profile && @profile.id.blank?)
add_flash(_("Add of new %{models} was cancelled by the user") %
{:models => ui_lookup(:model => "MiqPolicySet")})
else
add_flash(_("Edit of %{model} \"%{name}\" was cancelled by the user") % {:model => ui_lookup(:model => "MiqPolicySet"), :name => @profile.description})
end
get_node_info(x_node)
replace_right_cell(@nodetype)
return
when "reset", nil # Reset or first time in
profile_build_edit_screen
@sb[:action] = "profile_edit"
if params[:button] == "reset"
add_flash(_("All changes have been reset"), :warning)
end
replace_right_cell("pp")
return
end
# Load @edit/vars for other buttons
id = params[:id] ? params[:id] : "new"
return unless load_edit("profile_edit__#{id}", "replace_cell__explorer")
@profile = @edit[:profile_id] ? MiqPolicySet.find_by_id(@edit[:profile_id]) : MiqPolicySet.new
case params[:button]
when "save", "add"
assert_privileges("profile_#{@profile.id ? "edit" : "new"}")
add_flash(_("%{model} must contain at least one %{field}") % {:model => ui_lookup(:model => "MiqPolicySet"), :field => ui_lookup(:model => "MiqPolicy")}, :error) if @edit[:new][:policies].length == 0 # At least one member is required
profile = @profile.id.blank? ? MiqPolicySet.new : MiqPolicySet.find(@profile.id) # Get new or existing record
profile.description = @edit[:new][:description]
profile.notes = @edit[:new][:notes]
if profile.valid? && !@flash_array && profile.save
policies = profile.members # Get the sets members
current = []
policies.each { |p| current.push(p.id) } # Build an array of the current policy ids
mems = @edit[:new][:policies].invert # Get the ids from the member list box
begin
policies.each { |c| profile.remove_member(MiqPolicy.find(c)) unless mems.include?(c.id) } # Remove any policies no longer in the members list box
mems.each_key { |m| profile.add_member(MiqPolicy.find(m)) unless current.include?(m) } # Add any policies not in the set
rescue StandardError => bang
add_flash(_("Error during 'Policy Profile %{params}': %{messages}") %
{:params => params[:button], :messages => bang.message}, :error)
end
AuditEvent.success(build_saved_audit(profile, params[:button] == "add"))
flash_key = params[:button] == "save" ? _("%{model} \"%{name}\" was saved") :
_("%{model} \"%{name}\" was added")
add_flash(flash_key % {:model => ui_lookup(:model => "MiqPolicySet"), :name => @edit[:new][:description]})
profile_get_info(MiqPolicySet.find(profile.id))
@edit = nil
@nodetype = "pp"
@new_profile_node = "pp-#{to_cid(profile.id)}"
replace_right_cell("pp", [:policy_profile])
else
profile.errors.each do |field, msg|
add_flash("#{field.to_s.capitalize} #{msg}", :error)
end
replace_right_cell("pp")
end
when "move_right", "move_left", "move_allleft"
handle_selection_buttons(:policies)
session[:changed] = (@edit[:new] != @edit[:current])
replace_right_cell("pp")
end
end
def profile_delete
assert_privileges("profile_delete")
profiles = []
# showing 1 policy set, delete it
if params[:id].nil? || MiqPolicySet.find_by_id(params[:id]).nil?
add_flash(_("%{models} no longer exists") % {:models => ui_lookup(:model => "MiqPolicySet")},
:error)
else
profiles.push(params[:id])
end
process_profiles(profiles, "destroy") unless profiles.empty?
add_flash(_("The selected %{models} was deleted") %
{:models => ui_lookup(:models => "MiqPolicySet")}) if @flash_array.nil?
self.x_node = @new_profile_node = 'root'
get_node_info('root')
replace_right_cell('root', [:policy_profile])
end
def profile_field_changed
return unless load_edit("profile_edit__#{params[:id]}", "replace_cell__explorer")
@profile = @edit[:profile_id] ? MiqPolicySet.find_by_id(@edit[:profile_id]) : MiqPolicySet.new
@edit[:new][:description] = params[:description].blank? ? nil : params[:description] if params[:description]
@edit[:new][:notes] = params[:notes].blank? ? nil : params[:notes] if params[:notes]
send_button_changes
end
private
def process_profiles(profiles, task)
process_elements(profiles, MiqPolicySet, task)
end
def profile_build_edit_screen
@edit = {}
@edit[:new] = {}
@edit[:current] = {}
@profile = params[:id] ? MiqPolicySet.find(params[:id]) : MiqPolicySet.new # Get existing or new record
@edit[:key] = "profile_edit__#{@profile.id || "new"}"
@edit[:rec_id] = @profile.id || nil
@edit[:profile_id] = @profile.id
@edit[:new][:description] = @profile.description
@edit[:new][:notes] = @profile.notes
@edit[:new][:policies] = {}
policies = @profile.members # Get the member sets
policies.each { |p| @edit[:new][:policies][ui_lookup(:model => p.towhat) + " #{p.mode.capitalize}: " + p.description] = p.id } # Build a hash for the members list box
@edit[:choices] = {}
MiqPolicy.all.each do |p|
@edit[:choices][ui_lookup(:model => p.towhat) + " #{p.mode.capitalize}: " + p.description] = p.id # Build a hash for the policies to choose from
end
@edit[:new][:policies].each_key do |key|
@edit[:choices].delete(key) # Remove any policies that are in the members list box
end
@edit[:current] = copy_hash(@edit[:new])
@embedded = true
@in_a_form = true
@edit[:current][:add] = true if @edit[:profile_id].blank? # Force changed to be true if adding a record
session[:changed] = (@edit[:new] != @edit[:current])
end
def profile_get_all
@profiles = MiqPolicySet.all.sort_by { |ps| ps.description.downcase }
set_search_text
@profiles = apply_search_filter(@search_text, @profiles) unless @search_text.blank?
@right_cell_text = _("All %{models}") % {:models => ui_lookup(:models => "MiqPolicySet")}
@right_cell_div = "profile_list"
end
# Get information for a profile
def profile_get_info(profile)
@record = @profile = profile
@profile_policies = @profile.miq_policies.sort_by { |p| [p.towhat, p.mode, p.description.downcase] }
@right_cell_text = _("%{model} \"%{name}\"") % {:model => ui_lookup(:model => "MiqPolicySet"), :name => @profile.description}
@right_cell_div = "profile_details"
end
end
| maas-ufcg/manageiq | app/controllers/miq_policy_controller/policy_profiles.rb | Ruby | apache-2.0 | 7,000 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jmeter.protocol.http.visualizers;
import java.awt.BorderLayout;
import java.awt.Component;
import java.io.UnsupportedEncodingException;
import java.net.URL;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.swing.JPanel;
import javax.swing.JSplitPane;
import javax.swing.JTable;
import javax.swing.table.TableCellRenderer;
import javax.swing.table.TableColumn;
import org.apache.commons.lang3.StringUtils;
import org.apache.jmeter.config.Argument;
import org.apache.jmeter.gui.util.HeaderAsPropertyRenderer;
import org.apache.jmeter.gui.util.TextBoxDialoger.TextBoxDoubleClick;
import org.apache.jmeter.protocol.http.config.MultipartUrlConfig;
import org.apache.jmeter.protocol.http.sampler.HTTPSampleResult;
import org.apache.jmeter.protocol.http.util.HTTPConstants;
import org.apache.jmeter.testelement.property.JMeterProperty;
import org.apache.jmeter.util.JMeterUtils;
import org.apache.jmeter.visualizers.RequestView;
import org.apache.jmeter.visualizers.SamplerResultTab.RowResult;
import org.apache.jmeter.visualizers.SearchTextExtension;
import org.apache.jmeter.visualizers.SearchTextExtension.ISearchTextExtensionProvider;
import org.apache.jorphan.gui.GuiUtils;
import org.apache.jorphan.gui.ObjectTableModel;
import org.apache.jorphan.gui.RendererUtils;
import org.apache.jorphan.reflect.Functor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Specializer panel to view a HTTP request parsed
*/
public class RequestViewHTTP implements RequestView {
private static final Logger log = LoggerFactory.getLogger(RequestViewHTTP.class);
private static final String KEY_LABEL = "view_results_table_request_tab_http"; //$NON-NLS-1$
private static final String CHARSET_DECODE = StandardCharsets.ISO_8859_1.name();
private static final String PARAM_CONCATENATE = "&"; //$NON-NLS-1$
private JPanel paneParsed;
private ObjectTableModel requestModel = null;
private ObjectTableModel paramsModel = null;
private ObjectTableModel headersModel = null;
private static final String[] COLUMNS_REQUEST = new String[] {
" ", // one space for blank header // $NON-NLS-1$
" " }; // one space for blank header // $NON-NLS-1$
private static final String[] COLUMNS_PARAMS = new String[] {
"view_results_table_request_params_key", // $NON-NLS-1$
"view_results_table_request_params_value" }; // $NON-NLS-1$
private static final String[] COLUMNS_HEADERS = new String[] {
"view_results_table_request_headers_key", // $NON-NLS-1$
"view_results_table_request_headers_value" }; // $NON-NLS-1$
private JTable tableRequest = null;
private JTable tableParams = null;
private JTable tableHeaders = null;
// Request headers column renderers
private static final TableCellRenderer[] RENDERERS_REQUEST = new TableCellRenderer[] {
null, // Key
null, // Value
};
// Request headers column renderers
private static final TableCellRenderer[] RENDERERS_PARAMS = new TableCellRenderer[] {
null, // Key
null, // Value
};
// Request headers column renderers
private static final TableCellRenderer[] RENDERERS_HEADERS = new TableCellRenderer[] {
null, // Key
null, // Value
};
private SearchTextExtension searchTextExtension;
/**
* Pane to view HTTP request sample in view results tree
*/
public RequestViewHTTP() {
requestModel = new ObjectTableModel(COLUMNS_REQUEST, RowResult.class, // The object used for each row
new Functor[] {
new Functor("getKey"), // $NON-NLS-1$
new Functor("getValue") }, // $NON-NLS-1$
new Functor[] {
null, null }, new Class[] {
String.class, String.class }, false);
paramsModel = new ObjectTableModel(COLUMNS_PARAMS, RowResult.class, // The object used for each row
new Functor[] {
new Functor("getKey"), // $NON-NLS-1$
new Functor("getValue") }, // $NON-NLS-1$
new Functor[] {
null, null }, new Class[] {
String.class, String.class }, false);
headersModel = new ObjectTableModel(COLUMNS_HEADERS, RowResult.class, // The object used for each row
new Functor[] {
new Functor("getKey"), // $NON-NLS-1$
new Functor("getValue") }, // $NON-NLS-1$
new Functor[] {
null, null }, new Class[] {
String.class, String.class }, false);
}
/* (non-Javadoc)
* @see org.apache.jmeter.visualizers.request.RequestView#init()
*/
@Override
public void init() {
paneParsed = new JPanel(new BorderLayout(0, 5));
paneParsed.add(createRequestPane(), BorderLayout.CENTER);
this.searchTextExtension = new SearchTextExtension(new RequestViewHttpSearchProvider());
paneParsed.add(searchTextExtension.getSearchToolBar(), BorderLayout.NORTH);
}
/* (non-Javadoc)
* @see org.apache.jmeter.visualizers.request.RequestView#clearData()
*/
@Override
public void clearData() {
requestModel.clearData();
paramsModel.clearData();
headersModel.clearData(); // clear results table before filling
}
/* (non-Javadoc)
* @see org.apache.jmeter.visualizers.request.RequestView#setSamplerResult(java.lang.Object)
*/
@Override
public void setSamplerResult(Object objectResult) {
this.searchTextExtension.resetTextToFind();
if (objectResult instanceof HTTPSampleResult) {
HTTPSampleResult sampleResult = (HTTPSampleResult) objectResult;
// Display with same order HTTP protocol
requestModel.addRow(new RowResult(
JMeterUtils.getResString("view_results_table_request_http_method"), //$NON-NLS-1$
sampleResult.getHTTPMethod()));
// Parsed request headers
LinkedHashMap<String, String> lhm = JMeterUtils.parseHeaders(sampleResult.getRequestHeaders());
for (Map.Entry<String, String> entry : lhm.entrySet()) {
headersModel.addRow(new RowResult(entry.getKey(), entry.getValue()));
}
URL hUrl = sampleResult.getURL();
if (hUrl != null){ // can be null - e.g. if URL was invalid
requestModel.addRow(new RowResult(JMeterUtils
.getResString("view_results_table_request_http_protocol"), //$NON-NLS-1$
hUrl.getProtocol()));
requestModel.addRow(new RowResult(
JMeterUtils.getResString("view_results_table_request_http_host"), //$NON-NLS-1$
hUrl.getHost()));
int port = hUrl.getPort() == -1 ? hUrl.getDefaultPort() : hUrl.getPort();
requestModel.addRow(new RowResult(
JMeterUtils.getResString("view_results_table_request_http_port"), //$NON-NLS-1$
port));
requestModel.addRow(new RowResult(
JMeterUtils.getResString("view_results_table_request_http_path"), //$NON-NLS-1$
hUrl.getPath()));
String queryGet = hUrl.getQuery() == null ? "" : hUrl.getQuery(); //$NON-NLS-1$
boolean isMultipart = isMultipart(lhm);
// Concatenate query post if exists
String queryPost = sampleResult.getQueryString();
if (!isMultipart && StringUtils.isNotBlank(queryPost)) {
if (queryGet.length() > 0) {
queryGet += PARAM_CONCATENATE;
}
queryGet += queryPost;
}
if (StringUtils.isNotBlank(queryGet)) {
Set<Map.Entry<String, String[]>> keys = RequestViewHTTP.getQueryMap(queryGet).entrySet();
for (Map.Entry<String, String[]> entry : keys) {
for (String value : entry.getValue()) {
paramsModel.addRow(new RowResult(entry.getKey(), value));
}
}
}
if(isMultipart && StringUtils.isNotBlank(queryPost)) {
String contentType = lhm.get(HTTPConstants.HEADER_CONTENT_TYPE);
String boundaryString = extractBoundary(contentType);
MultipartUrlConfig urlconfig = new MultipartUrlConfig(boundaryString);
urlconfig.parseArguments(queryPost);
for(JMeterProperty prop : urlconfig.getArguments()) {
Argument arg = (Argument) prop.getObjectValue();
paramsModel.addRow(new RowResult(arg.getName(), arg.getValue()));
}
}
}
// Display cookie in headers table (same location on http protocol)
String cookie = sampleResult.getCookies();
if (cookie != null && cookie.length() > 0) {
headersModel.addRow(new RowResult(
JMeterUtils.getParsedLabel("view_results_table_request_http_cookie"), //$NON-NLS-1$
sampleResult.getCookies()));
}
}
else {
// add a message when no http sample
requestModel.addRow(new RowResult("", //$NON-NLS-1$
JMeterUtils.getResString("view_results_table_request_http_nohttp"))); //$NON-NLS-1$
}
}
/**
* Extract the multipart boundary
* @param contentType the content type header
* @return the boundary string
*/
private String extractBoundary(String contentType) {
// Get the boundary string for the multiparts from the content type
String boundaryString = contentType.substring(contentType.toLowerCase(java.util.Locale.ENGLISH).indexOf("boundary=") + "boundary=".length());
//TODO check in the RFC if other char can be used as separator
String[] split = boundaryString.split(";");
if(split.length > 1) {
boundaryString = split[0];
}
return boundaryString;
}
/**
* check if the request is multipart
* @param headers the http request headers
* @return true if the request is multipart
*/
private boolean isMultipart(LinkedHashMap<String, String> headers) {
String contentType = headers.get(HTTPConstants.HEADER_CONTENT_TYPE);
return contentType != null && contentType.startsWith(HTTPConstants.MULTIPART_FORM_DATA);
}
/**
* @param query query to parse for param and value pairs
* @return Map params and values
*/
//TODO: move to utils class (JMeterUtils?)
public static Map<String, String[]> getQueryMap(String query) {
Map<String, String[]> map = new HashMap<>();
String[] params = query.split(PARAM_CONCATENATE);
for (String param : params) {
String[] paramSplit = param.split("=");
String name = decodeQuery(paramSplit[0]);
// hack for SOAP request (generally)
if (name.trim().startsWith("<?")) { // $NON-NLS-1$
map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$
return map;
}
// the post payload is not key=value
if((param.startsWith("=") && paramSplit.length == 1) || paramSplit.length > 2) {
map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$
return map;
}
String value = "";
if(paramSplit.length>1) {
value = decodeQuery(paramSplit[1]);
}
String[] known = map.get(name);
if(known == null) {
known = new String[] {value};
}
else {
String[] tmp = new String[known.length+1];
tmp[tmp.length-1] = value;
System.arraycopy(known, 0, tmp, 0, known.length);
known = tmp;
}
map.put(name, known);
}
return map;
}
/**
* Decode a query string
*
* @param query
* to decode
* @return the decoded query string, if it can be url-decoded. Otherwise the original
* query will be returned.
*/
public static String decodeQuery(String query) {
if (query != null && query.length() > 0) {
try {
return URLDecoder.decode(query, CHARSET_DECODE); // better ISO-8859-1 than UTF-8
} catch (IllegalArgumentException | UnsupportedEncodingException e) {
log.warn(
"Error decoding query, maybe your request parameters should be encoded:"
+ query, e);
return query;
}
}
return "";
}
@Override
public JPanel getPanel() {
return paneParsed;
}
/**
* Create a pane with three tables (request, params, headers)
*
* @return Pane to display request data
*/
private Component createRequestPane() {
// Set up the 1st table Result with empty headers
tableRequest = new JTable(requestModel);
JMeterUtils.applyHiDPI(tableRequest);
tableRequest.setToolTipText(JMeterUtils.getResString("textbox_tooltip_cell")); // $NON-NLS-1$
tableRequest.addMouseListener(new TextBoxDoubleClick(tableRequest));
setFirstColumnPreferredAndMaxWidth(tableRequest);
RendererUtils.applyRenderers(tableRequest, RENDERERS_REQUEST);
// Set up the 2nd table
tableParams = new JTable(paramsModel);
JMeterUtils.applyHiDPI(tableParams);
tableParams.setToolTipText(JMeterUtils.getResString("textbox_tooltip_cell")); // $NON-NLS-1$
tableParams.addMouseListener(new TextBoxDoubleClick(tableParams));
TableColumn column = tableParams.getColumnModel().getColumn(0);
column.setPreferredWidth(160);
tableParams.getTableHeader().setDefaultRenderer(new HeaderAsPropertyRenderer());
RendererUtils.applyRenderers(tableParams, RENDERERS_PARAMS);
// Set up the 3rd table
tableHeaders = new JTable(headersModel);
JMeterUtils.applyHiDPI(tableHeaders);
tableHeaders.setToolTipText(JMeterUtils.getResString("textbox_tooltip_cell")); // $NON-NLS-1$
tableHeaders.addMouseListener(new TextBoxDoubleClick(tableHeaders));
setFirstColumnPreferredAndMaxWidth(tableHeaders);
tableHeaders.getTableHeader().setDefaultRenderer(
new HeaderAsPropertyRenderer());
RendererUtils.applyRenderers(tableHeaders, RENDERERS_HEADERS);
// Create the split pane
JSplitPane topSplit = new JSplitPane(JSplitPane.VERTICAL_SPLIT,
GuiUtils.makeScrollPane(tableParams),
GuiUtils.makeScrollPane(tableHeaders));
topSplit.setOneTouchExpandable(true);
topSplit.setResizeWeight(0.50); // set split ratio
topSplit.setBorder(null); // see bug jdk 4131528
JSplitPane paneParsed = new JSplitPane(JSplitPane.VERTICAL_SPLIT,
GuiUtils.makeScrollPane(tableRequest), topSplit);
paneParsed.setOneTouchExpandable(true);
paneParsed.setResizeWeight(0.25); // set split ratio (only 5 lines to display)
paneParsed.setBorder(null); // see bug jdk 4131528
// Hint to background color on bottom tabs (grey, not blue)
JPanel panel = new JPanel(new BorderLayout());
panel.add(paneParsed);
return panel;
}
private void setFirstColumnPreferredAndMaxWidth(JTable table) {
TableColumn column = table.getColumnModel().getColumn(0);
column.setMaxWidth(300);
column.setPreferredWidth(160);
}
/* (non-Javadoc)
* @see org.apache.jmeter.visualizers.request.RequestView#getLabel()
*/
@Override
public String getLabel() {
return JMeterUtils.getResString(KEY_LABEL);
}
/**
* Search implementation for the http parameter table
*/
private class RequestViewHttpSearchProvider implements ISearchTextExtensionProvider {
private int lastPosition = -1;
@Override
public void resetTextToFind() {
lastPosition = -1;
if(tableParams != null) {
tableParams.clearSelection();
}
}
@Override
public boolean executeAndShowTextFind(Pattern pattern) {
boolean found = false;
if(tableParams != null) {
tableParams.clearSelection();
outerloop:
for (int i = lastPosition+1; i < tableParams.getRowCount(); i++) {
for (int j = 0; j < COLUMNS_PARAMS.length; j++) {
Object o = tableParams.getModel().getValueAt(i, j);
if(o instanceof String) {
Matcher matcher = pattern.matcher((String) o);
if (matcher.find()) {
found = true;
tableParams.setRowSelectionInterval(i, i);
tableParams.scrollRectToVisible(tableParams.getCellRect(i, 0, true));
lastPosition = i;
break outerloop;
}
}
}
}
if(!found) {
resetTextToFind();
}
}
return found;
}
}
}
| apache/jmeter | src/protocol/http/src/main/java/org/apache/jmeter/protocol/http/visualizers/RequestViewHTTP.java | Java | apache-2.0 | 18,933 |
/***** BEGIN LICENSE BLOCK *****
* Copyright (c) 2006-2007, 2010 Nick Sieger <nick@nicksieger.com>
* Copyright (c) 2006-2007 Ola Bini <ola.bini@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
***** END LICENSE BLOCK *****/
package arjdbc.derby;
import java.sql.SQLException;
import arjdbc.jdbc.RubyJdbcConnection;
import org.jruby.Ruby;
import org.jruby.RubyBigDecimal;
import org.jruby.RubyBignum;
import org.jruby.RubyBoolean;
import org.jruby.RubyFixnum;
import org.jruby.RubyFloat;
import org.jruby.RubyModule;
import org.jruby.RubyNumeric;
import org.jruby.RubyObjectAdapter;
import org.jruby.RubyRange;
import org.jruby.RubyString;
import org.jruby.anno.JRubyMethod;
import org.jruby.runtime.ThreadContext;
import org.jruby.runtime.builtin.IRubyObject;
import org.jruby.util.ByteList;
public class DerbyModule {
private static RubyObjectAdapter rubyApi;
public static void load(RubyModule arJdbc, RubyObjectAdapter adapter) {
RubyModule derby = arJdbc.defineModuleUnder("Derby");
derby.defineAnnotatedMethods(DerbyModule.class);
RubyModule column = derby.defineModuleUnder("Column");
column.defineAnnotatedMethods(Column.class);
rubyApi = adapter;
}
public static class Column {
@JRubyMethod(name = "type_cast", required = 1)
public static IRubyObject type_cast(IRubyObject recv, IRubyObject value) {
Ruby runtime = recv.getRuntime();
if (value.isNil() || ((value instanceof RubyString) && value.toString().trim().equalsIgnoreCase("null"))) {
return runtime.getNil();
}
String type = rubyApi.getInstanceVariable(recv, "@type").toString();
switch (type.charAt(0)) {
case 's': //string
return value;
case 't': //text, timestamp, time
if (type.equals("text")) {
return value;
} else if (type.equals("timestamp")) {
return rubyApi.callMethod(recv.getMetaClass(), "string_to_time", value);
} else { //time
return rubyApi.callMethod(recv.getMetaClass(), "string_to_dummy_time", value);
}
case 'i': //integer
case 'p': //primary key
if (value.respondsTo("to_i")) {
return rubyApi.callMethod(value, "to_i");
} else {
return runtime.newFixnum(value.isTrue() ? 1 : 0);
}
case 'd': //decimal, datetime, date
if (type.equals("datetime")) {
return rubyApi.callMethod(recv.getMetaClass(), "string_to_time", value);
} else if (type.equals("date")) {
return rubyApi.callMethod(recv.getMetaClass(), "string_to_date", value);
} else {
return rubyApi.callMethod(recv.getMetaClass(), "value_to_decimal", value);
}
case 'f': //float
return rubyApi.callMethod(value, "to_f");
case 'b': //binary, boolean
if (type.equals("binary")) {
return rubyApi.callMethod(recv.getMetaClass(), "binary_to_string", value);
} else {
return rubyApi.callMethod(recv.getMetaClass(), "value_to_boolean", value);
}
}
return value;
}
}
@JRubyMethod(name = "quote", required = 1, optional = 1)
public static IRubyObject quote(ThreadContext context, IRubyObject recv, IRubyObject[] args) {
Ruby runtime = recv.getRuntime();
IRubyObject value = args[0];
if (args.length > 1) {
IRubyObject col = args[1];
String type = rubyApi.callMethod(col, "type").toString();
// intercept and change value, maybe, if the column type is :text or :string
if (type.equals("text") || type.equals("string")) {
value = make_ruby_string_for_text_column(context, recv, runtime, value);
}
if (value instanceof RubyString) {
if (type.equals("string")) {
return quote_string_with_surround(runtime, "'", (RubyString)value, "'");
} else if (type.equals("text")) {
return quote_string_with_surround(runtime, "CAST('", (RubyString)value, "' AS CLOB)");
} else if (type.equals("binary")) {
return hexquote_string_with_surround(runtime, "CAST(X'", (RubyString)value, "' AS BLOB)");
} else {
// column type :integer or other numeric or date version
if (only_digits((RubyString)value)) {
return value;
} else {
return super_quote(context, recv, runtime, value, col);
}
}
} else if ((value instanceof RubyFloat) || (value instanceof RubyFixnum) || (value instanceof RubyBignum)) {
if (type.equals("string")) {
return quote_string_with_surround(runtime, "'", RubyString.objAsString(context, value), "'");
}
}
}
return super_quote(context, recv, runtime, value, runtime.getNil());
}
/*
* Derby is not permissive like MySql. Try and send an Integer to a CLOB or VARCHAR column and Derby will vomit.
* This method turns non stringy things into strings.
*/
private static IRubyObject make_ruby_string_for_text_column(ThreadContext context, IRubyObject recv, Ruby runtime, IRubyObject value) {
RubyModule multibyteChars = (RubyModule)
((RubyModule) ((RubyModule) runtime.getModule("ActiveSupport")).getConstant("Multibyte")).getConstantAt("Chars");
if (value instanceof RubyString || rubyApi.isKindOf(value, multibyteChars) || value.isNil()) {
return value;
}
if (value instanceof RubyBoolean) {
return value.isTrue() ? runtime.newString("1") : runtime.newString("0");
} else if (value instanceof RubyFloat || value instanceof RubyFixnum || value instanceof RubyBignum) {
return RubyString.objAsString(context, value);
} else if ( value instanceof RubyBigDecimal) {
return rubyApi.callMethod(value, "to_s", runtime.newString("F"));
} else {
if (rubyApi.callMethod(value, "acts_like?", runtime.newString("date")).isTrue() || rubyApi.callMethod(value, "acts_like?", runtime.newString("time")).isTrue()) {
return (RubyString)rubyApi.callMethod(recv, "quoted_date", value);
} else {
return (RubyString)rubyApi.callMethod(value, "to_yaml");
}
}
}
private final static ByteList NULL = new ByteList("NULL".getBytes());
private static IRubyObject super_quote(ThreadContext context, IRubyObject recv, Ruby runtime, IRubyObject value, IRubyObject col) {
if (value.respondsTo("quoted_id")) {
return rubyApi.callMethod(value, "quoted_id");
}
IRubyObject type = (col.isNil()) ? col : rubyApi.callMethod(col, "type");
RubyModule multibyteChars = (RubyModule)
((RubyModule) ((RubyModule) runtime.getModule("ActiveSupport")).getConstant("Multibyte")).getConstantAt("Chars");
if (value instanceof RubyString || rubyApi.isKindOf(value, multibyteChars)) {
RubyString svalue = RubyString.objAsString(context, value);
if (type == runtime.newSymbol("binary") && col.getType().respondsTo("string_to_binary")) {
return quote_string_with_surround(runtime, "'", (RubyString)(rubyApi.callMethod(col.getType(), "string_to_binary", svalue)), "'");
} else if (type == runtime.newSymbol("integer") || type == runtime.newSymbol("float")) {
return RubyString.objAsString(context, ((type == runtime.newSymbol("integer")) ?
rubyApi.callMethod(svalue, "to_i") :
rubyApi.callMethod(svalue, "to_f")));
} else {
return quote_string_with_surround(runtime, "'", svalue, "'");
}
} else if (value.isNil()) {
return runtime.newString(NULL);
} else if (value instanceof RubyBoolean) {
return (value.isTrue() ?
(type == runtime.newSymbol(":integer")) ? runtime.newString("1") : rubyApi.callMethod(recv, "quoted_true") :
(type == runtime.newSymbol(":integer")) ? runtime.newString("0") : rubyApi.callMethod(recv, "quoted_false"));
} else if((value instanceof RubyFloat) || (value instanceof RubyFixnum) || (value instanceof RubyBignum)) {
return RubyString.objAsString(context, value);
} else if(value instanceof RubyBigDecimal) {
return rubyApi.callMethod(value, "to_s", runtime.newString("F"));
} else if (rubyApi.callMethod(value, "acts_like?", runtime.newString("date")).isTrue() || rubyApi.callMethod(value, "acts_like?", runtime.newString("time")).isTrue()) {
return quote_string_with_surround(runtime, "'", (RubyString)(rubyApi.callMethod(recv, "quoted_date", value)), "'");
} else {
return quote_string_with_surround(runtime, "'", (RubyString)(rubyApi.callMethod(value, "to_yaml")), "'");
}
}
private final static ByteList TWO_SINGLE = new ByteList(new byte[]{'\'','\''});
private static IRubyObject quote_string_with_surround(Ruby runtime, String before, RubyString string, String after) {
ByteList input = string.getByteList();
ByteList output = new ByteList(before.getBytes());
for(int i = input.begin; i< input.begin + input.realSize; i++) {
switch(input.bytes[i]) {
case '\'':
output.append(input.bytes[i]);
//FALLTHROUGH
default:
output.append(input.bytes[i]);
}
}
output.append(after.getBytes());
return runtime.newString(output);
}
private final static byte[] HEX = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'};
private static IRubyObject hexquote_string_with_surround(Ruby runtime, String before, RubyString string, String after) {
ByteList input = string.getByteList();
ByteList output = new ByteList(before.getBytes());
int written = 0;
for(int i = input.begin; i< input.begin + input.realSize; i++) {
byte b1 = input.bytes[i];
byte higher = HEX[(((char)b1)>>4)%16];
byte lower = HEX[((char)b1)%16];
output.append(higher);
output.append(lower);
written += 2;
if(written >= 16334) { // max hex length = 16334
output.append("'||X'".getBytes());
written = 0;
}
}
output.append(after.getBytes());
return runtime.newStringShared(output);
}
private static boolean only_digits(RubyString inp) {
ByteList input = inp.getByteList();
for(int i = input.begin; i< input.begin + input.realSize; i++) {
if(input.bytes[i] < '0' || input.bytes[i] > '9') {
return false;
}
}
return true;
}
@JRubyMethod(name = "quote_string", required = 1)
public static IRubyObject quote_string(IRubyObject recv, IRubyObject string) {
boolean replacementFound = false;
ByteList bl = ((RubyString) string).getByteList();
for(int i = bl.begin; i < bl.begin + bl.realSize; i++) {
switch (bl.bytes[i]) {
case '\'': break;
default: continue;
}
// On first replacement allocate a different bytelist so we don't manip original
if(!replacementFound) {
i-= bl.begin;
bl = new ByteList(bl);
replacementFound = true;
}
bl.replace(i, 1, TWO_SINGLE);
i+=1;
}
if(replacementFound) {
return recv.getRuntime().newStringShared(bl);
} else {
return string;
}
}
@JRubyMethod(name = "select_all", rest = true)
public static IRubyObject select_all(IRubyObject recv, IRubyObject[] args) {
return rubyApi.callMethod(recv, "execute", args);
}
@JRubyMethod(name = "select_one", rest = true)
public static IRubyObject select_one(IRubyObject recv, IRubyObject[] args) {
IRubyObject limit = rubyApi.getInstanceVariable(recv, "@limit");
if (limit == null || limit.isNil()) {
rubyApi.setInstanceVariable(recv, "@limit", recv.getRuntime().newFixnum(1));
}
try {
IRubyObject result = rubyApi.callMethod(recv, "execute", args);
return rubyApi.callMethod(result, "first");
} finally {
rubyApi.setInstanceVariable(recv, "@limit", recv.getRuntime().getNil());
}
}
@JRubyMethod(name = "_execute", required = 1, optional = 1)
public static IRubyObject _execute(ThreadContext context, IRubyObject recv, IRubyObject[] args) throws SQLException, java.io.IOException {
Ruby runtime = recv.getRuntime();
RubyJdbcConnection conn = (RubyJdbcConnection) rubyApi.getInstanceVariable(recv, "@connection");
String sql = args[0].toString().trim().toLowerCase();
if (sql.charAt(0) == '(') {
sql = sql.substring(1).trim();
}
if (sql.startsWith("insert")) {
return conn.execute_insert(context, args[0]);
} else if (sql.startsWith("select") || sql.startsWith("show") || sql.startsWith("values")) {
return conn.execute_query(context, args[0]);
} else {
return conn.execute_update(context, args[0]);
}
}
}
| ThoughtWorksStudios/mingle_git_plugin | tools/gems/gems/activerecord-jdbc-adapter-1.1.1/src/java/arjdbc/derby/DerbyModule.java | Java | apache-2.0 | 15,045 |
# Copyright (c) 2020 Vestas Wind Systems A/S
#
# SPDX-License-Identifier: Apache-2.0
'''Runner for performing program download over CANopen (DSP 302-3).'''
import argparse
import os
import time
from runners.core import ZephyrBinaryRunner, RunnerCaps
try:
import canopen
from progress.bar import Bar
MISSING_REQUIREMENTS = False
except ImportError:
MISSING_REQUIREMENTS = True
# Default Python-CAN context to use, see python-can documentation for details
DEFAULT_CAN_CONTEXT = 'default'
# Default program number
DEFAULT_PROGRAM_NUMBER = 1
# Default timeouts and retries
DEFAULT_TIMEOUT = 10.0 # seconds
DEFAULT_SDO_TIMEOUT = 0.3 # seconds
DEFAULT_SDO_RETRIES = 1
# Object dictionary indexes
H1F50_PROGRAM_DATA = 0x1F50
H1F51_PROGRAM_CTRL = 0x1F51
H1F56_PROGRAM_SWID = 0x1F56
H1F57_FLASH_STATUS = 0x1F57
# Program control commands
PROGRAM_CTRL_STOP = 0x00
PROGRAM_CTRL_START = 0x01
PROGRAM_CTRL_RESET = 0x02
PROGRAM_CTRL_CLEAR = 0x03
PROGRAM_CTRL_ZEPHYR_CONFIRM = 0x80
class ToggleAction(argparse.Action):
'''Toggle argument parser'''
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, not option_string.startswith('--no-'))
class CANopenBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for CANopen.'''
def __init__(self, cfg, dev_id, can_context=DEFAULT_CAN_CONTEXT,
program_number=DEFAULT_PROGRAM_NUMBER, confirm=True,
confirm_only=True, timeout=DEFAULT_TIMEOUT,
sdo_retries=DEFAULT_SDO_RETRIES, sdo_timeout=DEFAULT_SDO_TIMEOUT):
if MISSING_REQUIREMENTS:
raise RuntimeError('one or more Python dependencies were missing; '
"see the getting started guide for details on "
"how to fix")
super().__init__(cfg)
self.dev_id = dev_id # Only use for error checking in do_run()
self.bin_file = cfg.bin_file
self.confirm = confirm
self.confirm_only = confirm_only
self.timeout = timeout
self.downloader = CANopenProgramDownloader(logger=self.logger,
node_id=dev_id,
can_context=can_context,
program_number=program_number,
sdo_retries=sdo_retries,
sdo_timeout=sdo_timeout)
@classmethod
def name(cls):
return 'canopen'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash'}, dev_id=True, flash_addr=False)
@classmethod
def dev_id_help(cls) -> str:
return 'CANopen Node ID.'
@classmethod
def do_add_parser(cls, parser):
# Optional:
parser.add_argument('--node-id', dest='dev_id',
help=cls.dev_id_help())
parser.add_argument('--can-context', default=DEFAULT_CAN_CONTEXT,
help=f'Python-CAN context to use (default: {DEFAULT_CAN_CONTEXT})')
parser.add_argument('--program-number', type=int, default=DEFAULT_PROGRAM_NUMBER,
help=f'program number (default: {DEFAULT_PROGRAM_NUMBER})')
parser.add_argument('--confirm', '--no-confirm',
dest='confirm', nargs=0,
action=ToggleAction,
help='confirm after starting? (default: yes)')
parser.add_argument('--confirm-only', default=False, action='store_true',
help='confirm only, no program download (default: no)')
parser.add_argument('--timeout', type=float, default=DEFAULT_TIMEOUT,
help=f'Timeout in seconds (default: {DEFAULT_TIMEOUT})')
parser.add_argument('--sdo-retries', type=int, default=DEFAULT_SDO_RETRIES,
help=f'CANopen SDO request retries (default: {DEFAULT_SDO_RETRIES})')
parser.add_argument('--sdo-timeout', type=float, default=DEFAULT_SDO_TIMEOUT,
help=f'''CANopen SDO response timeout in seconds
(default: {DEFAULT_SDO_TIMEOUT})''')
parser.set_defaults(confirm=True)
@classmethod
def do_create(cls, cfg, args):
return CANopenBinaryRunner(cfg, int(args.dev_id),
can_context=args.can_context,
program_number=args.program_number,
confirm=args.confirm,
confirm_only=args.confirm_only,
timeout=args.timeout,
sdo_retries=args.sdo_retries,
sdo_timeout=args.sdo_timeout)
def do_run(self, command, **kwargs):
if not self.dev_id:
raise RuntimeError('Please specify a CANopen node ID with the '
'-i/--dev-id or --node-id command-line switch.')
if command == 'flash':
self.flash(**kwargs)
def flash(self, **kwargs):
'''Download program to flash over CANopen'''
self.ensure_output('bin')
self.logger.info('Using Node ID %d, program number %d',
self.downloader.node_id,
self.downloader.program_number)
self.downloader.connect()
status = self.downloader.wait_for_flash_status_ok(self.timeout)
if status == 0:
self.downloader.swid()
else:
self.logger.warning('Flash status 0x{:02x}, '
'skipping software identification'.format(status))
self.downloader.enter_pre_operational()
if self.confirm_only:
self.downloader.zephyr_confirm_program()
self.downloader.disconnect()
return
if self.bin_file is None:
raise ValueError('Cannot download program; bin_file is missing')
self.downloader.stop_program()
self.downloader.clear_program()
self.downloader.wait_for_flash_status_ok(self.timeout)
self.downloader.download(self.bin_file)
status = self.downloader.wait_for_flash_status_ok(self.timeout)
if status != 0:
raise ValueError('Program download failed: '
'flash status 0x{:02x}'.format(status))
self.downloader.swid()
self.downloader.start_program()
self.downloader.wait_for_bootup(self.timeout)
self.downloader.swid()
if self.confirm:
self.downloader.enter_pre_operational()
self.downloader.zephyr_confirm_program()
self.downloader.disconnect()
class CANopenProgramDownloader(object):
'''CANopen program downloader'''
def __init__(self, logger, node_id, can_context=DEFAULT_CAN_CONTEXT,
program_number=DEFAULT_PROGRAM_NUMBER,
sdo_retries=DEFAULT_SDO_RETRIES, sdo_timeout=DEFAULT_SDO_TIMEOUT):
super(CANopenProgramDownloader, self).__init__()
self.logger = logger
self.node_id = node_id
self.can_context = can_context
self.program_number = program_number
self.network = canopen.Network()
self.node = self.network.add_node(self.node_id,
self.create_object_dictionary())
self.data_sdo = self.node.sdo[H1F50_PROGRAM_DATA][self.program_number]
self.ctrl_sdo = self.node.sdo[H1F51_PROGRAM_CTRL][self.program_number]
self.swid_sdo = self.node.sdo[H1F56_PROGRAM_SWID][self.program_number]
self.flash_sdo = self.node.sdo[H1F57_FLASH_STATUS][self.program_number]
self.node.sdo.MAX_RETRIES = sdo_retries
self.node.sdo.RESPONSE_TIMEOUT = sdo_timeout
def connect(self):
'''Connect to CAN network'''
try:
self.network.connect(context=self.can_context)
except:
raise ValueError('Unable to connect to CAN network')
def disconnect(self):
'''Disconnect from CAN network'''
self.network.disconnect()
def enter_pre_operational(self):
'''Enter pre-operational NMT state'''
self.logger.info("Entering pre-operational mode")
try:
self.node.nmt.state = 'PRE-OPERATIONAL'
except:
raise ValueError('Failed to enter pre-operational mode')
def _ctrl_program(self, cmd):
'''Write program control command to CANopen object dictionary (0x1f51)'''
try:
self.ctrl_sdo.raw = cmd
except:
raise ValueError('Unable to write control command 0x{:02x}'.format(cmd))
def stop_program(self):
'''Write stop control command to CANopen object dictionary (0x1f51)'''
self.logger.info('Stopping program')
self._ctrl_program(PROGRAM_CTRL_STOP)
def start_program(self):
'''Write start control command to CANopen object dictionary (0x1f51)'''
self.logger.info('Starting program')
self._ctrl_program(PROGRAM_CTRL_START)
def clear_program(self):
'''Write clear control command to CANopen object dictionary (0x1f51)'''
self.logger.info('Clearing program')
self._ctrl_program(PROGRAM_CTRL_CLEAR)
def zephyr_confirm_program(self):
'''Write confirm control command to CANopen object dictionary (0x1f51)'''
self.logger.info('Confirming program')
self._ctrl_program(PROGRAM_CTRL_ZEPHYR_CONFIRM)
def swid(self):
'''Read software identification from CANopen object dictionary (0x1f56)'''
try:
swid = self.swid_sdo.raw
except:
raise ValueError('Failed to read software identification')
self.logger.info('Program software identification: 0x{:08x}'.format(swid))
return swid
def flash_status(self):
'''Read flash status identification'''
try:
status = self.flash_sdo.raw
except:
raise ValueError('Failed to read flash status identification')
return status
def download(self, bin_file):
'''Download program to CANopen object dictionary (0x1f50)'''
self.logger.info('Downloading program: %s', bin_file)
try:
size = os.path.getsize(bin_file)
infile = open(bin_file, 'rb')
outfile = self.data_sdo.open('wb', size=size)
progress = Bar('%(percent)d%%', max=size, suffix='%(index)d/%(max)dB')
while True:
chunk = infile.read(1024)
if not chunk:
break
outfile.write(chunk)
progress.next(n=len(chunk))
except:
raise ValueError('Failed to download program')
finally:
progress.finish()
infile.close()
outfile.close()
def wait_for_bootup(self, timeout=DEFAULT_TIMEOUT):
'''Wait for boot-up message reception'''
self.logger.info('Waiting for boot-up message...')
try:
self.node.nmt.wait_for_bootup(timeout=timeout)
except:
raise ValueError('Timeout waiting for boot-up message')
def wait_for_flash_status_ok(self, timeout=DEFAULT_TIMEOUT):
'''Wait for flash status ok'''
self.logger.info('Waiting for flash status ok')
end_time = time.time() + timeout
while True:
now = time.time()
status = self.flash_status()
if status == 0:
break
if now > end_time:
return status
return status
@staticmethod
def create_object_dictionary():
'''Create a synthetic CANopen object dictionary for program download'''
objdict = canopen.objectdictionary.ObjectDictionary()
array = canopen.objectdictionary.Array('Program data', 0x1f50)
member = canopen.objectdictionary.Variable('', 0x1f50, subindex=1)
member.data_type = canopen.objectdictionary.DOMAIN
array.add_member(member)
objdict.add_object(array)
array = canopen.objectdictionary.Array('Program control', 0x1f51)
member = canopen.objectdictionary.Variable('', 0x1f51, subindex=1)
member.data_type = canopen.objectdictionary.UNSIGNED8
array.add_member(member)
objdict.add_object(array)
array = canopen.objectdictionary.Array('Program sofware ID', 0x1f56)
member = canopen.objectdictionary.Variable('', 0x1f56, subindex=1)
member.data_type = canopen.objectdictionary.UNSIGNED32
array.add_member(member)
objdict.add_object(array)
array = canopen.objectdictionary.Array('Flash error ID', 0x1f57)
member = canopen.objectdictionary.Variable('', 0x1f57, subindex=1)
member.data_type = canopen.objectdictionary.UNSIGNED32
array.add_member(member)
objdict.add_object(array)
return objdict
| zephyrproject-rtos/zephyr | scripts/west_commands/runners/canopen_program.py | Python | apache-2.0 | 13,074 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
var TypeService = function($http, ENV, locationUtils, messageModel) {
this.getTypes = function(queryParams) {
return $http.get(ENV.api['root'] + 'types', {params: queryParams}).then(
function (result) {
return result.data.response;
},
function (err) {
throw err;
}
)
};
this.getType = function(id) {
return $http.get(ENV.api['root'] + 'types', {params: {id: id}}).then(
function (result) {
return result.data.response[0];
},
function (err) {
throw err;
}
)
};
this.createType = function(type) {
return $http.post(ENV.api['root'] + 'types', type).then(
function(result) {
messageModel.setMessages([ { level: 'success', text: 'Type created' } ], true);
locationUtils.navigateToPath('/types');
return result;
},
function(err) {
messageModel.setMessages(err.data.alerts, false);
throw err;
}
);
};
// todo: change to use query param when it is supported
this.updateType = function(type) {
return $http.put(ENV.api['root'] + 'types/' + type.id, type).then(
function(result) {
messageModel.setMessages([ { level: 'success', text: 'Type updated' } ], false);
return result;
},
function(err) {
messageModel.setMessages(err.data.alerts, false);
throw err;
}
);
};
// todo: change to use query param when it is supported
this.deleteType = function(id) {
return $http.delete(ENV.api['root'] + "types/" + id).then(
function(result) {
messageModel.setMessages([ { level: 'success', text: 'Type deleted' } ], true);
return result;
},
function(err) {
messageModel.setMessages(err.data.alerts, true);
throw err;
}
);
};
};
TypeService.$inject = ['$http', 'ENV', 'locationUtils', 'messageModel'];
module.exports = TypeService;
| hbeatty/incubator-trafficcontrol | traffic_portal/app/src/common/api/TypeService.js | JavaScript | apache-2.0 | 3,066 |
package util
import (
"fmt"
)
/*
BytesRefHash is a special purpose hash map like data structure
optimized for BytesRef instances. BytesRefHash maintains mappings of
byte arrays to ids (map[[]byte]int) sorting the hashed bytes
efficiently in continuous storage. The mapping to the id is
encapsulated inside BytesRefHash and is guaranteed to be increased
for each added BytesRef.
Note: The maximum capacity BytesRef instance passed to add() must not
be longer than BYTE_BLOCK_SIZE-2. The internal storage is limited to
2GB total byte storage.
*/
type BytesRefHash struct {
pool *ByteBlockPool
bytesStart []int
scratch1 *BytesRef
hashSize int
hashHalfSize int
hashMask int
count int
lastCount int
ids []int
bytesStartArray BytesStartArray
bytesUsed Counter
}
func NewBytesRefHash(pool *ByteBlockPool, capacity int,
bytesStartArray BytesStartArray) *BytesRefHash {
ids := make([]int, capacity)
for i, _ := range ids {
ids[i] = -1
}
counter := bytesStartArray.BytesUsed()
if counter == nil {
counter = NewCounter()
}
counter.AddAndGet(int64(capacity) * NUM_BYTES_INT)
return &BytesRefHash{
scratch1: NewEmptyBytesRef(),
hashSize: capacity,
hashHalfSize: capacity >> 1,
hashMask: capacity - 1,
lastCount: -1,
pool: pool,
ids: ids,
bytesStartArray: bytesStartArray,
bytesStart: bytesStartArray.Init(),
bytesUsed: counter,
}
}
/* Returns the number of values in this hash. */
func (h *BytesRefHash) Size() int {
return h.count
}
/*
Returns the ids array in arbitrary order. Valid ids start at offset
of 0 and end at a limit of size() - 1
Note: This is a destructive operation. clear() must be called in
order to reuse this BytesRefHash instance.
*/
func (h *BytesRefHash) compact() []int {
assert2(h.bytesStart != nil, "bytesStart is nil - not initialized")
upto := 0
for i := 0; i < h.hashSize; i++ {
if h.ids[i] != -1 {
if upto < i {
h.ids[upto] = h.ids[i]
h.ids[i] = -1
}
upto++
}
}
assert(upto == h.count)
h.lastCount = h.count
return h.ids
}
type bytesRefIntroSorter struct {
*IntroSorter
owner *BytesRefHash
compact []int
comp func([]byte, []byte) bool
pivot *BytesRef
scratch1 *BytesRef
scratch2 *BytesRef
}
func newBytesRefIntroSorter(owner *BytesRefHash, v []int,
comp func([]byte, []byte) bool) *bytesRefIntroSorter {
ans := &bytesRefIntroSorter{
owner: owner,
compact: v,
comp: comp,
pivot: NewEmptyBytesRef(),
scratch1: NewEmptyBytesRef(),
scratch2: NewEmptyBytesRef(),
}
ans.IntroSorter = NewIntroSorter(ans, ans)
return ans
}
func (a *bytesRefIntroSorter) Len() int { return len(a.compact) }
func (a *bytesRefIntroSorter) Swap(i, j int) { a.compact[i], a.compact[j] = a.compact[j], a.compact[i] }
func (a *bytesRefIntroSorter) Less(i, j int) bool {
id1, id2 := a.compact[i], a.compact[j]
assert(len(a.owner.bytesStart) > id1 && len(a.owner.bytesStart) > id2)
a.owner.pool.SetBytesRef(a.scratch1, a.owner.bytesStart[id1])
a.owner.pool.SetBytesRef(a.scratch2, a.owner.bytesStart[id2])
return a.comp(a.scratch1.ToBytes(), a.scratch2.ToBytes())
}
func (a *bytesRefIntroSorter) SetPivot(i int) {
id := a.compact[i]
assert(len(a.owner.bytesStart) > id)
a.owner.pool.SetBytesRef(a.pivot, a.owner.bytesStart[id])
}
func (a *bytesRefIntroSorter) PivotLess(j int) bool {
id := a.compact[j]
assert(len(a.owner.bytesStart) > id)
a.owner.pool.SetBytesRef(a.scratch2, a.owner.bytesStart[id])
return a.comp(a.pivot.ToBytes(), a.scratch2.ToBytes())
}
/*
Returns the values array sorted by the referenced byte values.
Note: this is a destructive operation. clear() must be called in
order to reuse this BytesRefHash instance.
*/
func (h *BytesRefHash) Sort(comp func(a, b []byte) bool) []int {
compact := h.compact()
s := newBytesRefIntroSorter(h, compact, comp)
s.Sort(0, h.count)
// TODO remove this
// for i, _ := range compact {
// if compact[i+1] == -1 {
// break
// }
// assert(!s.Less(i+1, i))
// if ok := !s.Less(i+1, i); !ok {
// fmt.Println("DEBUG1", compact)
// assert(ok)
// }
// }
return compact
}
func (h *BytesRefHash) equals(id int, b []byte) bool {
h.pool.SetBytesRef(h.scratch1, h.bytesStart[id])
return h.scratch1.bytesEquals(b)
}
func (h *BytesRefHash) shrink(targetSize int) bool {
// Cannot use util.Shrink because we require power of 2:
newSize := h.hashSize
for newSize >= 8 && newSize/4 > targetSize {
newSize /= 2
}
if newSize != h.hashSize {
h.bytesUsed.AddAndGet(NUM_BYTES_INT * -int64(h.hashSize-newSize))
h.hashSize = newSize
h.ids = make([]int, h.hashSize)
for i, _ := range h.ids {
h.ids[i] = -1
}
h.hashHalfSize = newSize / 2
h.hashMask = newSize - 1
return true
}
return false
}
/* Clears the BytesRef which maps to the given BytesRef */
func (h *BytesRefHash) Clear(resetPool bool) {
h.lastCount = h.count
h.count = 0
if resetPool {
h.pool.Reset(false, false) // we don't need to 0-fill the bufferes
}
h.bytesStart = h.bytesStartArray.Clear()
if h.lastCount != -1 && h.shrink(h.lastCount) {
// shurnk clears the hash entries
return
}
for i, _ := range h.ids {
h.ids[i] = -1
}
}
type MaxBytesLengthExceededError string
func (e MaxBytesLengthExceededError) Error() string {
return string(e)
}
/* Adds a new BytesRef. */
func (h *BytesRefHash) Add(bytes []byte) (int, error) {
assert2(h.bytesStart != nil, "Bytesstart is null - not initialized")
length := len(bytes)
// final position
hashPos := h.findHash(bytes)
e := h.ids[hashPos]
if e == -1 {
// new entry
if len2 := 2 + len(bytes); len2+h.pool.ByteUpto > BYTE_BLOCK_SIZE {
if len2 > BYTE_BLOCK_SIZE {
return 0, MaxBytesLengthExceededError(fmt.Sprintf(
"bytes can be at most %v in length; got %v",
BYTE_BLOCK_SIZE-2, len(bytes)))
}
h.pool.NextBuffer()
}
buffer := h.pool.Buffer
bufferUpto := h.pool.ByteUpto
if h.count >= len(h.bytesStart) {
h.bytesStart = h.bytesStartArray.Grow()
assert2(h.count < len(h.bytesStart)+1, "count: %v len: %v", h.count, len(h.bytesStart))
}
e = h.count
h.count++
h.bytesStart[e] = bufferUpto + h.pool.ByteOffset
// We first encode the length, followed by the bytes. Length is
// encoded as vint, but will consume 1 or 2 bytes at most (we
// reject too-long terms, above).
if length < 128 {
// 1 byte to store length
buffer[bufferUpto] = byte(length)
h.pool.ByteUpto += length + 1
assert2(length >= 0, "Length must be positive: %v", length)
copy(buffer[bufferUpto+1:], bytes)
} else {
// 2 bytes to store length
buffer[bufferUpto] = byte(0x80 | (length & 0x7f))
buffer[bufferUpto+1] = byte((length >> 7) & 0xff)
h.pool.ByteUpto += length + 2
copy(buffer[bufferUpto+2:], bytes)
}
assert(h.ids[hashPos] == -1)
h.ids[hashPos] = e
if h.count == h.hashHalfSize {
h.rehash(2*h.hashSize, true)
}
return e, nil
}
return -(e + 1), nil
}
func (h *BytesRefHash) findHash(bytes []byte) int {
assert2(h.bytesStart != nil, "bytesStart is null - not initialized")
code := h.doHash(bytes)
// final position
hashPos := code & h.hashMask
if e := h.ids[hashPos]; e != -1 && !h.equals(e, bytes) {
// conflict; use linear probe to find an open slot
// (see LUCENE-5604):
for {
code++
hashPos = code & h.hashMask
e = h.ids[hashPos]
if e == -1 || h.equals(e, bytes) {
break
}
}
}
return hashPos
}
/* Called when has is too small (> 50% occupied) or too large (< 20% occupied). */
func (h *BytesRefHash) rehash(newSize int, hashOnData bool) {
newMask := newSize - 1
h.bytesUsed.AddAndGet(NUM_BYTES_INT * int64(newSize))
newHash := make([]int, newSize)
for i, _ := range newHash {
newHash[i] = -1
}
for i := 0; i < h.hashSize; i++ {
if e0 := h.ids[i]; e0 != -1 {
var code int
if hashOnData {
off := h.bytesStart[e0]
start := off & BYTE_BLOCK_MASK
bytes := h.pool.Buffers[off>>BYTE_BLOCK_SHIFT]
var length int
var pos int
if bytes[start]&0x80 == 0 {
// length is 1 byte
length = int(bytes[start])
pos = start + 1
} else {
length = int(bytes[start]&0x7f) + (int(bytes[start+1]&0xff) << 7)
pos = start + 2
}
code = h.doHash(bytes[pos : pos+length])
} else {
code = h.bytesStart[e0]
}
hashPos := code & newMask
assert(hashPos >= 0)
if newHash[hashPos] != -1 {
// conflict; use linear probe to find an open slot
// (see LUCENE-5604)
for {
code++
hashPos = code & newMask
if newHash[hashPos] == -1 {
break
}
}
}
assert(newHash[hashPos] == -1)
newHash[hashPos] = e0
}
}
h.hashMask = newMask
h.bytesUsed.AddAndGet(NUM_BYTES_INT * int64(-len(h.ids)))
h.ids = newHash
h.hashSize = newSize
h.hashHalfSize = newSize / 2
}
func (h *BytesRefHash) doHash(p []byte) int {
return int(MurmurHash3_x86_32(p, GOOD_FAST_HASH_SEED))
}
/*
reinitializes the BytesRefHash after a previous clear() call. If
clear() has not been called previously this method has no effect.
*/
func (h *BytesRefHash) Reinit() {
if h.bytesStart == nil {
h.bytesStart = h.bytesStartArray.Init()
}
if h.ids == nil {
h.ids = make([]int, h.hashSize)
h.bytesUsed.AddAndGet(NUM_BYTES_INT * int64(h.hashSize))
}
}
/*
Returns the bytesStart offset into the internally used ByteBlockPool
for the given bytesID.
*/
func (h *BytesRefHash) ByteStart(bytesId int) int {
assert2(h.bytesStart != nil, "bytesStart is null - not initialized")
assert2(bytesId >= 0 && bytesId <= h.count, "%v", bytesId)
return h.bytesStart[bytesId]
}
/* Manages allocation of per-term addresses. */
type BytesStartArray interface {
// Initializes the BytesStartArray. This call will allocate memory
Init() []int
// A Counter reference holding the number of bytes used by this
// BytesStartArray. The BytesRefHash uses this reference to track
// its memory usage
BytesUsed() Counter
// Grows the BytesStartArray
Grow() []int
// clears the BytesStartArray and returns the cleared instance.
Clear() []int
}
| balzaczyy/golucene | core/util/bytesRefHash.go | GO | apache-2.0 | 10,103 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zookeeper.server;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;
import org.apache.zookeeper.Watcher.Event.KeeperState;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class manages watches. It allows watches to be associated with a string
* and removes watchers and their watches in addition to managing triggers.
*/
class WatchManager {
private static final Logger LOG = LoggerFactory.getLogger(WatchManager.class);
private final Map<String, Set<Watcher>> watchTable =
new HashMap<String, Set<Watcher>>();
private final Map<Watcher, Set<String>> watch2Paths =
new HashMap<Watcher, Set<String>>();
synchronized int size(){
int result = 0;
for(Set<Watcher> watches : watchTable.values()) {
result += watches.size();
}
return result;
}
synchronized void addWatch(String path, Watcher watcher) {
Set<Watcher> list = watchTable.get(path);
if (list == null) {
// don't waste memory if there are few watches on a node
// rehash when the 4th entry is added, doubling size thereafter
// seems like a good compromise
list = new HashSet<Watcher>(4);
watchTable.put(path, list);
}
list.add(watcher);
Set<String> paths = watch2Paths.get(watcher);
if (paths == null) {
// cnxns typically have many watches, so use default cap here
paths = new HashSet<String>();
watch2Paths.put(watcher, paths);
}
paths.add(path);
}
synchronized void removeWatcher(Watcher watcher) {
Set<String> paths = watch2Paths.remove(watcher);
if (paths == null) {
return;
}
for (String p : paths) {
Set<Watcher> list = watchTable.get(p);
if (list != null) {
list.remove(watcher);
if (list.size() == 0) {
watchTable.remove(p);
}
}
}
}
Set<Watcher> triggerWatch(String path, EventType type) {
return triggerWatch(path, type, null);
}
Set<Watcher> triggerWatch(String path, EventType type, Set<Watcher> supress) {
WatchedEvent e = new WatchedEvent(type,
KeeperState.SyncConnected, path);
Set<Watcher> watchers;
synchronized (this) {
watchers = watchTable.remove(path);
if (watchers == null || watchers.isEmpty()) {
if (LOG.isTraceEnabled()) {
ZooTrace.logTraceMessage(LOG,
ZooTrace.EVENT_DELIVERY_TRACE_MASK,
"No watchers for " + path);
}
return null;
}
for (Watcher w : watchers) {
Set<String> paths = watch2Paths.get(w);
if (paths != null) {
paths.remove(path);
}
}
}
for (Watcher w : watchers) {
if (supress != null && supress.contains(w)) {
continue;
}
w.process(e);
}
return watchers;
}
/**
* Brief description of this object.
*/
@Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder();
sb.append(watch2Paths.size()).append(" connections watching ")
.append(watchTable.size()).append(" paths\n");
int total = 0;
for (Set<String> paths : watch2Paths.values()) {
total += paths.size();
}
sb.append("Total watches:").append(total);
return sb.toString();
}
/**
* String representation of watches. Warning, may be large!
* @param byPath iff true output watches by paths, otw output
* watches by connection
* @return string representation of watches
*/
synchronized void dumpWatches(PrintWriter pwriter, boolean byPath) {
if (byPath) {
for (Entry<String, Set<Watcher>> e : watchTable.entrySet()) {
pwriter.println(e.getKey());
for (Watcher w : e.getValue()) {
pwriter.print("\t0x");
pwriter.print(Long.toHexString(((ServerCnxn)w).getSessionId()));
pwriter.print("\n");
}
}
} else {
for (Entry<Watcher, Set<String>> e : watch2Paths.entrySet()) {
pwriter.print("0x");
pwriter.println(Long.toHexString(((ServerCnxn)e.getKey()).getSessionId()));
for (String path : e.getValue()) {
pwriter.print("\t");
pwriter.println(path);
}
}
}
}
/**
* Checks the specified watcher exists for the given path
*
* @param path
* znode path
* @param watcher
* watcher object reference
* @return true if the watcher exists, false otherwise
*/
synchronized boolean containsWatcher(String path, Watcher watcher) {
Set<String> paths = watch2Paths.get(watcher);
if (paths == null || !paths.contains(path)) {
return false;
}
return true;
}
/**
* Removes the specified watcher for the given path
*
* @param path
* znode path
* @param watcher
* watcher object reference
* @return true if the watcher successfully removed, false otherwise
*/
synchronized boolean removeWatcher(String path, Watcher watcher) {
Set<String> paths = watch2Paths.get(watcher);
if (paths == null || !paths.remove(path)) {
return false;
}
Set<Watcher> list = watchTable.get(path);
if (list == null || !list.remove(watcher)) {
return false;
}
if (list.size() == 0) {
watchTable.remove(path);
}
return true;
}
/**
* Returns a watch report.
*
* @return watch report
* @see WatchesReport
*/
synchronized WatchesReport getWatches() {
Map<Long, Set<String>> id2paths = new HashMap<Long, Set<String>>();
for (Entry<Watcher, Set<String>> e: watch2Paths.entrySet()) {
Long id = ((ServerCnxn) e.getKey()).getSessionId();
Set<String> paths = new HashSet<String>(e.getValue());
id2paths.put(id, paths);
}
return new WatchesReport(id2paths);
}
/**
* Returns a watch report by path.
*
* @return watch report
* @see WatchesPathReport
*/
synchronized WatchesPathReport getWatchesByPath() {
Map<String, Set<Long>> path2ids = new HashMap<String, Set<Long>>();
for (Entry<String, Set<Watcher>> e : watchTable.entrySet()) {
Set<Long> ids = new HashSet<Long>(e.getValue().size());
path2ids.put(e.getKey(), ids);
for (Watcher watcher : e.getValue()) {
ids.add(((ServerCnxn) watcher).getSessionId());
}
}
return new WatchesPathReport(path2ids);
}
/**
* Returns a watch summary.
*
* @return watch summary
* @see WatchesSummary
*/
synchronized WatchesSummary getWatchesSummary() {
int totalWatches = 0;
for (Set<String> paths : watch2Paths.values()) {
totalWatches += paths.size();
}
return new WatchesSummary (watch2Paths.size(), watchTable.size(),
totalWatches);
}
}
| kfirlevari/zookeeper | src/java/main/org/apache/zookeeper/server/WatchManager.java | Java | apache-2.0 | 8,715 |
package org.apache.lucene.queryparser.flexible.standard.processors;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.LinkedList;
import java.util.List;
import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
import org.apache.lucene.queryparser.flexible.core.nodes.GroupQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.MatchNoDocsQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl;
/**
* This processor removes every {@link QueryNode} that is not a leaf and has not
* children. If after processing the entire tree the root node is not a leaf and
* has no children, a {@link MatchNoDocsQueryNode} object is returned.
* <br>
* This processor is used at the end of a pipeline to avoid invalid query node
* tree structures like a {@link GroupQueryNode} or {@link ModifierQueryNode}
* with no children.
*
* @see QueryNode
* @see MatchNoDocsQueryNode
*/
public class RemoveEmptyNonLeafQueryNodeProcessor extends
QueryNodeProcessorImpl {
private LinkedList<QueryNode> childrenBuffer = new LinkedList<>();
public RemoveEmptyNonLeafQueryNodeProcessor() {
// empty constructor
}
@Override
public QueryNode process(QueryNode queryTree) throws QueryNodeException {
queryTree = super.process(queryTree);
if (!queryTree.isLeaf()) {
List<QueryNode> children = queryTree.getChildren();
if (children == null || children.size() == 0) {
return new MatchNoDocsQueryNode();
}
}
return queryTree;
}
@Override
protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException {
return node;
}
@Override
protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException {
return node;
}
@Override
protected List<QueryNode> setChildrenOrder(List<QueryNode> children)
throws QueryNodeException {
try {
for (QueryNode child : children) {
if (!child.isLeaf()) {
List<QueryNode> grandChildren = child.getChildren();
if (grandChildren != null && grandChildren.size() > 0) {
this.childrenBuffer.add(child);
}
} else {
this.childrenBuffer.add(child);
}
}
children.clear();
children.addAll(this.childrenBuffer);
} finally {
this.childrenBuffer.clear();
}
return children;
}
}
| yida-lxw/solr-5.3.1 | lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java | Java | apache-2.0 | 3,319 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.xdebugger.breakpoints.ui;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
/**
* @author nik
*/
public abstract class XBreakpointGroup implements Comparable<XBreakpointGroup> {
@Nullable
public Icon getIcon(boolean isOpen) {
return null;
}
@NotNull
public abstract String getName();
@Override
public String toString() {
return getName();
}
@Override
public boolean equals(Object obj) {
if (obj == this) return true;
if (obj == null) return false;
return (getClass() == obj.getClass()) && compareTo((XBreakpointGroup)obj) == 0;
}
@Override
public int compareTo(final XBreakpointGroup o) {
return getName().compareTo(o.getName());
}
@Override
public int hashCode() {
return getName().hashCode();
}
}
| goodwinnk/intellij-community | platform/xdebugger-api/src/com/intellij/xdebugger/breakpoints/ui/XBreakpointGroup.java | Java | apache-2.0 | 1,450 |
/*
Copyright 2016 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package com.gs.fw.common.mithra.portal;
import com.gs.fw.common.mithra.MithraDataObject;
import com.gs.fw.common.mithra.MithraTransactionalObject;
public interface UpdateDataChooser
{
public MithraDataObject chooseDataForMultiUpdate(MithraTransactionalObject mithraObject);
}
| goldmansachs/reladomo | reladomo/src/main/java/com/gs/fw/common/mithra/portal/UpdateDataChooser.java | Java | apache-2.0 | 885 |
// ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
namespace Microsoft.Azure.Commands.LogicApp.Cmdlets
{
using System;
using System.Management.Automation;
using Microsoft.Azure.Commands.LogicApp.Utilities;
using Microsoft.Azure.Management.Logic.Models;
using Microsoft.WindowsAzure.Commands.Utilities.Common;
using System.Globalization;
using ResourceManager.Common.ArgumentCompleters;
/// <summary>
/// Updates the integration account map.
/// </summary>
[Cmdlet(VerbsCommon.Set, "AzureRmIntegrationAccountMap", SupportsShouldProcess = true)]
[OutputType(typeof(IntegrationAccountMap))]
public class UpdateAzureIntegrationAccountMapCommand : LogicAppBaseCmdlet
{
#region Defaults
/// <summary>
/// Default content type for map.
/// </summary>
private string contentType = "application/xml";
/// <summary>
/// Default map type.
/// </summary>
private string mapType = "Xslt";
#endregion Defaults
#region Input Paramters
[Parameter(Mandatory = true, HelpMessage = "The integration account resource group name.",
ValueFromPipelineByPropertyName = true)]
[ResourceGroupCompleter]
[ValidateNotNullOrEmpty]
public string ResourceGroupName { get; set; }
[Parameter(Mandatory = true, HelpMessage = "The integration account name.",
ValueFromPipelineByPropertyName = true)]
[ValidateNotNullOrEmpty]
[Alias("IntegrationAccountName", "ResourceName")]
public string Name { get; set; }
[Parameter(Mandatory = true, HelpMessage = "The integration account map name.",
ValueFromPipelineByPropertyName = true)]
[ValidateNotNullOrEmpty]
public string MapName { get; set; }
[Parameter(Mandatory = false, HelpMessage = "The integration account map file path.")]
[ValidateNotNullOrEmpty]
public string MapFilePath { get; set; }
[Parameter(Mandatory = false, HelpMessage = "The integration account map definition.")]
[ValidateNotNullOrEmpty]
public string MapDefinition { get; set; }
[Parameter(Mandatory = false, HelpMessage = "The integration account map type.")]
[ValidateSet("Xslt", IgnoreCase = false)]
[ValidateNotNullOrEmpty]
public string MapType
{
get { return this.mapType; }
set { value = this.mapType; }
}
[Parameter(Mandatory = false, HelpMessage = "The integration account map content type.")]
[ValidateNotNullOrEmpty]
public string ContentType
{
get { return this.contentType; }
set { value = this.contentType; }
}
[Parameter(Mandatory = false, HelpMessage = "The integration account map metadata.",
ValueFromPipelineByPropertyName = false)]
[ValidateNotNullOrEmpty]
public object Metadata { get; set; }
[Parameter(Mandatory = false, HelpMessage = "Do not ask for confirmation.")]
public SwitchParameter Force { get; set; }
#endregion Input Parameters
/// <summary>
/// Executes the integration account map update command.
/// </summary>
public override void ExecuteCmdlet()
{
base.ExecuteCmdlet();
var integrationAccount = IntegrationAccountClient.GetIntegrationAccount(this.ResourceGroupName, this.Name);
var integrationAccountMap = IntegrationAccountClient.GetIntegrationAccountMap(this.ResourceGroupName,
this.Name,
this.MapName);
var integrationAccountMapCopy = new IntegrationAccountMap(mapType: integrationAccountMap.MapType,
id: integrationAccountMap.Id,
name: integrationAccountMap.Name,
type: integrationAccountMap.Type,
location: integrationAccountMap.Location,
tags: integrationAccountMap.Tags,
parametersSchema: integrationAccountMap.ParametersSchema,
createdTime: integrationAccountMap.CreatedTime,
changedTime: integrationAccountMap.ChangedTime,
content: integrationAccountMap.Content,
contentLink: null,
metadata: integrationAccountMap.Metadata);
if (!string.IsNullOrEmpty(this.MapFilePath))
{
integrationAccountMapCopy.Content = CmdletHelper.GetContentFromFile(this.TryResolvePath(this.MapFilePath));
}
if (!string.IsNullOrEmpty(this.MapDefinition))
{
integrationAccountMapCopy.Content = this.MapDefinition;
CmdletHelper.GetContentFromFile(this.TryResolvePath(this.MapFilePath));
}
if (!string.IsNullOrEmpty(this.ContentType))
{
integrationAccountMapCopy.ContentType = this.contentType;
}
if (!string.IsNullOrEmpty(this.MapType))
{
integrationAccountMapCopy.MapType = (MapType)Enum.Parse(typeof(MapType), this.MapType);
}
if (this.Metadata != null)
{
integrationAccountMapCopy.Metadata = CmdletHelper.ConvertToMetadataJObject(this.Metadata);
}
ConfirmAction(Force.IsPresent,
string.Format(CultureInfo.InvariantCulture, Properties.Resource.UpdateResourceWarning,
"Microsoft.Logic/integrationAccounts/maps", this.Name),
string.Format(CultureInfo.InvariantCulture, Properties.Resource.UpdateResourceMessage,
"Microsoft.Logic/integrationAccounts/maps", this.Name),
Name,
() =>
{
this.WriteObject(
IntegrationAccountClient.UpdateIntegrationAccountMap(this.ResourceGroupName, this.Name,
this.MapName,
integrationAccountMapCopy), true);
},
null);
}
}
} | devigned/azure-powershell | src/ResourceManager/LogicApp/Commands.LogicApp/Cmdlets/IntegrationAccount/UpdateAzureIntegrationAccountMapCommand.cs | C# | apache-2.0 | 6,849 |
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.mapreduce.replication;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.HConnectable;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableSplit;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeers;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This map-only job compares the data from a local table with a remote one.
* Every cell is compared and must have exactly the same keys (even timestamp)
* as well as same value. It is possible to restrict the job by time range and
* families. The peer id that's provided must match the one given when the
* replication stream was setup.
* <p>
* Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason
* for a why a row is different is shown in the map's log.
*/
public class VerifyReplication extends Configured implements Tool {
private static final Log LOG =
LogFactory.getLog(VerifyReplication.class);
public final static String NAME = "verifyrep";
static long startTime = 0;
static long endTime = Long.MAX_VALUE;
static int versions = -1;
static String tableName = null;
static String families = null;
static String peerId = null;
/**
* Map-only comparator for 2 tables
*/
public static class Verifier
extends TableMapper<ImmutableBytesWritable, Put> {
public static enum Counters {
GOODROWS, BADROWS, ONLY_IN_SOURCE_TABLE_ROWS, ONLY_IN_PEER_TABLE_ROWS, CONTENT_DIFFERENT_ROWS}
private ResultScanner replicatedScanner;
private Result currentCompareRowInPeerTable;
/**
* Map method that compares every scanned row with the equivalent from
* a distant cluster.
* @param row The current table row key.
* @param value The columns.
* @param context The current context.
* @throws IOException When something is broken with the data.
*/
@Override
public void map(ImmutableBytesWritable row, final Result value,
Context context)
throws IOException {
if (replicatedScanner == null) {
Configuration conf = context.getConfiguration();
final Scan scan = new Scan();
scan.setCaching(conf.getInt(TableInputFormat.SCAN_CACHEDROWS, 1));
long startTime = conf.getLong(NAME + ".startTime", 0);
long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE);
String families = conf.get(NAME + ".families", null);
if(families != null) {
String[] fams = families.split(",");
for(String fam : fams) {
scan.addFamily(Bytes.toBytes(fam));
}
}
scan.setTimeRange(startTime, endTime);
if (versions >= 0) {
scan.setMaxVersions(versions);
}
final TableSplit tableSplit = (TableSplit)(context.getInputSplit());
HConnectionManager.execute(new HConnectable<Void>(conf) {
@Override
public Void connect(HConnection conn) throws IOException {
String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
Configuration peerConf = HBaseConfiguration.create(conf);
ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey);
TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName"));
// TODO: THis HTable doesn't get closed. Fix!
Table replicatedTable = new HTable(peerConf, tableName);
scan.setStartRow(value.getRow());
scan.setStopRow(tableSplit.getEndRow());
replicatedScanner = replicatedTable.getScanner(scan);
return null;
}
});
currentCompareRowInPeerTable = replicatedScanner.next();
}
while (true) {
if (currentCompareRowInPeerTable == null) {
// reach the region end of peer table, row only in source table
logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value);
break;
}
int rowCmpRet = Bytes.compareTo(value.getRow(), currentCompareRowInPeerTable.getRow());
if (rowCmpRet == 0) {
// rowkey is same, need to compare the content of the row
try {
Result.compareResults(value, currentCompareRowInPeerTable);
context.getCounter(Counters.GOODROWS).increment(1);
} catch (Exception e) {
logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value);
}
currentCompareRowInPeerTable = replicatedScanner.next();
break;
} else if (rowCmpRet < 0) {
// row only exists in source table
logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value);
break;
} else {
// row only exists in peer table
logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS,
currentCompareRowInPeerTable);
currentCompareRowInPeerTable = replicatedScanner.next();
}
}
}
private void logFailRowAndIncreaseCounter(Context context, Counters counter, Result row) {
context.getCounter(counter).increment(1);
context.getCounter(Counters.BADROWS).increment(1);
LOG.error(counter.toString() + ", rowkey=" + Bytes.toString(row.getRow()));
}
@Override
protected void cleanup(Context context) {
if (replicatedScanner != null) {
try {
while (currentCompareRowInPeerTable != null) {
logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS,
currentCompareRowInPeerTable);
currentCompareRowInPeerTable = replicatedScanner.next();
}
} catch (Exception e) {
LOG.error("fail to scan peer table in cleanup", e);
} finally {
replicatedScanner.close();
replicatedScanner = null;
}
}
}
}
private static String getPeerQuorumAddress(final Configuration conf) throws IOException {
ZooKeeperWatcher localZKW = null;
ReplicationPeerZKImpl peer = null;
try {
localZKW = new ZooKeeperWatcher(conf, "VerifyReplication",
new Abortable() {
@Override public void abort(String why, Throwable e) {}
@Override public boolean isAborted() {return false;}
});
ReplicationPeers rp = ReplicationFactory.getReplicationPeers(localZKW, conf, localZKW);
rp.init();
Pair<ReplicationPeerConfig, Configuration> pair = rp.getPeerConf(peerId);
if (pair == null) {
throw new IOException("Couldn't get peer conf!");
}
Configuration peerConf = rp.getPeerConf(peerId).getSecond();
return ZKUtil.getZooKeeperClusterKey(peerConf);
} catch (ReplicationException e) {
throw new IOException(
"An error occured while trying to connect to the remove peer cluster", e);
} finally {
if (peer != null) {
peer.close();
}
if (localZKW != null) {
localZKW.close();
}
}
}
/**
* Sets up the actual job.
*
* @param conf The current configuration.
* @param args The command line parameters.
* @return The newly created job.
* @throws java.io.IOException When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
if (!doCommandLine(args)) {
return null;
}
if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY,
HConstants.REPLICATION_ENABLE_DEFAULT)) {
throw new IOException("Replication needs to be enabled to verify it.");
}
conf.set(NAME+".peerId", peerId);
conf.set(NAME+".tableName", tableName);
conf.setLong(NAME+".startTime", startTime);
conf.setLong(NAME+".endTime", endTime);
if (families != null) {
conf.set(NAME+".families", families);
}
String peerQuorumAddress = getPeerQuorumAddress(conf);
conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
LOG.info("Peer Quorum Address: " + peerQuorumAddress);
Job job = new Job(conf, NAME + "_" + tableName);
job.setJarByClass(VerifyReplication.class);
Scan scan = new Scan();
scan.setTimeRange(startTime, endTime);
if (versions >= 0) {
scan.setMaxVersions(versions);
}
if(families != null) {
String[] fams = families.split(",");
for(String fam : fams) {
scan.addFamily(Bytes.toBytes(fam));
}
}
TableMapReduceUtil.initTableMapperJob(tableName, scan,
Verifier.class, null, null, job);
// Obtain the auth token from peer cluster
TableMapReduceUtil.initCredentialsForCluster(job, peerQuorumAddress);
job.setOutputFormatClass(NullOutputFormat.class);
job.setNumReduceTasks(0);
return job;
}
private static boolean doCommandLine(final String[] args) {
if (args.length < 2) {
printUsage(null);
return false;
}
try {
for (int i = 0; i < args.length; i++) {
String cmd = args[i];
if (cmd.equals("-h") || cmd.startsWith("--h")) {
printUsage(null);
return false;
}
final String startTimeArgKey = "--starttime=";
if (cmd.startsWith(startTimeArgKey)) {
startTime = Long.parseLong(cmd.substring(startTimeArgKey.length()));
continue;
}
final String endTimeArgKey = "--endtime=";
if (cmd.startsWith(endTimeArgKey)) {
endTime = Long.parseLong(cmd.substring(endTimeArgKey.length()));
continue;
}
final String versionsArgKey = "--versions=";
if (cmd.startsWith(versionsArgKey)) {
versions = Integer.parseInt(cmd.substring(versionsArgKey.length()));
continue;
}
final String familiesArgKey = "--families=";
if (cmd.startsWith(familiesArgKey)) {
families = cmd.substring(familiesArgKey.length());
continue;
}
if (i == args.length-2) {
peerId = cmd;
}
if (i == args.length-1) {
tableName = cmd;
}
}
} catch (Exception e) {
e.printStackTrace();
printUsage("Can't start because " + e.getMessage());
return false;
}
return true;
}
/*
* @param errorMsg Error message. Can be null.
*/
private static void printUsage(final String errorMsg) {
if (errorMsg != null && errorMsg.length() > 0) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println("Usage: verifyrep [--starttime=X]" +
" [--stoptime=Y] [--families=A] <peerid> <tablename>");
System.err.println();
System.err.println("Options:");
System.err.println(" starttime beginning of the time range");
System.err.println(" without endtime means from starttime to forever");
System.err.println(" endtime end of the time range");
System.err.println(" versions number of cell versions to verify");
System.err.println(" families comma-separated list of families to copy");
System.err.println();
System.err.println("Args:");
System.err.println(" peerid Id of the peer used for verification, must match the one given for replication");
System.err.println(" tablename Name of the table to verify");
System.err.println();
System.err.println("Examples:");
System.err.println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 ");
System.err.println(" $ bin/hbase " +
"org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" +
" --starttime=1265875194289 --endtime=1265878794289 5 TestTable ");
}
@Override
public int run(String[] args) throws Exception {
Configuration conf = this.getConf();
Job job = createSubmittableJob(conf, args);
if (job != null) {
return job.waitForCompletion(true) ? 0 : 1;
}
return 1;
}
/**
* Main entry point.
*
* @param args The command line parameters.
* @throws Exception When running the job fails.
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(HBaseConfiguration.create(), new VerifyReplication(), args);
System.exit(res);
}
}
| baishuo/hbase-1.0.0-cdh5.4.7_baishuo | hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java | Java | apache-2.0 | 14,404 |
<?php
session_start();
include_once("../functions.inc.php");
$CONF['title_header']=lang('NEW_title')." - ".$CONF['name_of_firm'];
if (validate_user($_SESSION['helpdesk_user_id'], $_SESSION['code'])) {
if ($_SESSION['helpdesk_user_id']) {
include("head.inc.php");
include("navbar.inc.php");
//check_unlinked_file();
?>
<div class="container" id="form_add">
<input type="hidden" id="main_last_new_ticket" value="<?=get_last_ticket_new($_SESSION['helpdesk_user_id']);?>">
<div class="row" style="padding-bottom:20px;">
<div class="col-md-8"> <center><h3><i class="fa fa-tag"></i> <?=lang('NEW_title');?></h3></center></div>
</div>
<div class="row" style="padding-bottom:20px;">
<div class="col-md-8" id="div_new">
<?php
if (isset($_GET['ok'])) {
if (isset($_GET['h'])) {$h=$_GET['h'];}
?>
<div class="alert alert-success alert-dismissable">
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">×</button>
<strong><i class="fa fa-check"></i> <?=lang('NEW_ok');?></strong> <?=lang('NEW_ok_1');?> <a class="alert-link" href="<?=$CONF['hostname']?>ticket?<?=$h;?>"><?=lang('NEW_ok_2');?></a> <?=lang('NEW_ok_3');?>
<a class="alert-link" href="<?=$CONF['hostname']?>print_ticket?<?=$h;?>"target="_blank"> <?=lang('NEW_ok_4');?></a>.
</div>
<?php
}
?>
<div class="panel panel-success" style="padding:20px;">
<div class="panel-body">
<div class="form-horizontal" id="main_form" novalidate="" action="" method="post">
<div class="control-group">
<div class="controls">
<div class="form-group" id="for_fio">
<label for="fio" class="col-sm-2 control-label" data-toggle="tooltip" data-placement="top" title="<?=lang('NEW_from_desc');?>"><small><?=lang('NEW_from');?>: </small></label>
<div class="col-sm-10">
<input type="text" name="fio" class="form-control input-sm" id="fio" placeholder="<?=lang('NEW_fio');?>" autofocus data-toggle="popover" data-trigger="manual" data-html="true" data-placement="right" data-content="<small><?=lang('NEW_fio_desc');?></small>">
</div>
</div></div>
<hr>
<div class="form-group" id="for_to" data-toggle="popover" data-html="true" data-trigger="manual" data-placement="right">
<label for="to" class="col-md-2 control-label" data-toggle="tooltip" data-placement="top" title="<?=lang('NEW_to_desc');?>"><small><?=lang('NEW_to');?>: </small></label>
<div class="col-md-6">
<select data-placeholder="<?=lang('NEW_to_unit');?>" class="chosen-select form-control" id="to" name="unit_id">
<option value="0"></option>
<?php
/*$qstring = "SELECT name as label, id as value FROM deps where id !='0' ;";
$result = mysql_query($qstring);//query the database for entries containing the
while ($row = mysql_fetch_array($result,MYSQL_ASSOC)) {
*/
$stmt = $dbConnection->prepare('SELECT name as label, id as value FROM deps where id !=:n AND status=:s');
$stmt->execute(array(':n'=>'0',':s'=>'1'));
$res1 = $stmt->fetchAll();
foreach($res1 as $row) {
//echo($row['label']);
$row['label']=$row['label'];
$row['value']=(int)$row['value'];
?>
<option value="<?=$row['value']?>"><?=$row['label']?></option>
<?php
}
?>
</select>
</div>
<div class="col-md-4" style="" id="dsd" data-toggle="popover" data-html="true" data-trigger="manual" data-placement="right" data-content="<small><?=lang('NEW_to_unit_desc');?></small>">
<select data-placeholder="<?=lang('NEW_to_user');?>" id="users_do" name="unit_id">
<option></option>
<?php
/* $qstring = "SELECT fio as label, id as value FROM users where status='1' and login !='system' order by fio ASC;";
$result = mysql_query($qstring);//query the database for entries containing the term
while ($row = mysql_fetch_array($result,MYSQL_ASSOC)){
*/
$stmt = $dbConnection->prepare('SELECT fio as label, id as value FROM users where status=:n and login !=:system order by fio ASC');
$stmt->execute(array(':n'=>'1',':system'=>'system'));
$res1 = $stmt->fetchAll();
foreach($res1 as $row) {
//echo($row['label']);
$row['label']=$row['label'];
$row['value']=(int)$row['value'];
if (get_user_status_text($row['value']) == "online") {$s="status-online-icon";}
else if (get_user_status_text($row['value']) == "offline") {$s="status-offline-icon";}
?>
<option data-foo="<?=$s;?>" value="<?=$row['value']?>"><?=nameshort($row['label'])?> </option>
<?php
}
?>
</select>
</div>
</div>
</div>
<div class="control-group" id="for_prio">
<div class="controls">
<div class="form-group">
<label for="" class="col-sm-2 control-label"><small><?=lang('NEW_prio');?>: </small></label>
<div class="col-sm-10" style=" padding-top: 5px; ">
<div class="btn-group btn-group-justified">
<div class="btn-group">
<button type="button" class="btn btn-primary btn-xs" id="prio_low"><i id="lprio_low" class=""></i><?=lang('NEW_prio_low');?></button>
</div>
<div class="btn-group">
<button type="button" class="btn btn-info btn-xs active" id="prio_normal"><i id="lprio_norm" class="fa fa-check"></i> <?=lang('NEW_prio_norm');?></button>
</div>
<div class="btn-group">
<button type="button" class="btn btn-danger btn-xs" data-toggle="tooltip" data-placement="top" title="<?=lang('NEW_prio_high_desc');?>" id="prio_high"><i id="lprio_high" class=""></i><?=lang('NEW_prio_high');?></button>
</div>
</div>
</div></div></div></div>
<?php
/*
*/
if ($CONF['fix_subj'] == "false") {
?>
<div class="control-group" id="for_subj">
<div class="controls">
<div class="form-group">
<label for="subj" class="col-sm-2 control-label"><small><?=lang('NEW_subj');?>: </small></label>
<div class="col-sm-10">
<input type="text" class="form-control input-sm" name="subj" id="subj" placeholder="<?=lang('NEW_subj');?>" data-toggle="popover" data-html="true" data-trigger="manual" data-placement="right" data-content="<small><?=lang('NEW_subj_msg');?></small>">
</div>
</div></div></div>
<?php }
else if ($CONF['fix_subj'] == "true") {
?>
<div class="control-group" id="for_subj" data-toggle="popover" data-html="true" data-trigger="manual" data-placement="right" data-content="<small><?=lang('NEW_subj_msg');?></small>">
<div class="controls">
<div class="form-group">
<label for="subj" class="col-sm-2 control-label"><small><?=lang('NEW_subj');?>: </small></label>
<div class="col-sm-10" style="">
<select data-placeholder="<?=lang('NEW_subj_det');?>" class="chosen-select form-control input-sm" id="subj" name="subj">
<option value="0"></option>
<?php
/*$qstring = "SELECT name FROM subj order by name COLLATE utf8_unicode_ci ASC";
$result = mysql_query($qstring);//query the database for entries containing the term
while ($row = mysql_fetch_array($result,MYSQL_ASSOC)) {
*/
$stmt = $dbConnection->prepare('SELECT name FROM subj order by name COLLATE utf8_unicode_ci ASC');
$stmt->execute();
$res1 = $stmt->fetchAll();
foreach($res1 as $row) {
?>
<option value="<?=$row['name']?>"><?=$row['name']?></option>
<?php
}
?>
</select>
</div>
</div>
</div>
</div>
<?php } ?>
<div class="control-group">
<div class="controls">
<div class="form-group" id="for_msg">
<label for="msg" class="col-sm-2 control-label"><small><?=lang('NEW_MSG');?>:</small></label>
<div class="col-sm-10">
<textarea data-toggle="popover" data-html="true" data-trigger="manual" data-placement="right" data-content="<small><?=lang('NEW_MSG_msg');?></small>" placeholder="<?=lang('NEW_MSG_ph');?>" class="form-control input-sm animated" name="msg" id="msg" rows="3" required="" data-validation-required-message="Укажите сообщение" aria-invalid="false"></textarea>
</div>
</div>
<div class="help-block"></div></div></div>
<?php if ($CONF['file_uploads'] == "true") { ?>
<div class="control-group">
<div class="controls">
<div class="form-group">
<label for="" class="col-sm-2 control-label"><small><?=lang('TICKET_file_add');?>:</small></label>
<div class="col-sm-10">
<form id="fileupload" action="" method="POST" enctype="multipart/form-data">
<div class="fileupload-buttonbar">
<div class="">
<!-- The fileinput-button span is used to style the file input field as button -->
<span class="btn btn-success fileinput-button btn-xs">
<i class="glyphicon glyphicon-plus"></i>
<span><?=lang('TICKET_file_upload')?></span>
<input id="filer" type="file" name="files[]" multiple>
</span>
<!--button data-toggle="popover" data-html="true" data-trigger="manual" data-placement="top" data-content="<small><?=lang('upload_not_u')?></small>" type="submit" class="btn btn-primary start btn-xs" id="start_upload">
<i class="glyphicon glyphicon-upload"></i>
<span><?=lang('TICKET_file_startupload');?></span>
</button>
<button type="reset" class="btn btn-warning cancel btn-xs">
<i class="glyphicon glyphicon-ban-circle"></i>
<span><?=lang('TICKET_file_notupload')?></span>
</button--><br>
<small class="text-muted"><?=lang('TICKET_file_upload_msg');?></small>
<!-- The global file processing state -->
<span class="fileupload-process"></span>
</div>
</div>
<!-- The table listing the files available for upload/download -->
<table role="presentation" class="table table-striped"><tbody class="files"></tbody></table>
</form>
</div>
</div>
</div>
</div>
<?php } ?>
<div class="col-md-2"></div>
<div class="col-md-10" id="processing">
<div class="btn-group btn-group-justified">
<div class="btn-group">
<button id="enter_ticket" class="btn btn-success" type="button"><i class="fa fa-check-circle-o"></i> <?=lang('NEW_button_create');?></button>
</div>
<div class="btn-group">
<button id="reset_ticket" class="btn btn-default" type="submit"><i class="fa fa-eraser"></i> <?=lang('NEW_button_reset');?></button>
</div>
</div>
<input type="hidden" id="file_array" value="">
<input type="hidden" id="client_id_param" value="">
<input type="hidden" id="hashname" value="<?=md5(time());?>">
<input type="hidden" id="status_action" value="">
<input type="hidden" id="prio" value="1">
<input type="hidden" value="<?php echo $_SESSION['helpdesk_user_id']; ?>" id="user_init_id">
<input type="hidden" id="file_types" value="<?=$CONF['file_types']?>">
<input type="hidden" id="file_size" value="<?=$CONF['file_size']?>">
</div>
</div>
</div>
</div>
<br>
</div>
<div class="col-md-4">
<div class="panel panel-success" id="user_info" style="display: block;">
</div>
<div id="alert_add">
</div>
</div>
</div>
</div>
</div>
<?php
include("footer.inc.php");
?>
<script id="template-upload" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-upload fade" id="up_entry">
<td>
<span class="preview"></span>
</td>
<td>
<p class="name">
{% if (file.name.length>20) { %}
{%=file.name.substr(0,10) %}...{%=file.name.substr(-5) %}
{% } %}
{% if (file.name.length<20) { %}
{%=file.name%}
{% } %}
</p>
<strong class="error text-danger"></strong>
</td>
<td>
<p class="size">Processing...</p>
<div class="progress progress-striped active" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="0"><div class="progress-bar progress-bar-success" style="width:0%;"></div></div>
</td>
<td>
{% if (!i && !o.options.autoUpload) { %}
<button id="s_start" class="btn btn-primary start btn-xs" disabled><i class="glyphicon glyphicon-upload"></i> <?=lang('TICKET_file_startupload');?>
</button>
{% } %}
{% if (!i) { %}
<button class="btn btn-warning cancel btn-xs">
<i class="glyphicon glyphicon-ban-circle"></i>
<span><?=lang('TICKET_file_notupload_one');?></span>
</button>
{% } %}
</td>
</tr>
{% } %}
</script>
<!-- The template to display files available for download -->
<script id="template-download" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-download fade">
<td>
<span class="preview">
{% if (file.thumbnailUrl) { %}
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" data-gallery><img src="{%=file.thumbnailUrl%}"></a>
{% } %}
</span>
</td>
<td>
<p class="name">
{% if (file.name2.length>30) { %}
<?=lang('file_info');?>: {%=file.name2.substr(0,30) %}...{%=file.name2.substr(-5) %} - <?=lang('file_info2');?>
{% } %}
{% if (file.name2.length<30) { %}
<?=lang('file_info');?>: {%=file.name2%} - <?=lang('file_info2');?>
{% } %}
</p>
{% if (file.error) { %}
<div><span class="label label-danger">Error</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<span class="size">{%=o.formatFileSize(file.size)%}</span>
</td>
<td>
<p class="name">
<span class="label label-success"><i class="fa fa-check"></i> ok</span>
</p>
</td>
</tr>
{% } %}
</script>
<?php
}
}
else {
include 'auth.php';
}
?>
| vik0803/hd.rustem | inc/new.php | PHP | apache-2.0 | 15,273 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.runtime.controlprogram.parfor.opt;
import org.apache.sysml.runtime.controlprogram.parfor.opt.PerfTestTool.DataFormat;
/**
*
* TODO extend to right as well (see PerfTestTool, currently only trained with regard to left)
* TODO integrate mem and exec time for reuse?
*
*/
public class OptNodeStatistics
{
public static final long DEFAULT_DIMENSION = 100;
public static final double DEFAULT_SPARSITY = 1.0;
public static final DataFormat DEFAULT_DATAFORMAT = DataFormat.DENSE;
//operation characteristics
private long _dim1 = -1; //rows left
private long _dim2 = -1; //cols left
private long _dim3 = -1; //rows right
private long _dim4 = -1; //cols right
private double _sparsity = -1; //sparsity left
private DataFormat _df = null; //data format left
/**
* Default constructor, sets all internal statistics to their respective default values.
*/
public OptNodeStatistics( )
{
_dim1 = DEFAULT_DIMENSION;
_dim2 = DEFAULT_DIMENSION;
_dim3 = DEFAULT_DIMENSION;
_dim4 = DEFAULT_DIMENSION;
_sparsity = DEFAULT_SPARSITY;
_df = DEFAULT_DATAFORMAT;
}
public OptNodeStatistics( long dim1, long dim2, long dim3, long dim4, double sparsity, DataFormat df )
{
_dim1 = dim1;
_dim2 = dim2;
_dim3 = dim3;
_dim4 = dim4;
_sparsity = sparsity;
_df = df;
}
public long getDim1()
{
return _dim1;
}
public void setDim1(long dim1)
{
_dim1 = dim1;
}
public long getDim2()
{
return _dim2;
}
public void setDim2(long dim2)
{
_dim2 = dim2;
}
public long getDim3()
{
return _dim3;
}
public void setDim3(long dim3)
{
_dim3 = dim3;
}
public long getDim4()
{
return _dim4;
}
public void setDim4(long dim4)
{
_dim4 = dim4;
}
public double getSparsity()
{
return _sparsity;
}
public void setSparsity(double sparsity)
{
_sparsity = sparsity;
}
public DataFormat getDataFormat()
{
return _df;
}
public void setDataFormat(DataFormat df)
{
_df = df;
}
}
| fschueler/incubator-systemml | src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptNodeStatistics.java | Java | apache-2.0 | 2,875 |
import Check from "./Check.js";
import defaultValue from "./defaultValue.js";
import defined from "./defined.js";
import DeveloperError from "./DeveloperError.js";
import CesiumMath from "./Math.js";
/**
* Creates a curve parameterized and evaluated by time. This type describes an interface
* and is not intended to be instantiated directly.
*
* @alias Spline
* @constructor
*
* @see CatmullRomSpline
* @see HermiteSpline
* @see LinearSpline
* @see QuaternionSpline
*/
function Spline() {
/**
* An array of times for the control points.
* @type {Number[]}
* @default undefined
*/
this.times = undefined;
/**
* An array of control points.
* @type {Cartesian3[]|Quaternion[]}
* @default undefined
*/
this.points = undefined;
DeveloperError.throwInstantiationError();
}
/**
* Evaluates the curve at a given time.
* @function
*
* @param {Number} time The time at which to evaluate the curve.
* @param {Cartesian3|Quaternion|Number[]} [result] The object onto which to store the result.
* @returns {Cartesian3|Quaternion|Number[]} The modified result parameter or a new instance of the point on the curve at the given time.
*
* @exception {DeveloperError} time must be in the range <code>[t<sub>0</sub>, t<sub>n</sub>]</code>, where <code>t<sub>0</sub></code>
* is the first element in the array <code>times</code> and <code>t<sub>n</sub></code> is the last element
* in the array <code>times</code>.
*/
Spline.prototype.evaluate = DeveloperError.throwInstantiationError;
/**
* Finds an index <code>i</code> in <code>times</code> such that the parameter
* <code>time</code> is in the interval <code>[times[i], times[i + 1]]</code>.
*
* @param {Number} time The time.
* @param {Number} startIndex The index from which to start the search.
* @returns {Number} The index for the element at the start of the interval.
*
* @exception {DeveloperError} time must be in the range <code>[t<sub>0</sub>, t<sub>n</sub>]</code>, where <code>t<sub>0</sub></code>
* is the first element in the array <code>times</code> and <code>t<sub>n</sub></code> is the last element
* in the array <code>times</code>.
*/
Spline.prototype.findTimeInterval = function (time, startIndex) {
var times = this.times;
var length = times.length;
//>>includeStart('debug', pragmas.debug);
if (!defined(time)) {
throw new DeveloperError("time is required.");
}
if (time < times[0] || time > times[length - 1]) {
throw new DeveloperError("time is out of range.");
}
//>>includeEnd('debug');
// Take advantage of temporal coherence by checking current, next and previous intervals
// for containment of time.
startIndex = defaultValue(startIndex, 0);
if (time >= times[startIndex]) {
if (startIndex + 1 < length && time < times[startIndex + 1]) {
return startIndex;
} else if (startIndex + 2 < length && time < times[startIndex + 2]) {
return startIndex + 1;
}
} else if (startIndex - 1 >= 0 && time >= times[startIndex - 1]) {
return startIndex - 1;
}
// The above failed so do a linear search. For the use cases so far, the
// length of the list is less than 10. In the future, if there is a bottle neck,
// it might be here.
var i;
if (time > times[startIndex]) {
for (i = startIndex; i < length - 1; ++i) {
if (time >= times[i] && time < times[i + 1]) {
break;
}
}
} else {
for (i = startIndex - 1; i >= 0; --i) {
if (time >= times[i] && time < times[i + 1]) {
break;
}
}
}
if (i === length - 1) {
i = length - 2;
}
return i;
};
/**
* Wraps the given time to the period covered by the spline.
* @function
*
* @param {Number} time The time.
* @return {Number} The time, wrapped around the animation period.
*/
Spline.prototype.wrapTime = function (time) {
//>>includeStart('debug', pragmas.debug);
Check.typeOf.number("time", time);
//>>includeEnd('debug');
var times = this.times;
var timeEnd = times[times.length - 1];
var timeStart = times[0];
var timeStretch = timeEnd - timeStart;
var divs;
if (time < timeStart) {
divs = Math.floor((timeStart - time) / timeStretch) + 1;
time += divs * timeStretch;
}
if (time > timeEnd) {
divs = Math.floor((time - timeEnd) / timeStretch) + 1;
time -= divs * timeStretch;
}
return time;
};
/**
* Clamps the given time to the period covered by the spline.
* @function
*
* @param {Number} time The time.
* @return {Number} The time, clamped to the animation period.
*/
Spline.prototype.clampTime = function (time) {
//>>includeStart('debug', pragmas.debug);
Check.typeOf.number("time", time);
//>>includeEnd('debug');
var times = this.times;
return CesiumMath.clamp(time, times[0], times[times.length - 1]);
};
export default Spline;
| progsung/cesium | Source/Core/Spline.js | JavaScript | apache-2.0 | 4,935 |
/*******************************************************************************
* Copyright (c) 2010 Haifeng Li
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package smile.feature;
import smile.data.Attribute;
import smile.data.NominalAttribute;
import smile.data.NumericAttribute;
/**
* Nominal variable to binary dummy variables feature generator. Although some
* method such as decision trees can handle nominal variable directly, other
* methods generally require nominal variables converted to multiple binary
* dummy variables to indicate the presence or absence of a characteristic.
*
* @author Haifeng Li
*/
public class Nominal2Binary implements Feature<double[]> {
/**
* The variable attributes.
*/
private Attribute[] attributes;
/**
* The attributes of generated binary dummy variables.
*/
private Attribute[] features;
/**
* A map from feature id to original attribute index.
*/
private int[] map;
/**
* A map from feature id to nominal attribute value.
*/
private int[] value;
/**
* Constructor.
* @param attributes the variable attributes. Of which, nominal variables
* will be converted to binary dummy variables.
*/
public Nominal2Binary(Attribute[] attributes) {
this.attributes = attributes;
int p = 0;
for (Attribute attribute : attributes) {
if (attribute instanceof NominalAttribute) {
NominalAttribute nominal = (NominalAttribute) attribute;
p += nominal.size();
}
}
features = new Attribute[p];
map = new int[p];
value = new int[p];
for (int i = 0, j = 0; j < attributes.length; j++) {
Attribute attribute = attributes[j];
if (attribute instanceof NominalAttribute) {
NominalAttribute nominal = (NominalAttribute) attribute;
double weight = nominal.weight;
String name = nominal.name;
String description = nominal.description;
for (int k = 0; k < nominal.size(); k++, i++) {
features[i] = new NumericAttribute(name + "_" + k, description, weight);
map[i] = j;
value[i] = k;
}
}
}
}
@Override
public Attribute[] attributes() {
return features;
}
@Override
public double f(double[] object, int id) {
if (object.length != attributes.length) {
throw new IllegalArgumentException(String.format("Invalide object size %d, expected %d", object.length, attributes.length));
}
if (id < 0 || id >= features.length) {
throw new IllegalArgumentException("Invalide feature id: " + id);
}
if (object[map[id]] == value[id]) {
return 1;
} else {
return 0;
}
}
}
| dublinio/smile | Smile/src/main/java/smile/feature/Nominal2Binary.java | Java | apache-2.0 | 3,646 |
define(function(require, exports, module) {
var Notify = require('common/bootstrap-notify');
exports.run = function() {
var $table = $('#teacher-table');
$table.on('click', '.promote-user', function(){
$.post($(this).data('url'),function(response) {
window.location.reload();
});
});
$table.on('click', '.cancel-promote-user', function(){
$.post($(this).data('url'),function(response) {
window.location.reload();
});
});
};
}); | 18826252059/im | web/bundles/topxiaadmin/js/controller/user/teacher-list.js | JavaScript | apache-2.0 | 539 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/mlir/lite/utils/lstm_utils.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h" // from @llvm-project
#include "mlir/IR/Attributes.h" // from @llvm-project
#include "mlir/IR/Builders.h" // from @llvm-project
#include "mlir/IR/Function.h" // from @llvm-project
#include "mlir/IR/Identifier.h" // from @llvm-project
#include "mlir/IR/Location.h" // from @llvm-project
#include "mlir/IR/MLIRContext.h" // from @llvm-project
#include "mlir/IR/OpDefinition.h" // from @llvm-project
#include "mlir/IR/Operation.h" // from @llvm-project
#include "mlir/IR/StandardTypes.h" // from @llvm-project
#include "mlir/IR/Types.h" // from @llvm-project
#include "mlir/IR/Value.h" // from @llvm-project
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace TFL {
namespace {
Value CreateI32SplatConst(OpBuilder* builder, ArrayRef<int64_t> shape,
int32_t val, mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getIntegerType(32));
auto attr = DenseElementsAttr::get(type, val);
return builder->create<ConstantOp>(location, type, attr);
}
Value CreateF32SplatConst(OpBuilder* builder, ArrayRef<int64_t> shape,
float val, mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getF32Type());
auto attr = DenseElementsAttr::get(type, val);
return builder->create<ConstantOp>(location, type, attr);
}
Value CreatTfF32ConstOp(OpBuilder* builder, ArrayRef<int64_t> shape, float val,
mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getF32Type());
auto ele_type = RankedTensorType::get({1}, builder->getF32Type());
auto attr = DenseElementsAttr::get(ele_type, val);
return builder->create<TF::ConstOp>(location, type, attr);
}
Value CreateI64DenseConst(OpBuilder* builder, ArrayRef<int64_t> shape,
ArrayRef<int64_t> values, mlir::Location location) {
auto type = RankedTensorType::get(static_cast<int>(shape.size()),
builder->getIntegerType(64));
auto attr = DenseElementsAttr::get(type, values);
return builder->create<ConstantOp>(location, type, attr);
}
Value CreateI32DenseConst(OpBuilder* builder, ArrayRef<int32_t> values,
mlir::Location location) {
auto type = RankedTensorType::get(static_cast<int>(values.size()),
builder->getIntegerType(32));
auto attr = DenseElementsAttr::get(type, values);
return builder->create<ConstantOp>(location, type, attr);
}
Value CreateNoneValue(OpBuilder* builder, mlir::Location location) {
return builder->create<mlir::ConstantOp>(location, builder->getNoneType(),
builder->getUnitAttr());
}
Value Transpose(OpBuilder* builder, Value value_to_transpose,
SmallVector<int32_t, 4> perm, RankedTensorType original_type,
mlir::Location location) {
// Create a constant op for transpose permutation.
auto perm_op = CreateI32DenseConst(builder, perm, location);
// Create tensor type for the transpose result.
auto transpose_type = original_type;
auto transpose_shape =
llvm::to_vector<8>(llvm::map_range(perm, [transpose_type](int32_t dim) {
return transpose_type.getDimSize(dim);
}));
auto elem_type = transpose_type.getElementType();
auto result_type = RankedTensorType::get(transpose_shape, elem_type);
return builder->create<TF::TransposeOp>(location, result_type,
value_to_transpose, perm_op);
}
Value Transpose2D(OpBuilder* builder, Value value_to_transpose,
RankedTensorType type, mlir::Location location) {
// Create a constant op for transpose permutation.
SmallVector<int32_t, 4> perm = {1, 0};
return Transpose(builder, value_to_transpose, perm, type, location);
}
Value Reverse(OpBuilder* builder, Value value_to_reverse, int axis,
RankedTensorType type, mlir::Location location) {
auto axis_op = CreateI32SplatConst(builder, {1}, axis, location);
// The result type will be the same as the input.
return builder->create<TF::ReverseV2Op>(location, type, value_to_reverse,
axis_op);
}
ArrayRef<int64_t> GetRankedTensorShape(Value value) {
return value.getType().cast<RankedTensorType>().getShape();
}
Value SliceRankedTensor(OpBuilder* builder, Value input,
ArrayRef<int64_t> begin_shape,
ArrayRef<int64_t> begin_values,
ArrayRef<int64_t> size_shape,
ArrayRef<int64_t> size_values,
mlir::Location location) {
// If the size of the tensor to be sliced from the input overflows
// the input tensor's dimensions, return 0-valued tensor of the requested
// shape.
ArrayRef<int64_t> input_shape = GetRankedTensorShape(input);
for (int i = 0, end = input_shape.size(); i < end; i++) {
if (begin_values[i] < 0 ||
(begin_values[i] + size_values[i] > input_shape[i])) {
return CreateF32SplatConst(builder, size_shape, 0, location);
}
}
// Create a dense constant op for slice's begin
auto slice_i2c_begin =
CreateI64DenseConst(builder, begin_shape, begin_values, location);
// Create a dense constant op for slice's size
auto slice_i2c_size =
CreateI64DenseConst(builder, size_shape, size_values, location);
return builder->create<TF::SliceOp>(
location,
RankedTensorType::get(
size_values,
input.getType().cast<RankedTensorType>().getElementType()),
input, slice_i2c_begin, slice_i2c_size);
}
Value CreateStridedSliceOp(mlir::Location loc, ArrayRef<int64_t> output_shape,
Value input, ArrayRef<int32_t> begin,
ArrayRef<int32_t> end, ArrayRef<int32_t> strides,
int64_t begin_mask, int64_t end_mask,
int64_t ellipsis_mask, int64_t new_axis_mask,
int64_t shrink_axis_mask, OpBuilder* builder) {
auto output_type = RankedTensorType::get(
output_shape, input.getType().cast<RankedTensorType>().getElementType());
auto begin_tensor = CreateI32DenseConst(builder, begin, loc);
auto end_tensor = CreateI32DenseConst(builder, end, loc);
auto strides_tensor = CreateI32DenseConst(builder, strides, loc);
return builder->create<TF::StridedSliceOp>(
loc, output_type, input, begin_tensor, end_tensor, strides_tensor,
builder->getI64IntegerAttr(begin_mask),
builder->getI64IntegerAttr(end_mask),
builder->getI64IntegerAttr(ellipsis_mask),
builder->getI64IntegerAttr(new_axis_mask),
builder->getI64IntegerAttr(shrink_axis_mask));
}
} // namespace
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToCellGate() {
SmallVector<int64_t, 2> begin_i2c_values = {0, 0};
input2cell_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2c_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToInputGate() {
SmallVector<int64_t, 2> begin_i2i_values = {n_cell_, 0};
input2input_ = couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, weight_transposed_,
weight_slice_shape_, begin_i2i_values,
weight_slice_shape_,
weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToForgetGate() {
int input_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 2> begin_i2f_values = {input_forget_start, 0};
input2forget_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2f_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToOutputGate() {
int input_output_start =
couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 2> begin_i2o_values = {input_output_start, 0};
input2output_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2o_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToCellGate() {
SmallVector<int64_t, 2> begin_rec2c_values = {0, n_input_};
rec2cell_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2c_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToInputGate() {
SmallVector<int64_t, 2> begin_rec2i_values = {n_cell_, n_input_};
rec2input_ = couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, weight_transposed_,
weight_slice_shape_, begin_rec2i_values,
weight_slice_shape_,
weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToForgetGate() {
int rec_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 2> begin_rec2f_values = {rec_forget_start, n_input_};
rec2forget_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2f_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToOutputGate() {
int rec_output_start = couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 2> begin_rec2o_values = {rec_output_start, n_input_};
rec2output_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2o_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToCellGate() {
SmallVector<int64_t, 1> begin_bias2c_values = {0};
bias2cell_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2c_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToInputGate() {
SmallVector<int64_t, 1> begin_bias2i_values = {n_cell_};
bias2input_ =
couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2i_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToForgetGate() {
int bias_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 1> begin_bias2f_values = {bias_forget_start};
bias2forget_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2f_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToOutputGate() {
int bias_output_start =
couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 1> begin_bias2o_values = {bias_output_start};
bias2output_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2o_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetProjection() {
SmallVector<int64_t, 2> projection_slice_shape = {
1, num_cols_projection_transposed_};
SmallVector<int64_t, 2> projection_slice_size_values = {n_output_, n_cell_};
SmallVector<int64_t, 2> projection_slice_begin_values = {0, 0};
proj_weight_ =
!projection_
? none_
: SliceRankedTensor(
&builder_, projection_transposed_, projection_slice_shape,
projection_slice_begin_values, projection_slice_shape,
projection_slice_size_values, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetProjectionBias() {
proj_bias_ = !projection_type_
? none_
: CreateF32SplatConst(&builder_, {n_output_}, 0,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputActivationState() {
input_activation_state_ = CreateF32SplatConst(&builder_, {1, n_output_}, 0,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputCellState() {
input_cell_state_ =
CreateF32SplatConst(&builder_, {1, n_cell_}, 0, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetCellLayerNormCoefficients() {
cell_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputLayerNormCoefficients() {
input_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetForgetLayerNormCoefficients() {
forget_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetOutputLayerNormCoefficients() {
output_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::GenerateFusedOpOperands() {
// Transpose both weight and projection.
weight_transposed_ =
Transpose2D(&builder_, weight_, weight_type_, fused_func_op_.getLoc());
projection_transposed_ = Transpose2D(&builder_, projection_, projection_type_,
fused_func_op_.getLoc());
none_ = CreateNoneValue(&builder_, fused_func_op_.getLoc());
// Extract input to cifg gates via slicing the weight tensor
SetWeightForInputToCellGate();
SetWeightForInputToInputGate();
SetWeightForInputToForgetGate();
SetWeightForInputToOutputGate();
// Extract recurrent to cifg gates via slicing the weight tensor
SetWeightForRecurrentToCellGate();
SetWeightForRecurrentToInputGate();
SetWeightForRecurrentToForgetGate();
SetWeightForRecurrentToOutputGate();
// Extract bias to cifg gates via slicing the bias tensor
SetBiasToCellGate();
SetBiasToInputGate();
SetBiasToForgetGate();
SetBiasToOutputGate();
// Extract projection and set an empty projection bias
SetProjection();
SetProjectionBias();
// Set the variable tensors
SetInputActivationState();
SetInputCellState();
// Extract the layer norm coefficients
SetCellLayerNormCoefficients();
SetInputLayerNormCoefficients();
SetForgetLayerNormCoefficients();
SetOutputLayerNormCoefficients();
}
void ConvertLSTMCellSimpleToFusedLSTM::UpdateFuncSignature() {
// https://github.com/tensorflow/community/pull/113
SmallVector<int64_t, 2> output_shape{1, -1};
auto input_types = fused_func_op_.getType().getInputs();
auto output_type = mlir::RankedTensorType::get(
output_shape, input_.getType().cast<RankedTensorType>().getElementType());
fused_func_op_.setType(mlir::FunctionType::get(input_types, output_type,
fused_func_op_.getContext()));
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::RewriteFunc() {
LogicalResult result = Initialize();
if (failed(result)) {
return result;
}
// Update the func signature, based on output shape.
// The func will ultimately return the output of the fused
// LSTM op.
UpdateFuncSignature();
// Transform the weights, projection, bias and layer norm coefficients
// to generate operands for the TFL fused LSTM op.
GenerateFusedOpOperands();
// Create the fused LSTM op.
SmallVector<int64_t, 2> output_shape = {1, n_output_};
auto result_type = mlir::RankedTensorType::get(
output_shape, input_.getType().cast<RankedTensorType>().getElementType());
lstm_ = builder_.create<mlir::TFL::LSTMOp>(
fused_func_op_.getLoc(), result_type, input_, input2input_, input2forget_,
input2cell_, input2output_, rec2input_, rec2forget_, rec2cell_,
rec2output_, /*cell_to_input_weights*/ none_,
/*cell_to_forget_weights*/ none_,
/*cell_to_output_weights*/ none_, bias2input_, bias2forget_, bias2cell_,
bias2output_, proj_weight_, proj_bias_, input_activation_state_,
input_cell_state_, input_layer_norm_coefficients_,
forget_layer_norm_coefficients_, cell_layer_norm_coefficients_,
output_layer_norm_coefficients_, builder_.getStringAttr("TANH"),
builder_.getF32FloatAttr(10.0), builder_.getF32FloatAttr(0.0),
builder_.getStringAttr("FULL"),
/*input_to_input_intermediate=*/mlir::TypeAttr(),
/*input_to_forget_intermediate=*/mlir::TypeAttr(),
/*input_to_cell_intermediate=*/mlir::TypeAttr(),
/*input_to_output_intermediate=*/mlir::TypeAttr(),
/*effective_hidden_scale_intermediate=*/mlir::TypeAttr());
// Cast the static shaped lstm result to FuncOp's signature -
// Ranked but unknown 2nd dimension to support stacking these.
SmallVector<int64_t, 2> func_output_shape = {1, -1};
auto func_result_type = mlir::RankedTensorType::get(
func_output_shape,
input_.getType().cast<RankedTensorType>().getElementType());
auto tensor_cast = builder_.create<mlir::TensorCastOp>(
fused_func_op_.getLoc(), lstm_.getResult(), func_result_type);
builder_.create<mlir::ReturnOp>(fused_func_op_.getLoc(),
tensor_cast.getResult());
return success();
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::InitializeFromFuncAttributes() {
auto attr = fused_func_op_.getAttrOfType<StringAttr>(kTFImplements);
if (!attr) {
return fused_func_op_.emitError()
<< "Invalid function attribute, expected " << kTFImplements
<< " attribute "
"not found";
}
// TODO(ashwinm, b/144775479): Make these NamedAttribute on TF import
// once tf.function can support this.
llvm::SmallVector<llvm::StringRef, 4> attr_tokens;
attr.getValue().split(attr_tokens, ",");
if (attr_tokens.empty()) {
return fused_func_op_.emitError()
<< kTFImplements << " attribute should be set";
}
// Check if the interface matches.
if (GetCompositeOpName().str() != attr_tokens[0]) {
return fused_func_op_.emitError()
<< "Unexpected interface for the composite op. Expected: "
<< GetCompositeOpName() << " Actual: " << attr_tokens[0];
}
// Extract other interface attributes, for now cifg.
couple_input_forget_gates_ =
std::find(attr_tokens.begin() + 1, attr_tokens.end(),
kCoupleInputForgetGates) != attr_tokens.end();
return success();
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::Initialize() {
if (failed(InitializeFromFuncAttributes())) {
return fused_func_op_.emitError()
<< "Expected function attributes were not set on the function "
"encapsulating the composite op";
}
num_gates_ = couple_input_forget_gates_ ? 3 : 4;
input_ = fused_func_op_.getArgument(0);
bias_ = fused_func_op_.getArgument(2);
weight_ = fused_func_op_.getArgument(1);
weight_type_ = weight_.getType().cast<RankedTensorType>();
if (weight_type_.getRank() != 2) {
return fused_func_op_.emitError() << "The weight tensor was not of rank 2";
}
if (weight_type_.getDimSize(1) % num_gates_ != 0) {
return fused_func_op_.emitError()
<< "Invalid dimension 1 of weight tensor, "
"should be divisible by the number of gates";
}
n_cell_ = weight_type_.getDimSize(1) / num_gates_;
projection_ = fused_func_op_.getArgument(3);
projection_type_ = projection_.getType().cast<RankedTensorType>();
if (projection_type_.getRank() != 2) {
n_output_ = n_cell_;
} else {
n_output_ = projection_type_.getDimSize(1);
}
n_input_ = weight_type_.getDimSize(0) - n_output_;
num_cols_weight_transposed_ = weight_type_.getDimSize(0);
num_cols_projection_transposed_ = projection_type_.getDimSize(0);
bias_slice_shape_ = {n_cell_};
bias_size_values_ = {n_cell_};
weight_slice_shape_ = {1, num_cols_weight_transposed_};
weight_slice_size_input_values_ = {n_cell_, n_input_};
weight_slice_size_recurrent_values_ = {n_cell_, n_output_};
return success();
}
LogicalResult ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::Initialize() {
if (failed(ConvertLSTMCellSimpleToFusedLSTM::Initialize())) {
return fused_func_op_.emitError()
<< "Specified LayerNormalizedLSTMCellSimple was not of the expected "
"interface and cannot not be converted to the fused LSTM op";
}
layer_norm_scale_ = fused_func_op_.getArgument(4);
layer_norm_scale_type_ = layer_norm_scale_.getType().cast<RankedTensorType>();
if (layer_norm_scale_type_.getRank() != 1) {
return fused_func_op_.emitError()
<< "The layer_norm_scale tensor was not of rank 1";
}
layer_norm_slice_shape_ = {n_cell_};
layer_norm_size_values_ = {n_cell_};
return success();
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetCellLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_cell_layer_norm_values = {0};
cell_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_cell_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetInputLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_input_layer_norm_values = {n_cell_};
input_layer_norm_coefficients_ =
couple_input_forget_gates_
? none_
: SliceRankedTensor(
&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_input_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetForgetLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_forget_layer_norm_values = {2 * n_cell_};
forget_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_forget_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetOutputLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_output_layer_norm_values = {3 * n_cell_};
output_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_output_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
TF::ConstOp Create1DConstantOp(const std::vector<int>& value, Location loc,
OpBuilder* builder) {
auto type =
mlir::RankedTensorType::get(value.size(), builder->getIntegerType(32));
auto dense_values = mlir::DenseIntElementsAttr::get(type, value);
return builder->create<TF::ConstOp>(loc, dense_values);
}
TF::ConstOp CreateScalarConstantOp(int value, Location loc,
OpBuilder* builder) {
return builder->create<TF::ConstOp>(loc, builder->getI32IntegerAttr(value));
}
LogicalResult CreateEqualSizeSplitVOp(Value input, int axis, int splits,
Location loc, OpBuilder* builder,
Operation** result) {
auto input_type = input.getType().cast<RankedTensorType>();
SmallVector<int64_t, 4> output_shape;
int size_of_splits;
if (input_type.getRank() < axis || axis < 0) return failure();
for (int i = 0; i < input_type.getRank(); ++i) {
int dim = input_type.getDimSize(i);
if (i == axis) {
if (dim % splits != 0) {
return failure();
}
size_of_splits = dim / splits;
output_shape.push_back(size_of_splits);
} else {
output_shape.push_back(dim);
}
}
SmallVector<mlir::Type, 4> output_types;
for (int i = 0; i < splits; ++i) {
output_types.push_back(
mlir::RankedTensorType::get(output_shape, input_type.getElementType()));
}
auto size_of_splits_op = Create1DConstantOp(
{size_of_splits, size_of_splits, size_of_splits, size_of_splits}, loc,
builder);
auto axis_op = CreateScalarConstantOp(axis, loc, builder);
*result = builder->create<TF::SplitVOp>(loc, output_types, input,
size_of_splits_op.getResult(),
axis_op.getResult());
return success();
}
// TODO(b/147436982): Consider refactor this to be more general.
LogicalResult ConvertKerasLSTMLayer(mlir::FuncOp func_op, OpBuilder* builder) {
// For argument order, please check out standard_lstm under
// tensorflow/python/keras/layers/recurrent_v2.py
Value input = func_op.getArgument(0);
Value output_init_state = func_op.getArgument(1);
Value hidden_init_state = func_op.getArgument(2);
Value weight_kernel = func_op.getArgument(3);
Value recurrent_kernel = func_op.getArgument(4);
Value bias = func_op.getArgument(5);
// The func op should have 5 outputs.
if (func_op.getNumResults() != 5) return failure();
// TFL lstm only supports time-majored inputs, so if it's not time-majored,
// we will transpose the inputs and outputs.
auto time_major_attr = func_op.getAttrOfType<BoolAttr>("tf.time_major");
if (time_major_attr == nullptr) return failure();
bool time_majored = time_major_attr.getValue();
auto input_type = input.getType().dyn_cast_or_null<RankedTensorType>();
if (!input_type) {
func_op.emitError() << "Input type is not a ranked tensor type";
return failure();
}
auto final_inputs = input;
auto final_input_type = input_type;
// Handle go_backwards:
// LSTM in Keras semantic will reverse the input sequence if it's go_backwards
auto go_backwards_attr = func_op.getAttrOfType<BoolAttr>("tf.go_backwards");
if (go_backwards_attr != nullptr && go_backwards_attr.getValue()) {
int time_dim = time_majored ? 0 : 1;
final_inputs = Reverse(builder, final_inputs, time_dim, final_input_type,
func_op.getLoc());
}
int batch = time_majored ? final_input_type.getDimSize(1)
: final_input_type.getDimSize(0);
int time = time_majored ? final_input_type.getDimSize(0)
: final_input_type.getDimSize(1);
// Setup correct weights.
RankedTensorType weight_type =
weight_kernel.getType().cast<RankedTensorType>();
if (weight_type.getRank() != 2)
return func_op.emitError() << "The weight should be rank of 2";
Value transposed_weight_kernel =
Transpose2D(builder, weight_kernel, weight_type, func_op.getLoc());
RankedTensorType recurrent_kernel_type =
recurrent_kernel.getType().cast<RankedTensorType>();
const int n_output = recurrent_kernel_type.getDimSize(0);
Value transpose_recurrent_kernel = Transpose2D(
builder, recurrent_kernel, recurrent_kernel_type, func_op.getLoc());
// Splits the weights into 4: i, f, c, o.
const int splits = 4;
Operation* weights_array;
if (failed(CreateEqualSizeSplitVOp(transposed_weight_kernel, 0, splits,
func_op.getLoc(), builder,
&weights_array)))
return failure();
// Splits the recurrent_weights into 4:
Operation* recurrent_weights_array;
if (failed(CreateEqualSizeSplitVOp(transpose_recurrent_kernel, 0, splits,
func_op.getLoc(), builder,
&recurrent_weights_array)))
return failure();
// Splits the bias into 4:
Operation* bias_array;
if (failed(CreateEqualSizeSplitVOp(bias, 0, splits, func_op.getLoc(), builder,
&bias_array)))
return failure();
// Build the lstm op.
SmallVector<int64_t, 3> output_shape;
if (time_majored) {
output_shape = {time, batch, n_output};
} else {
output_shape = {batch, time, n_output};
}
auto result_type = mlir::RankedTensorType::get(
output_shape,
final_inputs.getType().cast<RankedTensorType>().getElementType());
Value none = builder->create<mlir::ConstantOp>(
func_op.getLoc(), builder->getNoneType(), builder->getUnitAttr());
auto lstm = builder->create<mlir::TFL::UnidirectionalSequenceLSTMOp>(
func_op.getLoc(), result_type, /*input=*/final_inputs,
/*input_to_input_weights=*/weights_array->getResult(0),
/*input_to_forget_weights=*/weights_array->getResult(1),
/*input_to_cell_weights=*/weights_array->getResult(2),
/*input_to_output_weights=*/weights_array->getResult(3),
/*recurrent_to_input_weights=*/recurrent_weights_array->getResult(0),
/*recurrent_to_forget_weights=*/recurrent_weights_array->getResult(1),
/*recurrent_to_cell_weights=*/recurrent_weights_array->getResult(2),
/*recurrent_to_output_weights=*/recurrent_weights_array->getResult(3),
/*cell_to_input_weights=*/none,
/*cell_to_forget_weights=*/none,
/*cell_to_output_weights=*/none,
/*input_gate_bias=*/bias_array->getResult(0),
/*forget_gate_bias=*/bias_array->getResult(1),
/*cell_bias=*/bias_array->getResult(2),
/*output_gate_bias=*/bias_array->getResult(3),
/*projection_weights=*/none,
/*projection_bias=*/none,
/*input_activation_state=*/output_init_state,
/*input_cell_state=*/hidden_init_state,
/*input_layer_norm_coefficients=*/none,
/*forget_layer_norm_coefficients=*/none,
/*cell_layer_norm_coefficients=*/none,
/*output_layer_norm_coefficients=*/none, builder->getStringAttr("TANH"),
builder->getF32FloatAttr(10.0), builder->getF32FloatAttr(0.0),
builder->getBoolAttr(time_majored));
auto final_output_full_sequences = lstm.getResult();
// Populate the last output: last output is sliced from the full sequences.
// If time_major: last_output = outputs[-1, :, :]
// else: last_output = outputs[:, -1, :]
//
// As we are creating the strided_slice op, we need to populate the following
// fields:
// end: should always be (0, 0, 0)
// strides: should always be (1, 1, 1)
// begin: should be (0, -1, 0) or (-1, 0, 0) if it's time-majored.
// new_axis_mask: should always be 0.
// ellipsis_mask: should always be 0.
// begin_mask & end_mask: should be 0b101 = 5 or 0b110 = 4 if it's
// time-majored. shrink_axis_mask: should be 0b010 = 2 or 0b001 = 1 if it's
// time-majored.
SmallVector<int64_t, 2> last_output_shape({batch, n_output});
SmallVector<int32_t, 3> end({0, 0, 0});
SmallVector<int32_t, 3> strides({1, 1, 1});
SmallVector<int32_t, 3> begin;
int64_t new_axis_mask = 0;
int64_t ellipsis_mask = 0;
int64_t begin_mask;
int64_t end_mask;
int64_t shrink_axis_mask;
if (time_majored) {
begin_mask = 6;
end_mask = 6;
shrink_axis_mask = 1;
begin = {-1, 0, 0};
} else {
begin_mask = 5;
end_mask = 5;
shrink_axis_mask = 2;
begin = {0, -1, 0};
}
auto last_output = CreateStridedSliceOp(
func_op.getLoc(), last_output_shape, final_output_full_sequences, begin,
end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask,
shrink_axis_mask, builder);
SmallVector<Value, 5> outputs;
SmallVector<Type, 5> output_types;
// Due to the existence of the while loop, the timestamp may be unknown
// for the signature, for us, since we know the inputs, we can infer the time
// steps.
// Last output.
outputs.push_back(last_output);
output_types.push_back(last_output.getType());
// Full sequences.
outputs.push_back(final_output_full_sequences);
output_types.push_back(final_output_full_sequences.getType());
// All the rest: states, device.
for (int i = 2; i < 5; ++i) {
auto result_type =
func_op.getCallableResults()[i].dyn_cast<RankedTensorType>();
outputs.push_back(CreatTfF32ConstOp(builder, result_type.getShape(), 0.0f,
func_op.getLoc()));
output_types.push_back(result_type);
}
// Update function signatures.
func_op.setType(mlir::FunctionType::get(func_op.getType().getInputs(),
output_types, func_op.getContext()));
builder->create<mlir::ReturnOp>(func_op.getLoc(), outputs);
return success();
}
} // namespace TFL
} // namespace mlir
| karllessard/tensorflow | tensorflow/compiler/mlir/lite/utils/lstm_utils.cc | C++ | apache-2.0 | 34,132 |
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/bootstrapper.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/prettyprinter.h"
namespace v8 {
namespace internal {
RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
"ExportFromRuntime");
Bootstrapper::ExportFromRuntime(isolate, container);
JSObject::MigrateSlowToFast(container, 0, "ExportFromRuntime");
return *container;
}
RUNTIME_FUNCTION(Runtime_ExportExperimentalFromRuntime) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
"ExportExperimentalFromRuntime");
Bootstrapper::ExportExperimentalFromRuntime(isolate, container);
JSObject::MigrateSlowToFast(container, 0, "ExportExperimentalFromRuntime");
return *container;
}
RUNTIME_FUNCTION(Runtime_InstallToContext) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
RUNTIME_ASSERT(array->HasFastElements());
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
Handle<Context> native_context = isolate->native_context();
Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
int length = Smi::cast(array->length())->value();
for (int i = 0; i < length; i += 2) {
RUNTIME_ASSERT(fixed_array->get(i)->IsString());
Handle<String> name(String::cast(fixed_array->get(i)));
RUNTIME_ASSERT(fixed_array->get(i + 1)->IsJSObject());
Handle<JSObject> object(JSObject::cast(fixed_array->get(i + 1)));
int index = Context::ImportedFieldIndexForName(name);
if (index == Context::kNotFound) {
index = Context::IntrinsicIndexForName(name);
}
RUNTIME_ASSERT(index != Context::kNotFound);
native_context->set(index, *object);
}
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_Throw) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
return isolate->Throw(args[0]);
}
RUNTIME_FUNCTION(Runtime_ReThrow) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
return isolate->ReThrow(args[0]);
}
RUNTIME_FUNCTION(Runtime_ThrowStackOverflow) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
return isolate->StackOverflow();
}
RUNTIME_FUNCTION(Runtime_UnwindAndFindExceptionHandler) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
return isolate->UnwindAndFindHandler();
}
RUNTIME_FUNCTION(Runtime_PromoteScheduledException) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
return isolate->PromoteScheduledException();
}
RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
}
RUNTIME_FUNCTION(Runtime_NewTypeError) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
auto message_template =
static_cast<MessageTemplate::Template>(template_index);
return *isolate->factory()->NewTypeError(message_template, arg0);
}
RUNTIME_FUNCTION(Runtime_NewReferenceError) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
auto message_template =
static_cast<MessageTemplate::Template>(template_index);
return *isolate->factory()->NewReferenceError(message_template, arg0);
}
RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
auto message_template =
static_cast<MessageTemplate::Template>(template_index);
return *isolate->factory()->NewSyntaxError(message_template, arg0);
}
RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kIteratorResultNotAnObject, value));
}
RUNTIME_FUNCTION(Runtime_ThrowStrongModeImplicitConversion) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kStrongImplicitConversion));
}
RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
DCHECK(args.length() == 3);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
if (debug_event) isolate->debug()->OnPromiseReject(promise, value);
Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
// Do not report if we actually have a handler.
if (JSReceiver::GetDataProperty(promise, key)->IsUndefined()) {
isolate->ReportPromiseReject(promise, value,
v8::kPromiseRejectWithNoHandler);
}
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
DCHECK(args.length() == 1);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
// At this point, no revocation has been issued before
RUNTIME_ASSERT(JSReceiver::GetDataProperty(promise, key)->IsUndefined());
isolate->ReportPromiseReject(promise, Handle<Object>(),
v8::kPromiseHandlerAddedAfterReject);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_StackGuard) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
// First check if this is a real stack overflow.
StackLimitCheck check(isolate);
if (check.JsHasOverflowed()) {
return isolate->StackOverflow();
}
return isolate->stack_guard()->HandleInterrupts();
}
RUNTIME_FUNCTION(Runtime_Interrupt) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
return isolate->stack_guard()->HandleInterrupts();
}
RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_SMI_ARG_CHECKED(size, 0);
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE);
}
RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationSpace space = AllocateTargetSpace::decode(flags);
return *isolate->factory()->NewFillerObject(size, double_align, space);
}
// Collect the raw data for a stack trace. Returns an array of 4
// element segments each containing a receiver, function, code and
// native code offset.
RUNTIME_FUNCTION(Runtime_CollectStackTrace) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, caller, 1);
if (!isolate->bootstrapper()->IsActive()) {
// Optionally capture a more detailed stack trace for the message.
RETURN_FAILURE_ON_EXCEPTION(
isolate, isolate->CaptureAndSetDetailedStackTrace(error_object));
// Capture a simple stack trace for the stack property.
RETURN_FAILURE_ON_EXCEPTION(
isolate, isolate->CaptureAndSetSimpleStackTrace(error_object, caller));
}
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_MessageGetStartPosition) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return Smi::FromInt(message->start_position());
}
RUNTIME_FUNCTION(Runtime_MessageGetScript) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return message->script();
}
RUNTIME_FUNCTION(Runtime_ErrorToStringRT) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, error, 0);
Handle<String> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
isolate->error_tostring_helper()->Stringify(isolate, error));
return *result;
}
RUNTIME_FUNCTION(Runtime_FormatMessageString) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(String, arg0, 1);
CONVERT_ARG_HANDLE_CHECKED(String, arg1, 2);
CONVERT_ARG_HANDLE_CHECKED(String, arg2, 3);
Handle<String> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
MessageTemplate::FormatMessage(template_index, arg0, arg1, arg2));
isolate->native_context()->IncrementErrorsThrown();
return *result;
}
#define CALLSITE_GET(NAME, RETURN) \
RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) { \
HandleScope scope(isolate); \
DCHECK(args.length() == 1); \
CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0); \
Handle<String> result; \
CallSite call_site(isolate, call_site_obj); \
RUNTIME_ASSERT(call_site.IsValid()) \
return RETURN(call_site.NAME(), isolate); \
}
static inline Object* ReturnDereferencedHandle(Handle<Object> obj,
Isolate* isolate) {
return *obj;
}
static inline Object* ReturnPositiveSmiOrNull(int value, Isolate* isolate) {
if (value >= 0) return Smi::FromInt(value);
return isolate->heap()->null_value();
}
static inline Object* ReturnBoolean(bool value, Isolate* isolate) {
return isolate->heap()->ToBoolean(value);
}
CALLSITE_GET(GetFileName, ReturnDereferencedHandle)
CALLSITE_GET(GetFunctionName, ReturnDereferencedHandle)
CALLSITE_GET(GetScriptNameOrSourceUrl, ReturnDereferencedHandle)
CALLSITE_GET(GetMethodName, ReturnDereferencedHandle)
CALLSITE_GET(GetLineNumber, ReturnPositiveSmiOrNull)
CALLSITE_GET(GetColumnNumber, ReturnPositiveSmiOrNull)
CALLSITE_GET(IsNative, ReturnBoolean)
CALLSITE_GET(IsToplevel, ReturnBoolean)
CALLSITE_GET(IsEval, ReturnBoolean)
CALLSITE_GET(IsConstructor, ReturnBoolean)
#undef CALLSITE_GET
RUNTIME_FUNCTION(Runtime_IS_VAR) {
UNREACHABLE(); // implemented as macro in the parser
return NULL;
}
RUNTIME_FUNCTION(Runtime_IncrementStatsCounter) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(String, name, 0);
if (FLAG_native_code_counters) {
StatsCounter(isolate, name->ToCString().get()).Increment();
}
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_HarmonyToString) {
// TODO(caitp): Delete this runtime method when removing --harmony-tostring
return isolate->heap()->ToBoolean(FLAG_harmony_tostring);
}
RUNTIME_FUNCTION(Runtime_GetTypeFeedbackVector) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
return function->shared()->feedback_vector();
}
RUNTIME_FUNCTION(Runtime_GetCallerJSFunction) {
SealHandleScope shs(isolate);
StackFrameIterator it(isolate);
RUNTIME_ASSERT(it.frame()->type() == StackFrame::STUB);
it.Advance();
RUNTIME_ASSERT(it.frame()->type() == StackFrame::JAVA_SCRIPT);
return JavaScriptFrame::cast(it.frame())->function();
}
RUNTIME_FUNCTION(Runtime_GetCodeStubExportsObject) {
HandleScope shs(isolate);
return isolate->heap()->code_stub_exports_object();
}
namespace {
Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
MessageLocation location;
if (isolate->ComputeLocation(&location)) {
Zone zone;
base::SmartPointer<ParseInfo> info(
location.function()->shared()->is_function()
? new ParseInfo(&zone, location.function())
: new ParseInfo(&zone, location.script()));
if (Parser::ParseStatic(info.get())) {
CallPrinter printer(isolate);
const char* string = printer.Print(info->literal(), location.start_pos());
return isolate->factory()->NewStringFromAsciiChecked(string);
} else {
isolate->clear_pending_exception();
}
}
return Object::TypeOf(isolate, object);
}
} // namespace
RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
Handle<String> callsite = RenderCallSite(isolate, object);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callsite));
}
} // namespace internal
} // namespace v8
| weolar/miniblink49 | v8_4_8/src/runtime/runtime-internal.cc | C++ | apache-2.0 | 13,940 |
/*
* Copyright (c) 2008-2018, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.client.impl.protocol.task.cache;
import com.hazelcast.cache.impl.CacheClearResponse;
import com.hazelcast.cache.impl.CacheOperationProvider;
import com.hazelcast.cache.impl.CacheService;
import com.hazelcast.cache.impl.operation.CacheClearOperationFactory;
import com.hazelcast.client.impl.protocol.ClientMessage;
import com.hazelcast.client.impl.protocol.codec.CacheClearCodec;
import com.hazelcast.instance.Node;
import com.hazelcast.nio.Connection;
import com.hazelcast.security.permission.ActionConstants;
import com.hazelcast.security.permission.CachePermission;
import com.hazelcast.spi.OperationFactory;
import javax.cache.CacheException;
import java.security.Permission;
import java.util.Map;
/**
* This client request specifically calls {@link CacheClearOperationFactory} on the server side.
*
* @see CacheClearOperationFactory
*/
public class CacheClearMessageTask
extends AbstractCacheAllPartitionsTask<CacheClearCodec.RequestParameters> {
public CacheClearMessageTask(ClientMessage clientMessage, Node node, Connection connection) {
super(clientMessage, node, connection);
}
@Override
protected CacheClearCodec.RequestParameters decodeClientMessage(ClientMessage clientMessage) {
return CacheClearCodec.decodeRequest(clientMessage);
}
@Override
protected ClientMessage encodeResponse(Object response) {
return CacheClearCodec.encodeResponse();
}
@Override
protected OperationFactory createOperationFactory() {
CacheOperationProvider operationProvider = getOperationProvider(parameters.name);
return operationProvider.createClearOperationFactory();
}
@Override
protected Object reduce(Map<Integer, Object> map) {
for (Map.Entry<Integer, Object> entry : map.entrySet()) {
if (entry.getValue() == null) {
continue;
}
final CacheClearResponse cacheClearResponse = (CacheClearResponse) nodeEngine.toObject(entry.getValue());
final Object response = cacheClearResponse.getResponse();
if (response instanceof CacheException) {
throw (CacheException) response;
}
}
return null;
}
@Override
public Permission getRequiredPermission() {
return new CachePermission(parameters.name, ActionConstants.ACTION_REMOVE);
}
@Override
public String getServiceName() {
return CacheService.SERVICE_NAME;
}
@Override
public String getDistributedObjectName() {
return parameters.name;
}
@Override
public Object[] getParameters() {
return null;
}
@Override
public String getMethodName() {
return "clear";
}
}
| tufangorel/hazelcast | hazelcast/src/main/java/com/hazelcast/client/impl/protocol/task/cache/CacheClearMessageTask.java | Java | apache-2.0 | 3,397 |
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.collide.client.workspace;
import com.google.collide.client.bootstrap.BootstrapSession;
import com.google.collide.client.ui.tree.TreeNodeElement;
import com.google.collide.client.util.PathUtil;
import com.google.collide.client.util.logging.Log;
import com.google.collide.client.workspace.FileTreeModelNetworkController.OutgoingController;
import com.google.collide.dto.DirInfo;
import com.google.collide.dto.Mutation;
import com.google.collide.dto.ServerError.FailureReason;
import com.google.collide.dto.WorkspaceTreeUpdate;
import com.google.collide.dto.client.DtoClientImpls.DirInfoImpl;
import com.google.collide.dto.client.DtoClientImpls.WorkspaceTreeUpdateImpl;
import com.google.collide.json.client.JsoArray;
import com.google.collide.json.shared.JsonArray;
import com.google.collide.shared.util.JsonCollections;
import com.google.collide.shared.util.StringUtils;
import com.google.common.base.Preconditions;
import javax.annotation.Nullable;
/**
* Public API for interacting with the client side workspace file tree model.
* Also exposes callbacks for mutations that have been applied to the model.
*
* If you want to mutate the workspace file tree, which is a tree of
* {@link FileTreeNode}'s you need to go through here.
*/
public class FileTreeModel {
/**
* Callback interface for requesting the root node, potentially
* asynchronously.
*/
public interface RootNodeRequestCallback {
void onRootNodeAvailable(FileTreeNode root);
}
/**
* Callback interface for requesting a node, potentially asynchronously.
*/
public interface NodeRequestCallback {
void onNodeAvailable(FileTreeNode node);
/**
* Called if the node does not exist.
*/
void onNodeUnavailable();
/**
* Called if an error occurs while loading the node.
*/
void onError(FailureReason reason);
}
/**
* Callback interface for getting notified about changes to the workspace tree
* model that have been applied by the FileTreeController.
*/
public interface TreeModelChangeListener {
/**
* Notification that a node was added.
*/
void onNodeAdded(PathUtil parentDirPath, FileTreeNode newNode);
/**
* Notification that a node was moved/renamed.
*
* @param oldPath the old node path
* @param node the node that was moved, or null if the old path is not loaded. If both the old
* path and the new path are loaded, node == newNode and node's parent will be the target
* directory of the new path. If the new path is not loaded, node is the node that was in
* the old path.
* @param newPath the new node path
* @param newNode the new node, or null if the target directory is not loaded
*/
void onNodeMoved(PathUtil oldPath, FileTreeNode node, PathUtil newPath, FileTreeNode newNode);
/**
* Notification that a set of nodes was removed.
*
* @param oldNodes a list of nodes that we removed. Every node will still have its parent filled
*/
void onNodesRemoved(JsonArray<FileTreeNode> oldNodes);
/**
* Notification that a node was replaced (can be either a file or directory).
*
* @param oldNode the existing node that used to be in the file tree, or null if the workspace
* root is being set for the first time
* @param newNode the node that replaces the {@code oldNode}. This will be the same
* {@link FileTreeNode#getNodeType()} as the node it is replacing.
*/
void onNodeReplaced(@Nullable FileTreeNode oldNode, FileTreeNode newNode);
}
/**
* A {@link TreeModelChangeListener} which does not perform any operations in
* response to an event. Its only purpose is to allow clients to only override
* the events matter to them.
*/
public abstract static class AbstractTreeModelChangeListener implements TreeModelChangeListener {
@Override
public void onNodeAdded(PathUtil parentDirPath, FileTreeNode newNode) {
// intentional no-op, clients should override if needed
}
@Override
public void onNodeMoved(
PathUtil oldPath, FileTreeNode node, PathUtil newPath, FileTreeNode newNode) {
// intentional no-op, clients should override if needed
}
@Override
public void onNodesRemoved(JsonArray<FileTreeNode> oldNodes) {
// intentional no-op, clients should override if needed
}
@Override
public void onNodeReplaced(FileTreeNode oldDir, FileTreeNode newDir) {
// intentional no-op, clients should override if needed
}
}
/**
* A {@link TreeModelChangeListener} that performs the exact same action in
* response to any and all tree mutations.
*/
public abstract static class BasicTreeModelChangeListener implements TreeModelChangeListener {
public abstract void onTreeModelChange();
@Override
public void onNodeAdded(PathUtil parentDirPath, FileTreeNode newNode) {
onTreeModelChange();
}
@Override
public void onNodeMoved(
PathUtil oldPath, FileTreeNode node, PathUtil newPath, FileTreeNode newNode) {
onTreeModelChange();
}
@Override
public void onNodesRemoved(JsonArray<FileTreeNode> oldNodes) {
onTreeModelChange();
}
@Override
public void onNodeReplaced(FileTreeNode oldDir, FileTreeNode newDir) {
onTreeModelChange();
}
}
private interface ChangeDispatcher {
void dispatch(TreeModelChangeListener changeListener);
}
private final JsoArray<TreeModelChangeListener> modelChangeListeners = JsoArray.create();
private final OutgoingController outgoingNetworkController;
private FileTreeNode workspaceRoot;
private boolean disableChangeNotifications;
/**
* Tree revision that corresponds to the revision of the last
* successfully applied tree mutation that this client is aware of.
*/
private String lastAppliedTreeMutationRevision = "0";
public FileTreeModel(
FileTreeModelNetworkController.OutgoingController outgoingNetworkController) {
this.outgoingNetworkController = outgoingNetworkController;
}
/**
* Adds a node to our model by path.
*/
public void addNode(PathUtil path, final FileTreeNode newNode, String workspaceRootId) {
if (workspaceRoot == null) {
// TODO: queue up this add?
Log.warn(getClass(), "Attempting to add a node before the root is set", path);
return;
}
// Find the parent directory of the node.
final PathUtil parentDirPath = PathUtil.createExcludingLastN(path, 1);
FileTreeNode parentDir = getWorkspaceRoot().findChildNode(parentDirPath);
if (parentDir != null && parentDir.isComplete()) {
// The parent directory is complete, so add the node.
addNode(parentDir, newNode, workspaceRootId);
} else {
// The parent directory isn't complete, so do not add the node to the model, but update the
// workspace root id.
maybeSetLastAppliedTreeMutationRevision(workspaceRootId);
}
}
/**
* Adds a node to the model under the specified parent node.
*/
public void addNode(FileTreeNode parentDir, FileTreeNode childNode, String workspaceRootId) {
addNodeNoDispatch(parentDir, childNode);
dispatchAddNode(parentDir, childNode, workspaceRootId);
}
private void addNodeNoDispatch(final FileTreeNode parentDir, final FileTreeNode childNode) {
if (parentDir == null) {
Log.error(getClass(), "Trying to add a child to a null parent!", childNode);
return;
}
Log.debug(getClass(), "Adding ", childNode, " - to - ", parentDir);
parentDir.addChild(childNode);
}
/**
* Manually dispatch that a node was added.
*/
void dispatchAddNode(
final FileTreeNode parentDir, final FileTreeNode childNode, final String workspaceRootId) {
dispatchModelChange(new ChangeDispatcher() {
@Override
public void dispatch(TreeModelChangeListener changeListener) {
changeListener.onNodeAdded(parentDir.getNodePath(), childNode);
}
}, workspaceRootId);
}
/**
* Moves/renames a node in the model.
*/
public void moveNode(
final PathUtil oldPath, final PathUtil newPath, final String workspaceRootId) {
if (workspaceRoot == null) {
// TODO: queue up this move?
Log.warn(getClass(), "Attempting to move a node before the root is set", oldPath);
return;
}
// Remove the node from its old path if the old directory is complete.
final FileTreeNode oldNode = workspaceRoot.findChildNode(oldPath);
if (oldNode == null) {
/*
* No node found at the old path - either it isn't loaded, or we optimistically updated
* already. Verify that one of those is the case.
*/
Preconditions.checkState(workspaceRoot.findClosestChildNode(oldPath) != null ||
workspaceRoot.findChildNode(newPath) != null);
} else {
oldNode.setName(newPath.getBaseName());
oldNode.getParent().removeChild(oldNode);
}
// Apply the new root id.
maybeSetLastAppliedTreeMutationRevision(workspaceRootId);
// Prepare a callback that will dispatch the onNodeMove event to listeners.
NodeRequestCallback callback = new NodeRequestCallback() {
@Override
public void onNodeAvailable(FileTreeNode newNode) {
/*
* If we had to request the target directory, replace the target node with the oldNode to
* ensure that all properties (such as the rendered node and the fileEditSessionKey) are
* copied over correctly.
*/
if (oldNode != null && newNode != null && newNode != oldNode) {
newNode.replaceWith(oldNode);
newNode = oldNode;
}
// Dispatch a change event.
final FileTreeNode finalNewNode = newNode;
dispatchModelChange(new ChangeDispatcher() {
@Override
public void dispatch(TreeModelChangeListener changeListener) {
changeListener.onNodeMoved(oldPath, oldNode, newPath, finalNewNode);
}
}, workspaceRootId);
}
@Override
public void onNodeUnavailable() {
// The node should be available because we are requesting the node using the root ID
// immediately after the move.
Log.error(getClass(),
"Could not find moved node using the workspace root ID immediately after the move");
}
@Override
public void onError(FailureReason reason) {
// Error already logged.
}
};
// Request the target directory.
final PathUtil parentDirPath = PathUtil.createExcludingLastN(newPath, 1);
FileTreeNode parentDir = workspaceRoot.findChildNode(parentDirPath);
if (parentDir == null || !parentDir.isComplete()) {
if (oldNode == null) {
// Early exit if neither the old node nor the target directory is loaded.
return;
} else {
// If the parent directory was not loaded, don't bother loading it.
callback.onNodeAvailable(null);
}
} else {
if (oldNode == null) {
// The old node doesn't exist, so we need to force a refresh of the target directory's
// children by marking the target directory incomplete.
DirInfoImpl parentDirView = parentDir.cast();
parentDirView.setIsComplete(false);
} else {
// The old node exists and the target directory is loaded, so add the node to the target.
parentDir.addChild(oldNode);
}
// Request the new node.
requestWorkspaceNode(newPath, callback);
}
}
/**
* Removes a node from the model.
*
* @param toDelete the {@link FileTreeNode} we want to remove.
* @param workspaceRootId the new file tree revision
* @return the node that was deleted from the model. This will return
* {@code null} if the input node is null or if the input node does
* not have a parent. Meaning if the input node is the root, this
* method will return {@code null}.
*/
public FileTreeNode removeNode(final FileTreeNode toDelete, String workspaceRootId) {
// If we found a node at the specified path, then remove it.
if (deleteNodeNoDispatch(toDelete)) {
final JsonArray<FileTreeNode> deletedNode = JsonCollections.createArray(toDelete);
dispatchModelChange(new ChangeDispatcher() {
@Override
public void dispatch(TreeModelChangeListener changeListener) {
changeListener.onNodesRemoved(deletedNode);
}
}, workspaceRootId);
return toDelete;
}
return null;
}
/**
* Removes a set of nodes from the model.
*
* @param toDelete the {@link PathUtil}s for the nodes we want to remove.
* @param workspaceRootId the new file tree revision
* @return the nodes that were deleted from the model. This will return an
* empty list if we try to add a node before we have a root node set,
* or if the specified path does not exist..
*/
public JsonArray<FileTreeNode> removeNodes(
final JsonArray<PathUtil> toDelete, String workspaceRootId) {
if (workspaceRoot == null) {
// TODO: queue up this remove?
Log.warn(getClass(), "Attempting to remove nodes before the root is set");
return null;
}
final JsonArray<FileTreeNode> deletedNodes = JsonCollections.createArray();
for (int i = 0; i < toDelete.size(); i++) {
FileTreeNode node = workspaceRoot.findChildNode(toDelete.get(i));
if (deleteNodeNoDispatch(node)) {
deletedNodes.add(node);
}
}
if (deletedNodes.size() == 0) {
// if none of the nodes created a need to update the UI, just return an
// empty list.
return deletedNodes;
}
dispatchModelChange(new ChangeDispatcher() {
@Override
public void dispatch(TreeModelChangeListener changeListener) {
changeListener.onNodesRemoved(deletedNodes);
}
}, workspaceRootId);
return deletedNodes;
}
/**
* Deletes a single node (does not update the UI).
*/
private boolean deleteNodeNoDispatch(FileTreeNode node) {
if (node == null || node.getParent() == null) {
return false;
}
FileTreeNode parent = node.getParent();
// Guard against someone installing a node of the same name in the parent
// (meaning we are already gone.
if (!node.equals(parent.getChildNode(node.getName()))) {
// This means that the node we are removing from the tree is already
// effectively removed from where it thinks it is.
return false;
}
node.getParent().removeChild(node);
return true;
}
/**
* Replaces either the root node for this tree model, or replaces an existing directory node, or
* replaces an existing file node.
*/
public void replaceNode(PathUtil path, final FileTreeNode newNode, String workspaceRootId) {
if (newNode == null) {
return;
}
if (PathUtil.WORKSPACE_ROOT.equals(path)) {
// Install the workspace root.
final FileTreeNode oldDir = workspaceRoot;
workspaceRoot = newNode;
dispatchModelChange(new ChangeDispatcher() {
@Override
public void dispatch(TreeModelChangeListener changeListener) {
changeListener.onNodeReplaced(oldDir, newNode);
}
}, workspaceRootId);
} else {
// Patch the model if there is one.
if (workspaceRoot != null) {
final FileTreeNode nodeToReplace = workspaceRoot.findChildNode(path);
// Note. We do not support patching subtrees that don't already
// exist. This subtree must have already existed, or have been
// preceded by an ADD or COPY mutation.
if (nodeToReplace == null) {
return;
}
nodeToReplace.replaceWith(newNode);
dispatchModelChange(new ChangeDispatcher() {
@Override
public void dispatch(TreeModelChangeListener changeListener) {
changeListener.onNodeReplaced(nodeToReplace, newNode);
}
}, workspaceRootId);
}
}
}
/**
* @return the current value of the workspaceRoot. Potentially {@code null} if
* the model has not yet been populated.
*/
public FileTreeNode getWorkspaceRoot() {
return workspaceRoot;
}
/**
* Asks for the root node, potentially asynchronously if the model is not yet
* populated. If the root node is already available then the callback will be
* invoked synchronously.
*/
public void requestWorkspaceRoot(final RootNodeRequestCallback callback) {
FileTreeNode rootNode = getWorkspaceRoot();
if (rootNode == null) {
// Wait for the model to be populated.
addModelChangeListener(new AbstractTreeModelChangeListener() {
@Override
public void onNodeReplaced(FileTreeNode oldNode, FileTreeNode newNode) {
Preconditions.checkArgument(newNode.getNodePath().equals(PathUtil.WORKSPACE_ROOT),
"Unexpected non-workspace root subtree replaced before workspace root was replaced: "
+ newNode.toString());
// Should be resilient to concurrent modification!
removeModelChangeListener(this);
callback.onRootNodeAvailable(getWorkspaceRoot());
}
});
return;
}
callback.onRootNodeAvailable(rootNode);
}
/**
* Adds a {@link TreeModelChangeListener} to be notified of mutations applied
* by the FileTreeController to the underlying workspace file tree model.
*
* @param modelChangeListener the listener we are adding
*/
public void addModelChangeListener(TreeModelChangeListener modelChangeListener) {
modelChangeListeners.add(modelChangeListener);
}
/**
* Removes a {@link TreeModelChangeListener} from the set of listeners
* subscribed to model changes.
*/
public void removeModelChangeListener(TreeModelChangeListener modelChangeListener) {
modelChangeListeners.remove(modelChangeListener);
}
public void setDisableChangeNotifications(boolean disable) {
this.disableChangeNotifications = disable;
}
private void dispatchModelChange(ChangeDispatcher dispatcher, String workspaceRootId) {
// Update the tracked tip ID.
maybeSetLastAppliedTreeMutationRevision(workspaceRootId);
if (disableChangeNotifications) {
return;
}
JsoArray<TreeModelChangeListener> copy = modelChangeListeners.slice(
0, modelChangeListeners.size());
for (int i = 0, n = copy.size(); i < n; i++) {
dispatcher.dispatch(copy.get(i));
}
}
/**
* @return the file tree revision associated with the last seen Tree mutation.
*/
public String getLastAppliedTreeMutationRevision() {
return lastAppliedTreeMutationRevision;
}
/**
* Bumps the tracked Root ID for the last applied tree mutation, if the
* version happens to be larger than the version we are tracking.
*/
public void maybeSetLastAppliedTreeMutationRevision(String lastAppliedTreeMutationRevision) {
// TODO: Ensure numeric comparison survives ID obfuscation.
try {
long newRevision = StringUtils.toLong(lastAppliedTreeMutationRevision);
long lastRevision = StringUtils.toLong(this.lastAppliedTreeMutationRevision);
this.lastAppliedTreeMutationRevision = (newRevision > lastRevision)
? lastAppliedTreeMutationRevision : this.lastAppliedTreeMutationRevision;
// TODO: this should be monotonically increasing; if it's not, we missed an update.
} catch (NumberFormatException e) {
Log.error(getClass(), "Root ID is not a numeric long!", lastAppliedTreeMutationRevision);
}
}
/**
* Folks that want to mutate the file tree should obtain a skeletal {@link WorkspaceTreeUpdate}
* using this factory method.
*/
public WorkspaceTreeUpdateImpl makeEmptyTreeUpdate() {
if (this.lastAppliedTreeMutationRevision == null) {
throw new IllegalStateException(
"Attempted to mutate the tree before the workspace file tree was loaded at least once!");
}
return WorkspaceTreeUpdateImpl.make()
.setAuthorClientId(BootstrapSession.getBootstrapSession().getActiveClientId())
.setMutations(JsoArray.<Mutation>create());
}
/**
* Calculates the list of expanded paths. The list only contains the paths of the deepest expanded
* directories. Parent directories are assumed to be open as well.
*
* @return the list of expanded paths, or null if the workspace root is not loaded
*/
public JsoArray<String> calculateExpandedPaths() {
// Early exit if the root isn't loaded yet.
if (workspaceRoot == null) {
return null;
}
// Walk the tree looking for expanded paths.
JsoArray<String> expandedPaths = JsoArray.create();
calculateExpandedPathsRecursive(workspaceRoot, expandedPaths);
return expandedPaths;
}
/**
* Calculates the list of expanded paths beneath the specified node and adds them to expanded
* path. If none of the children
*
* @param node the directory containing the expanded paths
* @param expandedPaths the running list of expanded paths
*/
private void calculateExpandedPathsRecursive(FileTreeNode node, JsoArray<String> expandedPaths) {
assert node.isDirectory() : "node must be a directory";
// Check if the directory is expanded. The root is always expanded.
if (node != workspaceRoot) {
TreeNodeElement<FileTreeNode> dirElem = node.getRenderedTreeNode();
if (!dirElem.isOpen()) {
return;
}
}
// Recursively search for expanded subdirectories.
int expandedPathsCount = expandedPaths.size();
DirInfoImpl dir = node.cast();
JsonArray<DirInfo> subDirs = dir.getSubDirectories();
if (subDirs != null) {
for (int i = 0; i < subDirs.size(); i++) {
DirInfo subDir = subDirs.get(i);
calculateExpandedPathsRecursive((FileTreeNode) subDir, expandedPaths);
}
}
// Add this directory if none of its descendants were added.
if (expandedPathsCount == expandedPaths.size()) {
expandedPaths.add(node.getNodePath().getPathString());
}
}
/**
* Asks for the node at the specified path, potentially asynchronously if the model does not yet
* contain the node. If the node is already available then the callback will be invoked
* synchronously.
*
* @param path the path to the node, which must be a file (not a directory)
* @param callback the callback to invoke when the node is ready
*/
public void requestWorkspaceNode(final PathUtil path, final NodeRequestCallback callback) {
outgoingNetworkController.requestWorkspaceNode(this, path, callback);
}
/**
* Asks for the children of the specified node.
*
* @param node a directory node
* @param callback an optional callback that will be notified once the children are fetched. If
* null, this method will alert the user if there was an error
*/
public void requestDirectoryChildren(FileTreeNode node,
@Nullable final NodeRequestCallback callback) {
outgoingNetworkController.requestDirectoryChildren(this, node, callback);
}
}
| ericmckean/collide | java/com/google/collide/client/workspace/FileTreeModel.java | Java | apache-2.0 | 23,794 |
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package trigger
import (
"fmt"
"github.com/Sirupsen/logrus"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/prow/kube"
"k8s.io/test-infra/prow/plugins"
)
const (
pluginName = "trigger"
lgtmLabel = "lgtm"
)
func init() {
plugins.RegisterIssueCommentHandler(pluginName, handleIssueComment)
plugins.RegisterPullRequestHandler(pluginName, handlePullRequest)
plugins.RegisterPushEventHandler(pluginName, handlePush)
}
type githubClient interface {
AddLabel(org, repo string, number int, label string) error
BotName() string
IsMember(org, user string) (bool, error)
GetPullRequest(org, repo string, number int) (*github.PullRequest, error)
GetRef(org, repo, ref string) (string, error)
CreateComment(owner, repo string, number int, comment string) error
ListIssueComments(owner, repo string, issue int) ([]github.IssueComment, error)
CreateStatus(owner, repo, ref string, status github.Status) error
GetCombinedStatus(org, repo, ref string) (*github.CombinedStatus, error)
GetPullRequestChanges(org, repo string, number int) ([]github.PullRequestChange, error)
RemoveLabel(org, repo string, number int, label string) error
}
type kubeClient interface {
CreateProwJob(kube.ProwJob) (kube.ProwJob, error)
}
type client struct {
GitHubClient githubClient
KubeClient kubeClient
Config *config.Config
Logger *logrus.Entry
}
func triggerConfig(c *config.Config, org, repo string) *config.Trigger {
for _, tr := range c.Triggers {
for _, r := range tr.Repos {
if r == org || r == fmt.Sprintf("%s/%s", org, repo) {
return &tr
}
}
}
return nil
}
func getClient(pc plugins.PluginClient) client {
return client{
GitHubClient: pc.GitHubClient,
Config: pc.Config,
KubeClient: pc.KubeClient,
Logger: pc.Logger,
}
}
func handlePullRequest(pc plugins.PluginClient, pr github.PullRequestEvent) error {
return handlePR(getClient(pc), pr)
}
func handleIssueComment(pc plugins.PluginClient, ic github.IssueCommentEvent) error {
return handleIC(getClient(pc), ic)
}
func handlePush(pc plugins.PluginClient, pe github.PushEvent) error {
return handlePE(getClient(pc), pe)
}
| kewu1992/test-infra | prow/plugins/trigger/trigger.go | GO | apache-2.0 | 2,751 |
/**
* <copyright>
* </copyright>
*
* $Id$
*/
package org.wso2.developerstudio.eclipse.gmf.esb;
/**
* <!-- begin-user-doc -->
* A representation of the model object '<em><b>Local Entry</b></em>'.
* <!-- end-user-doc -->
*
* <p>
* The following features are supported:
* <ul>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getEntryName <em>Entry Name</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getValueType <em>Value Type</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getValueLiteral <em>Value Literal</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getValueXML <em>Value XML</em>}</li>
* <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getValueURL <em>Value URL</em>}</li>
* </ul>
* </p>
*
* @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getLocalEntry()
* @model
* @generated
*/
public interface LocalEntry extends EsbElement {
/**
* Returns the value of the '<em><b>Entry Name</b></em>' attribute.
* The default value is <code>"entry_name"</code>.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Entry Name</em>' attribute isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Entry Name</em>' attribute.
* @see #setEntryName(String)
* @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getLocalEntry_EntryName()
* @model default="entry_name"
* @generated
*/
String getEntryName();
/**
* Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getEntryName <em>Entry Name</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Entry Name</em>' attribute.
* @see #getEntryName()
* @generated
*/
void setEntryName(String value);
/**
* Returns the value of the '<em><b>Value Type</b></em>' attribute.
* The default value is <code>"LITERAL"</code>.
* The literals are from the enumeration {@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntryValueType}.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Value Type</em>' attribute isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Value Type</em>' attribute.
* @see org.wso2.developerstudio.eclipse.gmf.esb.LocalEntryValueType
* @see #setValueType(LocalEntryValueType)
* @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getLocalEntry_ValueType()
* @model default="LITERAL"
* @generated
*/
LocalEntryValueType getValueType();
/**
* Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getValueType <em>Value Type</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Value Type</em>' attribute.
* @see org.wso2.developerstudio.eclipse.gmf.esb.LocalEntryValueType
* @see #getValueType()
* @generated
*/
void setValueType(LocalEntryValueType value);
/**
* Returns the value of the '<em><b>Value Literal</b></em>' attribute.
* The default value is <code>"entry_value"</code>.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Value Literal</em>' attribute isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Value Literal</em>' attribute.
* @see #setValueLiteral(String)
* @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getLocalEntry_ValueLiteral()
* @model default="entry_value"
* @generated
*/
String getValueLiteral();
/**
* Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getValueLiteral <em>Value Literal</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Value Literal</em>' attribute.
* @see #getValueLiteral()
* @generated
*/
void setValueLiteral(String value);
/**
* Returns the value of the '<em><b>Value XML</b></em>' attribute.
* The default value is <code>"<value/>"</code>.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Value XML</em>' attribute isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Value XML</em>' attribute.
* @see #setValueXML(String)
* @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getLocalEntry_ValueXML()
* @model default="<value/>"
* @generated
*/
String getValueXML();
/**
* Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getValueXML <em>Value XML</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Value XML</em>' attribute.
* @see #getValueXML()
* @generated
*/
void setValueXML(String value);
/**
* Returns the value of the '<em><b>Value URL</b></em>' attribute.
* The default value is <code>"file:/path/to/resource.ext"</code>.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Value URL</em>' attribute isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Value URL</em>' attribute.
* @see #setValueURL(String)
* @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getLocalEntry_ValueURL()
* @model default="file:/path/to/resource.ext"
* @generated
*/
String getValueURL();
/**
* Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.LocalEntry#getValueURL <em>Value URL</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Value URL</em>' attribute.
* @see #getValueURL()
* @generated
*/
void setValueURL(String value);
} // LocalEntry
| chanakaudaya/developer-studio | esb/org.wso2.developerstudio.eclipse.gmf.esb/src/org/wso2/developerstudio/eclipse/gmf/esb/LocalEntry.java | Java | apache-2.0 | 5,960 |
package md5b60ffeb829f638581ab2bb9b1a7f4f3f;
public class ButtonRenderer
extends md5b60ffeb829f638581ab2bb9b1a7f4f3f.ViewRenderer_2
implements
mono.android.IGCUserPeer,
android.view.View.OnAttachStateChangeListener
{
static final String __md_methods;
static {
__md_methods =
"n_onViewAttachedToWindow:(Landroid/view/View;)V:GetOnViewAttachedToWindow_Landroid_view_View_Handler:Android.Views.View/IOnAttachStateChangeListenerInvoker, Mono.Android, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null\n" +
"n_onViewDetachedFromWindow:(Landroid/view/View;)V:GetOnViewDetachedFromWindow_Landroid_view_View_Handler:Android.Views.View/IOnAttachStateChangeListenerInvoker, Mono.Android, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null\n" +
"";
mono.android.Runtime.register ("Xamarin.Forms.Platform.Android.ButtonRenderer, Xamarin.Forms.Platform.Android, Version=2.0.0.0, Culture=neutral, PublicKeyToken=null", ButtonRenderer.class, __md_methods);
}
public ButtonRenderer (android.content.Context p0) throws java.lang.Throwable
{
super (p0);
if (getClass () == ButtonRenderer.class)
mono.android.TypeManager.Activate ("Xamarin.Forms.Platform.Android.ButtonRenderer, Xamarin.Forms.Platform.Android, Version=2.0.0.0, Culture=neutral, PublicKeyToken=null", "Android.Content.Context, Mono.Android, Version=0.0.0.0, Culture=neutral, PublicKeyToken=84e04ff9cfb79065", this, new java.lang.Object[] { p0 });
}
public ButtonRenderer (android.content.Context p0, android.util.AttributeSet p1) throws java.lang.Throwable
{
super (p0, p1);
if (getClass () == ButtonRenderer.class)
mono.android.TypeManager.Activate ("Xamarin.Forms.Platform.Android.ButtonRenderer, Xamarin.Forms.Platform.Android, Version=2.0.0.0, Culture=neutral, PublicKeyToken=null", "Android.Content.Context, Mono.Android, Version=0.0.0.0, Culture=neutral, PublicKeyToken=84e04ff9cfb79065:Android.Util.IAttributeSet, Mono.Android, Version=0.0.0.0, Culture=neutral, PublicKeyToken=84e04ff9cfb79065", this, new java.lang.Object[] { p0, p1 });
}
public ButtonRenderer (android.content.Context p0, android.util.AttributeSet p1, int p2) throws java.lang.Throwable
{
super (p0, p1, p2);
if (getClass () == ButtonRenderer.class)
mono.android.TypeManager.Activate ("Xamarin.Forms.Platform.Android.ButtonRenderer, Xamarin.Forms.Platform.Android, Version=2.0.0.0, Culture=neutral, PublicKeyToken=null", "Android.Content.Context, Mono.Android, Version=0.0.0.0, Culture=neutral, PublicKeyToken=84e04ff9cfb79065:Android.Util.IAttributeSet, Mono.Android, Version=0.0.0.0, Culture=neutral, PublicKeyToken=84e04ff9cfb79065:System.Int32, mscorlib, Version=2.0.5.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e", this, new java.lang.Object[] { p0, p1, p2 });
}
public void onViewAttachedToWindow (android.view.View p0)
{
n_onViewAttachedToWindow (p0);
}
private native void n_onViewAttachedToWindow (android.view.View p0);
public void onViewDetachedFromWindow (android.view.View p0)
{
n_onViewDetachedFromWindow (p0);
}
private native void n_onViewDetachedFromWindow (android.view.View p0);
java.util.ArrayList refList;
public void monodroidAddReference (java.lang.Object obj)
{
if (refList == null)
refList = new java.util.ArrayList ();
refList.add (obj);
}
public void monodroidClearReferences ()
{
if (refList != null)
refList.clear ();
}
}
| MobileRez/XFXamlClass | Lab Materials/Part 1/Completed/MeasurementConverter/MeasurementConverter.Droid/obj/Debug/android/src/md5b60ffeb829f638581ab2bb9b1a7f4f3f/ButtonRenderer.java | Java | apache-2.0 | 3,388 |
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
* Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.cloudfront.model;
import java.io.Serializable;
/**
* A complex type that contains zero or more CacheBehavior elements.
*/
public class CacheBehaviors implements Serializable, Cloneable {
/** The number of cache behaviors for this distribution. */
private Integer quantity;
/**
* Optional: A complex type that contains cache behaviors for this
* distribution. If Quantity is 0, you can omit Items.
*/
private com.amazonaws.internal.SdkInternalList<CacheBehavior> items;
/**
* The number of cache behaviors for this distribution.
*
* @param quantity
* The number of cache behaviors for this distribution.
*/
public void setQuantity(Integer quantity) {
this.quantity = quantity;
}
/**
* The number of cache behaviors for this distribution.
*
* @return The number of cache behaviors for this distribution.
*/
public Integer getQuantity() {
return this.quantity;
}
/**
* The number of cache behaviors for this distribution.
*
* @param quantity
* The number of cache behaviors for this distribution.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CacheBehaviors withQuantity(Integer quantity) {
setQuantity(quantity);
return this;
}
/**
* Optional: A complex type that contains cache behaviors for this
* distribution. If Quantity is 0, you can omit Items.
*
* @return Optional: A complex type that contains cache behaviors for this
* distribution. If Quantity is 0, you can omit Items.
*/
public java.util.List<CacheBehavior> getItems() {
if (items == null) {
items = new com.amazonaws.internal.SdkInternalList<CacheBehavior>();
}
return items;
}
/**
* Optional: A complex type that contains cache behaviors for this
* distribution. If Quantity is 0, you can omit Items.
*
* @param items
* Optional: A complex type that contains cache behaviors for this
* distribution. If Quantity is 0, you can omit Items.
*/
public void setItems(java.util.Collection<CacheBehavior> items) {
if (items == null) {
this.items = null;
return;
}
this.items = new com.amazonaws.internal.SdkInternalList<CacheBehavior>(
items);
}
/**
* Optional: A complex type that contains cache behaviors for this
* distribution. If Quantity is 0, you can omit Items.
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if
* any). Use {@link #setItems(java.util.Collection)} or
* {@link #withItems(java.util.Collection)} if you want to override the
* existing values.
* </p>
*
* @param items
* Optional: A complex type that contains cache behaviors for this
* distribution. If Quantity is 0, you can omit Items.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CacheBehaviors withItems(CacheBehavior... items) {
if (this.items == null) {
setItems(new com.amazonaws.internal.SdkInternalList<CacheBehavior>(
items.length));
}
for (CacheBehavior ele : items) {
this.items.add(ele);
}
return this;
}
/**
* Optional: A complex type that contains cache behaviors for this
* distribution. If Quantity is 0, you can omit Items.
*
* @param items
* Optional: A complex type that contains cache behaviors for this
* distribution. If Quantity is 0, you can omit Items.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CacheBehaviors withItems(java.util.Collection<CacheBehavior> items) {
setItems(items);
return this;
}
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getQuantity() != null)
sb.append("Quantity: " + getQuantity() + ",");
if (getItems() != null)
sb.append("Items: " + getItems());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CacheBehaviors == false)
return false;
CacheBehaviors other = (CacheBehaviors) obj;
if (other.getQuantity() == null ^ this.getQuantity() == null)
return false;
if (other.getQuantity() != null
&& other.getQuantity().equals(this.getQuantity()) == false)
return false;
if (other.getItems() == null ^ this.getItems() == null)
return false;
if (other.getItems() != null
&& other.getItems().equals(this.getItems()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode
+ ((getQuantity() == null) ? 0 : getQuantity().hashCode());
hashCode = prime * hashCode
+ ((getItems() == null) ? 0 : getItems().hashCode());
return hashCode;
}
@Override
public CacheBehaviors clone() {
try {
return (CacheBehaviors) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException(
"Got a CloneNotSupportedException from Object.clone() "
+ "even though we're Cloneable!", e);
}
}
}
| mhurne/aws-sdk-java | aws-java-sdk-cloudfront/src/main/java/com/amazonaws/services/cloudfront/model/CacheBehaviors.java | Java | apache-2.0 | 6,768 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.client.security;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.security.user.privileges.ApplicationResourcePrivileges;
import org.elasticsearch.client.security.user.privileges.GlobalPrivileges;
import org.elasticsearch.client.security.user.privileges.UserIndicesPrivileges;
import org.elasticsearch.common.xcontent.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.Objects;
import java.util.Set;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
/**
* The response for the {@link org.elasticsearch.client.SecurityClient#getUserPrivileges(RequestOptions)} API.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html">the API docs</a>
*/
public class GetUserPrivilegesResponse {
private static final ConstructingObjectParser<GetUserPrivilegesResponse, Void> PARSER = new ConstructingObjectParser<>(
"get_user_privileges_response", true, GetUserPrivilegesResponse::buildResponseFromParserArgs);
@SuppressWarnings("unchecked")
private static GetUserPrivilegesResponse buildResponseFromParserArgs(Object[] args) {
return new GetUserPrivilegesResponse(
(Collection<String>) args[0],
(Collection<GlobalPrivileges>) args[1],
(Collection<UserIndicesPrivileges>) args[2],
(Collection<ApplicationResourcePrivileges>) args[3],
(Collection<String>) args[4]
);
}
static {
PARSER.declareStringArray(constructorArg(), new ParseField("cluster"));
PARSER.declareObjectArray(constructorArg(), (parser, ignore) -> GlobalPrivileges.fromXContent(parser),
new ParseField("global"));
PARSER.declareObjectArray(constructorArg(), (parser, ignore) -> UserIndicesPrivileges.fromXContent(parser),
new ParseField("indices"));
PARSER.declareObjectArray(constructorArg(), (parser, ignore) -> ApplicationResourcePrivileges.fromXContent(parser),
new ParseField("applications"));
PARSER.declareStringArray(constructorArg(), new ParseField("run_as"));
}
public static GetUserPrivilegesResponse fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
private Set<String> clusterPrivileges;
private Set<GlobalPrivileges> globalPrivileges;
private Set<UserIndicesPrivileges> indicesPrivileges;
private Set<ApplicationResourcePrivileges> applicationPrivileges;
private Set<String> runAsPrivilege;
public GetUserPrivilegesResponse(Collection<String> clusterPrivileges, Collection<GlobalPrivileges> globalPrivileges,
Collection<UserIndicesPrivileges> indicesPrivileges,
Collection<ApplicationResourcePrivileges> applicationPrivileges, Collection<String> runAsPrivilege) {
this.clusterPrivileges = Collections.unmodifiableSet(new LinkedHashSet<>(clusterPrivileges));
this.globalPrivileges = Collections.unmodifiableSet(new LinkedHashSet<>(globalPrivileges));
this.indicesPrivileges = Collections.unmodifiableSet(new LinkedHashSet<>(indicesPrivileges));
this.applicationPrivileges = Collections.unmodifiableSet(new LinkedHashSet<>(applicationPrivileges));
this.runAsPrivilege = Collections.unmodifiableSet(new LinkedHashSet<>(runAsPrivilege));
}
public Set<String> getClusterPrivileges() {
return clusterPrivileges;
}
public Set<GlobalPrivileges> getGlobalPrivileges() {
return globalPrivileges;
}
public Set<UserIndicesPrivileges> getIndicesPrivileges() {
return indicesPrivileges;
}
public Set<ApplicationResourcePrivileges> getApplicationPrivileges() {
return applicationPrivileges;
}
public Set<String> getRunAsPrivilege() {
return runAsPrivilege;
}
@Override
public String toString() {
return "GetUserPrivilegesResponse{" +
"clusterPrivileges=" + clusterPrivileges +
", globalPrivileges=" + globalPrivileges +
", indicesPrivileges=" + indicesPrivileges +
", applicationPrivileges=" + applicationPrivileges +
", runAsPrivilege=" + runAsPrivilege +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final GetUserPrivilegesResponse that = (GetUserPrivilegesResponse) o;
return Objects.equals(this.clusterPrivileges, that.clusterPrivileges) &&
Objects.equals(this.globalPrivileges, that.globalPrivileges) &&
Objects.equals(this.indicesPrivileges, that.indicesPrivileges) &&
Objects.equals(this.applicationPrivileges, that.applicationPrivileges) &&
Objects.equals(this.runAsPrivilege, that.runAsPrivilege);
}
@Override
public int hashCode() {
return Objects.hash(clusterPrivileges, globalPrivileges, indicesPrivileges, applicationPrivileges, runAsPrivilege);
}
}
| ern/elasticsearch | client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetUserPrivilegesResponse.java | Java | apache-2.0 | 5,778 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v2 import availability_zones
from cinderclient.v2 import consistencygroups
from cinderclient.v2 import pools
from cinderclient.v2 import qos_specs
from cinderclient.v2 import quotas
from cinderclient.v2 import services
from cinderclient.v2 import volume_backups as vol_backups
from cinderclient.v2 import volume_encryption_types as vol_enc_types
from cinderclient.v2 import volume_snapshots as vol_snaps
from cinderclient.v2 import volume_transfers
from cinderclient.v2 import volume_types
from cinderclient.v2 import volumes
from openstack_dashboard import api
from openstack_dashboard.usage import quotas as usage_quotas
from openstack_dashboard.test.test_data import utils
def data(TEST):
TEST.cinder_services = utils.TestDataContainer()
TEST.cinder_volumes = utils.TestDataContainer()
TEST.cinder_volume_backups = utils.TestDataContainer()
TEST.cinder_volume_encryption_types = utils.TestDataContainer()
TEST.cinder_volume_types = utils.TestDataContainer()
TEST.cinder_volume_encryption = utils.TestDataContainer()
TEST.cinder_bootable_volumes = utils.TestDataContainer()
TEST.cinder_qos_specs = utils.TestDataContainer()
TEST.cinder_qos_spec_associations = utils.TestDataContainer()
TEST.cinder_volume_snapshots = utils.TestDataContainer()
TEST.cinder_quotas = utils.TestDataContainer()
TEST.cinder_quota_usages = utils.TestDataContainer()
TEST.cinder_availability_zones = utils.TestDataContainer()
TEST.cinder_volume_transfers = utils.TestDataContainer()
TEST.cinder_pools = utils.TestDataContainer()
TEST.cinder_consistencygroups = utils.TestDataContainer()
TEST.cinder_cgroup_volumes = utils.TestDataContainer()
# Services
service_1 = services.Service(services.ServiceManager(None), {
"service": "cinder-scheduler",
"status": "enabled",
"binary": "cinder-scheduler",
"zone": "internal",
"state": "up",
"updated_at": "2013-07-08T05:21:00.000000",
"host": "devstack001",
"disabled_reason": None
})
service_2 = services.Service(services.ServiceManager(None), {
"service": "cinder-volume",
"status": "enabled",
"binary": "cinder-volume",
"zone": "nova",
"state": "up",
"updated_at": "2013-07-08T05:20:51.000000",
"host": "devstack001",
"disabled_reason": None
})
TEST.cinder_services.add(service_1)
TEST.cinder_services.add(service_2)
# Volumes - Cinder v1
volume = volumes.Volume(
volumes.VolumeManager(None),
{'id': "11023e92-8008-4c8b-8059-7f2293ff3887",
'status': 'available',
'size': 40,
'display_name': 'Volume name',
'display_description': 'Volume description',
'created_at': '2014-01-27 10:30:00',
'volume_type': None,
'attachments': []})
nameless_volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "4b069dd0-6eaa-4272-8abc-5448a68f1cce",
"status": 'available',
"size": 10,
"display_name": '',
"display_description": '',
"device": "/dev/hda",
"created_at": '2010-11-21 18:34:25',
"volume_type": 'vol_type_1',
"attachments": []})
other_volume = volumes.Volume(
volumes.VolumeManager(None),
{'id': "21023e92-8008-1234-8059-7f2293ff3889",
'status': 'in-use',
'size': 10,
'display_name': u'my_volume',
'display_description': '',
'created_at': '2013-04-01 10:30:00',
'volume_type': None,
'attachments': [{"id": "1", "server_id": '1',
"device": "/dev/hda"}]})
volume_with_type = volumes.Volume(
volumes.VolumeManager(None),
{'id': "7dcb47fd-07d9-42c2-9647-be5eab799ebe",
'name': 'my_volume2',
'status': 'in-use',
'size': 10,
'display_name': u'my_volume2',
'display_description': '',
'created_at': '2013-04-01 10:30:00',
'volume_type': 'vol_type_2',
'attachments': [{"id": "2", "server_id": '2',
"device": "/dev/hdb"}]})
non_bootable_volume = volumes.Volume(
volumes.VolumeManager(None),
{'id': "21023e92-8008-1234-8059-7f2293ff3890",
'status': 'in-use',
'size': 10,
'display_name': u'my_volume',
'display_description': '',
'created_at': '2013-04-01 10:30:00',
'volume_type': None,
'bootable': False,
'attachments': [{"id": "1", "server_id": '1',
"device": "/dev/hda"}]})
volume.bootable = 'true'
nameless_volume.bootable = 'true'
other_volume.bootable = 'true'
TEST.cinder_volumes.add(api.cinder.Volume(volume))
TEST.cinder_volumes.add(api.cinder.Volume(nameless_volume))
TEST.cinder_volumes.add(api.cinder.Volume(other_volume))
TEST.cinder_volumes.add(api.cinder.Volume(volume_with_type))
TEST.cinder_bootable_volumes.add(api.cinder.Volume(non_bootable_volume))
vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': u'1',
'name': u'vol_type_1',
'description': 'type 1 description',
'extra_specs': {'foo': 'bar'}})
vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': u'2',
'name': u'vol_type_2',
'description': 'type 2 description'})
TEST.cinder_volume_types.add(vol_type1, vol_type2)
# Volumes - Cinder v2
volume_v2 = volumes.Volume(
volumes.VolumeManager(None),
{'id': "31023e92-8008-4c8b-8059-7f2293ff1234",
'name': 'v2_volume',
'description': "v2 Volume Description",
'status': 'available',
'size': 20,
'created_at': '2014-01-27 10:30:00',
'volume_type': None,
'os-vol-host-attr:host': 'host@backend-name#pool',
'bootable': 'true',
'attachments': []})
volume_v2.bootable = 'true'
TEST.cinder_volumes.add(api.cinder.Volume(volume_v2))
snapshot = vol_snaps.Snapshot(
vol_snaps.SnapshotManager(None),
{'id': '5f3d1c33-7d00-4511-99df-a2def31f3b5d',
'display_name': 'test snapshot',
'display_description': 'volume snapshot',
'size': 40,
'status': 'available',
'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'})
snapshot2 = vol_snaps.Snapshot(
vol_snaps.SnapshotManager(None),
{'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0f',
'name': '',
'description': 'v2 volume snapshot description',
'size': 80,
'status': 'available',
'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
snapshot3 = vol_snaps.Snapshot(
vol_snaps.SnapshotManager(None),
{'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0e',
'name': '',
'description': 'v2 volume snapshot description 2',
'size': 80,
'status': 'available',
'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
snapshot.bootable = 'true'
snapshot2.bootable = 'true'
TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot))
TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot2))
TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot3))
TEST.cinder_volume_snapshots.first()._volume = volume
# Volume Type Encryption
vol_enc_type1 = vol_enc_types.VolumeEncryptionType(
vol_enc_types.VolumeEncryptionTypeManager(None),
{'volume_type_id': u'1',
'control_location': "front-end",
'key_size': 512,
'provider': "a-provider",
'cipher': "a-cipher"})
vol_enc_type2 = vol_enc_types.VolumeEncryptionType(
vol_enc_types.VolumeEncryptionTypeManager(None),
{'volume_type_id': u'2',
'control_location': "front-end",
'key_size': 256,
'provider': "a-provider",
'cipher': "a-cipher"})
vol_unenc_type1 = vol_enc_types.VolumeEncryptionType(
vol_enc_types.VolumeEncryptionTypeManager(None), {})
TEST.cinder_volume_encryption_types.add(vol_enc_type1, vol_enc_type2,
vol_unenc_type1)
volume_backup1 = vol_backups.VolumeBackup(
vol_backups.VolumeBackupManager(None),
{'id': 'a374cbb8-3f99-4c3f-a2ef-3edbec842e31',
'name': 'backup1',
'description': 'volume backup 1',
'size': 10,
'status': 'available',
'container_name': 'volumebackups',
'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'})
volume_backup2 = vol_backups.VolumeBackup(
vol_backups.VolumeBackupManager(None),
{'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e52',
'name': 'backup2',
'description': 'volume backup 2',
'size': 20,
'status': 'available',
'container_name': 'volumebackups',
'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
volume_backup3 = vol_backups.VolumeBackup(
vol_backups.VolumeBackupManager(None),
{'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e53',
'name': 'backup3',
'description': 'volume backup 3',
'size': 20,
'status': 'available',
'container_name': 'volumebackups',
'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
TEST.cinder_volume_backups.add(volume_backup1)
TEST.cinder_volume_backups.add(volume_backup2)
TEST.cinder_volume_backups.add(volume_backup3)
# Volume Encryption
vol_enc_metadata1 = volumes.Volume(
volumes.VolumeManager(None),
{'cipher': 'test-cipher',
'key_size': 512,
'provider': 'test-provider',
'control_location': 'front-end'})
vol_unenc_metadata1 = volumes.Volume(
volumes.VolumeManager(None),
{})
TEST.cinder_volume_encryption.add(vol_enc_metadata1)
TEST.cinder_volume_encryption.add(vol_unenc_metadata1)
# Quota Sets
quota_data = dict(volumes='1',
snapshots='1',
gigabytes='1000')
quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
TEST.cinder_quotas.add(api.base.QuotaSet(quota))
# Quota Usages
quota_usage_data = {'gigabytes': {'used': 0,
'quota': 1000},
'instances': {'used': 0,
'quota': 10},
'snapshots': {'used': 0,
'quota': 10}}
quota_usage = usage_quotas.QuotaUsage()
for k, v in quota_usage_data.items():
quota_usage.add_quota(api.base.Quota(k, v['quota']))
quota_usage.tally(k, v['used'])
TEST.cinder_quota_usages.add(quota_usage)
# Availability Zones
# Cinder returns the following structure from os-availability-zone
# {"availabilityZoneInfo":
# [{"zoneState": {"available": true}, "zoneName": "nova"}]}
# Note that the default zone is still "nova" even though this is cinder
TEST.cinder_availability_zones.add(
availability_zones.AvailabilityZone(
availability_zones.AvailabilityZoneManager(None),
{
'zoneName': 'nova',
'zoneState': {'available': True}
}
)
)
# Cinder Limits
limits = {"absolute": {"totalVolumesUsed": 1,
"totalGigabytesUsed": 5,
"maxTotalVolumeGigabytes": 1000,
"maxTotalVolumes": 10}}
TEST.cinder_limits = limits
# QOS Specs
qos_spec1 = qos_specs.QoSSpecs(
qos_specs.QoSSpecsManager(None),
{"id": "418db45d-6992-4674-b226-80aacad2073c",
"name": "high_iops",
"consumer": "back-end",
"specs": {"minIOPS": "1000", "maxIOPS": '100000'}})
qos_spec2 = qos_specs.QoSSpecs(
qos_specs.QoSSpecsManager(None),
{"id": "6ed7035f-992e-4075-8ed6-6eff19b3192d",
"name": "high_bws",
"consumer": "back-end",
"specs": {"maxBWS": '5000'}})
TEST.cinder_qos_specs.add(qos_spec1, qos_spec2)
vol_type1.associated_qos_spec = qos_spec1.name
TEST.cinder_qos_spec_associations.add(vol_type1)
# volume_transfers
transfer_1 = volume_transfers.VolumeTransfer(
volume_transfers.VolumeTransferManager(None), {
'id': '99999999-8888-7777-6666-555555555555',
'name': 'test transfer',
'volume_id': volume.id,
'auth_key': 'blah',
'created_at': ''})
TEST.cinder_volume_transfers.add(transfer_1)
# Pools
pool1 = pools.Pool(
pools.PoolManager(None), {
"QoS_support": False,
"allocated_capacity_gb": 0,
"driver_version": "3.0.0",
"free_capacity_gb": 10,
"extra_specs": {
"description": "LVM Extra specs",
"display_name": "LVMDriver",
"namespace": "OS::Cinder::LVMDriver",
"type": "object",
},
"name": "devstack@lvmdriver-1#lvmdriver-1",
"pool_name": "lvmdriver-1",
"reserved_percentage": 0,
"storage_protocol": "iSCSI",
"total_capacity_gb": 10,
"vendor_name": "Open Source",
"volume_backend_name": "lvmdriver-1"})
pool2 = pools.Pool(
pools.PoolManager(None), {
"QoS_support": False,
"allocated_capacity_gb": 2,
"driver_version": "3.0.0",
"free_capacity_gb": 15,
"extra_specs": {
"description": "LVM Extra specs",
"display_name": "LVMDriver",
"namespace": "OS::Cinder::LVMDriver",
"type": "object",
},
"name": "devstack@lvmdriver-2#lvmdriver-2",
"pool_name": "lvmdriver-2",
"reserved_percentage": 0,
"storage_protocol": "iSCSI",
"total_capacity_gb": 10,
"vendor_name": "Open Source",
"volume_backend_name": "lvmdriver-2"})
TEST.cinder_pools.add(pool1)
TEST.cinder_pools.add(pool2)
# volume consistency groups
cgroup_1 = consistencygroups.Consistencygroup(
consistencygroups.ConsistencygroupManager(None),
{'id': u'1',
'name': u'cg_1',
'description': 'cg 1 description',
'volume_types': u'1',
'volume_type_names': []})
cgroup_2 = consistencygroups.Consistencygroup(
consistencygroups.ConsistencygroupManager(None),
{'id': u'2',
'name': u'cg_2',
'description': 'cg 2 description',
'volume_types': u'1',
'volume_type_names': []})
TEST.cinder_consistencygroups.add(cgroup_1)
TEST.cinder_consistencygroups.add(cgroup_2)
volume_for_consistency_group = volumes.Volume(
volumes.VolumeManager(None),
{'id': "11023e92-8008-4c8b-8059-7f2293ff3881",
'status': 'available',
'size': 40,
'display_name': 'Volume name',
'display_description': 'Volume description',
'created_at': '2014-01-27 10:30:00',
'volume_type': None,
'attachments': [],
'consistencygroup_id': u'1'})
TEST.cinder_cgroup_volumes.add(api.cinder.Volume(
volume_for_consistency_group))
| yangleo/cloud-github | openstack_dashboard/test/test_data/cinder_data.py | Python | apache-2.0 | 16,279 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.smartdata.action.annotation.ActionSignature;
/**
* An action to do all-disk for a file.
*/
@ActionSignature(
actionId = "alldisk",
displayName = "alldisk",
usage = HdfsAction.FILE_PATH + " $file "
)
public class AllDiskFileAction extends MoveFileAction {
@Override
public String getStoragePolicy() {
return "HOT";
}
}
| PHILO-HE/SSM | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllDiskFileAction.java | Java | apache-2.0 | 1,192 |
/*
* Copyright (c) 2008-2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mongodb.connection;
import com.mongodb.MongoInternalException;
import com.mongodb.ServerAddress;
import javax.net.ssl.SSLSocket;
import java.io.IOException;
import java.net.Socket;
import static com.mongodb.internal.connection.SslHelper.enableHostNameVerification;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
final class SocketStreamHelper {
static void initialize(final Socket socket, final ServerAddress address, final SocketSettings settings, final SslSettings sslSettings)
throws IOException {
socket.setTcpNoDelay(true);
socket.setSoTimeout(settings.getReadTimeout(MILLISECONDS));
socket.setKeepAlive(settings.isKeepAlive());
if (settings.getReceiveBufferSize() > 0) {
socket.setReceiveBufferSize(settings.getReceiveBufferSize());
}
if (settings.getSendBufferSize() > 0) {
socket.setSendBufferSize(settings.getSendBufferSize());
}
if (sslSettings.isEnabled()) {
if (!(socket instanceof SSLSocket)) {
throw new MongoInternalException("SSL is enabled but the socket is not an instance of javax.net.ssl.SSLSocket");
}
if (!sslSettings.isInvalidHostNameAllowed()) {
SSLSocket sslSocket = (SSLSocket) socket;
sslSocket.setSSLParameters(enableHostNameVerification(sslSocket.getSSLParameters()));
}
}
socket.connect(address.getSocketAddress(), settings.getConnectTimeout(MILLISECONDS));
}
private SocketStreamHelper() {
}
}
| kay-kim/mongo-java-driver | driver-core/src/main/com/mongodb/connection/SocketStreamHelper.java | Java | apache-2.0 | 2,178 |
using System.ComponentModel;
using GraphX.Controls.Models;
#if WPF
using System.Windows;
using DefaultEventArgs = System.EventArgs;
using System.Windows.Controls;
#elif METRO
using Windows.Foundation;
using Windows.UI.Xaml;
using Windows.UI.Xaml.Data;
using DefaultEventArgs = System.Object;
#endif
using GraphX.PCL.Common.Exceptions;
namespace GraphX.Controls
{
#if METRO
[Bindable]
#endif
public class AttachableVertexLabelControl : VertexLabelControl, IAttachableControl<VertexControl>, INotifyPropertyChanged
{
/// <summary>
/// Gets label attach node
/// </summary>
public VertexControl AttachNode { get { return (VertexControl)GetValue(AttachNodeProperty); } private set { SetValue(AttachNodeProperty, value); OnPropertyChanged("AttachNode"); } }
public static readonly DependencyProperty AttachNodeProperty = DependencyProperty.Register(nameof(AttachNode), typeof(VertexControl), typeof(AttachableVertexLabelControl),
new PropertyMetadata(null));
#if WPF
static AttachableVertexLabelControl()
{
DefaultStyleKeyProperty.OverrideMetadata(typeof(AttachableVertexLabelControl), new FrameworkPropertyMetadata(typeof(AttachableVertexLabelControl)));
}
#endif
public AttachableVertexLabelControl()
{
DataContext = this;
#if METRO
DefaultStyleKey = typeof(AttachableVertexLabelControl);
#endif
}
/// <summary>
/// Attach label to VertexControl
/// </summary>
/// <param name="node">VertexControl node</param>
public virtual void Attach(VertexControl node)
{
#if WPF
if (AttachNode != null)
AttachNode.IsVisibleChanged -= AttachNode_IsVisibleChanged;
AttachNode = node;
AttachNode.IsVisibleChanged += AttachNode_IsVisibleChanged;
#elif METRO
AttachNode = node;
#endif
node.AttachLabel(this);
}
/// <summary>
/// Detach label from control
/// </summary>
public virtual void Detach()
{
#if WPF
if (AttachNode != null)
AttachNode.IsVisibleChanged -= AttachNode_IsVisibleChanged;
#endif
AttachNode = null;
}
#if WPF
void AttachNode_IsVisibleChanged(object sender, DependencyPropertyChangedEventArgs e)
{
if (AttachNode.IsVisible && AttachNode.ShowLabel)
Show();
else if (!AttachNode.IsVisible)
{
Hide();
}
}
#endif
protected override VertexControl GetVertexControl(DependencyObject parent)
{
//if(AttachNode == null)
// throw new GX_InvalidDataException("AttachableVertexLabelControl node is not attached!");
return AttachNode;
}
public override void UpdatePosition()
{
if (double.IsNaN(DesiredSize.Width) || DesiredSize.Width == 0) return;
var vc = GetVertexControl(GetParent());
if (vc == null) return;
if (LabelPositionMode == VertexLabelPositionMode.Sides)
{
var vcPos = vc.GetPosition();
Point pt;
switch (LabelPositionSide)
{
case VertexLabelPositionSide.TopRight:
pt = new Point(vcPos.X + vc.DesiredSize.Width, vcPos.Y + -DesiredSize.Height);
break;
case VertexLabelPositionSide.BottomRight:
pt = new Point(vcPos.X + vc.DesiredSize.Width, vcPos.Y + vc.DesiredSize.Height);
break;
case VertexLabelPositionSide.TopLeft:
pt = new Point(vcPos.X + -DesiredSize.Width, vcPos.Y + -DesiredSize.Height);
break;
case VertexLabelPositionSide.BottomLeft:
pt = new Point(vcPos.X + -DesiredSize.Width, vcPos.Y + vc.DesiredSize.Height);
break;
case VertexLabelPositionSide.Top:
pt = new Point(vcPos.X + vc.DesiredSize.Width * .5 - DesiredSize.Width * .5, vcPos.Y + -DesiredSize.Height);
break;
case VertexLabelPositionSide.Bottom:
pt = new Point(vcPos.X + vc.DesiredSize.Width * .5 - DesiredSize.Width * .5, vcPos.Y + vc.DesiredSize.Height);
break;
case VertexLabelPositionSide.Left:
pt = new Point(vcPos.X + -DesiredSize.Width, vcPos.Y + vc.DesiredSize.Height * .5f - DesiredSize.Height * .5);
break;
case VertexLabelPositionSide.Right:
pt = new Point(vcPos.X + vc.DesiredSize.Width, vcPos.Y + vc.DesiredSize.Height * .5f - DesiredSize.Height * .5);
break;
default:
throw new GX_InvalidDataException("UpdatePosition() -> Unknown vertex label side!");
}
LastKnownRectSize = new Rect(pt, DesiredSize);
}
else LastKnownRectSize = new Rect(LabelPosition, DesiredSize);
Arrange(LastKnownRectSize);
}
public event PropertyChangedEventHandler PropertyChanged;
protected virtual void OnPropertyChanged(string propertyName)
{
var handler = PropertyChanged;
handler?.Invoke(this, new PropertyChangedEventArgs(propertyName));
}
}
}
| edgardozoppi/GraphX | GraphX.Controls/Controls/VertexLabels/AttachableVertexLabelControl.cs | C# | apache-2.0 | 5,630 |
// Copyright 2015 The Project Buendia Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at: http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distrib-
// uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
// OR CONDITIONS OF ANY KIND, either express or implied. See the License for
// specific language governing permissions and limitations under the License.
package org.projectbuendia.client.json;
/** A list of concept results returned by the server. */
public class JsonConceptResponse {
public JsonConcept[] results;
}
| llvasconcellos/client | app/src/main/java/org/projectbuendia/client/json/JsonConceptResponse.java | Java | apache-2.0 | 762 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "remoting/protocol/authentication_method.h"
#include "base/base64.h"
#include "base/logging.h"
#include "crypto/hmac.h"
#include "remoting/protocol/auth_util.h"
namespace remoting {
namespace protocol {
// static
AuthenticationMethod AuthenticationMethod::Invalid() {
return AuthenticationMethod();
}
// static
AuthenticationMethod AuthenticationMethod::Spake2(HashFunction hash_function) {
return AuthenticationMethod(SPAKE2, hash_function);
}
// static
AuthenticationMethod AuthenticationMethod::ThirdParty() {
return AuthenticationMethod(THIRD_PARTY, NONE);
}
// static
AuthenticationMethod AuthenticationMethod::FromString(
const std::string& value) {
if (value == "spake2_plain") {
return Spake2(NONE);
} else if (value == "spake2_hmac") {
return Spake2(HMAC_SHA256);
} else if (value == "third_party") {
return ThirdParty();
} else {
return AuthenticationMethod::Invalid();
}
}
// static
std::string AuthenticationMethod::ApplyHashFunction(
HashFunction hash_function,
const std::string& tag,
const std::string& shared_secret) {
switch (hash_function) {
case NONE:
return shared_secret;
break;
case HMAC_SHA256: {
crypto::HMAC response(crypto::HMAC::SHA256);
if (!response.Init(tag)) {
LOG(FATAL) << "HMAC::Init failed";
}
unsigned char out_bytes[kSharedSecretHashLength];
if (!response.Sign(shared_secret, out_bytes, sizeof(out_bytes))) {
LOG(FATAL) << "HMAC::Sign failed";
}
return std::string(out_bytes, out_bytes + sizeof(out_bytes));
}
}
NOTREACHED();
return shared_secret;
}
AuthenticationMethod::AuthenticationMethod()
: type_(INVALID),
hash_function_(NONE) {
}
AuthenticationMethod::AuthenticationMethod(MethodType type,
HashFunction hash_function)
: type_(type),
hash_function_(hash_function) {
DCHECK_NE(type_, INVALID);
}
AuthenticationMethod::HashFunction AuthenticationMethod::hash_function() const {
DCHECK(is_valid());
return hash_function_;
}
const std::string AuthenticationMethod::ToString() const {
DCHECK(is_valid());
if (type_ == THIRD_PARTY)
return "third_party";
DCHECK_EQ(type_, SPAKE2);
switch (hash_function_) {
case NONE:
return "spake2_plain";
case HMAC_SHA256:
return "spake2_hmac";
}
return "invalid";
}
bool AuthenticationMethod::operator ==(
const AuthenticationMethod& other) const {
return type_ == other.type_ &&
hash_function_ == other.hash_function_;
}
bool SharedSecretHash::Parse(const std::string& as_string) {
size_t separator = as_string.find(':');
if (separator == std::string::npos)
return false;
std::string function_name = as_string.substr(0, separator);
if (function_name == "plain") {
hash_function = AuthenticationMethod::NONE;
} else if (function_name == "hmac") {
hash_function = AuthenticationMethod::HMAC_SHA256;
} else {
return false;
}
if (!base::Base64Decode(as_string.substr(separator + 1), &value)) {
return false;
}
return true;
}
} // namespace protocol
} // namespace remoting
| plxaye/chromium | src/remoting/protocol/authentication_method.cc | C++ | apache-2.0 | 3,345 |
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import acos_client.errors as acos_errors
import acos_client.v21.base as base
class BasePersistence(base.BaseV21):
def __init__(self, client):
super(BasePersistence, self).__init__(client)
self.prefix = "slb.template.%s_persistence" % self.pers_type
def get(self, name, **kwargs):
return self._post(("%s.search" % self.prefix), {'name': name},
**kwargs)
def exists(self, name, **kwargs):
try:
self.get(name, **kwargs)
return True
except acos_errors.NotFound:
return False
def create(self, name, **kwargs):
self._post(("%s.create" % self.prefix), self.get_params(name),
**kwargs)
def delete(self, name, **kwargs):
self._post(("%s.delete" % self.prefix), {'name': name}, **kwargs)
class CookiePersistence(BasePersistence):
def __init__(self, client):
self.pers_type = 'cookie'
super(CookiePersistence, self).__init__(client)
def get_params(self, name):
return {
"cookie_persistence_template": {
"name": name
}
}
class SourceIpPersistence(BasePersistence):
def __init__(self, client):
self.pers_type = 'src_ip'
super(SourceIpPersistence, self).__init__(client)
def get_params(self, name):
return {
"src_ip_persistence_template": {
"name": name
}
}
| dougwig/acos-client | acos_client/v21/slb/template/persistence.py | Python | apache-2.0 | 2,099 |
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testclient
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/watch"
)
// FakeResourceQuotas implements ResourceQuotaInterface. Meant to be embedded into a struct to get a default
// implementation. This makes faking out just the methods you want to test easier.
type FakeResourceQuotas struct {
Fake *Fake
Namespace string
}
func (c *FakeResourceQuotas) Get(name string) (*api.ResourceQuota, error) {
obj, err := c.Fake.Invokes(NewGetAction("resourcequotas", c.Namespace, name), &api.ResourceQuota{})
if obj == nil {
return nil, err
}
return obj.(*api.ResourceQuota), err
}
func (c *FakeResourceQuotas) List(label labels.Selector, field fields.Selector) (*api.ResourceQuotaList, error) {
obj, err := c.Fake.Invokes(NewListAction("resourcequotas", c.Namespace, label, field), &api.ResourceQuotaList{})
if obj == nil {
return nil, err
}
return obj.(*api.ResourceQuotaList), err
}
func (c *FakeResourceQuotas) Create(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) {
obj, err := c.Fake.Invokes(NewCreateAction("resourcequotas", c.Namespace, resourceQuota), resourceQuota)
if obj == nil {
return nil, err
}
return obj.(*api.ResourceQuota), err
}
func (c *FakeResourceQuotas) Update(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) {
obj, err := c.Fake.Invokes(NewUpdateAction("resourcequotas", c.Namespace, resourceQuota), resourceQuota)
if obj == nil {
return nil, err
}
return obj.(*api.ResourceQuota), err
}
func (c *FakeResourceQuotas) Delete(name string) error {
_, err := c.Fake.Invokes(NewDeleteAction("resourcequotas", c.Namespace, name), &api.ResourceQuota{})
return err
}
func (c *FakeResourceQuotas) Watch(label labels.Selector, field fields.Selector, opts unversioned.ListOptions) (watch.Interface, error) {
return c.Fake.InvokesWatch(NewWatchAction("resourcequotas", c.Namespace, label, field, opts))
}
func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) {
obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("resourcequotas", "status", c.Namespace, resourceQuota), resourceQuota)
if obj == nil {
return nil, err
}
return obj.(*api.ResourceQuota), err
}
| GertiPoppel/kubernetes | pkg/client/unversioned/testclient/fake_resource_quotas.go | GO | apache-2.0 | 2,914 |
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver;
import java.io.IOException;
import java.util.NavigableMap;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.regionserver.ReplicationSourceService;
import org.apache.hadoop.hbase.regionserver.ReplicationSinkService;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.replication.ReplicationZookeeper;
import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.zookeeper.KeeperException;
import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
import static org.apache.hadoop.hbase.HConstants.REPLICATION_ENABLE_KEY;
import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL;
/**
* Gateway to Replication. Used by {@link org.apache.hadoop.hbase.regionserver.HRegionServer}.
*/
public class Replication implements WALActionsListener,
ReplicationSourceService, ReplicationSinkService {
private boolean replication;
private ReplicationSourceManager replicationManager;
private final AtomicBoolean replicating = new AtomicBoolean(true);
private ReplicationZookeeper zkHelper;
private Configuration conf;
private ReplicationSink replicationSink;
// Hosting server
private Server server;
/**
* Instantiate the replication management (if rep is enabled).
* @param server Hosting server
* @param fs handle to the filesystem
* @param logDir
* @param oldLogDir directory where logs are archived
* @throws IOException
*/
public Replication(final Server server, final FileSystem fs,
final Path logDir, final Path oldLogDir) throws IOException{
initialize(server, fs, logDir, oldLogDir);
}
/**
* Empty constructor
*/
public Replication() {
}
public void initialize(final Server server, final FileSystem fs,
final Path logDir, final Path oldLogDir) throws IOException {
this.server = server;
this.conf = this.server.getConfiguration();
this.replication = isReplication(this.conf);
if (replication) {
try {
this.zkHelper = new ReplicationZookeeper(server, this.replicating);
} catch (KeeperException ke) {
throw new IOException("Failed replication handler create " +
"(replicating=" + this.replicating, ke);
}
this.replicationManager = new ReplicationSourceManager(zkHelper, conf,
this.server, fs, this.replicating, logDir, oldLogDir) ;
} else {
this.replicationManager = null;
this.zkHelper = null;
}
}
/**
* @param c Configuration to look at
* @return True if replication is enabled.
*/
public static boolean isReplication(final Configuration c) {
return c.getBoolean(REPLICATION_ENABLE_KEY, false);
}
/*
* Returns an object to listen to new hlog changes
**/
public WALActionsListener getWALActionsListener() {
return this;
}
/**
* Stops replication service.
*/
public void stopReplicationService() {
join();
}
/**
* Join with the replication threads
*/
public void join() {
if (this.replication) {
this.replicationManager.join();
}
}
/**
* Carry on the list of log entries down to the sink
* @param entries list of entries to replicate
* @throws IOException
*/
public void replicateLogEntries(HLog.Entry[] entries) throws IOException {
if (this.replication) {
this.replicationSink.replicateEntries(entries);
}
}
/**
* If replication is enabled and this cluster is a master,
* it starts
* @throws IOException
*/
public void startReplicationService() throws IOException {
if (this.replication) {
this.replicationManager.init();
this.replicationSink = new ReplicationSink(this.conf, this.server);
}
}
/**
* Get the replication sources manager
* @return the manager if replication is enabled, else returns false
*/
public ReplicationSourceManager getReplicationManager() {
return this.replicationManager;
}
@Override
public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
WALEdit logEdit) {
// Not interested
}
@Override
public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
WALEdit logEdit) {
NavigableMap<byte[], Integer> scopes =
new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
byte[] family;
for (KeyValue kv : logEdit.getKeyValues()) {
family = kv.getFamily();
int scope = htd.getFamily(family).getScope();
if (scope != REPLICATION_SCOPE_LOCAL &&
!scopes.containsKey(family)) {
scopes.put(family, scope);
}
}
if (!scopes.isEmpty()) {
logEdit.setScopes(scopes);
}
}
@Override
public void preLogRoll(Path oldPath, Path newPath) throws IOException {
// Not interested
}
@Override
public void postLogRoll(Path oldPath, Path newPath) throws IOException {
getReplicationManager().logRolled(newPath);
}
@Override
public void preLogArchive(Path oldPath, Path newPath) throws IOException {
// Not interested
}
@Override
public void postLogArchive(Path oldPath, Path newPath) throws IOException {
// Not interested
}
/**
* This method modifies the master's configuration in order to inject
* replication-related features
* @param conf
*/
public static void decorateMasterConfiguration(Configuration conf) {
if (!isReplication(conf)) {
return;
}
String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS);
if (!plugins.contains(ReplicationLogCleaner.class.toString())) {
conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS,
plugins + "," + ReplicationLogCleaner.class.getCanonicalName());
}
}
@Override
public void logRollRequested() {
// Not interested
}
@Override
public void logCloseRequested() {
// not interested
}
}
| indi60/hbase-pmc | target/hbase-0.94.1/hbase-0.94.1/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java | Java | apache-2.0 | 7,362 |
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package codec
import (
"encoding/binary"
"runtime"
"unsafe"
"github.com/pingcap/errors"
)
const (
encGroupSize = 8
encMarker = byte(0xFF)
encPad = byte(0x0)
)
var (
pads = make([]byte, encGroupSize)
encPads = []byte{encPad}
)
// EncodeBytes guarantees the encoded value is in ascending order for comparison,
// encoding with the following rule:
// [group1][marker1]...[groupN][markerN]
// group is 8 bytes slice which is padding with 0.
// marker is `0xFF - padding 0 count`
// For example:
// [] -> [0, 0, 0, 0, 0, 0, 0, 0, 247]
// [1, 2, 3] -> [1, 2, 3, 0, 0, 0, 0, 0, 250]
// [1, 2, 3, 0] -> [1, 2, 3, 0, 0, 0, 0, 0, 251]
// [1, 2, 3, 4, 5, 6, 7, 8] -> [1, 2, 3, 4, 5, 6, 7, 8, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247]
// Refer: https://github.com/facebook/mysql-5.6/wiki/MyRocks-record-format#memcomparable-format
func EncodeBytes(b []byte, data []byte) []byte {
// Allocate more space to avoid unnecessary slice growing.
// Assume that the byte slice size is about `(len(data) / encGroupSize + 1) * (encGroupSize + 1)` bytes,
// that is `(len(data) / 8 + 1) * 9` in our implement.
dLen := len(data)
reallocSize := (dLen/encGroupSize + 1) * (encGroupSize + 1)
result := reallocBytes(b, reallocSize)
for idx := 0; idx <= dLen; idx += encGroupSize {
remain := dLen - idx
padCount := 0
if remain >= encGroupSize {
result = append(result, data[idx:idx+encGroupSize]...)
} else {
padCount = encGroupSize - remain
result = append(result, data[idx:]...)
result = append(result, pads[:padCount]...)
}
marker := encMarker - byte(padCount)
result = append(result, marker)
}
return result
}
func decodeBytes(b []byte, buf []byte, reverse bool) ([]byte, []byte, error) {
if buf == nil {
buf = make([]byte, 0, len(b))
}
buf = buf[:0]
for {
if len(b) < encGroupSize+1 {
return nil, nil, errors.New("insufficient bytes to decode value")
}
groupBytes := b[:encGroupSize+1]
group := groupBytes[:encGroupSize]
marker := groupBytes[encGroupSize]
var padCount byte
if reverse {
padCount = marker
} else {
padCount = encMarker - marker
}
if padCount > encGroupSize {
return nil, nil, errors.Errorf("invalid marker byte, group bytes %q", groupBytes)
}
realGroupSize := encGroupSize - padCount
buf = append(buf, group[:realGroupSize]...)
b = b[encGroupSize+1:]
if padCount != 0 {
var padByte = encPad
if reverse {
padByte = encMarker
}
// Check validity of padding bytes.
for _, v := range group[realGroupSize:] {
if v != padByte {
return nil, nil, errors.Errorf("invalid padding byte, group bytes %q", groupBytes)
}
}
break
}
}
if reverse {
reverseBytes(buf)
}
return b, buf, nil
}
// DecodeBytes decodes bytes which is encoded by EncodeBytes before,
// returns the leftover bytes and decoded value if no error.
// `buf` is used to buffer data to avoid the cost of makeslice in decodeBytes when DecodeBytes is called by Decoder.DecodeOne.
func DecodeBytes(b []byte, buf []byte) ([]byte, []byte, error) {
return decodeBytes(b, buf, false)
}
// EncodeBytesDesc first encodes bytes using EncodeBytes, then bitwise reverses
// encoded value to guarantee the encoded value is in descending order for comparison.
func EncodeBytesDesc(b []byte, data []byte) []byte {
n := len(b)
b = EncodeBytes(b, data)
reverseBytes(b[n:])
return b
}
// DecodeBytesDesc decodes bytes which is encoded by EncodeBytesDesc before,
// returns the leftover bytes and decoded value if no error.
func DecodeBytesDesc(b []byte, buf []byte) ([]byte, []byte, error) {
return decodeBytes(b, buf, true)
}
// EncodeCompactBytes joins bytes with its length into a byte slice. It is more
// efficient in both space and time compare to EncodeBytes. Note that the encoded
// result is not memcomparable.
func EncodeCompactBytes(b []byte, data []byte) []byte {
b = reallocBytes(b, binary.MaxVarintLen64+len(data))
b = EncodeVarint(b, int64(len(data)))
return append(b, data...)
}
// DecodeCompactBytes decodes bytes which is encoded by EncodeCompactBytes before.
func DecodeCompactBytes(b []byte) ([]byte, []byte, error) {
b, n, err := DecodeVarint(b)
if err != nil {
return nil, nil, errors.Trace(err)
}
if int64(len(b)) < n {
return nil, nil, errors.Errorf("insufficient bytes to decode value, expected length: %v", n)
}
return b[n:], b[:n], nil
}
// See https://golang.org/src/crypto/cipher/xor.go
const wordSize = int(unsafe.Sizeof(uintptr(0)))
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64"
func fastReverseBytes(b []byte) {
n := len(b)
w := n / wordSize
if w > 0 {
bw := *(*[]uintptr)(unsafe.Pointer(&b))
for i := 0; i < w; i++ {
bw[i] = ^bw[i]
}
}
for i := w * wordSize; i < n; i++ {
b[i] = ^b[i]
}
}
func safeReverseBytes(b []byte) {
for i := range b {
b[i] = ^b[i]
}
}
func reverseBytes(b []byte) {
if supportsUnaligned {
fastReverseBytes(b)
return
}
safeReverseBytes(b)
}
// reallocBytes is like realloc.
func reallocBytes(b []byte, n int) []byte {
newSize := len(b) + n
if cap(b) < newSize {
bs := make([]byte, len(b), newSize)
copy(bs, b)
return bs
}
// slice b has capability to store n bytes
return b
}
| tiancaiamao/tidb | util/codec/bytes.go | GO | apache-2.0 | 5,743 |
// <copyright file="TangoPointCloud.cs" company="Google">
//
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// </copyright>
//-----------------------------------------------------------------------
using System;
using System.Collections;
using System.Collections.Generic;
using Tango;
using UnityEngine;
/// <summary>
/// Utility functions for working with and visualizing point cloud data from the
/// Tango depth API. Used by the Tango Point Cloud prefab to enable depth point
/// functionality.
/// </summary>
public class TangoPointCloud : MonoBehaviour, ITangoPointCloud
{
/// <summary>
/// If set, the point cloud will be transformed to be in the Area
/// Description frame.
/// </summary>
public bool m_useAreaDescriptionPose;
/// <summary>
/// If set, update the point cloud's mesh (very slow, useful for debugging).
/// </summary>
public bool m_updatePointsMesh;
/// <summary>
/// The points of the point cloud, in world space.
///
/// Note that not every member of this array will be filled out. See
/// m_pointsCount.
/// </summary>
[HideInInspector]
public Vector3[] m_points;
/// <summary>
/// The number of points in m_points.
/// </summary>
[HideInInspector]
public int m_pointsCount = 0;
/// <summary>
/// The average depth (relative to the depth camera).
/// </summary>
[HideInInspector]
public float m_overallZ = 0.0f;
/// <summary>
/// Time between the last two depth events.
/// </summary>
[HideInInspector]
public float m_depthDeltaTime = 0.0f;
/// <summary>
/// The position of the floor at y height when FindFloor has been called.
///
/// The default value is 0, even if no floor has been found. When FindFloor has completed successfully,
/// the result is assigned here.
/// </summary>
[HideInInspector]
public float m_floorPlaneY = 0.0f;
/// <summary>
/// Check if a floor has been found.
///
/// The value is <c>true</c> if the method FindFloor has successfully found a floor, which is assigned
/// to m_floorPlaneY. The value is always <c>false</c> if FindFloor has not been called.
/// </summary>
[HideInInspector]
public bool m_floorFound = false;
/// <summary>
/// The maximum points displayed. Just some constant value.
/// </summary>
private const int MAX_POINT_COUNT = 61440;
/// <summary>
/// The minimum number of points near a world position y to determine that it is a reasonable floor.
/// </summary>
private const int RECOGNITION_THRESHOLD = 1000;
/// <summary>
/// The minimum number of points near a world position y to determine that it is not simply noise points.
/// </summary>
private const int NOISE_THRESHOLD = 500;
/// <summary>
/// The interval in meters between buckets of points. For example, a high sensitivity of 0.01 will group
/// points into buckets every 1cm.
/// </summary>
private const float SENSITIVITY = 0.02f;
private TangoApplication m_tangoApplication;
// Matrices for transforming pointcloud to world coordinates.
// This equation will take account of the camera sensors extrinsic.
// Full equation is:
// Matrix4x4 unityWorldTDepthCamera =
// m_unityWorldTStartService * startServiceTDevice * Matrix4x4.Inverse(m_imuTDevice) * m_imuTDepthCamera;
private Matrix4x4 m_unityWorldTStartService;
private Matrix4x4 m_imuTDevice;
private Matrix4x4 m_imuTDepthCamera;
// Matrix for transforming the Unity camera space to the color camera space.
private Matrix4x4 m_colorCameraTUnityCamera;
/// <summary>
/// Color camera intrinsics.
/// </summary>
private TangoCameraIntrinsics m_colorCameraIntrinsics;
/// <summary>
/// If the camera data has already been set up.
/// </summary>
private bool m_cameraDataSetUp;
/// <summary>
/// The Tango timestamp from the last update of m_points.
/// </summary>
private double m_depthTimestamp;
/// <summary>
/// Mesh this script will modify.
/// </summary>
private Mesh m_mesh;
private Renderer m_renderer;
// Pose controller from which the offset is queried.
private TangoDeltaPoseController m_tangoDeltaPoseController;
/// <summary>
/// Set to <c>true</c> when currently attempting to find a floor using depth points, <c>false</c> when not
/// floor finding.
/// </summary>
private bool m_findFloorWithDepth = false;
/// <summary>
/// Used for floor finding, container for the number of points that fall into a y bucket within a sensitivity range.
/// </summary>
private Dictionary<float, int> m_numPointsAtY;
/// <summary>
/// Used for floor finding, the list of y value buckets that have sufficient points near that y position height
/// to determine that it not simply noise.
/// </summary>
private List<float> m_nonNoiseBuckets;
/// @cond
/// <summary>
/// Use this for initialization.
/// </summary>
public void Start()
{
m_tangoApplication = FindObjectOfType<TangoApplication>();
m_tangoApplication.Register(this);
m_tangoDeltaPoseController = FindObjectOfType<TangoDeltaPoseController>();
m_unityWorldTStartService.SetColumn(0, new Vector4(1.0f, 0.0f, 0.0f, 0.0f));
m_unityWorldTStartService.SetColumn(1, new Vector4(0.0f, 0.0f, 1.0f, 0.0f));
m_unityWorldTStartService.SetColumn(2, new Vector4(0.0f, 1.0f, 0.0f, 0.0f));
m_unityWorldTStartService.SetColumn(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f));
// Constant matrix converting Unity world frame frame to device frame.
m_colorCameraTUnityCamera.SetColumn(0, new Vector4(1.0f, 0.0f, 0.0f, 0.0f));
m_colorCameraTUnityCamera.SetColumn(1, new Vector4(0.0f, -1.0f, 0.0f, 0.0f));
m_colorCameraTUnityCamera.SetColumn(2, new Vector4(0.0f, 0.0f, 1.0f, 0.0f));
m_colorCameraTUnityCamera.SetColumn(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f));
// Assign triangles, note: this is just for visualizing point in the mesh data.
m_points = new Vector3[MAX_POINT_COUNT];
m_mesh = GetComponent<MeshFilter>().mesh;
m_mesh.Clear();
// Points used for finding floor plane.
m_numPointsAtY = new Dictionary<float, int>();
m_nonNoiseBuckets = new List<float>();
m_renderer = GetComponent<Renderer>();
}
/// <summary>
/// Unity callback when the component gets destroyed.
/// </summary>
public void OnDestroy()
{
m_tangoApplication.Unregister(this);
}
/// <summary>
/// Callback that gets called when depth is available from the Tango Service.
/// </summary>
/// <param name="pointCloud">Depth information from Tango.</param>
public void OnTangoPointCloudAvailable(TangoPointCloudData pointCloud)
{
// Calculate the time since the last successful depth data
// collection.
if (m_depthTimestamp != 0.0)
{
m_depthDeltaTime = (float)((pointCloud.m_timestamp - m_depthTimestamp) * 1000.0);
}
// Fill in the data to draw the point cloud.
m_pointsCount = pointCloud.m_numPoints;
if (m_pointsCount > 0)
{
_SetUpCameraData();
TangoCoordinateFramePair pair;
TangoPoseData poseData = new TangoPoseData();
// Query pose to transform point cloud to world coordinates, here we are using the timestamp
// that we get from depth.
if (m_useAreaDescriptionPose)
{
pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION;
pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
}
else
{
pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
}
PoseProvider.GetPoseAtTime(poseData, pointCloud.m_timestamp, pair);
if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
{
return;
}
Matrix4x4 startServiceTDevice = poseData.ToMatrix4x4();
// The transformation matrix that represents the pointcloud's pose.
// Explanation:
// The pointcloud which is in Depth camera's frame, is put in unity world's
// coordinate system(wrt unity world).
// Then we are extracting the position and rotation from uwTuc matrix and applying it to
// the PointCloud's transform.
Matrix4x4 unityWorldTDepthCamera = m_unityWorldTStartService * startServiceTDevice * Matrix4x4.Inverse(m_imuTDevice) * m_imuTDepthCamera;
transform.position = Vector3.zero;
transform.rotation = Quaternion.identity;
// Add offset to the pointcloud depending on the offset from TangoDeltaPoseController
Matrix4x4 unityWorldOffsetTDepthCamera;
if (m_tangoDeltaPoseController != null)
{
unityWorldOffsetTDepthCamera = m_tangoDeltaPoseController.UnityWorldOffset * unityWorldTDepthCamera;
}
else
{
unityWorldOffsetTDepthCamera = unityWorldTDepthCamera;
}
// Converting points array to world space.
m_overallZ = 0;
for (int i = 0; i < m_pointsCount; ++i)
{
Vector3 point = pointCloud[i];
m_points[i] = unityWorldOffsetTDepthCamera.MultiplyPoint3x4(point);
m_overallZ += point.z;
}
m_overallZ = m_overallZ / m_pointsCount;
m_depthTimestamp = pointCloud.m_timestamp;
if (m_updatePointsMesh)
{
// Need to update indicies too!
int[] indices = new int[m_pointsCount];
for (int i = 0; i < m_pointsCount; ++i)
{
indices[i] = i;
}
m_mesh.Clear();
m_mesh.vertices = m_points;
m_mesh.SetIndices(indices, MeshTopology.Points, 0);
}
// The color should be pose relative, we need to store enough info to go back to pose values.
m_renderer.material.SetMatrix("depthCameraTUnityWorld", unityWorldOffsetTDepthCamera.inverse);
// Try to find the floor using this set of depth points if requested.
if (m_findFloorWithDepth)
{
_FindFloorWithDepth();
}
}
else
{
m_overallZ = 0;
}
}
/// @endcond
/// <summary>
/// Finds the closest point from a point cloud to a position on screen.
///
/// This function is slow, as it looks at every single point in the point
/// cloud. Avoid calling this more than once a frame.
/// </summary>
/// <returns>The index of the closest point, or -1 if not found.</returns>
/// <param name="cam">The current camera.</param>
/// <param name="pos">Position on screen (in pixels).</param>
/// <param name="maxDist">The maximum pixel distance to allow.</param>
public int FindClosestPoint(Camera cam, Vector2 pos, int maxDist)
{
int bestIndex = -1;
float bestDistSqr = 0;
for (int it = 0; it < m_pointsCount; ++it)
{
Vector3 screenPos3 = cam.WorldToScreenPoint(m_points[it]);
Vector2 screenPos = new Vector2(screenPos3.x, screenPos3.y);
float distSqr = Vector2.SqrMagnitude(screenPos - pos);
if (distSqr > maxDist * maxDist)
{
continue;
}
if (bestIndex == -1 || distSqr < bestDistSqr)
{
bestIndex = it;
bestDistSqr = distSqr;
}
}
return bestIndex;
}
/// <summary>
/// Given a screen coordinate, finds a plane that most closely fits the
/// depth values in that area.
///
/// This function is slow, as it looks at every single point in the point
/// cloud. Avoid calling this more than once a frame. This also assumes the
/// Unity camera intrinsics match the device's color camera.
/// </summary>
/// <returns><c>true</c>, if a plane was found; <c>false</c> otherwise.</returns>
/// <param name="cam">The Unity camera.</param>
/// <param name="pos">The point in screen space to perform detection on.</param>
/// <param name="planeCenter">Filled in with the center of the plane in Unity world space.</param>
/// <param name="plane">Filled in with a model of the plane in Unity world space.</param>
public bool FindPlane(Camera cam, Vector2 pos, out Vector3 planeCenter, out Plane plane)
{
if (m_pointsCount == 0)
{
// No points to check, maybe not connected to the service yet
planeCenter = Vector3.zero;
plane = new Plane();
return false;
}
Matrix4x4 colorCameraTUnityWorld = m_colorCameraTUnityCamera * cam.transform.worldToLocalMatrix;
Vector2 normalizedPos = cam.ScreenToViewportPoint(pos);
// If the camera has a TangoARScreen attached, it is not displaying the entire color camera image. Correct
// the normalized coordinates by taking the clipping into account.
TangoARScreen arScreen = cam.gameObject.GetComponent<TangoARScreen>();
if (arScreen != null)
{
normalizedPos = arScreen.ViewportPointToCameraImagePoint(normalizedPos);
}
TangoCameraIntrinsics alignedIntrinsics = new TangoCameraIntrinsics();
VideoOverlayProvider.GetDeviceOientationAlignedIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR,
alignedIntrinsics);
int returnValue = TangoSupport.FitPlaneModelNearClick(
m_points, m_pointsCount, m_depthTimestamp, alignedIntrinsics, ref colorCameraTUnityWorld,
normalizedPos, out planeCenter, out plane);
if (returnValue == Common.ErrorType.TANGO_SUCCESS)
{
return true;
}
else
{
return false;
}
}
/// <summary>
/// Start processing the point cloud depth points to find the position of the floor.
/// </summary>
public void FindFloor()
{
m_floorFound = false;
m_findFloorWithDepth = true;
m_floorPlaneY = 0.0f;
}
/// <summary>
/// Sets up extrinsic matrixes and camera intrinsics for this hardware.
/// </summary>
private void _SetUpCameraData()
{
if (m_cameraDataSetUp)
{
return;
}
double timestamp = 0.0;
TangoCoordinateFramePair pair;
TangoPoseData poseData = new TangoPoseData();
// Query the extrinsics between IMU and device frame.
pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
m_imuTDevice = poseData.ToMatrix4x4();
// Query the extrinsics between IMU and depth camera frame.
pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU;
pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH;
PoseProvider.GetPoseAtTime(poseData, timestamp, pair);
m_imuTDepthCamera = poseData.ToMatrix4x4();
// Also get the camera intrinsics
m_colorCameraIntrinsics = new TangoCameraIntrinsics();
VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, m_colorCameraIntrinsics);
m_cameraDataSetUp = true;
}
/// <summary>
/// Use the last received set of depth points to find a reasonable floor.
/// </summary>
private void _FindFloorWithDepth()
{
m_numPointsAtY.Clear();
m_nonNoiseBuckets.Clear();
// Count each depth point into a bucket based on its world position y value.
for (int i = 0; i < m_pointsCount; i++)
{
Vector3 point = m_points[i];
if (!point.Equals(Vector3.zero))
{
// Group similar points into buckets based on sensitivity.
float roundedY = Mathf.Round(point.y / SENSITIVITY) * SENSITIVITY;
if (!m_numPointsAtY.ContainsKey(roundedY))
{
m_numPointsAtY.Add(roundedY, 0);
}
m_numPointsAtY[roundedY]++;
// Check if the y plane is a non-noise plane.
if (m_numPointsAtY[roundedY] > NOISE_THRESHOLD && !m_nonNoiseBuckets.Contains(roundedY))
{
m_nonNoiseBuckets.Add(roundedY);
}
}
}
// Find a plane at the y value. The y value must be below the camera y position.
m_nonNoiseBuckets.Sort();
for (int i = 0; i < m_nonNoiseBuckets.Count; i++)
{
float yBucket = m_nonNoiseBuckets[i];
int numPoints = m_numPointsAtY[yBucket];
if (numPoints > RECOGNITION_THRESHOLD && yBucket < Camera.main.transform.position.y)
{
// Reject the plane if it is not the lowest.
if (yBucket > m_nonNoiseBuckets[0])
{
return;
}
m_floorFound = true;
m_findFloorWithDepth = false;
m_floorPlaneY = yBucket;
m_numPointsAtY.Clear();
m_nonNoiseBuckets.Clear();
}
}
}
}
| kyr7/tango-examples-unity | UnityExamples/Assets/TangoPrefabs/Scripts/TangoPointCloud.cs | C# | apache-2.0 | 18,770 |
#!/usr/bin/env ruby
# Encoding: utf-8
#
# Author:: api.dklimkin@gmail.com (Danial Klimkin)
#
# Copyright:: Copyright 2011, Google Inc. All Rights Reserved.
#
# License:: Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example illustrates how to retrieve all languages and carriers available
# for targeting.
#
# Tags: ConstantDataService.getLanguageCriterion
# Tags: ConstantDataService.getCarrierCriterion
require 'adwords_api'
def get_targetable_languages_and_carriers()
# AdwordsApi::Api will read a config file from ENV['HOME']/adwords_api.yml
# when called without parameters.
adwords = AdwordsApi::Api.new
# To enable logging of SOAP requests, set the log_level value to 'DEBUG' in
# the configuration file or provide your own logger:
# adwords.logger = Logger.new('adwords_xml.log')
constant_data_srv = adwords.service(:ConstantDataService, API_VERSION)
# Get all languages from ConstantDataService.
languages = constant_data_srv.get_language_criterion()
if languages
languages.each do |language|
puts "Language name is '%s', ID is %d and code is '%s'." %
[language[:name], language[:id], language[:code]]
end
else
puts 'No languages were found.'
end
# Get all carriers from ConstantDataService.
carriers = constant_data_srv.get_carrier_criterion()
if carriers
carriers.each do |carrier|
puts "Carrier name is '%s', ID is %d and country code is '%s'." %
[carrier[:name], carrier[:id], carrier[:country_code]]
end
else
puts 'No carriers were retrieved.'
end
end
if __FILE__ == $0
API_VERSION = :v201506
begin
get_targetable_languages_and_carriers()
# Authorization error.
rescue AdsCommon::Errors::OAuth2VerificationRequired => e
puts "Authorization credentials are not valid. Edit adwords_api.yml for " +
"OAuth2 client ID and secret and run misc/setup_oauth2.rb example " +
"to retrieve and store OAuth2 tokens."
puts "See this wiki page for more details:\n\n " +
'http://code.google.com/p/google-api-ads-ruby/wiki/OAuth2'
# HTTP errors.
rescue AdsCommon::Errors::HttpError => e
puts "HTTP Error: %s" % e
# API errors.
rescue AdwordsApi::Errors::ApiException => e
puts "Message: %s" % e.message
puts 'Errors:'
e.errors.each_with_index do |error, index|
puts "\tError [%d]:" % (index + 1)
error.each do |field, value|
puts "\t\t%s: %s" % [field, value]
end
end
end
end
| claimsmall/google-api-ads-ruby | adwords_api/examples/v201506/targeting/get_targetable_languages_and_carriers.rb | Ruby | apache-2.0 | 3,071 |
/*
* Copyright 2010-2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.elasticmapreduce.model;
/**
* Action On Failure
*/
public enum ActionOnFailure {
TERMINATE_JOB_FLOW("TERMINATE_JOB_FLOW"),
CANCEL_AND_WAIT("CANCEL_AND_WAIT"),
CONTINUE("CONTINUE");
private String value;
private ActionOnFailure(String value) {
this.value = value;
}
@Override
public String toString() {
return this.value;
}
/**
* Use this in place of valueOf.
*
* @param value
* real value
* @return ActionOnFailure corresponding to the value
*/
public static ActionOnFailure fromValue(String value) {
if (value == null || "".equals(value)) {
throw new IllegalArgumentException("Value cannot be null or empty!");
} else if ("TERMINATE_JOB_FLOW".equals(value)) {
return ActionOnFailure.TERMINATE_JOB_FLOW;
} else if ("CANCEL_AND_WAIT".equals(value)) {
return ActionOnFailure.CANCEL_AND_WAIT;
} else if ("CONTINUE".equals(value)) {
return ActionOnFailure.CONTINUE;
} else {
throw new IllegalArgumentException("Cannot create enum from " + value + " value!");
}
}
}
| XidongHuang/aws-sdk-for-java | src/main/java/com/amazonaws/services/elasticmapreduce/model/ActionOnFailure.java | Java | apache-2.0 | 1,818 |
'use strict';
/* // [START classdefinition] */
export default class exampleClass {
/* // [END classdefinition] */
constructor () {
super();
console.log('Example Constructor');
}
exampleFunction () {
console.log('Example Function');
}
}
| beaufortfrancois/WebFundamentals | src/content/en/resources/jekyll/_code/example.js | JavaScript | apache-2.0 | 261 |
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the kubelet volume manager to
keep track of attached volumes and the pods that mounted them.
*/
package cache
import (
"fmt"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
// ActualStateOfWorld defines a set of thread-safe operations for the kubelet
// volume manager's actual state of the world cache.
// This cache contains volumes->pods i.e. a set of all volumes attached to this
// node and the pods that the manager believes have successfully mounted the
// volume.
// Note: This is distinct from the ActualStateOfWorld implemented by the
// attach/detach controller. They both keep track of different objects. This
// contains kubelet volume manager specific state.
type ActualStateOfWorld interface {
// ActualStateOfWorld must implement the methods required to allow
// operationexecutor to interact with it.
operationexecutor.ActualStateOfWorldMounterUpdater
// ActualStateOfWorld must implement the methods required to allow
// operationexecutor to interact with it.
operationexecutor.ActualStateOfWorldAttacherUpdater
// AddPodToVolume adds the given pod to the given volume in the cache
// indicating the specified volume has been successfully mounted to the
// specified pod.
// If a pod with the same unique name already exists under the specified
// volume, reset the pod's remountRequired value.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, an error is returned.
AddPodToVolume(operationexecutor.MarkVolumeOpts) error
// MarkRemountRequired marks each volume that is successfully attached and
// mounted for the specified pod as requiring remount (if the plugin for the
// volume indicates it requires remounting on pod updates). Atomically
// updating volumes depend on this to update the contents of the volume on
// pod update.
MarkRemountRequired(podName volumetypes.UniquePodName)
// SetDeviceMountState sets device mount state for the given volume. When deviceMountState is set to DeviceGloballyMounted
// then device is mounted at a global mount point. When it is set to DeviceMountUncertain then also it means volume
// MAY be globally mounted at a global mount point. In both cases - the volume must be unmounted from
// global mount point prior to detach.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, an error is returned.
SetDeviceMountState(volumeName v1.UniqueVolumeName, deviceMountState operationexecutor.DeviceMountState, devicePath, deviceMountPath string) error
// DeletePodFromVolume removes the given pod from the given volume in the
// cache indicating the volume has been successfully unmounted from the pod.
// If a pod with the same unique name does not exist under the specified
// volume, this is a no-op.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, an error is returned.
DeletePodFromVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error
// DeleteVolume removes the given volume from the list of attached volumes
// in the cache indicating the volume has been successfully detached from
// this node.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, this is a no-op.
// If a volume with the name volumeName exists and its list of mountedPods
// is not empty, an error is returned.
DeleteVolume(volumeName v1.UniqueVolumeName) error
// PodExistsInVolume returns true if the given pod exists in the list of
// mountedPods for the given volume in the cache, indicating that the volume
// is attached to this node and the pod has successfully mounted it.
// If a pod with the same unique name does not exist under the specified
// volume, false is returned.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, a volumeNotAttachedError is returned indicating the
// given volume is not yet attached.
// If the given volumeName/podName combo exists but the value of
// remountRequired is true, a remountRequiredError is returned indicating
// the given volume has been successfully mounted to this pod but should be
// remounted to reflect changes in the referencing pod. Atomically updating
// volumes, depend on this to update the contents of the volume.
// All volume mounting calls should be idempotent so a second mount call for
// volumes that do not need to update contents should not fail.
PodExistsInVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) (bool, string, error)
// PodRemovedFromVolume returns true if the given pod does not exist in the list of
// mountedPods for the given volume in the cache, indicating that the pod has
// fully unmounted it or it was never mounted the volume.
// If the volume is fully mounted or is in uncertain mount state for the pod, it is
// considered that the pod still exists in volume manager's actual state of the world
// and false is returned.
PodRemovedFromVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) bool
// VolumeExistsWithSpecName returns true if the given volume specified with the
// volume spec name (a.k.a., InnerVolumeSpecName) exists in the list of
// volumes that should be attached to this node.
// If a pod with the same name does not exist under the specified
// volume, false is returned.
VolumeExistsWithSpecName(podName volumetypes.UniquePodName, volumeSpecName string) bool
// VolumeExists returns true if the given volume exists in the list of
// attached volumes in the cache, indicating the volume is attached to this
// node.
VolumeExists(volumeName v1.UniqueVolumeName) bool
// GetMountedVolumes generates and returns a list of volumes and the pods
// they are successfully attached and mounted for based on the current
// actual state of the world.
GetMountedVolumes() []MountedVolume
// GetAllMountedVolumes returns list of all possibly mounted volumes including
// those that are in VolumeMounted state and VolumeMountUncertain state.
GetAllMountedVolumes() []MountedVolume
// GetMountedVolumesForPod generates and returns a list of volumes that are
// successfully attached and mounted for the specified pod based on the
// current actual state of the world.
GetMountedVolumesForPod(podName volumetypes.UniquePodName) []MountedVolume
// GetPossiblyMountedVolumesForPod generates and returns a list of volumes for
// the specified pod that either are attached and mounted or are "uncertain",
// i.e. a volume plugin may be mounting the volume right now.
GetPossiblyMountedVolumesForPod(podName volumetypes.UniquePodName) []MountedVolume
// GetGloballyMountedVolumes generates and returns a list of all attached
// volumes that are globally mounted. This list can be used to determine
// which volumes should be reported as "in use" in the node's VolumesInUse
// status field. Globally mounted here refers to the shared plugin mount
// point for the attachable volume from which the pod specific mount points
// are created (via bind mount).
GetGloballyMountedVolumes() []AttachedVolume
// GetUnmountedVolumes generates and returns a list of attached volumes that
// have no mountedPods. This list can be used to determine which volumes are
// no longer referenced and may be globally unmounted and detached.
GetUnmountedVolumes() []AttachedVolume
// MarkFSResizeRequired marks each volume that is successfully attached and
// mounted for the specified pod as requiring file system resize (if the plugin for the
// volume indicates it requires file system resize).
MarkFSResizeRequired(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName)
// GetAttachedVolumes returns a list of volumes that is known to be attached
// to the node. This list can be used to determine volumes that are either in-use
// or have a mount/unmount operation pending.
GetAttachedVolumes() []AttachedVolume
}
// MountedVolume represents a volume that has successfully been mounted to a pod.
type MountedVolume struct {
operationexecutor.MountedVolume
}
// AttachedVolume represents a volume that is attached to a node.
type AttachedVolume struct {
operationexecutor.AttachedVolume
// DeviceMountState indicates if device has been globally mounted or is not.
DeviceMountState operationexecutor.DeviceMountState
}
// DeviceMayBeMounted returns true if device is mounted in global path or is in
// uncertain state.
func (av AttachedVolume) DeviceMayBeMounted() bool {
return av.DeviceMountState == operationexecutor.DeviceGloballyMounted ||
av.DeviceMountState == operationexecutor.DeviceMountUncertain
}
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld(
nodeName types.NodeName,
volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{
nodeName: nodeName,
attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume),
volumePluginMgr: volumePluginMgr,
}
}
// IsVolumeNotAttachedError returns true if the specified error is a
// volumeNotAttachedError.
func IsVolumeNotAttachedError(err error) bool {
_, ok := err.(volumeNotAttachedError)
return ok
}
// IsRemountRequiredError returns true if the specified error is a
// remountRequiredError.
func IsRemountRequiredError(err error) bool {
_, ok := err.(remountRequiredError)
return ok
}
type actualStateOfWorld struct {
// nodeName is the name of this node. This value is passed to Attach/Detach
nodeName types.NodeName
// attachedVolumes is a map containing the set of volumes the kubelet volume
// manager believes to be successfully attached to this node. Volume types
// that do not implement an attacher interface are assumed to be in this
// state by default.
// The key in this map is the name of the volume and the value is an object
// containing more information about the attached volume.
attachedVolumes map[v1.UniqueVolumeName]attachedVolume
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// attachedVolume represents a volume the kubelet volume manager believes to be
// successfully attached to a node it is managing. Volume types that do not
// implement an attacher are assumed to be in this state.
type attachedVolume struct {
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// mountedPods is a map containing the set of pods that this volume has been
// successfully mounted to. The key in this map is the name of the pod and
// the value is a mountedPod object containing more information about the
// pod.
mountedPods map[volumetypes.UniquePodName]mountedPod
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to plugin methods.
// In particular, the Unmount method uses spec.Name() as the volumeSpecName
// in the mount path:
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{volumeSpecName}/
spec *volume.Spec
// pluginName is the Unescaped Qualified name of the volume plugin used to
// attach and mount this volume. It is stored separately in case the full
// volume spec (everything except the name) can not be reconstructed for a
// volume that should be unmounted (which would be the case for a mount path
// read from disk without a full volume spec).
pluginName string
// pluginIsAttachable indicates the volume plugin used to attach and mount
// this volume implements the volume.Attacher interface
pluginIsAttachable bool
// deviceMountState stores information that tells us if device is mounted
// globally or not
deviceMountState operationexecutor.DeviceMountState
// devicePath contains the path on the node where the volume is attached for
// attachable volumes
devicePath string
// deviceMountPath contains the path on the node where the device should
// be mounted after it is attached.
deviceMountPath string
// volumeInUseErrorForExpansion indicates volume driver has previously returned volume-in-use error
// for this volume and volume expansion on this node should not be retried
volumeInUseErrorForExpansion bool
}
// The mountedPod object represents a pod for which the kubelet volume manager
// believes the underlying volume has been successfully been mounted.
type mountedPod struct {
// the name of the pod
podName volumetypes.UniquePodName
// the UID of the pod
podUID types.UID
// mounter used to mount
mounter volume.Mounter
// mapper used to block volumes support
blockVolumeMapper volume.BlockVolumeMapper
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to plugin methods.
// In particular, the Unmount method uses spec.Name() as the volumeSpecName
// in the mount path:
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{volumeSpecName}/
volumeSpec *volume.Spec
// outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced
// directly in the pod. If the volume was referenced through a persistent
// volume claim, this contains the volume.Spec.Name() of the persistent
// volume claim
outerVolumeSpecName string
// remountRequired indicates the underlying volume has been successfully
// mounted to this pod but it should be remounted to reflect changes in the
// referencing pod.
// Atomically updating volumes depend on this to update the contents of the
// volume. All volume mounting calls should be idempotent so a second mount
// call for volumes that do not need to update contents should not fail.
remountRequired bool
// volumeGidValue contains the value of the GID annotation, if present.
volumeGidValue string
// fsResizeRequired indicates the underlying volume has been successfully
// mounted to this pod but its size has been expanded after that.
fsResizeRequired bool
// volumeMountStateForPod stores state of volume mount for the pod. if it is:
// - VolumeMounted: means volume for pod has been successfully mounted
// - VolumeMountUncertain: means volume for pod may not be mounted, but it must be unmounted
volumeMountStateForPod operationexecutor.VolumeMountState
}
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error {
return asw.addVolume(volumeName, volumeSpec, devicePath)
}
func (asw *actualStateOfWorld) MarkVolumeAsUncertain(
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName) error {
return nil
}
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.DeleteVolume(volumeName)
}
func (asw *actualStateOfWorld) MarkVolumeAsMounted(markVolumeOpts operationexecutor.MarkVolumeOpts) error {
return asw.AddPodToVolume(markVolumeOpts)
}
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
// no operation for kubelet side
}
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
// no operation for kubelet side
return nil
}
func (asw *actualStateOfWorld) MarkVolumeAsUnmounted(
podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error {
return asw.DeletePodFromVolume(podName, volumeName)
}
func (asw *actualStateOfWorld) MarkDeviceAsMounted(
volumeName v1.UniqueVolumeName, devicePath, deviceMountPath string) error {
return asw.SetDeviceMountState(volumeName, operationexecutor.DeviceGloballyMounted, devicePath, deviceMountPath)
}
func (asw *actualStateOfWorld) MarkDeviceAsUncertain(
volumeName v1.UniqueVolumeName, devicePath, deviceMountPath string) error {
return asw.SetDeviceMountState(volumeName, operationexecutor.DeviceMountUncertain, devicePath, deviceMountPath)
}
func (asw *actualStateOfWorld) MarkVolumeMountAsUncertain(markVolumeOpts operationexecutor.MarkVolumeOpts) error {
markVolumeOpts.VolumeMountState = operationexecutor.VolumeMountUncertain
return asw.AddPodToVolume(markVolumeOpts)
}
func (asw *actualStateOfWorld) MarkDeviceAsUnmounted(
volumeName v1.UniqueVolumeName) error {
return asw.SetDeviceMountState(volumeName, operationexecutor.DeviceNotMounted, "", "")
}
func (asw *actualStateOfWorld) GetDeviceMountState(volumeName v1.UniqueVolumeName) operationexecutor.DeviceMountState {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return operationexecutor.DeviceNotMounted
}
return volumeObj.deviceMountState
}
func (asw *actualStateOfWorld) MarkForInUseExpansionError(volumeName v1.UniqueVolumeName) {
asw.Lock()
defer asw.Unlock()
volumeObj, ok := asw.attachedVolumes[volumeName]
if ok {
volumeObj.volumeInUseErrorForExpansion = true
asw.attachedVolumes[volumeName] = volumeObj
}
}
func (asw *actualStateOfWorld) GetVolumeMountState(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) operationexecutor.VolumeMountState {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return operationexecutor.VolumeNotMounted
}
podObj, podExists := volumeObj.mountedPods[podName]
if !podExists {
return operationexecutor.VolumeNotMounted
}
return podObj.volumeMountStateForPod
}
func (asw *actualStateOfWorld) IsVolumeMountedElsewhere(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return false
}
for _, podObj := range volumeObj.mountedPods {
if podName != podObj.podName {
// Treat uncertain mount state as mounted until certain.
if podObj.volumeMountStateForPod != operationexecutor.VolumeNotMounted {
return true
}
}
}
return false
}
// addVolume adds the given volume to the cache indicating the specified
// volume is attached to this node. If no volume name is supplied, a unique
// volume name is generated from the volumeSpec and returned on success. If a
// volume with the same generated name already exists, this is a noop. If no
// volume plugin can support the given volumeSpec or more than one plugin can
// support it, an error is returned.
func (asw *actualStateOfWorld) addVolume(
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, devicePath string) error {
asw.Lock()
defer asw.Unlock()
volumePlugin, err := asw.volumePluginMgr.FindPluginBySpec(volumeSpec)
if err != nil || volumePlugin == nil {
return fmt.Errorf(
"failed to get Plugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
if len(volumeName) == 0 {
volumeName, err = util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
if err != nil {
return fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
volumeSpec.Name(),
volumePlugin.GetPluginName(),
err)
}
}
pluginIsAttachable := false
if attachablePlugin, err := asw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec); err == nil && attachablePlugin != nil {
pluginIsAttachable = true
}
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
volumeObj = attachedVolume{
volumeName: volumeName,
spec: volumeSpec,
mountedPods: make(map[volumetypes.UniquePodName]mountedPod),
pluginName: volumePlugin.GetPluginName(),
pluginIsAttachable: pluginIsAttachable,
deviceMountState: operationexecutor.DeviceNotMounted,
devicePath: devicePath,
}
} else {
// If volume object already exists, update the fields such as device path
volumeObj.devicePath = devicePath
klog.V(2).InfoS("Volume is already added to attachedVolume list, update device path", "volumeName", volumeName, "path", devicePath)
}
asw.attachedVolumes[volumeName] = volumeObj
return nil
}
func (asw *actualStateOfWorld) AddPodToVolume(markVolumeOpts operationexecutor.MarkVolumeOpts) error {
podName := markVolumeOpts.PodName
podUID := markVolumeOpts.PodUID
volumeName := markVolumeOpts.VolumeName
mounter := markVolumeOpts.Mounter
blockVolumeMapper := markVolumeOpts.BlockVolumeMapper
outerVolumeSpecName := markVolumeOpts.OuterVolumeSpecName
volumeGidValue := markVolumeOpts.VolumeGidVolume
volumeSpec := markVolumeOpts.VolumeSpec
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return fmt.Errorf(
"no volume with the name %q exists in the list of attached volumes",
volumeName)
}
podObj, podExists := volumeObj.mountedPods[podName]
if !podExists {
podObj = mountedPod{
podName: podName,
podUID: podUID,
mounter: mounter,
blockVolumeMapper: blockVolumeMapper,
outerVolumeSpecName: outerVolumeSpecName,
volumeGidValue: volumeGidValue,
volumeSpec: volumeSpec,
volumeMountStateForPod: markVolumeOpts.VolumeMountState,
}
}
// If pod exists, reset remountRequired value
podObj.remountRequired = false
podObj.volumeMountStateForPod = markVolumeOpts.VolumeMountState
if mounter != nil {
// The mounter stored in the object may have old information,
// use the newest one.
podObj.mounter = mounter
}
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
return nil
}
func (asw *actualStateOfWorld) MarkVolumeAsResized(
podName volumetypes.UniquePodName,
volumeName v1.UniqueVolumeName) error {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return fmt.Errorf(
"no volume with the name %q exists in the list of attached volumes",
volumeName)
}
podObj, podExists := volumeObj.mountedPods[podName]
if !podExists {
return fmt.Errorf(
"no pod with the name %q exists in the mounted pods list of volume %s",
podName,
volumeName)
}
klog.V(5).InfoS("Pod volume has been resized", "uniquePodName", podName, "volumeName", volumeName, "outerVolumeSpecName", podObj.outerVolumeSpecName)
podObj.fsResizeRequired = false
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
return nil
}
func (asw *actualStateOfWorld) MarkRemountRequired(
podName volumetypes.UniquePodName) {
asw.Lock()
defer asw.Unlock()
for volumeName, volumeObj := range asw.attachedVolumes {
if podObj, podExists := volumeObj.mountedPods[podName]; podExists {
volumePlugin, err :=
asw.volumePluginMgr.FindPluginBySpec(podObj.volumeSpec)
if err != nil || volumePlugin == nil {
// Log and continue processing
klog.ErrorS(nil, "MarkRemountRequired failed to FindPluginBySpec for volume", "uniquePodName", podObj.podName, "podUID", podObj.podUID, "volumeName", volumeName, "volumeSpecName", podObj.volumeSpec.Name())
continue
}
if volumePlugin.RequiresRemount(podObj.volumeSpec) {
podObj.remountRequired = true
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
}
}
}
}
func (asw *actualStateOfWorld) MarkFSResizeRequired(
volumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName) {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
klog.InfoS("MarkFSResizeRequired for volume failed as volume does not exist", "volumeName", volumeName)
return
}
podObj, podExists := volumeObj.mountedPods[podName]
if !podExists {
klog.InfoS("MarkFSResizeRequired for volume failed because the pod does not exist", "uniquePodName", podName, "volumeName", volumeName)
return
}
volumePlugin, err :=
asw.volumePluginMgr.FindNodeExpandablePluginBySpec(podObj.volumeSpec)
if err != nil || volumePlugin == nil {
// Log and continue processing
klog.ErrorS(nil, "MarkFSResizeRequired failed to find expandable plugin for volume", "uniquePodName", podObj.podName, "volumeName", volumeObj.volumeName, "volumeSpecName", podObj.volumeSpec.Name())
return
}
if volumePlugin.RequiresFSResize() {
if !podObj.fsResizeRequired {
klog.V(3).InfoS("PVC volume of the pod requires file system resize", "uniquePodName", podName, "volumeName", volumeName, "outerVolumeSpecName", podObj.outerVolumeSpecName)
podObj.fsResizeRequired = true
}
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
}
}
func (asw *actualStateOfWorld) SetDeviceMountState(
volumeName v1.UniqueVolumeName, deviceMountState operationexecutor.DeviceMountState, devicePath, deviceMountPath string) error {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return fmt.Errorf(
"no volume with the name %q exists in the list of attached volumes",
volumeName)
}
volumeObj.deviceMountState = deviceMountState
volumeObj.deviceMountPath = deviceMountPath
if devicePath != "" {
volumeObj.devicePath = devicePath
}
asw.attachedVolumes[volumeName] = volumeObj
return nil
}
func (asw *actualStateOfWorld) DeletePodFromVolume(
podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return fmt.Errorf(
"no volume with the name %q exists in the list of attached volumes",
volumeName)
}
_, podExists := volumeObj.mountedPods[podName]
if podExists {
delete(asw.attachedVolumes[volumeName].mountedPods, podName)
}
return nil
}
func (asw *actualStateOfWorld) DeleteVolume(volumeName v1.UniqueVolumeName) error {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return nil
}
if len(volumeObj.mountedPods) != 0 {
return fmt.Errorf(
"failed to DeleteVolume %q, it still has %v mountedPods",
volumeName,
len(volumeObj.mountedPods))
}
delete(asw.attachedVolumes, volumeName)
return nil
}
func (asw *actualStateOfWorld) PodExistsInVolume(
podName volumetypes.UniquePodName,
volumeName v1.UniqueVolumeName) (bool, string, error) {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return false, "", newVolumeNotAttachedError(volumeName)
}
podObj, podExists := volumeObj.mountedPods[podName]
if podExists {
// if volume mount was uncertain we should keep trying to mount the volume
if podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain {
return false, volumeObj.devicePath, nil
}
if podObj.remountRequired {
return true, volumeObj.devicePath, newRemountRequiredError(volumeObj.volumeName, podObj.podName)
}
if podObj.fsResizeRequired &&
!volumeObj.volumeInUseErrorForExpansion &&
utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) {
return true, volumeObj.devicePath, newFsResizeRequiredError(volumeObj.volumeName, podObj.podName)
}
}
return podExists, volumeObj.devicePath, nil
}
func (asw *actualStateOfWorld) PodRemovedFromVolume(
podName volumetypes.UniquePodName,
volumeName v1.UniqueVolumeName) bool {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return true
}
podObj, podExists := volumeObj.mountedPods[podName]
if podExists {
// if volume mount was uncertain we should keep trying to unmount the volume
if podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain {
return false
}
if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted {
return false
}
}
return true
}
func (asw *actualStateOfWorld) VolumeExistsWithSpecName(podName volumetypes.UniquePodName, volumeSpecName string) bool {
asw.RLock()
defer asw.RUnlock()
for _, volumeObj := range asw.attachedVolumes {
if podObj, podExists := volumeObj.mountedPods[podName]; podExists {
if podObj.volumeSpec.Name() == volumeSpecName {
return true
}
}
}
return false
}
func (asw *actualStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName) bool {
asw.RLock()
defer asw.RUnlock()
_, volumeExists := asw.attachedVolumes[volumeName]
return volumeExists
}
func (asw *actualStateOfWorld) GetMountedVolumes() []MountedVolume {
asw.RLock()
defer asw.RUnlock()
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for _, podObj := range volumeObj.mountedPods {
if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted {
mountedVolume = append(
mountedVolume,
getMountedVolume(&podObj, &volumeObj))
}
}
}
return mountedVolume
}
// GetAllMountedVolumes returns all volumes which could be locally mounted for a pod.
func (asw *actualStateOfWorld) GetAllMountedVolumes() []MountedVolume {
asw.RLock()
defer asw.RUnlock()
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for _, podObj := range volumeObj.mountedPods {
if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted ||
podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain {
mountedVolume = append(
mountedVolume,
getMountedVolume(&podObj, &volumeObj))
}
}
}
return mountedVolume
}
func (asw *actualStateOfWorld) GetMountedVolumesForPod(
podName volumetypes.UniquePodName) []MountedVolume {
asw.RLock()
defer asw.RUnlock()
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for mountedPodName, podObj := range volumeObj.mountedPods {
if mountedPodName == podName && podObj.volumeMountStateForPod == operationexecutor.VolumeMounted {
mountedVolume = append(
mountedVolume,
getMountedVolume(&podObj, &volumeObj))
}
}
}
return mountedVolume
}
func (asw *actualStateOfWorld) GetPossiblyMountedVolumesForPod(
podName volumetypes.UniquePodName) []MountedVolume {
asw.RLock()
defer asw.RUnlock()
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for mountedPodName, podObj := range volumeObj.mountedPods {
if mountedPodName == podName &&
(podObj.volumeMountStateForPod == operationexecutor.VolumeMounted ||
podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain) {
mountedVolume = append(
mountedVolume,
getMountedVolume(&podObj, &volumeObj))
}
}
}
return mountedVolume
}
func (asw *actualStateOfWorld) GetGloballyMountedVolumes() []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
globallyMountedVolumes := make(
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
if volumeObj.deviceMountState == operationexecutor.DeviceGloballyMounted {
globallyMountedVolumes = append(
globallyMountedVolumes,
asw.newAttachedVolume(&volumeObj))
}
}
return globallyMountedVolumes
}
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
allAttachedVolumes := make(
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
allAttachedVolumes = append(
allAttachedVolumes,
asw.newAttachedVolume(&volumeObj))
}
return allAttachedVolumes
}
func (asw *actualStateOfWorld) GetUnmountedVolumes() []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
unmountedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
if len(volumeObj.mountedPods) == 0 {
unmountedVolumes = append(
unmountedVolumes,
asw.newAttachedVolume(&volumeObj))
}
}
return unmountedVolumes
}
func (asw *actualStateOfWorld) newAttachedVolume(
attachedVolume *attachedVolume) AttachedVolume {
return AttachedVolume{
AttachedVolume: operationexecutor.AttachedVolume{
VolumeName: attachedVolume.volumeName,
VolumeSpec: attachedVolume.spec,
NodeName: asw.nodeName,
PluginIsAttachable: attachedVolume.pluginIsAttachable,
DevicePath: attachedVolume.devicePath,
DeviceMountPath: attachedVolume.deviceMountPath,
PluginName: attachedVolume.pluginName},
DeviceMountState: attachedVolume.deviceMountState,
}
}
// Compile-time check to ensure volumeNotAttachedError implements the error interface
var _ error = volumeNotAttachedError{}
// volumeNotAttachedError is an error returned when PodExistsInVolume() fails to
// find specified volume in the list of attached volumes.
type volumeNotAttachedError struct {
volumeName v1.UniqueVolumeName
}
func (err volumeNotAttachedError) Error() string {
return fmt.Sprintf(
"volumeName %q does not exist in the list of attached volumes",
err.volumeName)
}
func newVolumeNotAttachedError(volumeName v1.UniqueVolumeName) error {
return volumeNotAttachedError{
volumeName: volumeName,
}
}
// Compile-time check to ensure remountRequiredError implements the error interface
var _ error = remountRequiredError{}
// remountRequiredError is an error returned when PodExistsInVolume() found
// volume/pod attached/mounted but remountRequired was true, indicating the
// given volume should be remounted to the pod to reflect changes in the
// referencing pod.
type remountRequiredError struct {
volumeName v1.UniqueVolumeName
podName volumetypes.UniquePodName
}
func (err remountRequiredError) Error() string {
return fmt.Sprintf(
"volumeName %q is mounted to %q but should be remounted",
err.volumeName, err.podName)
}
func newRemountRequiredError(
volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) error {
return remountRequiredError{
volumeName: volumeName,
podName: podName,
}
}
// fsResizeRequiredError is an error returned when PodExistsInVolume() found
// volume/pod attached/mounted but fsResizeRequired was true, indicating the
// given volume receives an resize request after attached/mounted.
type fsResizeRequiredError struct {
volumeName v1.UniqueVolumeName
podName volumetypes.UniquePodName
}
func (err fsResizeRequiredError) Error() string {
return fmt.Sprintf(
"volumeName %q mounted to %q needs to resize file system",
err.volumeName, err.podName)
}
func newFsResizeRequiredError(
volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) error {
return fsResizeRequiredError{
volumeName: volumeName,
podName: podName,
}
}
// IsFSResizeRequiredError returns true if the specified error is a
// fsResizeRequiredError.
func IsFSResizeRequiredError(err error) bool {
_, ok := err.(fsResizeRequiredError)
return ok
}
// getMountedVolume constructs and returns a MountedVolume object from the given
// mountedPod and attachedVolume objects.
func getMountedVolume(
mountedPod *mountedPod, attachedVolume *attachedVolume) MountedVolume {
return MountedVolume{
MountedVolume: operationexecutor.MountedVolume{
PodName: mountedPod.podName,
VolumeName: attachedVolume.volumeName,
InnerVolumeSpecName: mountedPod.volumeSpec.Name(),
OuterVolumeSpecName: mountedPod.outerVolumeSpecName,
PluginName: attachedVolume.pluginName,
PodUID: mountedPod.podUID,
Mounter: mountedPod.mounter,
BlockVolumeMapper: mountedPod.blockVolumeMapper,
VolumeGidValue: mountedPod.volumeGidValue,
VolumeSpec: mountedPod.volumeSpec,
DeviceMountPath: attachedVolume.deviceMountPath}}
}
| ravilr/kubernetes | pkg/kubelet/volumemanager/cache/actual_state_of_world.go | GO | apache-2.0 | 36,672 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.internal.cache.control;
import java.util.Set;
import org.apache.geode.internal.cache.control.ResourceAdvisor.ResourceManagerProfile;
/**
* Implemented by classes that the ResourceManager creates in order to monitor a specific type of
* resource (heap memory, off-heap memory, disk, etc.).
*
* @since Geode 1.0
*/
interface ResourceMonitor {
/**
* Ask the monitor to notify the given listeners of the given event.
*
* @param listeners Set of listeners of notify.
* @param event Event to send to the listeners.
*/
void notifyListeners(final Set<ResourceListener<?>> listeners,
final ResourceEvent event);
/**
* Ask the monitor to stop monitoring.
*/
void stopMonitoring();
/**
* Populate the fields in the profile that are appropriate for this monitor.
*
* @param profile The profile to populate.
*/
void fillInProfile(final ResourceManagerProfile profile);
}
| smgoller/geode | geode-core/src/main/java/org/apache/geode/internal/cache/control/ResourceMonitor.java | Java | apache-2.0 | 1,735 |
// Generated by xsd compiler for android/java
// DO NOT CHANGE!
package ebay.apis.eblbasecomponents;
import java.io.Serializable;
import com.leansoft.nano.annotation.*;
import java.util.List;
import java.util.Date;
/**
*
* Returns the estimated fees for the listing that is being verified for a re-list.
*
*/
@RootElement(name = "VerifyRelistItemResponse", namespace = "urn:ebay:apis:eBLBaseComponents")
public class VerifyRelistItemResponseType extends AbstractResponseType implements Serializable {
private static final long serialVersionUID = -1L;
@Element(name = "ItemID")
private String itemID;
@Element(name = "Fees")
private FeesType fees;
@Element(name = "StartTime")
private Date startTime;
@Element(name = "EndTime")
private Date endTime;
@Element(name = "DiscountReason")
private List<DiscountReasonCodeType> discountReason;
@Element(name = "ProductSuggestions")
private ProductSuggestionsType productSuggestions;
/**
* public getter
*
*
* Unique item ID for the new listing. As VerifyRelistItem does not
* actually re-list an item, returns 0 instead of a normal item ID.
*
*
* @returns java.lang.String
*/
public String getItemID() {
return this.itemID;
}
/**
* public setter
*
*
* Unique item ID for the new listing. As VerifyRelistItem does not
* actually re-list an item, returns 0 instead of a normal item ID.
*
*
* @param java.lang.String
*/
public void setItemID(String itemID) {
this.itemID = itemID;
}
/**
* public getter
*
*
* Child elements contain the estimated listing fees for the new item
* listing. The fees do not include the Final Value Fee (FVF), which cannot
* be determined until an item is sold.
*
*
* @returns ebay.apis.eblbasecomponents.FeesType
*/
public FeesType getFees() {
return this.fees;
}
/**
* public setter
*
*
* Child elements contain the estimated listing fees for the new item
* listing. The fees do not include the Final Value Fee (FVF), which cannot
* be determined until an item is sold.
*
*
* @param ebay.apis.eblbasecomponents.FeesType
*/
public void setFees(FeesType fees) {
this.fees = fees;
}
/**
* public getter
*
*
* Date and time the new listing became active on the eBay site.
*
*
* @returns java.util.Date
*/
public Date getStartTime() {
return this.startTime;
}
/**
* public setter
*
*
* Date and time the new listing became active on the eBay site.
*
*
* @param java.util.Date
*/
public void setStartTime(Date startTime) {
this.startTime = startTime;
}
/**
* public getter
*
*
* Date and time when the new listing ends. This is the starting time plus
* the listing duration.
*
*
* @returns java.util.Date
*/
public Date getEndTime() {
return this.endTime;
}
/**
* public setter
*
*
* Date and time when the new listing ends. This is the starting time plus
* the listing duration.
*
*
* @param java.util.Date
*/
public void setEndTime(Date endTime) {
this.endTime = endTime;
}
/**
* public getter
*
*
* The nature of the discount, if a discount would have applied
* had this actually been listed at this time.
*
*
* @returns java.util.List<ebay.apis.eblbasecomponents.DiscountReasonCodeType>
*/
public List<DiscountReasonCodeType> getDiscountReason() {
return this.discountReason;
}
/**
* public setter
*
*
* The nature of the discount, if a discount would have applied
* had this actually been listed at this time.
*
*
* @param java.util.List<ebay.apis.eblbasecomponents.DiscountReasonCodeType>
*/
public void setDiscountReason(List<DiscountReasonCodeType> discountReason) {
this.discountReason = discountReason;
}
/**
* public getter
*
*
* Provides a list of products recommended by eBay which match the item information
* provided by the seller.
* Not applicable to Half.com.
*
*
* @returns ebay.apis.eblbasecomponents.ProductSuggestionsType
*/
public ProductSuggestionsType getProductSuggestions() {
return this.productSuggestions;
}
/**
* public setter
*
*
* Provides a list of products recommended by eBay which match the item information
* provided by the seller.
* Not applicable to Half.com.
*
*
* @param ebay.apis.eblbasecomponents.ProductSuggestionsType
*/
public void setProductSuggestions(ProductSuggestionsType productSuggestions) {
this.productSuggestions = productSuggestions;
}
} | bulldog2011/nano-rest | sample/HelloEBayTrading/src/ebay/apis/eblbasecomponents/VerifyRelistItemResponseType.java | Java | apache-2.0 | 4,812 |
// Copyright (C) 2013 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.server.access;
import com.google.gerrit.extensions.registration.DynamicMap;
import com.google.gerrit.extensions.restapi.IdString;
import com.google.gerrit.extensions.restapi.ResourceNotFoundException;
import com.google.gerrit.extensions.restapi.RestCollection;
import com.google.gerrit.extensions.restapi.RestView;
import com.google.gerrit.extensions.restapi.TopLevelResource;
import com.google.inject.Inject;
import com.google.inject.Provider;
public class AccessCollection implements
RestCollection<TopLevelResource, AccessResource> {
private final Provider<ListAccess> list;
private final DynamicMap<RestView<AccessResource>> views;
@Inject
AccessCollection(Provider<ListAccess> list,
DynamicMap<RestView<AccessResource>> views) {
this.list = list;
this.views = views;
}
@Override
public RestView<TopLevelResource> list() {
return list.get();
}
@Override
public AccessResource parse(TopLevelResource parent, IdString id)
throws ResourceNotFoundException {
throw new ResourceNotFoundException(id);
}
@Override
public DynamicMap<RestView<AccessResource>> views() {
return views;
}
}
| bootstraponline-archive/gerrit-mirror | gerrit-server/src/main/java/com/google/gerrit/server/access/AccessCollection.java | Java | apache-2.0 | 1,789 |
package com.hubspot.singularity.s3.base.config;
import static com.hubspot.mesos.JavaUtils.obfuscateValue;
import java.util.Objects;
import org.jets3t.service.security.AWSCredentials;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.hubspot.singularity.runner.base.jackson.Obfuscate;
public class SingularityS3Credentials {
private final String accessKey;
private final String secretKey;
@JsonCreator
public SingularityS3Credentials(@JsonProperty("accessKey") String accessKey,
@JsonProperty("secretKey") String secretKey) {
this.accessKey = accessKey;
this.secretKey = secretKey;
}
@Obfuscate
public String getAccessKey() {
return accessKey;
}
@Obfuscate
public String getSecretKey() {
return secretKey;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SingularityS3Credentials that = (SingularityS3Credentials) o;
return Objects.equals(accessKey, that.accessKey) &&
Objects.equals(secretKey, that.secretKey);
}
@Override
public int hashCode() {
return Objects.hash(accessKey, secretKey);
}
@Override
public String toString() {
return "SingularityS3Credentials[" +
"accessKey='" + obfuscateValue(accessKey) + '\'' +
", secretKey='" + obfuscateValue(secretKey) + '\'' +
']';
}
@JsonIgnore
public AWSCredentials toAWSCredentials() {
return new AWSCredentials(accessKey, secretKey);
}
}
| calebTomlinson/Singularity | SingularityS3Base/src/main/java/com/hubspot/singularity/s3/base/config/SingularityS3Credentials.java | Java | apache-2.0 | 1,701 |
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//-----------------------------------------------------------------------------
//
// Description:
// The package properties are a subset of the standard OLE property sets
// SummaryInformation and DocumentSummaryInformation, and include such properties
// as Title and Subject.
//
//-----------------------------------------------------------------------------
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.Text;
using System.Xml;
namespace System.IO.Packaging
{
using Properties;
/// <summary>
/// The package properties are a subset of the standard OLE property sets
/// SummaryInformation and DocumentSummaryInformation, and include such properties
/// as Title and Subject.
/// </summary>
/// <remarks>
/// <para>Setting a property to null deletes this property. 'null' is never strictly speaking
/// a property value, but an absence indicator.</para>
/// </remarks>
internal class PartBasedPackageProperties : PackageProperties
{
//------------------------------------------------------
//
// Constructors
//
//------------------------------------------------------
#region Constructors
internal PartBasedPackageProperties(Package package)
{
_package = package;
// Initialize literals as Xml Atomic strings.
_nameTable = PackageXmlStringTable.NameTable;
ReadPropertyValuesFromPackage();
// No matter what happens during initialization, the dirty flag should not be set.
_dirty = false;
}
#endregion Constructors
//------------------------------------------------------
//
// Public Methods
//
//------------------------------------------------------
//------------------------------------------------------
//
// Public Properties
//
//------------------------------------------------------
#region Public Properties
/// <value>
/// The primary creator. The identification is environment-specific and
/// can consist of a name, email address, employee ID, etc. It is
/// recommended that this value be only as verbose as necessary to
/// identify the individual.
/// </value>
public override string Creator
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Creator);
}
set
{
RecordNewBinding(PackageXmlEnum.Creator, value);
}
}
/// <value>
/// The title.
/// </value>
public override string Title
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Title);
}
set
{
RecordNewBinding(PackageXmlEnum.Title, value);
}
}
/// <value>
/// The topic of the contents.
/// </value>
public override string Subject
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Subject);
}
set
{
RecordNewBinding(PackageXmlEnum.Subject, value);
}
}
/// <value>
/// The category. This value is typically used by UI applications to create navigation
/// controls.
/// </value>
public override string Category
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Category);
}
set
{
RecordNewBinding(PackageXmlEnum.Category, value);
}
}
/// <value>
/// A delimited set of keywords to support searching and indexing. This
/// is typically a list of terms that are not available elsewhere in the
/// properties.
/// </value>
public override string Keywords
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Keywords);
}
set
{
RecordNewBinding(PackageXmlEnum.Keywords, value);
}
}
/// <value>
/// The description or abstract of the contents.
/// </value>
public override string Description
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Description);
}
set
{
RecordNewBinding(PackageXmlEnum.Description, value);
}
}
/// <value>
/// The type of content represented, generally defined by a specific
/// use and intended audience. Example values include "Whitepaper",
/// "Security Bulletin", and "Exam". (This property is distinct from
/// MIME content types as defined in RFC 2616.)
/// </value>
public override string ContentType
{
get
{
string contentType = GetPropertyValue(PackageXmlEnum.ContentType) as string;
return contentType;
}
set
{
RecordNewBinding(PackageXmlEnum.ContentType, value);
}
}
/// <value>
/// The status of the content. Example values include "Draft",
/// "Reviewed", and "Final".
/// </value>
public override string ContentStatus
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.ContentStatus);
}
set
{
RecordNewBinding(PackageXmlEnum.ContentStatus, value);
}
}
/// <value>
/// The version number. This value is set by the user or by the application.
/// </value>
public override string Version
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Version);
}
set
{
RecordNewBinding(PackageXmlEnum.Version, value);
}
}
/// <value>
/// The revision number. This value indicates the number of saves or
/// revisions. The application is responsible for updating this value
/// after each revision.
/// </value>
public override string Revision
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Revision);
}
set
{
RecordNewBinding(PackageXmlEnum.Revision, value);
}
}
/// <value>
/// The creation date and time.
/// </value>
public override Nullable<DateTime> Created
{
get
{
return GetDateTimePropertyValue(PackageXmlEnum.Created);
}
set
{
RecordNewBinding(PackageXmlEnum.Created, value);
}
}
/// <value>
/// The date and time of the last modification.
/// </value>
public override Nullable<DateTime> Modified
{
get
{
return GetDateTimePropertyValue(PackageXmlEnum.Modified);
}
set
{
RecordNewBinding(PackageXmlEnum.Modified, value);
}
}
/// <value>
/// The user who performed the last modification. The identification is
/// environment-specific and can consist of a name, email address,
/// employee ID, etc. It is recommended that this value be only as
/// verbose as necessary to identify the individual.
/// </value>
public override string LastModifiedBy
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.LastModifiedBy);
}
set
{
RecordNewBinding(PackageXmlEnum.LastModifiedBy, value);
}
}
/// <value>
/// The date and time of the last printing.
/// </value>
public override Nullable<DateTime> LastPrinted
{
get
{
return GetDateTimePropertyValue(PackageXmlEnum.LastPrinted);
}
set
{
RecordNewBinding(PackageXmlEnum.LastPrinted, value);
}
}
/// <value>
/// A language of the intellectual content of the resource
/// </value>
public override string Language
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Language);
}
set
{
RecordNewBinding(PackageXmlEnum.Language, value);
}
}
/// <value>
/// A unique identifier.
/// </value>
public override string Identifier
{
get
{
return (string)GetPropertyValue(PackageXmlEnum.Identifier);
}
set
{
RecordNewBinding(PackageXmlEnum.Identifier, value);
}
}
#endregion Public Properties
//------------------------------------------------------
//
// Internal Methods
//
//------------------------------------------------------
#region Internal Methods
// Invoked from Package.Flush.
// The expectation is that whatever is currently dirty will get flushed.
internal void Flush()
{
if (!_dirty)
return;
// Make sure there is a part to write to and that it contains
// the expected start markup.
EnsureXmlWriter();
// Write the property elements and clear _dirty.
SerializeDirtyProperties();
// add closing markup and close the writer.
CloseXmlWriter();
}
// Invoked from Package.Close.
internal void Close()
{
Flush();
}
#endregion Internal Methods
//------------------------------------------------------
//
// Internal Properties
//
//------------------------------------------------------
//------------------------------------------------------
//
// Private Methods
//
//------------------------------------------------------
#region Private Methods
// The property store is implemented as a hash table of objects.
// Keys are taken from the set of string constants defined in this
// class and compared by their references rather than their values.
private object GetPropertyValue(PackageXmlEnum propertyName)
{
_package.ThrowIfWriteOnly();
if (!_propertyDictionary.ContainsKey(propertyName))
return null;
return _propertyDictionary[propertyName];
}
// Shim function to adequately cast the result of GetPropertyValue.
private Nullable<DateTime> GetDateTimePropertyValue(PackageXmlEnum propertyName)
{
object valueObject = GetPropertyValue(propertyName);
if (valueObject == null)
return null;
// If an object is there, it will be a DateTime (not a Nullable<DateTime>).
return (Nullable<DateTime>)valueObject;
}
// Set new property value.
// Override that sets the initializing flag to false to reflect the default
// situation: recording a binding to implement a value assignment.
private void RecordNewBinding(PackageXmlEnum propertyenum, object value)
{
RecordNewBinding(propertyenum, value, false /* not invoked at construction */, null);
}
// Set new property value.
// Null value is passed for deleting a property.
// While initializing, we are not assigning new values, and so the dirty flag should
// stay untouched.
private void RecordNewBinding(PackageXmlEnum propertyenum, object value, bool initializing, XmlReader reader)
{
// If we are reading values from the package, reader cannot be null
Debug.Assert(!initializing || reader != null);
if (!initializing)
_package.ThrowIfReadOnly();
// Case of an existing property.
if (_propertyDictionary.ContainsKey(propertyenum))
{
// Parsing should detect redundant entries.
if (initializing)
{
throw new XmlException(Formatter.Format(Resources.DuplicateCorePropertyName, reader.Name),
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
// Nullable<DateTime> values can be checked against null
if (value == null) // a deletion
{
_propertyDictionary.Remove(propertyenum);
}
else // an update
{
_propertyDictionary[propertyenum] = value;
}
// If the binding is an assignment rather than an initialization, set the dirty flag.
_dirty = !initializing;
}
// Case of an initial value being set for a property.
else
{
_propertyDictionary.Add(propertyenum, value);
// If the binding is an assignment rather than an initialization, set the dirty flag.
_dirty = !initializing;
}
}
// Initialize object from property values found in package.
// All values will remain null if the package is not enabled for reading.
private void ReadPropertyValuesFromPackage()
{
Debug.Assert(_propertyPart == null); // This gets called exclusively from constructor.
// Don't try to read properties from the package it does not have read access
if (_package.FileOpenAccess == FileAccess.Write)
return;
_propertyPart = GetPropertyPart();
if (_propertyPart == null)
return;
ParseCorePropertyPart(_propertyPart);
}
// Locate core properties part using the package relationship that points to it.
private PackagePart GetPropertyPart()
{
// Find a package-wide relationship of type CoreDocumentPropertiesRelationshipType.
PackageRelationship corePropertiesRelationship = GetCorePropertiesRelationship();
if (corePropertiesRelationship == null)
return null;
// Retrieve the part referenced by its target URI.
if (corePropertiesRelationship.TargetMode != TargetMode.Internal)
throw new FileFormatException(Resources.NoExternalTargetForMetadataRelationship);
PackagePart propertiesPart = null;
Uri propertiesPartUri = PackUriHelper.ResolvePartUri(
PackUriHelper.PackageRootUri,
corePropertiesRelationship.TargetUri);
if (!_package.PartExists(propertiesPartUri))
throw new FileFormatException(Resources.DanglingMetadataRelationship);
propertiesPart = _package.GetPart(propertiesPartUri);
if (!propertiesPart.ValidatedContentType.AreTypeAndSubTypeEqual(s_coreDocumentPropertiesContentType))
{
throw new FileFormatException(Resources.WrongContentTypeForPropertyPart);
}
return propertiesPart;
}
// Find a package-wide relationship of type CoreDocumentPropertiesRelationshipType.
private PackageRelationship GetCorePropertiesRelationship()
{
PackageRelationship propertiesPartRelationship = null;
foreach (PackageRelationship rel
in _package.GetRelationshipsByType(CoreDocumentPropertiesRelationshipType))
{
if (propertiesPartRelationship != null)
{
throw new FileFormatException(Resources.MoreThanOneMetadataRelationships);
}
propertiesPartRelationship = rel;
}
return propertiesPartRelationship;
}
// Deserialize properties part.
private void ParseCorePropertyPart(PackagePart part)
{
XmlReaderSettings xrs = new XmlReaderSettings();
xrs.NameTable = _nameTable;
using (Stream stream = part.GetStream(FileMode.Open, FileAccess.Read))
// Create a reader that uses _nameTable so as to use the set of tag literals
// in effect as a set of atomic identifiers.
using (XmlReader reader = XmlReader.Create(stream, xrs))
{
//This method expects the reader to be in ReadState.Initial.
//It will make the first read call.
PackagingUtilities.PerformInitailReadAndVerifyEncoding(reader);
//Note: After the previous method call the reader should be at the first tag in the markup.
//MoveToContent - Skips over the following - ProcessingInstruction, DocumentType, Comment, Whitespace, or SignificantWhitespace
//If the reader is currently at a content node then this function call is a no-op
if (reader.MoveToContent() != XmlNodeType.Element
|| (object)reader.NamespaceURI != PackageXmlStringTable.GetXmlStringAsObject(PackageXmlEnum.PackageCorePropertiesNamespace)
|| (object)reader.LocalName != PackageXmlStringTable.GetXmlStringAsObject(PackageXmlEnum.CoreProperties))
{
throw new XmlException(Resources.CorePropertiesElementExpected,
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
// The schema is closed and defines no attributes on the root element.
if (PackagingUtilities.GetNonXmlnsAttributeCount(reader) != 0)
{
throw new XmlException(Formatter.Format(Resources.PropertyWrongNumbOfAttribsDefinedOn, reader.Name),
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
// Iterate through property elements until EOF. Note the proper closing of all
// open tags is checked by the reader itself.
// This loop deals only with depth-1 start tags. Handling of element content
// is delegated to dedicated functions.
int attributesCount;
while (reader.Read() && reader.MoveToContent() != XmlNodeType.None)
{
// Ignore end-tags. We check element errors on opening tags.
if (reader.NodeType == XmlNodeType.EndElement)
continue;
// Any content markup that is not an element here is unexpected.
if (reader.NodeType != XmlNodeType.Element)
{
throw new XmlException(Resources.PropertyStartTagExpected,
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
// Any element below the root should open at level 1 exclusively.
if (reader.Depth != 1)
{
throw new XmlException(Resources.NoStructuredContentInsideProperties,
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
attributesCount = PackagingUtilities.GetNonXmlnsAttributeCount(reader);
// Property elements can occur in any order (xsd:all).
object localName = reader.LocalName;
PackageXmlEnum xmlStringIndex = PackageXmlStringTable.GetEnumOf(localName);
String valueType = PackageXmlStringTable.GetValueType(xmlStringIndex);
if (Array.IndexOf(s_validProperties, xmlStringIndex) == -1) // An unexpected element is an error.
{
throw new XmlException(
Formatter.Format(Resources.InvalidPropertyNameInCorePropertiesPart, reader.LocalName),
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
// Any element not in the valid core properties namespace is unexpected.
// The following is an object comparison, not a string comparison.
if ((object)reader.NamespaceURI != PackageXmlStringTable.GetXmlStringAsObject(PackageXmlStringTable.GetXmlNamespace(xmlStringIndex)))
{
throw new XmlException(Resources.UnknownNamespaceInCorePropertiesPart,
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
if (String.CompareOrdinal(valueType, "String") == 0)
{
// The schema is closed and defines no attributes on this type of element.
if (attributesCount != 0)
{
throw new XmlException(Formatter.Format(Resources.PropertyWrongNumbOfAttribsDefinedOn, reader.Name),
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
RecordNewBinding(xmlStringIndex, GetStringData(reader), true /*initializing*/, reader);
}
else if (String.CompareOrdinal(valueType, "DateTime") == 0)
{
int allowedAttributeCount = (object)reader.NamespaceURI ==
PackageXmlStringTable.GetXmlStringAsObject(PackageXmlEnum.DublinCoreTermsNamespace)
? 1 : 0;
// The schema is closed and defines no attributes on this type of element.
if (attributesCount != allowedAttributeCount)
{
throw new XmlException(Formatter.Format(Resources.PropertyWrongNumbOfAttribsDefinedOn, reader.Name),
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
if (allowedAttributeCount != 0)
{
ValidateXsiType(reader,
PackageXmlStringTable.GetXmlStringAsObject(PackageXmlEnum.DublinCoreTermsNamespace),
W3cdtf);
}
RecordNewBinding(xmlStringIndex, GetDateData(reader), true /*initializing*/, reader);
}
else // An unexpected element is an error.
{
Debug.Assert(false, "Unknown value type for properties");
}
}
}
}
// This method validates xsi:type="dcterms:W3CDTF"
// The valude of xsi:type is a qualified name. It should have a prefix that matches
// the xml namespace (ns) within the scope and the name that matches name
// The comparisons should be case-sensitive comparisons
internal static void ValidateXsiType(XmlReader reader, Object ns, string name)
{
// Get the value of xsi;type
String typeValue = reader.GetAttribute(PackageXmlStringTable.GetXmlString(PackageXmlEnum.Type),
PackageXmlStringTable.GetXmlString(PackageXmlEnum.XmlSchemaInstanceNamespace));
// Missing xsi:type
if (typeValue == null)
{
throw new XmlException(Formatter.Format(Resources.UnknownDCDateTimeXsiType, reader.Name),
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
int index = typeValue.IndexOf(':');
// The valude of xsi:type is not a qualified name
if (index == -1)
{
throw new XmlException(Formatter.Format(Resources.UnknownDCDateTimeXsiType, reader.Name),
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
// Check the following conditions
// The namespace of the prefix (string before ":") matches "ns"
// The name (string after ":") matches "name"
if (!ReferenceEquals(ns, reader.LookupNamespace(typeValue.Substring(0, index)))
|| String.CompareOrdinal(name, typeValue.Substring(index + 1, typeValue.Length - index - 1)) != 0)
{
throw new XmlException(Formatter.Format(Resources.UnknownDCDateTimeXsiType, reader.Name),
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
}
// Expect to find text data and return its value.
private string GetStringData(XmlReader reader)
{
if (reader.IsEmptyElement)
return string.Empty;
reader.Read();
if (reader.MoveToContent() == XmlNodeType.EndElement)
return string.Empty;
// If there is any content in the element, it should be text content and nothing else.
if (reader.NodeType != XmlNodeType.Text)
{
throw new XmlException(Resources.NoStructuredContentInsideProperties,
null, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
return reader.Value;
}
// Expect to find text data and return its value as DateTime.
private Nullable<DateTime> GetDateData(XmlReader reader)
{
string data = GetStringData(reader);
DateTime dateTime;
try
{
// Note: No more than 7 second decimals are accepted by the
// list of formats given. There currently is no method that
// would perform XSD-compliant parsing.
dateTime = DateTime.ParseExact(data, s_dateTimeFormats, CultureInfo.InvariantCulture, DateTimeStyles.None);
}
catch (FormatException exc)
{
throw new XmlException(Resources.XsdDateTimeExpected,
exc, ((IXmlLineInfo)reader).LineNumber, ((IXmlLineInfo)reader).LinePosition);
}
return dateTime;
}
// Make sure there is a part to write to and that it contains
// the expected start markup.
private void EnsureXmlWriter()
{
if (_xmlWriter != null)
return;
EnsurePropertyPart(); // Should succeed or throw an exception.
Stream writerStream = new IgnoreFlushAndCloseStream(_propertyPart.GetStream(FileMode.Create, FileAccess.Write));
_xmlWriter = XmlWriter.Create(writerStream, new XmlWriterSettings { Encoding = Encoding.UTF8 });
WriteXmlStartTagsForPackageProperties();
}
// Create a property part if none exists yet.
private void EnsurePropertyPart()
{
if (_propertyPart != null)
return;
// If _propertyPart is null, no property part existed when this object was created,
// and this function is being called for the first time.
// However, when read access is available, we can afford the luxury of checking whether
// a property part and its referring relationship got correctly created in the meantime
// outside of this class.
// In write-only mode, it is impossible to perform this check, and the external creation
// scenario will result in an exception being thrown.
if (_package.FileOpenAccess == FileAccess.Read || _package.FileOpenAccess == FileAccess.ReadWrite)
{
_propertyPart = GetPropertyPart();
if (_propertyPart != null)
return;
}
CreatePropertyPart();
}
// Create a new property relationship pointing to a new property part.
// If only this class is used for manipulating property relationships, there cannot be a
// pre-existing dangling property relationship.
// No check is performed here for other classes getting misused insofar as this function
// has to work in write-only mode.
private void CreatePropertyPart()
{
_propertyPart = _package.CreatePart(GeneratePropertyPartUri(), s_coreDocumentPropertiesContentType.ToString());
_package.CreateRelationship(_propertyPart.Uri, TargetMode.Internal,
CoreDocumentPropertiesRelationshipType);
}
private Uri GeneratePropertyPartUri()
{
string propertyPartName = DefaultPropertyPartNamePrefix
+ Guid.NewGuid().ToString(GuidStorageFormatString)
+ DefaultPropertyPartNameExtension;
return PackUriHelper.CreatePartUri(new Uri(propertyPartName, UriKind.Relative));
}
private void WriteXmlStartTagsForPackageProperties()
{
_xmlWriter.WriteStartDocument();
// <coreProperties
_xmlWriter.WriteStartElement(PackageXmlStringTable.GetXmlString(PackageXmlEnum.CoreProperties), // local name
PackageXmlStringTable.GetXmlString(PackageXmlEnum.PackageCorePropertiesNamespace)); // namespace
// xmlns:dc
_xmlWriter.WriteAttributeString(PackageXmlStringTable.GetXmlString(PackageXmlEnum.XmlNamespacePrefix),
PackageXmlStringTable.GetXmlString(PackageXmlEnum.DublinCorePropertiesNamespacePrefix),
null,
PackageXmlStringTable.GetXmlString(PackageXmlEnum.DublinCorePropertiesNamespace));
// xmlns:dcterms
_xmlWriter.WriteAttributeString(PackageXmlStringTable.GetXmlString(PackageXmlEnum.XmlNamespacePrefix),
PackageXmlStringTable.GetXmlString(PackageXmlEnum.DublincCoreTermsNamespacePrefix),
null,
PackageXmlStringTable.GetXmlString(PackageXmlEnum.DublinCoreTermsNamespace));
// xmlns:xsi
_xmlWriter.WriteAttributeString(PackageXmlStringTable.GetXmlString(PackageXmlEnum.XmlNamespacePrefix),
PackageXmlStringTable.GetXmlString(PackageXmlEnum.XmlSchemaInstanceNamespacePrefix),
null,
PackageXmlStringTable.GetXmlString(PackageXmlEnum.XmlSchemaInstanceNamespace));
}
// Write the property elements and clear _dirty.
private void SerializeDirtyProperties()
{
// Create a property element for each non-null entry.
foreach (KeyValuePair<PackageXmlEnum, Object> entry in _propertyDictionary)
{
Debug.Assert(entry.Value != null);
PackageXmlEnum propertyNamespace = PackageXmlStringTable.GetXmlNamespace(entry.Key);
_xmlWriter.WriteStartElement(PackageXmlStringTable.GetXmlString(entry.Key),
PackageXmlStringTable.GetXmlString(propertyNamespace));
if (entry.Value is Nullable<DateTime>)
{
if (propertyNamespace == PackageXmlEnum.DublinCoreTermsNamespace)
{
// xsi:type=
_xmlWriter.WriteStartAttribute(PackageXmlStringTable.GetXmlString(PackageXmlEnum.Type),
PackageXmlStringTable.GetXmlString(PackageXmlEnum.XmlSchemaInstanceNamespace));
// "dcterms:W3CDTF"
_xmlWriter.WriteQualifiedName(W3cdtf,
PackageXmlStringTable.GetXmlString(PackageXmlEnum.DublinCoreTermsNamespace));
_xmlWriter.WriteEndAttribute();
}
// Use sortable ISO 8601 date/time pattern. Include second fractions down to the 100-nanosecond interval,
// which is the definition of a "tick" for the DateTime type.
_xmlWriter.WriteString(XmlConvert.ToString(((Nullable<DateTime>)entry.Value).Value.ToUniversalTime(), "yyyy-MM-ddTHH:mm:ss.fffffffZ"));
}
else
{
// The following uses the fact that ToString is virtual.
_xmlWriter.WriteString(entry.Value.ToString());
}
_xmlWriter.WriteEndElement();
}
// Mark properties as saved.
_dirty = false;
}
// Add end markup and close the writer.
private void CloseXmlWriter()
{
// Close the root element.
_xmlWriter.WriteEndElement();
// Close the writer itself.
_xmlWriter.Dispose();
// Make sure we know it's closed.
_xmlWriter = null;
}
#endregion Private Methods
//------------------------------------------------------
//
// Private fields
//
//------------------------------------------------------
#region Private Fields
private Package _package;
private PackagePart _propertyPart;
private XmlWriter _xmlWriter;
// Table of objects from the closed set of literals defined below.
// (Uses object comparison rather than string comparison.)
private const int NumCoreProperties = 16;
private Dictionary<PackageXmlEnum, Object> _propertyDictionary = new Dictionary<PackageXmlEnum, Object>(NumCoreProperties);
private bool _dirty = false;
// This System.Xml.NameTable makes sure that we use the same references to strings
// throughout (including when parsing Xml) and so can perform reference comparisons
// rather than value comparisons.
private NameTable _nameTable;
// Literals.
private static readonly ContentType s_coreDocumentPropertiesContentType
= new ContentType("application/vnd.openxmlformats-package.core-properties+xml");
private const string CoreDocumentPropertiesRelationshipType
= "http://schemas.openxmlformats.org/package/2006/relationships/metadata/core-properties";
private const string DefaultPropertyPartNamePrefix =
"/package/services/metadata/core-properties/";
private const string W3cdtf = "W3CDTF";
private const string DefaultPropertyPartNameExtension = ".psmdcp";
private const string GuidStorageFormatString = @"N"; // N - simple format without adornments
private static PackageXmlEnum[] s_validProperties = new PackageXmlEnum[] {
PackageXmlEnum.Creator,
PackageXmlEnum.Identifier,
PackageXmlEnum.Title,
PackageXmlEnum.Subject,
PackageXmlEnum.Description,
PackageXmlEnum.Language,
PackageXmlEnum.Created,
PackageXmlEnum.Modified,
PackageXmlEnum.ContentType,
PackageXmlEnum.Keywords,
PackageXmlEnum.Category,
PackageXmlEnum.Version,
PackageXmlEnum.LastModifiedBy,
PackageXmlEnum.ContentStatus,
PackageXmlEnum.Revision,
PackageXmlEnum.LastPrinted
};
// Array of formats to supply to XmlConvert.ToDateTime or DateTime.ParseExact.
// xsd:DateTime requires full date time in sortable (ISO 8601) format.
// It can be expressed in local time, universal time (Z), or relative to universal time (zzz).
// Negative years are accepted.
// IMPORTANT: Second fractions are recognized only down to 1 tenth of a microsecond because this is the resolution
// of the DateTime type. The Xml standard, however, allows any number of decimals; but XmlConvert only offers
// this very awkward API with an explicit pattern enumeration.
private static readonly string[] s_dateTimeFormats = new string[] {
"yyyy-MM-ddTHH:mm:ss",
"yyyy-MM-ddTHH:mm:ssZ",
"yyyy-MM-ddTHH:mm:sszzz",
@"\-yyyy-MM-ddTHH:mm:ss",
@"\-yyyy-MM-ddTHH:mm:ssZ",
@"\-yyyy-MM-ddTHH:mm:sszzz",
"yyyy-MM-ddTHH:mm:ss.ff",
"yyyy-MM-ddTHH:mm:ss.fZ",
"yyyy-MM-ddTHH:mm:ss.fzzz",
@"\-yyyy-MM-ddTHH:mm:ss.f",
@"\-yyyy-MM-ddTHH:mm:ss.fZ",
@"\-yyyy-MM-ddTHH:mm:ss.fzzz",
"yyyy-MM-ddTHH:mm:ss.ff",
"yyyy-MM-ddTHH:mm:ss.ffZ",
"yyyy-MM-ddTHH:mm:ss.ffzzz",
@"\-yyyy-MM-ddTHH:mm:ss.ff",
@"\-yyyy-MM-ddTHH:mm:ss.ffZ",
@"\-yyyy-MM-ddTHH:mm:ss.ffzzz",
"yyyy-MM-ddTHH:mm:ss.fff",
"yyyy-MM-ddTHH:mm:ss.fffZ",
"yyyy-MM-ddTHH:mm:ss.fffzzz",
@"\-yyyy-MM-ddTHH:mm:ss.fff",
@"\-yyyy-MM-ddTHH:mm:ss.fffZ",
@"\-yyyy-MM-ddTHH:mm:ss.fffzzz",
"yyyy-MM-ddTHH:mm:ss.ffff",
"yyyy-MM-ddTHH:mm:ss.ffffZ",
"yyyy-MM-ddTHH:mm:ss.ffffzzz",
@"\-yyyy-MM-ddTHH:mm:ss.ffff",
@"\-yyyy-MM-ddTHH:mm:ss.ffffZ",
@"\-yyyy-MM-ddTHH:mm:ss.ffffzzz",
"yyyy-MM-ddTHH:mm:ss.fffff",
"yyyy-MM-ddTHH:mm:ss.fffffZ",
"yyyy-MM-ddTHH:mm:ss.fffffzzz",
@"\-yyyy-MM-ddTHH:mm:ss.fffff",
@"\-yyyy-MM-ddTHH:mm:ss.fffffZ",
@"\-yyyy-MM-ddTHH:mm:ss.fffffzzz",
"yyyy-MM-ddTHH:mm:ss.ffffff",
"yyyy-MM-ddTHH:mm:ss.ffffffZ",
"yyyy-MM-ddTHH:mm:ss.ffffffzzz",
@"\-yyyy-MM-ddTHH:mm:ss.ffffff",
@"\-yyyy-MM-ddTHH:mm:ss.ffffffZ",
@"\-yyyy-MM-ddTHH:mm:ss.ffffffzzz",
"yyyy-MM-ddTHH:mm:ss.fffffff",
"yyyy-MM-ddTHH:mm:ss.fffffffZ",
"yyyy-MM-ddTHH:mm:ss.fffffffzzz",
@"\-yyyy-MM-ddTHH:mm:ss.fffffff",
@"\-yyyy-MM-ddTHH:mm:ss.fffffffZ",
@"\-yyyy-MM-ddTHH:mm:ss.fffffffzzz",
};
#endregion Private Fields
}
}
| badmishkallc/PoshOffice | src/System.IO.Packaging/PartBasedPackageProperties.cs | C# | artistic-2.0 | 40,857 |
require "language/go"
class Cosi < Formula
desc "Implementation of scalable collective signing"
homepage "https://github.com/dedis/cosi"
url "https://github.com/dedis/cosi/archive/0.8.6.tar.gz"
sha256 "007e4c4def13fcecf7301d86f177f098c583151c8a3d940ccb4c65a84413a9eb"
license "AGPL-3.0"
bottle do
cellar :any_skip_relocation
sha256 "30bbb457c0fb67ee264331e434068a4a747ece4cbc536cb75d289a06e93988e2" => :catalina
sha256 "2ddd695441977b1cd435fbae28d9aa864d48b7a90ec24971348d91b5d0e551df" => :mojave
sha256 "00663999a04ee29f52e334022cc828d7ebe89a442f1e713afb2167112f4ebf75" => :high_sierra
end
depends_on "go" => :build
go_resource "github.com/BurntSushi/toml" do
url "https://github.com/BurntSushi/toml.git",
revision: "f0aeabca5a127c4078abb8c8d64298b147264b55"
end
go_resource "github.com/daviddengcn/go-colortext" do
url "https://github.com/daviddengcn/go-colortext.git",
revision: "511bcaf42ccd42c38aba7427b6673277bf19e2a1"
end
go_resource "github.com/dedis/crypto" do
url "https://github.com/dedis/crypto.git",
revision: "d9272cb478c0942e1d60049e6df219cba2067fcd"
end
go_resource "github.com/dedis/protobuf" do
url "https://github.com/dedis/protobuf.git",
revision: "6948fbd96a0f1e4e96582003261cf647dc66c831"
end
go_resource "github.com/montanaflynn/stats" do
url "https://github.com/montanaflynn/stats.git",
revision: "60dcacf48f43d6dd654d0ed94120ff5806c5ca5c"
end
go_resource "github.com/satori/go.uuid" do
url "https://github.com/satori/go.uuid.git",
revision: "f9ab0dce87d815821e221626b772e3475a0d2749"
end
go_resource "golang.org/x/net" do
url "https://go.googlesource.com/net.git",
revision: "0c607074acd38c5f23d1344dfe74c977464d1257"
end
go_resource "gopkg.in/codegangsta/cli.v1" do
url "https://gopkg.in/codegangsta/cli.v1.git",
revision: "01857ac33766ce0c93856370626f9799281c14f4"
end
go_resource "gopkg.in/dedis/cothority.v0" do
url "https://gopkg.in/dedis/cothority.v0.git",
revision: "e5eb384290e5fd98b8cb150a1348661aa2d49e2a"
end
def install
mkdir_p buildpath/"src/github.com/dedis"
ln_s buildpath, buildpath/"src/github.com/dedis/cosi"
ENV["GOPATH"] = "#{buildpath}/Godeps/_workspace:#{buildpath}"
Language::Go.stage_deps resources, buildpath/"src"
system "go", "build", "-o", "cosi"
prefix.install "dedis_group.toml"
bin.install "cosi"
end
test do
port = free_port
(testpath/"config.toml").write <<~EOS
Public = "7b6d6361686d0c76d9f4b40961736eb5d0849f7db3f8bfd8f869b8015d831d45"
Private = "01a80f4fef21db2aea18e5288fe9aa71324a8ad202609139e5cfffc4ffdc4484"
Addresses = ["0.0.0.0:#{port}"]
EOS
(testpath/"group.toml").write <<~EOS
[[servers]]
Addresses = ["127.0.0.1:#{port}"]
Public = "e21jYWhtDHbZ9LQJYXNutdCEn32z+L/Y+Gm4AV2DHUU="
EOS
begin
file = prefix/"README.md"
sig = "README.sig"
pid = fork { exec bin/"cosi", "server", "-config", "config.toml" }
sleep 2
assert_match "Success", shell_output("#{bin}/cosi check -g group.toml")
system bin/"cosi", "sign", "-g", "group.toml", "-o", sig, file
out = shell_output("#{bin}/cosi verify -g group.toml -s #{sig} #{file}")
assert_match "OK", out
ensure
Process.kill("TERM", pid)
end
end
end
| jabenninghoff/homebrew-core | Formula/cosi.rb | Ruby | bsd-2-clause | 3,409 |
cask 'boom' do
version '1.6.9,1575451705'
sha256 '444b5513c92eb0975494509908786a31f087a0af0e58fa5f312a156318be22f8'
# devmate.com/com.globaldelight.Boom2/ was verified as official when first introduced to the cask
url "https://dl.devmate.com/com.globaldelight.Boom2/#{version.before_comma}/#{version.after_comma}/Boom2-#{version.before_comma}.dmg"
appcast 'https://updates.devmate.com/com.globaldelight.Boom2.xml'
name 'Boom'
homepage 'https://www.globaldelight.com/boom'
depends_on macos: '>= :yosemite'
app 'Boom 2.app'
uninstall kext: 'com.globaldelight.driver.Boom2Device',
launchctl: [
'com.globaldelight.Boom2.*',
'com.globaldelight.Boom2Daemon',
],
signal: ['TERM', 'com.globaldelight.Boom2']
zap trash: [
'~/Library/Application Support/com.globaldelight.Boom2',
'~/Library/Application Support/com.apple.sharedfilelist/com.apple.LSSharedFileList.ApplicationRecentDocuments/com.globaldelight.boom2.sfl*',
'~/Library/Application Support/com.apple.sharedfilelist/com.apple.LSSharedFileList.ApplicationRecentDocuments/com.globaldelight.boom2daemon.sfl*',
'~/Library/Preferences/com.globaldelight.Boom2.plist',
'~/Library/Preferences/com.globaldelight.Boom2Daemon.plist',
]
end
| sscotth/homebrew-cask | Casks/boom.rb | Ruby | bsd-2-clause | 1,400 |
// Copyright (C) 2015 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
es6id: 12.10.3
description: Invocation of `Symbol.toPrimitive` method during coercion
info: |
[...]
7. Return the result of performing Abstract Equality Comparison rval ==
lval.
ES6 Section 7.2.12 Abstract Equality Comparison
[...]
10. If Type(x) is either String, Number, or Symbol and Type(y) is Object,
then return the result of the comparison x == ToPrimitive(y).
ES6 Section 7.1.1 ToPrimitive ( input [, PreferredType] )
1. If PreferredType was not passed, let hint be "default".
[...]
4. Let exoticToPrim be GetMethod(input, @@toPrimitive).
5. ReturnIfAbrupt(exoticToPrim).
6. If exoticToPrim is not undefined, then
a. Let result be Call(exoticToPrim, input, «hint»).
[...]
features: [Symbol.toPrimitive]
---*/
var y = {};
var callCount = 0;
var thisVal, args;
y[Symbol.toPrimitive] = function() {
callCount += 1;
thisVal = this;
args = arguments;
};
0 == y;
assert.sameValue(callCount, 1, 'method invoked exactly once');
assert.sameValue(thisVal, y, '`this` value is the object being compared');
assert.sameValue(args.length, 1, 'method invoked with exactly one argument');
assert.sameValue(
args[0],
'default',
'method invoked with the string "default" as the first argument'
);
| sebastienros/jint | Jint.Tests.Test262/test/language/expressions/equals/coerce-symbol-to-prim-invocation.js | JavaScript | bsd-2-clause | 1,426 |
cask 'jetbrains-toolbox' do
version '1.0.1569'
sha256 '5e47e404f7b9aa6e5d500eceb59801a9c1dc4da104e29fe1e392956188369b71'
url "https://download.jetbrains.com/toolbox/jetbrains-toolbox-#{version}.dmg"
name 'JetBrains Toolbox'
homepage 'https://www.jetbrains.com/'
license :gratis
app 'JetBrains Toolbox.app'
end
| pacav69/homebrew-cask | Casks/jetbrains-toolbox.rb | Ruby | bsd-2-clause | 326 |
/*
* Copyright 2010 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.wrapper;
import java.io.*;
import java.net.URI;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
public class Install {
public static final String DEFAULT_DISTRIBUTION_PATH = "wrapper/dists";
private final IDownload download;
private final PathAssembler pathAssembler;
private final ExclusiveFileAccessManager exclusiveFileAccessManager = new ExclusiveFileAccessManager(120000, 200);
public Install(IDownload download, PathAssembler pathAssembler) {
this.download = download;
this.pathAssembler = pathAssembler;
}
public File createDist(WrapperConfiguration configuration) throws Exception {
final URI distributionUrl = configuration.getDistribution();
final PathAssembler.LocalDistribution localDistribution = pathAssembler.getDistribution(configuration);
final File distDir = localDistribution.getDistributionDir();
final File localZipFile = localDistribution.getZipFile();
return exclusiveFileAccessManager.access(localZipFile, new Callable<File>() {
public File call() throws Exception {
final File markerFile = new File(localZipFile.getParentFile(), localZipFile.getName() + ".ok");
if (distDir.isDirectory() && markerFile.isFile()) {
return getDistributionRoot(distDir, distDir.getAbsolutePath());
}
boolean needsDownload = !localZipFile.isFile();
if (needsDownload) {
File tmpZipFile = new File(localZipFile.getParentFile(), localZipFile.getName() + ".part");
tmpZipFile.delete();
System.out.println("Downloading " + distributionUrl);
download.download(distributionUrl, tmpZipFile);
tmpZipFile.renameTo(localZipFile);
}
List<File> topLevelDirs = listDirs(distDir);
for (File dir : topLevelDirs) {
System.out.println("Deleting directory " + dir.getAbsolutePath());
deleteDir(dir);
}
System.out.println("Unzipping " + localZipFile.getAbsolutePath() + " to " + distDir.getAbsolutePath());
unzip(localZipFile, distDir);
File root = getDistributionRoot(distDir, distributionUrl.toString());
setExecutablePermissions(root);
markerFile.createNewFile();
return root;
}
});
}
private File getDistributionRoot(File distDir, String distributionDescription) {
List<File> dirs = listDirs(distDir);
if (dirs.isEmpty()) {
throw new RuntimeException(String.format("Gradle distribution '%s' does not contain any directories. Expected to find exactly 1 directory.", distributionDescription));
}
if (dirs.size() != 1) {
throw new RuntimeException(String.format("Gradle distribution '%s' contains too many directories. Expected to find exactly 1 directory.", distributionDescription));
}
return dirs.get(0);
}
private List<File> listDirs(File distDir) {
List<File> dirs = new ArrayList<File>();
if (distDir.exists()) {
for (File file : distDir.listFiles()) {
if (file.isDirectory()) {
dirs.add(file);
}
}
}
return dirs;
}
private void setExecutablePermissions(File gradleHome) {
if (isWindows()) {
return;
}
File gradleCommand = new File(gradleHome, "bin/gradle");
String errorMessage = null;
try {
ProcessBuilder pb = new ProcessBuilder("chmod", "755", gradleCommand.getCanonicalPath());
Process p = pb.start();
if (p.waitFor() == 0) {
System.out.println("Set executable permissions for: " + gradleCommand.getAbsolutePath());
} else {
BufferedReader is = new BufferedReader(new InputStreamReader(p.getInputStream()));
Formatter stdout = new Formatter();
String line;
while ((line = is.readLine()) != null) {
stdout.format("%s%n", line);
}
errorMessage = stdout.toString();
}
} catch (IOException e) {
errorMessage = e.getMessage();
} catch (InterruptedException e) {
errorMessage = e.getMessage();
}
if (errorMessage != null) {
System.out.println("Could not set executable permissions for: " + gradleCommand.getAbsolutePath());
System.out.println("Please do this manually if you want to use the Gradle UI.");
}
}
private boolean isWindows() {
String osName = System.getProperty("os.name").toLowerCase(Locale.US);
if (osName.indexOf("windows") > -1) {
return true;
}
return false;
}
private boolean deleteDir(File dir) {
if (dir.isDirectory()) {
String[] children = dir.list();
for (int i = 0; i < children.length; i++) {
boolean success = deleteDir(new File(dir, children[i]));
if (!success) {
return false;
}
}
}
// The directory is now empty so delete it
return dir.delete();
}
private void unzip(File zip, File dest) throws IOException {
Enumeration entries;
ZipFile zipFile = new ZipFile(zip);
try {
entries = zipFile.entries();
while (entries.hasMoreElements()) {
ZipEntry entry = (ZipEntry) entries.nextElement();
if (entry.isDirectory()) {
(new File(dest, entry.getName())).mkdirs();
continue;
}
OutputStream outputStream = new BufferedOutputStream(new FileOutputStream(new File(dest, entry.getName())));
try {
copyInputStream(zipFile.getInputStream(entry), outputStream);
} finally {
outputStream.close();
}
}
} finally {
zipFile.close();
}
}
private void copyInputStream(InputStream in, OutputStream out) throws IOException {
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) >= 0) {
out.write(buffer, 0, len);
}
in.close();
out.close();
}
}
| Pushjet/Pushjet-Android | gradle/wrapper/dists/gradle-2.2.1-all/c64ydeuardnfqctvr1gm30w53/gradle-2.2.1/src/wrapper/org/gradle/wrapper/Install.java | Java | bsd-2-clause | 7,302 |
cask 'rubymine' do
version '2018.1.4,181.5281.41'
sha256 'e89880cfed154e01545063f830e444f0a9ae3509b177f254a92032544cffe24a'
url "https://download.jetbrains.com/ruby/RubyMine-#{version.before_comma}.dmg"
appcast 'https://data.services.jetbrains.com/products/releases?code=RM&latest=true&type=release'
name 'RubyMine'
homepage 'https://www.jetbrains.com/ruby/'
auto_updates true
app 'RubyMine.app'
uninstall_postflight do
ENV['PATH'].split(File::PATH_SEPARATOR).map { |path| File.join(path, 'mine') }.each { |path| File.delete(path) if File.exist?(path) && File.readlines(path).grep(%r{# see com.intellij.idea.SocketLock for the server side of this interface}).any? }
end
zap trash: [
"~/Library/Application Support/RubyMine#{version.major_minor}",
"~/Library/Caches/RubyMine#{version.major_minor}",
"~/Library/Logs/RubyMine#{version.major_minor}",
"~/Library/Preferences/RubyMine#{version.major_minor}",
]
end
| goxberry/homebrew-cask | Casks/rubymine.rb | Ruby | bsd-2-clause | 1,013 |
var loopback = require('loopback'),
boot = require('loopback-boot');
var app = loopback();
boot(app, __dirname);
module.exports = app;
| maschinenbau/freecodecamp | client/loopbackClient.js | JavaScript | bsd-3-clause | 142 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_extension.h"
#include "base/logging.h"
namespace base {
namespace allocator {
bool GetProperty(const char* name, size_t* value) {
thunks::GetPropertyFunction get_property_function =
base::allocator::thunks::GetGetPropertyFunction();
return get_property_function != NULL && get_property_function(name, value);
}
void GetStats(char* buffer, int buffer_length) {
DCHECK_GT(buffer_length, 0);
thunks::GetStatsFunction get_stats_function =
base::allocator::thunks::GetGetStatsFunction();
if (get_stats_function)
get_stats_function(buffer, buffer_length);
else
buffer[0] = '\0';
}
void ReleaseFreeMemory() {
thunks::ReleaseFreeMemoryFunction release_free_memory_function =
base::allocator::thunks::GetReleaseFreeMemoryFunction();
if (release_free_memory_function)
release_free_memory_function();
}
void SetGetPropertyFunction(
thunks::GetPropertyFunction get_property_function) {
DCHECK_EQ(base::allocator::thunks::GetGetPropertyFunction(),
reinterpret_cast<thunks::GetPropertyFunction>(NULL));
base::allocator::thunks::SetGetPropertyFunction(get_property_function);
}
void SetGetStatsFunction(thunks::GetStatsFunction get_stats_function) {
DCHECK_EQ(base::allocator::thunks::GetGetStatsFunction(),
reinterpret_cast<thunks::GetStatsFunction>(NULL));
base::allocator::thunks::SetGetStatsFunction(get_stats_function);
}
void SetReleaseFreeMemoryFunction(
thunks::ReleaseFreeMemoryFunction release_free_memory_function) {
DCHECK_EQ(base::allocator::thunks::GetReleaseFreeMemoryFunction(),
reinterpret_cast<thunks::ReleaseFreeMemoryFunction>(NULL));
base::allocator::thunks::SetReleaseFreeMemoryFunction(
release_free_memory_function);
}
} // namespace allocator
} // namespace base
| leighpauls/k2cro4 | base/allocator/allocator_extension.cc | C++ | bsd-3-clause | 1,997 |
"""
Generating and counting primes.
"""
from __future__ import print_function, division
import random
from bisect import bisect
# Using arrays for sieving instead of lists greatly reduces
# memory consumption
from array import array as _array
from sympy import Function, S
from sympy.core.compatibility import as_int, range
from .primetest import isprime
def _azeros(n):
return _array('l', [0]*n)
def _aset(*v):
return _array('l', v)
def _arange(a, b):
return _array('l', range(a, b))
class Sieve:
"""An infinite list of prime numbers, implemented as a dynamically
growing sieve of Eratosthenes. When a lookup is requested involving
an odd number that has not been sieved, the sieve is automatically
extended up to that number.
Examples
========
>>> from sympy import sieve
>>> sieve._reset() # this line for doctest only
>>> 25 in sieve
False
>>> sieve._list
array('l', [2, 3, 5, 7, 11, 13, 17, 19, 23])
"""
# data shared (and updated) by all Sieve instances
def __init__(self):
self._n = 6
self._list = _aset(2, 3, 5, 7, 11, 13) # primes
self._tlist = _aset(0, 1, 1, 2, 2, 4) # totient
self._mlist = _aset(0, 1, -1, -1, 0, -1) # mobius
assert all(len(i) == self._n for i in (self._list, self._tlist, self._mlist))
def __repr__(self):
return ("<%s sieve (%i): %i, %i, %i, ... %i, %i\n"
"%s sieve (%i): %i, %i, %i, ... %i, %i\n"
"%s sieve (%i): %i, %i, %i, ... %i, %i>") % (
'prime', len(self._list),
self._list[0], self._list[1], self._list[2],
self._list[-2], self._list[-1],
'totient', len(self._tlist),
self._tlist[0], self._tlist[1],
self._tlist[2], self._tlist[-2], self._tlist[-1],
'mobius', len(self._mlist),
self._mlist[0], self._mlist[1],
self._mlist[2], self._mlist[-2], self._mlist[-1])
def _reset(self, prime=None, totient=None, mobius=None):
"""Reset all caches (default). To reset one or more set the
desired keyword to True."""
if all(i is None for i in (prime, totient, mobius)):
prime = totient = mobius = True
if prime:
self._list = self._list[:self._n]
if totient:
self._tlist = self._tlist[:self._n]
if mobius:
self._mlist = self._mlist[:self._n]
def extend(self, n):
"""Grow the sieve to cover all primes <= n (a real number).
Examples
========
>>> from sympy import sieve
>>> sieve._reset() # this line for doctest only
>>> sieve.extend(30)
>>> sieve[10] == 29
True
"""
n = int(n)
if n <= self._list[-1]:
return
# We need to sieve against all bases up to sqrt(n).
# This is a recursive call that will do nothing if there are enough
# known bases already.
maxbase = int(n**0.5) + 1
self.extend(maxbase)
# Create a new sieve starting from sqrt(n)
begin = self._list[-1] + 1
newsieve = _arange(begin, n + 1)
# Now eliminate all multiples of primes in [2, sqrt(n)]
for p in self.primerange(2, maxbase):
# Start counting at a multiple of p, offsetting
# the index to account for the new sieve's base index
startindex = (-begin) % p
for i in range(startindex, len(newsieve), p):
newsieve[i] = 0
# Merge the sieves
self._list += _array('l', [x for x in newsieve if x])
def extend_to_no(self, i):
"""Extend to include the ith prime number.
Parameters
==========
i : integer
Examples
========
>>> from sympy import sieve
>>> sieve._reset() # this line for doctest only
>>> sieve.extend_to_no(9)
>>> sieve._list
array('l', [2, 3, 5, 7, 11, 13, 17, 19, 23])
Notes
=====
The list is extended by 50% if it is too short, so it is
likely that it will be longer than requested.
"""
i = as_int(i)
while len(self._list) < i:
self.extend(int(self._list[-1] * 1.5))
def primerange(self, a, b):
"""Generate all prime numbers in the range [a, b).
Examples
========
>>> from sympy import sieve
>>> print([i for i in sieve.primerange(7, 18)])
[7, 11, 13, 17]
"""
from sympy.functions.elementary.integers import ceiling
# wrapping ceiling in as_int will raise an error if there was a problem
# determining whether the expression was exactly an integer or not
a = max(2, as_int(ceiling(a)))
b = as_int(ceiling(b))
if a >= b:
return
self.extend(b)
i = self.search(a)[1]
maxi = len(self._list) + 1
while i < maxi:
p = self._list[i - 1]
if p < b:
yield p
i += 1
else:
return
def totientrange(self, a, b):
"""Generate all totient numbers for the range [a, b).
Examples
========
>>> from sympy import sieve
>>> print([i for i in sieve.totientrange(7, 18)])
[6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16]
"""
from sympy.functions.elementary.integers import ceiling
# wrapping ceiling in as_int will raise an error if there was a problem
# determining whether the expression was exactly an integer or not
a = max(1, as_int(ceiling(a)))
b = as_int(ceiling(b))
n = len(self._tlist)
if a >= b:
return
elif b <= n:
for i in range(a, b):
yield self._tlist[i]
else:
self._tlist += _arange(n, b)
for i in range(1, n):
ti = self._tlist[i]
startindex = (n + i - 1) // i * i
for j in range(startindex, b, i):
self._tlist[j] -= ti
if i >= a:
yield ti
for i in range(n, b):
ti = self._tlist[i]
for j in range(2 * i, b, i):
self._tlist[j] -= ti
if i >= a:
yield ti
def mobiusrange(self, a, b):
"""Generate all mobius numbers for the range [a, b).
Parameters
==========
a : integer
First number in range
b : integer
First number outside of range
Examples
========
>>> from sympy import sieve
>>> print([i for i in sieve.mobiusrange(7, 18)])
[-1, 0, 0, 1, -1, 0, -1, 1, 1, 0, -1]
"""
from sympy.functions.elementary.integers import ceiling
# wrapping ceiling in as_int will raise an error if there was a problem
# determining whether the expression was exactly an integer or not
a = max(1, as_int(ceiling(a)))
b = as_int(ceiling(b))
n = len(self._mlist)
if a >= b:
return
elif b <= n:
for i in range(a, b):
yield self._mlist[i]
else:
self._mlist += _azeros(b - n)
for i in range(1, n):
mi = self._mlist[i]
startindex = (n + i - 1) // i * i
for j in range(startindex, b, i):
self._mlist[j] -= mi
if i >= a:
yield mi
for i in range(n, b):
mi = self._mlist[i]
for j in range(2 * i, b, i):
self._mlist[j] -= mi
if i >= a:
yield mi
def search(self, n):
"""Return the indices i, j of the primes that bound n.
If n is prime then i == j.
Although n can be an expression, if ceiling cannot convert
it to an integer then an n error will be raised.
Examples
========
>>> from sympy import sieve
>>> sieve.search(25)
(9, 10)
>>> sieve.search(23)
(9, 9)
"""
from sympy.functions.elementary.integers import ceiling
# wrapping ceiling in as_int will raise an error if there was a problem
# determining whether the expression was exactly an integer or not
test = as_int(ceiling(n))
n = as_int(n)
if n < 2:
raise ValueError("n should be >= 2 but got: %s" % n)
if n > self._list[-1]:
self.extend(n)
b = bisect(self._list, n)
if self._list[b - 1] == test:
return b, b
else:
return b, b + 1
def __contains__(self, n):
try:
n = as_int(n)
assert n >= 2
except (ValueError, AssertionError):
return False
if n % 2 == 0:
return n == 2
a, b = self.search(n)
return a == b
def __getitem__(self, n):
"""Return the nth prime number"""
if isinstance(n, slice):
self.extend_to_no(n.stop)
return self._list[n.start - 1:n.stop - 1:n.step]
else:
n = as_int(n)
self.extend_to_no(n)
return self._list[n - 1]
# Generate a global object for repeated use in trial division etc
sieve = Sieve()
def prime(nth):
""" Return the nth prime, with the primes indexed as prime(1) = 2,
prime(2) = 3, etc.... The nth prime is approximately n*log(n).
Logarithmic integral of x is a pretty nice approximation for number of
primes <= x, i.e.
li(x) ~ pi(x)
In fact, for the numbers we are concerned about( x<1e11 ),
li(x) - pi(x) < 50000
Also,
li(x) > pi(x) can be safely assumed for the numbers which
can be evaluated by this function.
Here, we find the least integer m such that li(m) > n using binary search.
Now pi(m-1) < li(m-1) <= n,
We find pi(m - 1) using primepi function.
Starting from m, we have to find n - pi(m-1) more primes.
For the inputs this implementation can handle, we will have to test
primality for at max about 10**5 numbers, to get our answer.
Examples
========
>>> from sympy import prime
>>> prime(10)
29
>>> prime(1)
2
>>> prime(100000)
1299709
See Also
========
sympy.ntheory.primetest.isprime : Test if n is prime
primerange : Generate all primes in a given range
primepi : Return the number of primes less than or equal to n
References
==========
.. [1] https://en.wikipedia.org/wiki/Prime_number_theorem#Table_of_.CF.80.28x.29.2C_x_.2F_log_x.2C_and_li.28x.29
.. [2] https://en.wikipedia.org/wiki/Prime_number_theorem#Approximations_for_the_nth_prime_number
.. [3] https://en.wikipedia.org/wiki/Skewes%27_number
"""
n = as_int(nth)
if n < 1:
raise ValueError("nth must be a positive integer; prime(1) == 2")
if n <= len(sieve._list):
return sieve[n]
from sympy.functions.special.error_functions import li
from sympy.functions.elementary.exponential import log
a = 2 # Lower bound for binary search
b = int(n*(log(n) + log(log(n)))) # Upper bound for the search.
while a < b:
mid = (a + b) >> 1
if li(mid) > n:
b = mid
else:
a = mid + 1
n_primes = primepi(a - 1)
while n_primes < n:
if isprime(a):
n_primes += 1
a += 1
return a - 1
class primepi(Function):
""" Represents the prime counting function pi(n) = the number
of prime numbers less than or equal to n.
Algorithm Description:
In sieve method, we remove all multiples of prime p
except p itself.
Let phi(i,j) be the number of integers 2 <= k <= i
which remain after sieving from primes less than
or equal to j.
Clearly, pi(n) = phi(n, sqrt(n))
If j is not a prime,
phi(i,j) = phi(i, j - 1)
if j is a prime,
We remove all numbers(except j) whose
smallest prime factor is j.
Let x= j*a be such a number, where 2 <= a<= i / j
Now, after sieving from primes <= j - 1,
a must remain
(because x, and hence a has no prime factor <= j - 1)
Clearly, there are phi(i / j, j - 1) such a
which remain on sieving from primes <= j - 1
Now, if a is a prime less than equal to j - 1,
x= j*a has smallest prime factor = a, and
has already been removed(by sieving from a).
So, we don't need to remove it again.
(Note: there will be pi(j - 1) such x)
Thus, number of x, that will be removed are:
phi(i / j, j - 1) - phi(j - 1, j - 1)
(Note that pi(j - 1) = phi(j - 1, j - 1))
=> phi(i,j) = phi(i, j - 1) - phi(i / j, j - 1) + phi(j - 1, j - 1)
So,following recursion is used and implemented as dp:
phi(a, b) = phi(a, b - 1), if b is not a prime
phi(a, b) = phi(a, b-1)-phi(a / b, b-1) + phi(b-1, b-1), if b is prime
Clearly a is always of the form floor(n / k),
which can take at most 2*sqrt(n) values.
Two arrays arr1,arr2 are maintained
arr1[i] = phi(i, j),
arr2[i] = phi(n // i, j)
Finally the answer is arr2[1]
Examples
========
>>> from sympy import primepi
>>> primepi(25)
9
See Also
========
sympy.ntheory.primetest.isprime : Test if n is prime
primerange : Generate all primes in a given range
prime : Return the nth prime
"""
@classmethod
def eval(cls, n):
if n is S.Infinity:
return S.Infinity
if n is S.NegativeInfinity:
return S.Zero
try:
n = int(n)
except TypeError:
if n.is_real == False or n is S.NaN:
raise ValueError("n must be real")
return
if n < 2:
return S.Zero
if n <= sieve._list[-1]:
return S(sieve.search(n)[0])
lim = int(n ** 0.5)
lim -= 1
lim = max(lim, 0)
while lim * lim <= n:
lim += 1
lim -= 1
arr1 = [0] * (lim + 1)
arr2 = [0] * (lim + 1)
for i in range(1, lim + 1):
arr1[i] = i - 1
arr2[i] = n // i - 1
for i in range(2, lim + 1):
# Presently, arr1[k]=phi(k,i - 1),
# arr2[k] = phi(n // k,i - 1)
if arr1[i] == arr1[i - 1]:
continue
p = arr1[i - 1]
for j in range(1, min(n // (i * i), lim) + 1):
st = i * j
if st <= lim:
arr2[j] -= arr2[st] - p
else:
arr2[j] -= arr1[n // st] - p
lim2 = min(lim, i * i - 1)
for j in range(lim, lim2, -1):
arr1[j] -= arr1[j // i] - p
return S(arr2[1])
def nextprime(n, ith=1):
""" Return the ith prime greater than n.
i must be an integer.
Notes
=====
Potential primes are located at 6*j +/- 1. This
property is used during searching.
>>> from sympy import nextprime
>>> [(i, nextprime(i)) for i in range(10, 15)]
[(10, 11), (11, 13), (12, 13), (13, 17), (14, 17)]
>>> nextprime(2, ith=2) # the 2nd prime after 2
5
See Also
========
prevprime : Return the largest prime smaller than n
primerange : Generate all primes in a given range
"""
n = int(n)
i = as_int(ith)
if i > 1:
pr = n
j = 1
while 1:
pr = nextprime(pr)
j += 1
if j > i:
break
return pr
if n < 2:
return 2
if n < 7:
return {2: 3, 3: 5, 4: 5, 5: 7, 6: 7}[n]
if n <= sieve._list[-2]:
l, u = sieve.search(n)
if l == u:
return sieve[u + 1]
else:
return sieve[u]
nn = 6*(n//6)
if nn == n:
n += 1
if isprime(n):
return n
n += 4
elif n - nn == 5:
n += 2
if isprime(n):
return n
n += 4
else:
n = nn + 5
while 1:
if isprime(n):
return n
n += 2
if isprime(n):
return n
n += 4
def prevprime(n):
""" Return the largest prime smaller than n.
Notes
=====
Potential primes are located at 6*j +/- 1. This
property is used during searching.
>>> from sympy import prevprime
>>> [(i, prevprime(i)) for i in range(10, 15)]
[(10, 7), (11, 7), (12, 11), (13, 11), (14, 13)]
See Also
========
nextprime : Return the ith prime greater than n
primerange : Generates all primes in a given range
"""
from sympy.functions.elementary.integers import ceiling
# wrapping ceiling in as_int will raise an error if there was a problem
# determining whether the expression was exactly an integer or not
n = as_int(ceiling(n))
if n < 3:
raise ValueError("no preceding primes")
if n < 8:
return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n]
if n <= sieve._list[-1]:
l, u = sieve.search(n)
if l == u:
return sieve[l-1]
else:
return sieve[l]
nn = 6*(n//6)
if n - nn <= 1:
n = nn - 1
if isprime(n):
return n
n -= 4
else:
n = nn + 1
while 1:
if isprime(n):
return n
n -= 2
if isprime(n):
return n
n -= 4
def primerange(a, b):
""" Generate a list of all prime numbers in the range [a, b).
If the range exists in the default sieve, the values will
be returned from there; otherwise values will be returned
but will not modify the sieve.
Examples
========
>>> from sympy import primerange, sieve
>>> print([i for i in primerange(1, 30)])
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
The Sieve method, primerange, is generally faster but it will
occupy more memory as the sieve stores values. The default
instance of Sieve, named sieve, can be used:
>>> list(sieve.primerange(1, 30))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
Notes
=====
Some famous conjectures about the occurrence of primes in a given
range are [1]:
- Twin primes: though often not, the following will give 2 primes
an infinite number of times:
primerange(6*n - 1, 6*n + 2)
- Legendre's: the following always yields at least one prime
primerange(n**2, (n+1)**2+1)
- Bertrand's (proven): there is always a prime in the range
primerange(n, 2*n)
- Brocard's: there are at least four primes in the range
primerange(prime(n)**2, prime(n+1)**2)
The average gap between primes is log(n) [2]; the gap between
primes can be arbitrarily large since sequences of composite
numbers are arbitrarily large, e.g. the numbers in the sequence
n! + 2, n! + 3 ... n! + n are all composite.
See Also
========
nextprime : Return the ith prime greater than n
prevprime : Return the largest prime smaller than n
randprime : Returns a random prime in a given range
primorial : Returns the product of primes based on condition
Sieve.primerange : return range from already computed primes
or extend the sieve to contain the requested
range.
References
==========
.. [1] https://en.wikipedia.org/wiki/Prime_number
.. [2] http://primes.utm.edu/notes/gaps.html
"""
from sympy.functions.elementary.integers import ceiling
if a >= b:
return
# if we already have the range, return it
if b <= sieve._list[-1]:
for i in sieve.primerange(a, b):
yield i
return
# otherwise compute, without storing, the desired range.
# wrapping ceiling in as_int will raise an error if there was a problem
# determining whether the expression was exactly an integer or not
a = as_int(ceiling(a)) - 1
b = as_int(ceiling(b))
while 1:
a = nextprime(a)
if a < b:
yield a
else:
return
def randprime(a, b):
""" Return a random prime number in the range [a, b).
Bertrand's postulate assures that
randprime(a, 2*a) will always succeed for a > 1.
Examples
========
>>> from sympy import randprime, isprime
>>> randprime(1, 30) #doctest: +SKIP
13
>>> isprime(randprime(1, 30))
True
See Also
========
primerange : Generate all primes in a given range
References
==========
.. [1] https://en.wikipedia.org/wiki/Bertrand's_postulate
"""
if a >= b:
return
a, b = map(int, (a, b))
n = random.randint(a - 1, b)
p = nextprime(n)
if p >= b:
p = prevprime(b)
if p < a:
raise ValueError("no primes exist in the specified range")
return p
def primorial(n, nth=True):
"""
Returns the product of the first n primes (default) or
the primes less than or equal to n (when ``nth=False``).
Examples
========
>>> from sympy.ntheory.generate import primorial, randprime, primerange
>>> from sympy import factorint, Mul, primefactors, sqrt
>>> primorial(4) # the first 4 primes are 2, 3, 5, 7
210
>>> primorial(4, nth=False) # primes <= 4 are 2 and 3
6
>>> primorial(1)
2
>>> primorial(1, nth=False)
1
>>> primorial(sqrt(101), nth=False)
210
One can argue that the primes are infinite since if you take
a set of primes and multiply them together (e.g. the primorial) and
then add or subtract 1, the result cannot be divided by any of the
original factors, hence either 1 or more new primes must divide this
product of primes.
In this case, the number itself is a new prime:
>>> factorint(primorial(4) + 1)
{211: 1}
In this case two new primes are the factors:
>>> factorint(primorial(4) - 1)
{11: 1, 19: 1}
Here, some primes smaller and larger than the primes multiplied together
are obtained:
>>> p = list(primerange(10, 20))
>>> sorted(set(primefactors(Mul(*p) + 1)).difference(set(p)))
[2, 5, 31, 149]
See Also
========
primerange : Generate all primes in a given range
"""
if nth:
n = as_int(n)
else:
n = int(n)
if n < 1:
raise ValueError("primorial argument must be >= 1")
p = 1
if nth:
for i in range(1, n + 1):
p *= prime(i)
else:
for i in primerange(2, n + 1):
p *= i
return p
def cycle_length(f, x0, nmax=None, values=False):
"""For a given iterated sequence, return a generator that gives
the length of the iterated cycle (lambda) and the length of terms
before the cycle begins (mu); if ``values`` is True then the
terms of the sequence will be returned instead. The sequence is
started with value ``x0``.
Note: more than the first lambda + mu terms may be returned and this
is the cost of cycle detection with Brent's method; there are, however,
generally less terms calculated than would have been calculated if the
proper ending point were determined, e.g. by using Floyd's method.
>>> from sympy.ntheory.generate import cycle_length
This will yield successive values of i <-- func(i):
>>> def iter(func, i):
... while 1:
... ii = func(i)
... yield ii
... i = ii
...
A function is defined:
>>> func = lambda i: (i**2 + 1) % 51
and given a seed of 4 and the mu and lambda terms calculated:
>>> next(cycle_length(func, 4))
(6, 2)
We can see what is meant by looking at the output:
>>> n = cycle_length(func, 4, values=True)
>>> list(ni for ni in n)
[17, 35, 2, 5, 26, 14, 44, 50, 2, 5, 26, 14]
There are 6 repeating values after the first 2.
If a sequence is suspected of being longer than you might wish, ``nmax``
can be used to exit early (and mu will be returned as None):
>>> next(cycle_length(func, 4, nmax = 4))
(4, None)
>>> [ni for ni in cycle_length(func, 4, nmax = 4, values=True)]
[17, 35, 2, 5]
Code modified from:
https://en.wikipedia.org/wiki/Cycle_detection.
"""
nmax = int(nmax or 0)
# main phase: search successive powers of two
power = lam = 1
tortoise, hare = x0, f(x0) # f(x0) is the element/node next to x0.
i = 0
while tortoise != hare and (not nmax or i < nmax):
i += 1
if power == lam: # time to start a new power of two?
tortoise = hare
power *= 2
lam = 0
if values:
yield hare
hare = f(hare)
lam += 1
if nmax and i == nmax:
if values:
return
else:
yield nmax, None
return
if not values:
# Find the position of the first repetition of length lambda
mu = 0
tortoise = hare = x0
for i in range(lam):
hare = f(hare)
while tortoise != hare:
tortoise = f(tortoise)
hare = f(hare)
mu += 1
if mu:
mu -= 1
yield lam, mu
def composite(nth):
""" Return the nth composite number, with the composite numbers indexed as
composite(1) = 4, composite(2) = 6, etc....
Examples
========
>>> from sympy import composite
>>> composite(36)
52
>>> composite(1)
4
>>> composite(17737)
20000
See Also
========
sympy.ntheory.primetest.isprime : Test if n is prime
primerange : Generate all primes in a given range
primepi : Return the number of primes less than or equal to n
prime : Return the nth prime
compositepi : Return the number of positive composite numbers less than or equal to n
"""
n = as_int(nth)
if n < 1:
raise ValueError("nth must be a positive integer; composite(1) == 4")
composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18]
if n <= 10:
return composite_arr[n - 1]
a, b = 4, sieve._list[-1]
if n <= b - primepi(b) - 1:
while a < b - 1:
mid = (a + b) >> 1
if mid - primepi(mid) - 1 > n:
b = mid
else:
a = mid
if isprime(a):
a -= 1
return a
from sympy.functions.special.error_functions import li
from sympy.functions.elementary.exponential import log
a = 4 # Lower bound for binary search
b = int(n*(log(n) + log(log(n)))) # Upper bound for the search.
while a < b:
mid = (a + b) >> 1
if mid - li(mid) - 1 > n:
b = mid
else:
a = mid + 1
n_composites = a - primepi(a) - 1
while n_composites > n:
if not isprime(a):
n_composites -= 1
a -= 1
if isprime(a):
a -= 1
return a
def compositepi(n):
""" Return the number of positive composite numbers less than or equal to n.
The first positive composite is 4, i.e. compositepi(4) = 1.
Examples
========
>>> from sympy import compositepi
>>> compositepi(25)
15
>>> compositepi(1000)
831
See Also
========
sympy.ntheory.primetest.isprime : Test if n is prime
primerange : Generate all primes in a given range
prime : Return the nth prime
primepi : Return the number of primes less than or equal to n
composite : Return the nth composite number
"""
n = int(n)
if n < 4:
return 0
return n - primepi(n) - 1
| kaushik94/sympy | sympy/ntheory/generate.py | Python | bsd-3-clause | 28,626 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "android_webview/browser/net/aw_url_request_job_factory.h"
#include "net/base/net_errors.h"
#include "net/url_request/url_request_error_job.h"
#include "net/url_request/url_request_job_factory_impl.h"
#include "net/url_request/url_request_job_manager.h"
using net::NetworkDelegate;
using net::URLRequest;
using net::URLRequestJob;
namespace android_webview {
AwURLRequestJobFactory::AwURLRequestJobFactory()
: next_factory_(new net::URLRequestJobFactoryImpl()) {
}
AwURLRequestJobFactory::~AwURLRequestJobFactory() {
}
bool AwURLRequestJobFactory::IsHandledProtocol(
const std::string& scheme) const {
// This introduces a dependency on the URLRequestJobManager
// implementation. The assumption is that if true is returned from this
// method it is still valid to return NULL from the
// MaybeCreateJobWithProtocolHandler method and in that case the
// URLRequestJobManager will try and create the URLRequestJob by using the
// set of built in handlers.
return true;
}
bool AwURLRequestJobFactory::IsHandledURL(const GURL& url) const {
return true;
}
URLRequestJob* AwURLRequestJobFactory::MaybeCreateJobWithProtocolHandler(
const std::string& scheme,
URLRequest* request,
NetworkDelegate* network_delegate) const {
URLRequestJob* job = next_factory_->MaybeCreateJobWithProtocolHandler(
scheme, request, network_delegate);
if (job)
return job;
// If the URLRequestJobManager supports the scheme NULL should be returned
// from this method. In that case the built in handlers in
// URLRequestJobManager will then be used to create the job.
if (net::URLRequestJobManager::GetInstance()->SupportsScheme(scheme))
return NULL;
return new net::URLRequestErrorJob(
request, network_delegate, net::ERR_UNKNOWN_URL_SCHEME);
}
bool AwURLRequestJobFactory::SetProtocolHandler(
const std::string& scheme,
ProtocolHandler* protocol_handler) {
return next_factory_->SetProtocolHandler(scheme, protocol_handler);
}
void AwURLRequestJobFactory::AddInterceptor(Interceptor* interceptor) {
next_factory_->AddInterceptor(interceptor);
}
URLRequestJob* AwURLRequestJobFactory::MaybeCreateJobWithInterceptor(
URLRequest* request, NetworkDelegate* network_delegate) const {
return next_factory_->MaybeCreateJobWithInterceptor(
request, network_delegate);
}
URLRequestJob* AwURLRequestJobFactory::MaybeInterceptRedirect(
const GURL& location,
URLRequest* request,
NetworkDelegate* network_delegate) const {
return next_factory_->MaybeInterceptRedirect(
location, request, network_delegate);
}
URLRequestJob* AwURLRequestJobFactory::MaybeInterceptResponse(
URLRequest* request,
NetworkDelegate* network_delegate) const {
return next_factory_->MaybeInterceptResponse(request, network_delegate);
}
} // namespace android_webview
| nacl-webkit/chrome_deps | android_webview/browser/net/aw_url_request_job_factory.cc | C++ | bsd-3-clause | 3,024 |
package com.tinkerpop.pipes.transform;
import com.tinkerpop.blueprints.Direction;
import com.tinkerpop.pipes.util.PipeHelper;
import java.util.Arrays;
/**
* BothEdgesPipe emits both the outgoing and incoming edges of a vertex.
*
* @author Marko A. Rodriguez (http://markorodriguez.com)
*/
public class BothEdgesPipe extends VerticesEdgesPipe {
public BothEdgesPipe(final String... labels) {
super(Direction.BOTH, labels);
}
public BothEdgesPipe(final int branchFactor, final String... labels) {
super(Direction.BOTH, branchFactor, labels);
}
public String toString() {
return (this.branchFactor == Integer.MAX_VALUE) ?
PipeHelper.makePipeString(this, Arrays.asList(this.labels)) :
PipeHelper.makePipeString(this, this.branchFactor, Arrays.asList(this.labels));
}
}
| whshev/pipes | src/main/java/com/tinkerpop/pipes/transform/BothEdgesPipe.java | Java | bsd-3-clause | 855 |
module SalesInvoiceSoapResponses
def sales_invoice_gettax_response(doc_code, line_item, shipment, time = Time.now)
{
transaction_id: "4314427373575624",
result_code: "Success",
doc_id: "56879220",
doc_type: "SalesInvoice",
doc_code: doc_code,
doc_date: time.to_date,
doc_status: "Saved",
reconciled: false,
timestamp: time,
total_amount: "110",
total_discount: "0",
total_exemption: "0",
total_taxable: "110",
total_tax: "9.77",
total_tax_calculated: "9.77",
hash_code: "0",
tax_lines: {
tax_line: [
{
no: SpreeAvatax::SalesShared.avatax_id(line_item),
tax_code: "P0000000",
taxability: true,
boundary_level: "Zip5",
exemption: "0",
discount: "0",
taxable: "10",
rate: "0.088750",
tax: "0.89",
tax_calculated: "0.89",
tax_included: false,
tax_details: {
tax_detail: [
{
country: "US",
region: "NY",
juris_type: "State",
juris_code: "36",
tax_type: "Sales",
base: "10",
taxable: "10",
rate: "0.040000",
tax: "0.4",
tax_calculated: "0.4",
non_taxable: "0",
exemption: "0",
juris_name: "NEW YORK",
tax_name: "NY STATE TAX",
tax_authority_type: "45",
tax_group: nil,
rate_type: "G",
state_assigned_no: nil
},
{
country: "US",
region: "NY",
juris_type: "City",
juris_code: "51000",
tax_type: "Sales",
base: "10",
taxable: "10",
rate: "0.045000",
tax: "0.45",
tax_calculated: "0.45",
non_taxable: "0",
exemption: "0",
juris_name: "NEW YORK CITY",
tax_name: "NY CITY TAX",
tax_authority_type: "45",
tax_group: nil,
rate_type: "G",
state_assigned_no: "NE 8081"
},
{
country: "US",
region: "NY",
juris_type: "Special",
juris_code: "359071",
tax_type: "Sales",
base: "10",
taxable: "10",
rate: "0.003750",
tax: "0.04",
tax_calculated: "0.04",
non_taxable: "0",
exemption: "0",
juris_name: "METROPOLITAN COMMUTER TRANSPORTATION DISTRICT",
tax_name: "NY SPECIAL TAX",
tax_authority_type: "45",
tax_group: nil,
rate_type: "G",
state_assigned_no: "NE 8061"
}
]
},
exempt_cert_id: "0",
tax_date: time.to_date,
reporting_date: time.to_date,
accounting_method: "Accrual"
},
{
no: SpreeAvatax::SalesShared.avatax_id(shipment),
tax_code: "FR020100",
taxability: true,
boundary_level: "Zip5",
exemption: "0",
discount: "0",
taxable: "100",
rate: "0.088750",
tax: "8.88",
tax_calculated: "8.88",
tax_included: false,
tax_details: {
tax_detail: [
{
country: "US",
region: "NY",
juris_type: "State",
juris_code: "36",
tax_type: "Sales",
base: "100",
taxable: "100",
rate: "0.040000",
tax: "4",
tax_calculated: "4",
non_taxable: "0",
exemption: "0",
juris_name: "NEW YORK",
tax_name: "NY STATE TAX",
tax_authority_type: "45",
tax_group: nil,
rate_type: "G",
state_assigned_no: nil
},
{
country: "US",
region: "NY",
juris_type: "City",
juris_code: "51000",
tax_type: "Sales",
base: "100",
taxable: "100",
rate: "0.045000",
tax: "4.5",
tax_calculated: "4.5",
non_taxable: "0",
exemption: "0",
juris_name: "NEW YORK CITY",
tax_name: "NY CITY TAX",
tax_authority_type: "45",
tax_group: nil,
rate_type: "G",
state_assigned_no: "NE 8081"
},
{
country: "US",
region: "NY",
juris_type: "Special",
juris_code: "359071",
tax_type: "Sales",
base: "100",
taxable: "100",
rate: "0.003750",
tax: "0.38",
tax_calculated: "0.38",
non_taxable: "0",
exemption: "0",
juris_name: "METROPOLITAN COMMUTER TRANSPORTATION DISTRICT",
tax_name: "NY SPECIAL TAX",
tax_authority_type: "45",
tax_group: nil,
rate_type: "G",
state_assigned_no: "NE 8061"
}
]
},
exempt_cert_id: "0",
tax_date: time.to_date,
reporting_date: time.to_date,
accounting_method: "Accrual"
}
]
},
tax_addresses: {
tax_address: {
address: "1234 Way",
address_code: "1",
boundary_level: "2",
city: "New York",
country: "US",
postal_code: "10010",
region: "NY",
tax_region_id: "2088629",
juris_code: "3600051000",
latitude: nil,
longitude: nil,
geocode_type: "ZIP5Centroid",
validate_status: "HouseNotOnStreet",
distance_to_boundary: "0"
}
},
locked: false,
adjustment_reason: "0",
adjustment_description: nil,
version: "1",
tax_date: time.to_date,
tax_summary: nil,
volatile_tax_rates: false,
messages: [
{
summary: nil,
details: nil,
helplink: nil,
refersto: nil,
severity: nil,
source: nil
}
]
}
end
def sales_invoice_posttax_response
{
transaction_id: "4314427475194657",
result_code: "Success",
doc_id: "56879220",
messages:[
{
summary: nil,
details: nil,
helplink: nil,
refersto: nil,
severity: nil,
source: nil,
},
],
}
end
def sales_invoice_canceltax_response
{
transaction_id: "4321919394664864",
result_code: "Success",
doc_id: "57305344",
messages: [
{
summary: nil,
details: nil,
helplink: nil,
refersto: nil,
severity: nil,
source: nil,
},
],
}
end
end
| jordan-brough/solidus_avatax | spec/support/sales_invoice_soap_responses.rb | Ruby | bsd-3-clause | 7,800 |
/* @flow */
/* Flow declarations for express requests and responses */
/* eslint-disable no-unused-vars */
declare class Request {
method: String;
body: Object;
query: Object;
}
declare class Response {
status: (code: Number) => Response;
set: (field: String, value: String) => Response;
send: (body: String) => void;
}
| jamiehodge/express-graphql | resources/interfaces/express.js | JavaScript | bsd-3-clause | 333 |
import base64
import logging
import string
import warnings
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core import signing
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class UpdateError(Exception):
"""
Occurs if Django tries to update a session that was deleted.
"""
pass
class SessionBase:
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
__not_given = object()
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
if key == LANGUAGE_SESSION_KEY:
warnings.warn(
'The user language will no longer be stored in '
'request.session in Django 4.0. Read it from '
'request.COOKIES[settings.LANGUAGE_COOKIE_NAME] instead.',
RemovedInDjango40Warning, stacklevel=2,
)
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
@property
def key_salt(self):
return 'django.contrib.sessions.' + self.__class__.__qualname__
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=__not_given):
self.modified = self.modified or key in self._session
args = () if default is self.__not_given else (default,)
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
# RemovedInDjango40Warning: pre-Django 3.1 format will be invalid.
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Return the given session dictionary serialized and encoded as a string."
# RemovedInDjango40Warning: DEFAULT_HASHING_ALGORITHM will be removed.
if settings.DEFAULT_HASHING_ALGORITHM == 'sha1':
return self._legacy_encode(session_dict)
return signing.dumps(
session_dict, salt=self.key_salt, serializer=self.serializer,
compress=True,
)
def decode(self, session_data):
try:
return signing.loads(session_data, salt=self.key_salt, serializer=self.serializer)
# RemovedInDjango40Warning: when the deprecation ends, handle here
# exceptions similar to what _legacy_decode() does now.
except signing.BadSignature:
try:
# Return an empty session if data is not in the pre-Django 3.1
# format.
return self._legacy_decode(session_data)
except Exception:
logger = logging.getLogger('django.security.SuspiciousSession')
logger.warning('Session data corrupted')
return {}
except Exception:
return self._legacy_decode(session_data)
def _legacy_encode(self, session_dict):
# RemovedInDjango40Warning.
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b':' + serialized).decode('ascii')
def _legacy_decode(self, session_data):
# RemovedInDjango40Warning: pre-Django 3.1 format will be invalid.
encoded_data = base64.b64decode(session_data.encode('ascii'))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(str(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Return True when there is no session_key and the session is empty."
try:
return not self._session_key and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Return session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily load session from storage (unless "no_load" is True, when only
an empty dict is stored) and store it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_session_cookie_age(self):
return settings.SESSION_COOKIE_AGE
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return self.get_session_cookie_age()
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
expiry = expiry or self.get_session_cookie_age()
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Set a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Return ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Remove the current session data from the database and regenerate the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Create a new session key, while retaining the current session data.
"""
data = self._session
key = self.session_key
self.create()
self._session_cache = data
if key:
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Return True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Create a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Save the session data. If 'must_create' is True, create a new session
object (or raise CreateError). Otherwise, only update an existing
object and don't create one (raise UpdateError if needed).
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Delete the session data under this key. If the key is None, use the
current session key value.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Load the session data and return a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
| wkschwartz/django | django/contrib/sessions/backends/base.py | Python | bsd-3-clause | 13,900 |
# -*- coding: utf-8 -*-
import os
CODE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_permdir():
return os.path.join(CODE_DIR, 'permdir')
def get_repo_root():
return get_permdir()
def get_tmpdir():
return os.path.join(CODE_DIR, 'tmpdir')
def init_permdir():
path = get_permdir()
if not os.path.exists(path):
os.makedirs(path)
init_permdir()
| douban/code | vilya/libs/permdir.py | Python | bsd-3-clause | 407 |
from social_core.backends.upwork import UpworkOAuth
| cjltsod/python-social-auth | social/backends/upwork.py | Python | bsd-3-clause | 52 |
"""
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.template import loader, RequestContext
from django.template.context import _current_app_undefined
from django.template.engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined)
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, dirs=_dirs_undefined,
dictionary=_dictionary_undefined):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
content = loader.render_to_string(template_name, context)
else:
# Some deprecated arguments were passed - use the legacy code path
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, current_app=_current_app_undefined,
dirs=_dirs_undefined, dictionary=_dictionary_undefined):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
if (context_instance is _context_instance_undefined
and current_app is _current_app_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
# In Django 2.0, request should become a positional argument.
content = loader.render_to_string(template_name, context, request=request)
else:
# Some deprecated arguments were passed - use the legacy code path
if context_instance is not _context_instance_undefined:
if current_app is not _current_app_undefined:
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
context_instance = RequestContext(request)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of render is deprecated. "
"Set the current_app attribute of request instead.",
RemovedInDjango20Warning, stacklevel=2)
request.current_app = current_app
# Directly set the private attribute to avoid triggering the
# warning in RequestContext.__init__.
context_instance._current_app = current_app
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError("Object is of type '%s', but must be a Django Model, "
"Manager, or QuerySet" % klass__name)
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if any(to.startswith(path) for path in ('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
| doismellburning/django | django/shortcuts.py | Python | bsd-3-clause | 7,865 |
from settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'denorm',
'HOST': 'localhost',
'USER': 'denorm',
'PASSWORD': 'denorm1',
}
}
| mjtamlyn/django-denorm | test_project/settings_mysql.py | Python | bsd-3-clause | 221 |
// transform/fmllr-raw.cc
// Copyright 2013 Johns Hopkins University (author: Daniel Povey)
// See ../../COPYING for clarification regarding multiple authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
#include <utility>
#include <vector>
using std::vector;
#include "transform/fmllr-raw.h"
#include "transform/fmllr-diag-gmm.h"
namespace kaldi {
FmllrRawAccs::FmllrRawAccs(int32 raw_dim, int32 model_dim,
const Matrix<BaseFloat> &full_transform)
: raw_dim_(raw_dim), model_dim_(model_dim) {
if (full_transform.NumCols() != full_transform.NumRows() &&
full_transform.NumCols() != full_transform.NumRows() + 1) {
KALDI_ERR << "Expecting full LDA+MLLT transform to be square or d by d+1 "
<< "(make sure you are including rejected rows).";
}
if (raw_dim <= 0 || full_transform.NumRows() % raw_dim != 0)
KALDI_ERR << "Raw feature dimension is invalid " << raw_dim
<< "(must be positive and divide feature dimension)";
int32 full_dim = full_transform.NumRows();
full_transform_ = full_transform.Range(0, full_dim, 0, full_dim);
transform_offset_.Resize(full_dim);
if (full_transform_.NumCols() == full_dim + 1)
transform_offset_.CopyColFromMat(full_transform_, full_dim);
int32 full_dim2 = ((full_dim + 1) * (full_dim + 2)) / 2;
count_ = 0.0;
temp_.Resize(full_dim + 1);
Q_.Resize(model_dim + 1, full_dim + 1);
S_.Resize(model_dim + 1, full_dim2);
single_frame_stats_.s.Resize(full_dim + 1);
single_frame_stats_.transformed_data.Resize(full_dim);
single_frame_stats_.count = 0.0;
single_frame_stats_.a.Resize(model_dim);
single_frame_stats_.b.Resize(model_dim);
}
bool FmllrRawAccs::DataHasChanged(const VectorBase<BaseFloat> &data) const {
KALDI_ASSERT(data.Dim() == FullDim());
return !data.ApproxEqual(single_frame_stats_.s.Range(0, FullDim()), 0.0);
}
void FmllrRawAccs::CommitSingleFrameStats() {
// Commit the stats for this from (in SingleFrameStats).
int32 model_dim = ModelDim(), full_dim = FullDim();
SingleFrameStats &stats = single_frame_stats_;
if (stats.count == 0.0) return;
count_ += stats.count;
// a_ext and b_ext are a and b extended with the count,
// which we'll later use to reconstruct the full stats for
// the rejected dimensions.
Vector<double> a_ext(model_dim + 1), b_ext(model_dim + 1);
a_ext.Range(0, model_dim).CopyFromVec(stats.a);
b_ext.Range(0, model_dim).CopyFromVec(stats.b);
a_ext(model_dim) = stats.count;
b_ext(model_dim) = stats.count;
Q_.AddVecVec(1.0, a_ext, Vector<double>(stats.s));
temp_.SetZero();
temp_.AddVec2(1.0, stats.s);
int32 full_dim2 = ((full_dim + 1) * (full_dim + 2)) / 2;
SubVector<double> temp_vec(temp_.Data(), full_dim2);
S_.AddVecVec(1.0, b_ext, temp_vec);
}
void FmllrRawAccs::InitSingleFrameStats(const VectorBase<BaseFloat> &data) {
SingleFrameStats &stats = single_frame_stats_;
int32 full_dim = FullDim();
KALDI_ASSERT(data.Dim() == full_dim);
stats.s.Range(0, full_dim).CopyFromVec(data);
stats.s(full_dim) = 1.0;
stats.transformed_data.AddMatVec(1.0, full_transform_, kNoTrans, data, 0.0);
stats.transformed_data.AddVec(1.0, transform_offset_);
stats.count = 0.0;
stats.a.SetZero();
stats.b.SetZero();
}
BaseFloat FmllrRawAccs::AccumulateForGmm(const DiagGmm &gmm,
const VectorBase<BaseFloat> &data,
BaseFloat weight) {
int32 model_dim = ModelDim(), full_dim = FullDim();
KALDI_ASSERT(data.Dim() == full_dim &&
"Expect raw, spliced data, which should have same dimension as "
"full transform.");
if (DataHasChanged(data)) {
// this is part of our mechanism to accumulate certain sub-parts of
// the computation for each frame, to avoid excessive compute.
CommitSingleFrameStats();
InitSingleFrameStats(data);
}
SingleFrameStats &stats = single_frame_stats_;
SubVector<BaseFloat> projected_data(stats.transformed_data, 0, model_dim);
int32 num_gauss = gmm.NumGauss();
Vector<BaseFloat> posterior(num_gauss);
BaseFloat log_like = gmm.ComponentPosteriors(projected_data, &posterior);
posterior.Scale(weight);
// Note: AccumulateFromPosteriors takes the original, spliced data,
// and returns the log-like of the rejected dimensions.
AccumulateFromPosteriors(gmm, data, posterior);
// Add the likelihood of the rejected dimensions to the objective function
// (assume zero-mean, unit-variance Gaussian; the LDA should have any offset
// required to ensure this).
if (full_dim > model_dim) {
SubVector<BaseFloat> rejected_data(stats.transformed_data, model_dim,
full_dim - model_dim);
log_like += -0.5 * (VecVec(rejected_data, rejected_data) +
(full_dim - model_dim) * M_LOG_2PI);
}
return log_like;
}
/*
// Extended comment here.
//
// Let x_t(i) be the fully processed feature, dimension i (with fMLLR
transform
// and LDA transform), but *without* any offset term from the LDA, which
// it's more convenient to view as an offset in the model.
//
//
// For a given dimension i (either accepted or rejected), the auxf can
// be expressed as a quadratic function of x_t(i). We ultimately will want to
// express x_t(i) as a linear function of the parameters of the linearized
// fMLLR transform matrix. Some notation:
// Let l be the linearized transform matrix, i.e. the concatenation of the
// m rows, each of length m+1, of the fMLLR transform.
// Let n be the number of frames we splice together each time.
// Let s_t be the spliced-together features on time t, with a one appended;
// it will have n blocks each of size m, followed by a 1. (dim is n*m +
1).
//
// x(i) [note, this is the feature without any LDA offset], is bilinear in the
// transform matrix and the features, so:
//
// x(i) = l^T M_i s_t, where s_t is the spliced features on time t,
// with a 1 appended
// [we need to compute M_i but we know the function is bilinear so it
exists].
//
// The auxf can be written as:
// F = sum_i sum_t a_{ti} x(i) - 0.5 b_{ti} x(i)^2
// = sum_i sum_t a_{ti} x(i) - 0.5 b_{ti} x(i)^2
// = sum_i sum_t a_{ti} (l^T M_i s_t) - 0.5 b_{ti} (l^T M_i s_t )^2
// = sum_i l^T M_i q_i + l^T M_i S_i M_i^T l
// where
// q_i = sum_t a_{ti} s_t, and
// S_i = sum_t b_{ti} s_t s_t^T
// [Note that we only need store S_i for the model-dim plus one, because
// all the rejected dimensions have the same value]
//
// We define a matrix Q whose rows are q_d, with
// Q = \sum_t d_t s_t^T
// [The Q we actually store as stats will use a modified form of d that
// has a 1 for all dimensions past the model dim, to avoid redundancy;
// we'll reconstruct the true Q from this later on.]
//
//
// What is M_i? Working it out is a little tedious.
// Note: each M_i (from i = 0 ... full_dim) is of
// dimension (raw_dim*(raw_dim+1)) by full_dim + 1
//
// We want to express x(i) [we forget the subscript "t" sometimes],
// as a bilinear function of l and s_t.
// We have x(i) = l^T M_i s.
//
// The (j,k)'th component of M_i is the term in x(i) that corresponds to the
j'th
// component of l and the k'th of s.
// Before defining M_i, let us define N_i, where l^t N_i s will equal the
spliced and
// transformed pre-LDA features of dimension i. the N's have the same
dimensions as the
// M's.
//
// We'll first define the j,k'th component of N_i, as this is easier; we'll
then define the M_i
// as combinations of N_i.
//
// For a given i, j and k, the value of n_{i,j,k} will be as follows:
// We first decompose index j into j1, j2 (both functions of
// the original index j), where
// j1 corresponds to the row-index of the fMLLR transform, j2 to the
col-index.
// We next decompose i into i1, i2, where i1 corresponds to the splicing
number
// (0...n-1), and i2 corresponds to the cepstral index.
//
// If (j1 != i2) then n_{ijk} == 0.
//
// Elsif k corresponds to the last element [i.e. k == m * n], then this
m_{ijk} corresponds
// to the effect of the j'th component of l for zero input, so:
// If j2 == m (i.e. this the offset term in the fMLLR matrix), then
// n_{ijk} = 1.0,
// Else
// n_{ijk} = 0.0
// Fi
//
// Else:
// Decompose k into k1, k2, where k1 = 0.. n-1 is the splicing index, and
k2 = 0...m-1 is
// the cepstral index.
// If k1 != i1 then
// n_{ijk} = 0.0
// elsif k2 != j2 then
// n_{ijk} = 0.0
// else
// n_{ijk} = 1.0
// fi
// Endif
// Now, M_i will be defined as sum_i T_{ij} N_j, where T_{ij} are the
elements of the
// LDA+MLLT transform (but excluding any linear offset, which gets
accounted for by
// c_i, above).
//
// Now suppose we want to express the auxiliary function in a simpler form
// as l^T v - 0.5 l^T W l, where v and W are the "simple" linear and
quadratic stats,
// we can do so with:
// v = \sum_i M_i q_i
// and
// W = \sum_i M_i S_i M_i^T
//
*/
void FmllrRawAccs::AccumulateFromPosteriors(
const DiagGmm &diag_gmm, const VectorBase<BaseFloat> &data,
const VectorBase<BaseFloat> &posterior) {
// The user may call this function directly, even though we also
// call it from AccumulateForGmm(), so check again:
if (DataHasChanged(data)) {
CommitSingleFrameStats();
InitSingleFrameStats(data);
}
int32 model_dim = ModelDim();
SingleFrameStats &stats = single_frame_stats_;
// The quantities a and b describe the diagonal auxiliary function
// for each of the retained dimensions in the transformed space--
// in the format F = \sum_d alpha(d) x(d) -0.5 beta(d) x(d)^2,
// where x(d) is the d'th dimensional fully processed feature.
// For d, see the comment-- it's alpha processed to take into
// account any offset in the LDA. Note that it's a reference.
//
Vector<double> a(model_dim), b(model_dim);
int32 num_comp = diag_gmm.NumGauss();
double count = 0.0; // data-count contribution from this frame.
// Note: we could do this using matrix-matrix operations instead of
// row by row. In the end it won't really matter as this is not
// the slowest part of the computation.
for (size_t m = 0; m < num_comp; m++) {
BaseFloat this_post = posterior(m);
if (this_post != 0.0) {
count += this_post;
a.AddVec(this_post, diag_gmm.means_invvars().Row(m));
b.AddVec(this_post, diag_gmm.inv_vars().Row(m));
}
}
// Correct "a" for any offset term in the LDA transform-- we view it as
// the opposite offset in the model [note: we'll handle the rejected
// dimensions
// in update time.] Here, multiplying the element of "b" (which is the
// weighted inv-vars) by transform_offset_, and subtracting the result from
// a, is like subtracting the transform-offset from the original means
// (because a contains the means times inv-vars_.
Vector<double> offset(transform_offset_.Range(0, model_dim));
a.AddVecVec(-1.0, b, offset, 1.0);
stats.a.AddVec(1.0, a);
stats.b.AddVec(1.0, b);
stats.count += count;
}
void FmllrRawAccs::Update(const FmllrRawOptions &opts,
MatrixBase<BaseFloat> *raw_fmllr_mat,
BaseFloat *objf_impr, BaseFloat *count) {
// First commit any pending stats from the last frame.
if (single_frame_stats_.count != 0.0) CommitSingleFrameStats();
if (this->count_ < opts.min_count) {
KALDI_WARN << "Not updating (raw) fMLLR since count " << this->count_
<< " is less than min count " << opts.min_count;
*objf_impr = 0.0;
*count = this->count_;
return;
}
KALDI_ASSERT(raw_fmllr_mat->NumRows() == RawDim() &&
raw_fmllr_mat->NumCols() == RawDim() + 1 &&
!raw_fmllr_mat->IsZero());
Matrix<double> fmllr_mat(
*raw_fmllr_mat); // temporary, double-precision version
// of matrix.
Matrix<double> linear_stats; // like K in diagonal update.
std::vector<SpMatrix<double> > diag_stats; // like G in diagonal update.
// Note: we will invert these.
std::vector<std::vector<Matrix<double> > > off_diag_stats; // these will
// contribute to the linear term.
Vector<double> simple_linear_stats;
SpMatrix<double> simple_quadratic_stats;
ConvertToSimpleStats(&simple_linear_stats, &simple_quadratic_stats);
ConvertToPerRowStats(simple_linear_stats, simple_quadratic_stats,
&linear_stats, &diag_stats, &off_diag_stats);
try {
for (size_t i = 0; i < diag_stats.size(); i++) {
diag_stats[i].Invert();
}
} catch (...) {
KALDI_WARN << "Error inverting stats matrices for fMLLR "
<< "[min-count too small? Bad data?], not updating.";
return;
}
int32 raw_dim = RawDim(), splice_width = SpliceWidth();
double effective_beta = count_ * splice_width; // We "count" the determinant
// splice_width times in the objective function.
double auxf_orig =
GetAuxf(simple_linear_stats, simple_quadratic_stats, fmllr_mat);
for (int32 iter = 0; iter < opts.num_iters; iter++) {
for (int32 row = 0; row < raw_dim; row++) {
SubVector<double> this_row(fmllr_mat, row);
Vector<double> this_linear(raw_dim + 1); // Here, k_i is the linear term
// in the auxf expressed as a function of this row.
this_linear.CopyFromVec(linear_stats.Row(row));
for (int32 row2 = 0; row2 < raw_dim; row2++) {
if (row2 != row) {
if (row2 < row) {
this_linear.AddMatVec(-1.0, off_diag_stats[row][row2], kNoTrans,
fmllr_mat.Row(row2), 1.0);
} else {
// We won't have the element [row][row2] stored, but use symmetry.
this_linear.AddMatVec(-1.0, off_diag_stats[row2][row], kTrans,
fmllr_mat.Row(row2), 1.0);
}
}
}
FmllrInnerUpdate(diag_stats[row], this_linear, effective_beta, row,
&fmllr_mat);
}
if (GetVerboseLevel() >= 2) {
double cur_auxf = GetAuxf(simple_linear_stats, simple_quadratic_stats,
fmllr_mat),
auxf_change = cur_auxf - auxf_orig;
KALDI_VLOG(2) << "Updating raw fMLLR: objf improvement per frame was "
<< (auxf_change / this->count_) << " over " << this->count_
<< " frames, by the " << iter << "'th iteration";
}
}
double auxf_final =
GetAuxf(simple_linear_stats, simple_quadratic_stats, fmllr_mat),
auxf_change = auxf_final - auxf_orig;
*count = this->count_;
KALDI_VLOG(1) << "Updating raw fMLLR: objf improvement per frame was "
<< (auxf_change / this->count_) << " over " << this->count_
<< " frames.";
if (auxf_final > auxf_orig) {
*objf_impr = auxf_change;
*count = this->count_;
raw_fmllr_mat->CopyFromMat(fmllr_mat);
} else {
*objf_impr = 0.0;
// don't update "raw_fmllr_mat"
}
}
void FmllrRawAccs::SetZero() {
count_ = 0.0;
single_frame_stats_.count = 0.0;
single_frame_stats_.s.SetZero();
Q_.SetZero();
S_.SetZero();
}
// Compute the M_i quantities, needed in the update. This function could be
// greatly speeded up but I don't think it's the limiting factor.
void FmllrRawAccs::ComputeM(std::vector<Matrix<double> > *M) const {
int32 full_dim = FullDim(), raw_dim = RawDim(),
raw_dim2 = raw_dim * (raw_dim + 1);
M->resize(full_dim);
for (int32 i = 0; i < full_dim; i++) (*M)[i].Resize(raw_dim2, full_dim + 1);
// the N's are simpler matrices from which we'll interpolate the M's.
// In this loop we imagine w are computing the vector of N's, but
// when we get each element, if it's nonzero we propagate it straight
// to the M's.
for (int32 i = 0; i < full_dim; i++) {
// i is index after fMLLR transform; i1 is splicing index,
// i2 is cepstral index.
int32 i1 = i / raw_dim, i2 = i % raw_dim;
for (int32 j = 0; j < raw_dim2; j++) {
// j1 is row-index of fMLLR transform, j2 is column-index
int32 j1 = j / (raw_dim + 1), j2 = j % (raw_dim + 1);
for (int32 k = 0; k < full_dim + 1; k++) {
BaseFloat n_ijk;
if (j1 != i2) {
n_ijk = 0.0;
} else if (k == full_dim) {
if (j2 == raw_dim) // offset term in fMLLR matrix.
n_ijk = 1.0;
else
n_ijk = 0.0;
} else {
// k1 is splicing index, k2 is cepstral idnex.
int32 k1 = k / raw_dim, k2 = k % raw_dim;
if (k1 != i1 || k2 != j2)
n_ijk = 0.0;
else
n_ijk = 1.0;
}
if (n_ijk != 0.0)
for (int32 l = 0; l < full_dim; l++)
(*M)[l](j, k) += n_ijk * full_transform_(l, i);
}
}
}
}
void FmllrRawAccs::ConvertToSimpleStats(
Vector<double> *simple_linear_stats,
SpMatrix<double> *simple_quadratic_stats) const {
std::vector<Matrix<double> > M;
ComputeM(&M);
int32 full_dim = FullDim(), raw_dim = RawDim(), model_dim = ModelDim(),
raw_dim2 = raw_dim * (raw_dim + 1),
full_dim2 = ((full_dim + 1) * (full_dim + 2)) / 2;
simple_linear_stats->Resize(raw_dim2);
simple_quadratic_stats->Resize(raw_dim2);
for (int32 i = 0; i < full_dim; i++) {
Vector<double> q_i(full_dim + 1);
SpMatrix<double> S_i(full_dim + 1);
SubVector<double> S_i_vec(S_i.Data(), full_dim2);
if (i < model_dim) {
q_i.CopyFromVec(Q_.Row(i));
S_i_vec.CopyFromVec(S_.Row(i));
} else {
q_i.CopyFromVec(
Q_.Row(model_dim)); // The last row contains stats proportional
// to "count", which we need to modify to be correct.
q_i.Scale(
-transform_offset_(i)); // These stats are zero (corresponding to
// a zero-mean model) if there is no offset in the LDA transform. Note:
// the two statements above are the equivalent, for the rejected dims,
// of the statement "a.AddVecVec(-1.0, b, offset);" for the kept ones.
//
S_i_vec.CopyFromVec(S_.Row(model_dim)); // these are correct, and
// all the same (corresponds to unit variance).
}
// The equation v = \sum_i M_i q_i:
simple_linear_stats->AddMatVec(1.0, M[i], kNoTrans, q_i, 1.0);
// The equation W = \sum_i M_i S_i M_i^T
// Here, M[i] is quite sparse, so AddSmat2Sp will be faster.
simple_quadratic_stats->AddSmat2Sp(1.0, M[i], kNoTrans, S_i, 1.0);
}
}
// See header for comment.
void FmllrRawAccs::ConvertToPerRowStats(
const Vector<double> &simple_linear_stats,
const SpMatrix<double> &simple_quadratic_stats_sp,
Matrix<double> *linear_stats, std::vector<SpMatrix<double> > *diag_stats,
std::vector<std::vector<Matrix<double> > > *off_diag_stats) const {
// get it as a Matrix, which makes it easier to extract sub-parts.
Matrix<double> simple_quadratic_stats(simple_quadratic_stats_sp);
linear_stats->Resize(RawDim(), RawDim() + 1);
linear_stats->CopyRowsFromVec(simple_linear_stats);
diag_stats->resize(RawDim());
off_diag_stats->resize(RawDim());
// Set *diag_stats
int32 rd1 = RawDim() + 1;
for (int32 i = 0; i < RawDim(); i++) {
SubMatrix<double> this_diag(simple_quadratic_stats, i * rd1, rd1, i * rd1,
rd1);
(*diag_stats)[i].Resize(RawDim() + 1);
(*diag_stats)[i].CopyFromMat(this_diag, kTakeMean);
}
for (int32 i = 0; i < RawDim(); i++) {
(*off_diag_stats)[i].resize(i);
for (int32 j = 0; j < i; j++) {
SubMatrix<double> this_off_diag(simple_quadratic_stats, i * rd1, rd1,
j * rd1, rd1);
(*off_diag_stats)[i][j] = this_off_diag;
}
}
}
double FmllrRawAccs::GetAuxf(const Vector<double> &simple_linear_stats,
const SpMatrix<double> &simple_quadratic_stats,
const Matrix<double> &fmllr_mat) const {
// linearize transform...
int32 raw_dim = RawDim(), spice_width = SpliceWidth();
Vector<double> fmllr_vec(raw_dim * (raw_dim + 1));
fmllr_vec.CopyRowsFromMat(fmllr_mat);
SubMatrix<double> square_part(fmllr_mat, 0, raw_dim, 0, raw_dim);
double logdet = square_part.LogDet();
return VecVec(fmllr_vec, simple_linear_stats) -
0.5 * VecSpVec(fmllr_vec, simple_quadratic_stats, fmllr_vec) +
logdet * spice_width * count_;
}
} // namespace kaldi
| hoangt/djinn | tonic-suite/asr/src/transform/fmllr-raw.cc | C++ | bsd-3-clause | 21,344 |
// Copyright (C) 2015 Intel Corporation All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "config.h"
#if ENABLE(WEBCL)
#include "modules/webcl/WebCLContext.h"
#include "modules/webcl/WebCLObject.h"
namespace blink {
WebCLObject::~WebCLObject()
{
if (m_context)
m_context->untrackReleaseableWebCLObject(createWeakPtr());
}
WebCLObject::WebCLObject(WebCLContext* context)
: m_weakFactory(this)
, m_context(context)
{
ASSERT(m_context);
m_context->trackReleaseableWebCLObject(createWeakPtr());
}
WebCLObject::WebCLObject()
: m_weakFactory(this)
, m_context(nullptr)
{
}
WebCLContext* WebCLObject::context()
{
ASSERT(m_context);
return m_context;
}
void WebCLObject::setContext(WebCLContext* context)
{
m_context = context;
m_context->trackReleaseableWebCLObject(createWeakPtr());
}
} // namespace blink
#endif // ENABLE(WEBCL)
| XiaosongWei/blink-crosswalk | Source/modules/webcl/WebCLObject.cpp | C++ | bsd-3-clause | 970 |
#!/usr/bin/env python
import argparse
import time
from pytx import ThreatIndicator
from pytx.vocabulary import ThreatExchange as te
from pytx.vocabulary import ThreatType as tt
from pytx.vocabulary import Types as t
def get_results(options):
'''
Builds a query string based on the specified options and runs it.
'''
if options.since is None or options.until is None:
raise Exception('You must specify both "since" and "until" values')
results = ThreatIndicator.objects(threat_type=tt.COMPROMISED_CREDENTIAL, type_=t.EMAIL_ADDRESS, limit=options.limit,
fields=['indicator', 'passwords'], since=options.since, until=options.until)
return results
def process_result(handle, result):
'''
Process the threat indicators received from the server. This version
writes the indicators to the output file specified by 'handle', if any.
Indicators are written one per line.
'''
for password in result.passwords:
output = '%s:%s\n' % (result.indicator, password)
if handle is None:
print output,
else:
handle.write(output)
def run_query(options, handle):
start = int(time.time())
print 'READING %s%s' % (te.URL, te.THREAT_INDICATORS)
results = get_results(options)
count = 0
for result in results:
process_result(handle, result)
count += 1
end = int(time.time())
print ('SUCCESS: Got %d indicators in %d seconds' %
(count, end - start))
return
def get_args():
parser = argparse.ArgumentParser(description='Query ThreatExchange for Compromised Credentials')
parser.add_argument('-o', '--output', default='/dev/stdout',
help='[OPTIONAL] output file path.')
parser.add_argument('-s', '--since',
help='[OPTIONAL] Start time for search')
parser.add_argument('-u', '--until',
help='[OPTIONAL] End time for search')
parser.add_argument('-l', '--limit',
help='[OPTIONAL] Maximum number of results')
return parser.parse_args()
def main():
args = get_args()
with open(args.output, 'w') as fp:
run_query(args, fp)
if __name__ == '__main__':
main()
| arirubinstein/ThreatExchange | scripts/get_compromised_credentials.py | Python | bsd-3-clause | 2,285 |
// Copyright Peter Dimov 2001-2002
// Copyright Aleksey Gurtovoy 2001-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// *Preprocessed* version of the main "arg.hpp" header
// -- DO NOT modify by hand!
NDNBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_OPEN
template<> struct arg< -1 >
{
NDNBOOST_STATIC_CONSTANT(int, value = -1);
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, tag)
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, type)
template<
typename U1, typename U2, typename U3, typename U4, typename U5
>
struct apply
{
typedef U1 type;
NDNBOOST_MPL_AUX_ASSERT_NOT_NA(type);
};
};
template<> struct arg<1>
{
NDNBOOST_STATIC_CONSTANT(int, value = 1);
typedef arg<2> next;
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, tag)
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, type)
template<
typename U1, typename U2, typename U3, typename U4, typename U5
>
struct apply
{
typedef U1 type;
NDNBOOST_MPL_AUX_ASSERT_NOT_NA(type);
};
};
template<> struct arg<2>
{
NDNBOOST_STATIC_CONSTANT(int, value = 2);
typedef arg<3> next;
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, tag)
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, type)
template<
typename U1, typename U2, typename U3, typename U4, typename U5
>
struct apply
{
typedef U2 type;
NDNBOOST_MPL_AUX_ASSERT_NOT_NA(type);
};
};
template<> struct arg<3>
{
NDNBOOST_STATIC_CONSTANT(int, value = 3);
typedef arg<4> next;
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, tag)
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, type)
template<
typename U1, typename U2, typename U3, typename U4, typename U5
>
struct apply
{
typedef U3 type;
NDNBOOST_MPL_AUX_ASSERT_NOT_NA(type);
};
};
template<> struct arg<4>
{
NDNBOOST_STATIC_CONSTANT(int, value = 4);
typedef arg<5> next;
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, tag)
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, type)
template<
typename U1, typename U2, typename U3, typename U4, typename U5
>
struct apply
{
typedef U4 type;
NDNBOOST_MPL_AUX_ASSERT_NOT_NA(type);
};
};
template<> struct arg<5>
{
NDNBOOST_STATIC_CONSTANT(int, value = 5);
typedef arg<6> next;
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, tag)
NDNBOOST_MPL_AUX_ARG_TYPEDEF(na, type)
template<
typename U1, typename U2, typename U3, typename U4, typename U5
>
struct apply
{
typedef U5 type;
NDNBOOST_MPL_AUX_ASSERT_NOT_NA(type);
};
};
NDNBOOST_MPL_AUX_NONTYPE_ARITY_SPEC(1,int, arg)
NDNBOOST_MPL_AUX_ADL_BARRIER_NAMESPACE_CLOSE
| cawka/packaging-ndn-cpp-dev | include/ndnboost/mpl/aux_/preprocessed/bcc_pre590/arg.hpp | C++ | bsd-3-clause | 2,771 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.