content
stringlengths
7
2.61M
package runtime import ( "debug/dwarf" "unsafe" "github.com/go-delve/delve/pkg/dwarf/godwarf" dwarfreader "github.com/go-delve/delve/pkg/dwarf/reader" ) type ( Tree struct { godwarf.Entry typ godwarf.Type Tag dwarf.Tag Offset dwarf.Offset Children []*Tree } ) //go:linkname entryToTreeInternal github.com/go-delve/delve/pkg/dwarf/godwarf.entryToTreeInternal func entryToTreeInternal(entry *dwarf.Entry) *Tree //go:linkname loadTreeChildren github.com/go-delve/delve/pkg/dwarf/godwarf.loadTreeChildren func loadTreeChildren(e *dwarf.Entry, rdr *dwarf.Reader) ([]*Tree, error) func LoadTree(off dwarf.Offset, dw *dwarf.Data) (*godwarf.Tree, error) { rdr := dw.Reader() rdr.Seek(off) e, err := rdr.Next() if err != nil { return nil, err } r := entryToTreeInternal(e) r.Children, err = loadTreeChildren(e, rdr) if err != nil { return nil, err } tree := (*godwarf.Tree)(unsafe.Pointer(r)) tree.Children = *(*[]*godwarf.Tree)(unsafe.Pointer(&r.Children)) return tree, nil } func DwarfTree(dw *dwarf.Data) (map[string]*godwarf.Tree, error) { reader := dwarfreader.New(dw) ts := make(map[string]*godwarf.Tree) for entry, err := reader.Next(); entry != nil; entry, err = reader.Next() { if err != nil { return nil, err } if entry.Tag != dwarf.TagSubprogram { continue } tree, err := LoadTree(entry.Offset, dw) if err != nil { return nil, err } if name, ok := tree.Entry.Val(dwarf.AttrName).(string); ok { ts[name] = tree } } return ts, nil }
Reliability of the electrocortical response to gains and losses in the doors task. The ability to differentiate between rewards and losses is critical for motivated action, and aberrant reward and loss processing has been associated with psychopathology. The reward positivity (RewP) and feedback negativity (FN) are ERPs elicited by monetary gains and losses, respectively, and are promising individual difference measures. However, few studies have reported on the psychometric properties of the RewP and FN-crucial characteristics necessary for valid individual difference measures. The current study examined the internal consistency and 1-week test-retest reliability of the RewP and FN as elicited by the doors task among 59 young adults. The RewP, FN, and their difference score (RewP) all showed significant correlations between Time 1 and Time 2. The RewP and FN also achieved acceptable internal consistency at both time points within 20 trials using both Cronbach's and a generalizability theory-derived dependability measure. Internal consistency for RewP was notably weaker at both time points, which is expected from two highly intercorrelated constituent scores. In conclusion, the RewP and FN have strong psychometric properties in a healthy adult sample. Future research is needed to assess the psychometric properties of these ERPs in different age cohorts and in clinical populations.
from abc import ABCMeta, abstractproperty from .tuning import get_seq (S, T, I3) = range(3) def get_notes(root_note, size): notes = get_seq(root_note) res = [root_note] for sz in size: for c in range(sz): next(notes) res.append(next(notes)) return res class BaseSeq(object): __metaclass__ = ABCMeta def __init__(self, root_note): self.root_note = root_note self.notes = set(get_notes(root_note, self.size)) def __unicode__(self): return '%s %s' % (self.root_note.replace('#', u'\u266f'), self.cname) def nop(self): pass name = cname = size = abstractproperty(nop) class Major(BaseSeq): name = cname = 'Major Scale' size = (T, T, S, T, T, T, S) class Minor(BaseSeq): name = cname = 'Minor Scale' size = (T, S, T, T, S, T, T) class Blues(BaseSeq): name = cname = 'Blues Scale' size = (I3, T, S, S, I3, T) class PentatonicMajor(BaseSeq): name = 'Pentatonic Major' cname = 'Major Pentatonic Scale' size = (T, T, I3, T, I3) class PentatonicMinor(BaseSeq): name = 'Pentatonic Minor' cname = 'Minor Pentatonic Scale' size = (I3, T, T, I3, T) SCALES = (Major, Minor, PentatonicMajor, PentatonicMinor, Blues)
package net.farlands.sanctuary.command.player; import net.farlands.sanctuary.FarLands; import net.farlands.sanctuary.command.Category; import net.farlands.sanctuary.command.PlayerCommand; import net.farlands.sanctuary.data.Rank; import net.farlands.sanctuary.data.struct.OfflineFLPlayer; import net.farlands.sanctuary.util.ComponentColor; import org.bukkit.command.BlockCommandSender; import org.bukkit.command.CommandSender; import org.bukkit.command.ConsoleCommandSender; import org.bukkit.entity.Player; import java.util.Arrays; public class CommandShrug extends PlayerCommand { public CommandShrug() { super( Rank.INITIATE, Category.CHAT, "Append text emojis to the end of your message.", "/" + String.join("|", TextEmote.commands()) + " [action]", true, "shrug", TextEmote.commands() // Aliases ); } @Override public boolean execute(Player sender, String[] args) { OfflineFLPlayer flp = FarLands.getDataHandler().getOfflineFLPlayer(sender); String emote; try { emote = TextEmote.valueOf(args[0].toUpperCase()).getValue(); } catch (IllegalArgumentException e) { // Shouldn't happen ever, but just to be safe return false; } // Invalid emote flp.chat(args.length == 1 ? emote : joinArgsBeyond(0, " ", args).trim() + " " + emote); return true; } @Override public boolean canUse(CommandSender sender) { if (!(sender instanceof BlockCommandSender || sender instanceof ConsoleCommandSender || !FarLands.getDataHandler().getOfflineFLPlayer(sender).isMuted())) { sender.sendMessage(ComponentColor.red("You cannot use this command while muted.")); return false; } return super.canUse(sender); } public enum TextEmote { TABLEFLIP("(╯°□°)╯︵ ┻━┻"), UNFLIP("┬─┬ ノ( ゜-゜ノ)"), DAB("ㄥ(⸝ ، ⸍ )‾‾‾‾‾"), SHRUG("¯\\_(ツ)_/¯"), // ¯\_(ツ)_/¯ TM("™", false), // :tm: -> ™ ; public static TextEmote[] values = values(); private final String value; private final boolean isCommand; TextEmote(String value) { this(value, true); } TextEmote(String value, boolean isCommand) { this.value = value; this.isCommand = isCommand; } public String getValue() { return this.value; } public boolean isCommand() { return this.isCommand; } public static String[] commands() { return Arrays.stream(values()) .filter(TextEmote::isCommand) .map(Enum::name) .map(String::toLowerCase) .toArray(String[]::new); } } }
/********************************************************************************************************************* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * * * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance * * with the License. A copy of the License is located at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES * * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions * * and limitations under the License. * *********************************************************************************************************************/ import { injectable, inject } from 'inversify'; import {logger} from '../../utils/logger.util'; import { TYPES } from '../../di/types'; import { DocumentClient } from 'aws-sdk/clients/dynamodb'; import { createDelimitedAttribute, PkType, expandDelimitedAttribute } from '../../utils/pkUtils.util'; import { TargetItem, TargetTypeStrings, TargetItemFactory } from './targets.models'; @injectable() export class TargetDao { private _cachedDc: AWS.DynamoDB.DocumentClient; public constructor( @inject('aws.dynamoDb.tables.eventConfig.name') private eventConfigTable:string, @inject(TYPES.CachableDocumentClientFactory) cachableDocumentClientFactory: () => AWS.DynamoDB.DocumentClient ) { this._cachedDc = cachableDocumentClientFactory(); } /** * Creates the Target DynamoDB buildPutItemAttributeMap: * Target(s): pk='S-{subscriptionId}, sk='ST-{targetType}-{targetId}' */ public buildPutItemAttributeMap(item:TargetItem, eventSourceId:string, principal:string, principalValue:string ) : DocumentClient.PutItemInputAttributeMap { logger.debug(`target.dao buildPutItemAttributeMap: item:${JSON.stringify(item)}, eventSourceId:${eventSourceId}, principal:${principal}, principalValue:${principalValue}`); const putItemAttributeMap:DocumentClient.PutItemInputAttributeMap = { pk: createDelimitedAttribute(PkType.Subscription, item.subscriptionId), sk: createDelimitedAttribute(PkType.SubscriptionTarget, item.targetType, item.getId()), gsi2Key:createDelimitedAttribute(PkType.EventSource, eventSourceId, principal, principalValue), gsi2Sort: createDelimitedAttribute(PkType.Subscription, item.subscriptionId, PkType.SubscriptionTarget, item.targetType, item.getId()) }; for (const prop of Object.keys(item)) { const value = item[prop]; if (value !== undefined) { putItemAttributeMap[prop]= item[prop]; } } logger.debug(`target.dao buildPutItemAttributeMap: exit:${JSON.stringify(putItemAttributeMap)}`); return putItemAttributeMap; } /** * Creates the Subscription DynamoDB items: * Target(s): pk='S-{subscriptionId}, sk='ST-{targetType}-{targetId}' * @param subscription */ public async create(item:TargetItem, eventSourceId:string, principal:string, principalValue:string ) : Promise<void> { logger.debug(`target.dao create: item:${JSON.stringify(item)}, eventSourceId:${eventSourceId}, principal:${principal}, principalValue:${principalValue}`); const putItemAttributeMap:DocumentClient.PutItemInputAttributeMap = this.buildPutItemAttributeMap(item, eventSourceId, principal, principalValue); await this._cachedDc.put({ TableName: this.eventConfigTable, Item: putItemAttributeMap }).promise(); logger.debug(`subscriptions.dao create: exit:`); } public async delete(subscriptionId:string, targetType:string, targetId:string): Promise<void> { logger.debug(`target.dao delete: subscriptionId:${subscriptionId}, targetType:${targetType}, targetId:${targetId}`); const pk = createDelimitedAttribute(PkType.Subscription, subscriptionId ); const sk = createDelimitedAttribute(PkType.SubscriptionTarget, targetType, targetId ); const params:DocumentClient.DeleteItemInput = { TableName: this.eventConfigTable, Key: { pk, sk } }; await this._cachedDc.delete(params).promise(); logger.debug(`target.dao delete: exit:`); } public async get<T extends TargetItem>(subscriptionId:string, targetType:string, targetId:string): Promise<T> { logger.debug(`target.dao get: subscriptionId:${subscriptionId}, targetType:${targetType}, targetId:${targetId}`); const pk = createDelimitedAttribute(PkType.Subscription, subscriptionId ); const sk = createDelimitedAttribute(PkType.SubscriptionTarget, targetType, targetId ); const params:DocumentClient.GetItemInput = { TableName: this.eventConfigTable, Key: { pk, sk } }; const data = await this._cachedDc.get(params).promise(); const item = this.assemble(data); logger.debug(`target.dao get: exit:${JSON.stringify(item)}`); return item as T; } public async update(item:TargetItem): Promise<void> { logger.debug(`target.dao update: in: item:${JSON.stringify(item)}`); const pk = createDelimitedAttribute(PkType.Subscription, item.subscriptionId ); const sk = createDelimitedAttribute(PkType.SubscriptionTarget, item.targetType, item.getId() ); const params = { TableName: this.eventConfigTable, Key: { pk, sk }, UpdateExpression: '', ExpressionAttributeValues: {} }; Object.keys(item).forEach(k=> { if (item.hasOwnProperty(k)) { if (params.UpdateExpression==='') { params.UpdateExpression+='set '; } else { params.UpdateExpression+=', '; } params.UpdateExpression += `${k} = :${k}`; params.ExpressionAttributeValues[`:${k}`] = item[k]; } }); await this._cachedDc.update(params).promise(); logger.debug(`target.dao update: exit:`); } public assemble<T extends TargetItem>(data:AWS.DynamoDB.DocumentClient.AttributeMap) : T { logger.debug(`target.dao assemble: in data: ${JSON.stringify(data)}`); const subscriptionId = expandDelimitedAttribute(data.Item['pk'])[1]; const sk = <string>data.Item['sk']; const targetType = expandDelimitedAttribute(sk)[1] as TargetTypeStrings; const t = TargetItemFactory.getTargetItem(targetType); t.subscriptionId = subscriptionId; Object.keys(data.Item) .filter(k=> k!=='pk' && k!=='sk' && k!=='gsi2Key' && k!=='gsi2Sort') .forEach(k=> t[k]=data.Item[k]); logger.debug(`target.dao assemble: exit:${JSON.stringify(t)}`); return t as T; } }
Development of a Cyclotron Magnet We developed a cyclotron magnet at the Institute of Modern Physics, Chinese Academy of Sciences (IMP, CAS); the whole system include one main magnet, one solenoid, and two quadrupoles, and is used to accelerate the H- beam. This paper describes the magnetic field design of one cyclotron, and several shimming methods are used to meet the isochronous magnetic field of the H- beam, including the pole face shimming method and side shimming method. The final optimization results show that the error between simulation and theory value is small. For the purpose of comparison, a detail magnetic field measurement on the cyclotron are carried out, the results show that the isochronous deviations between the measurements and theory values are less than 5 G at most radii, which satisfy the design requirement.
/** * Transforms given class name to a class * * @param className * @return class or <code>null</code> */ public Class<?> transformToClass(String className) { if (className == null) { return null; } try { return Class.forName(className); } catch (ClassNotFoundException e) { return tryInnerClassResolving(className); /* ignore - we only return null */ } }
/** * IMPORTANT: To use Icon component make sure to follow this guide: * https://akveo.github.io/react-native-ui-kitten/docs/guides/icon-packages */ import React from 'react'; import { StyleSheet } from 'react-native'; import { Datepicker, Icon, Layout, } from '@ui-kitten/components'; const CalendarIcon = (style) => ( <Icon {...style} name='calendar'/> ); export const DatepickerWithIconShowcase = () => { const [date, setDate] = React.useState(null); return ( <Layout style={styles.container}> <Datepicker placeholder='Pick Date' date={date} onSelect={setDate} icon={CalendarIcon} /> </Layout> ); }; const styles = StyleSheet.create({ container: { minHeight: 376, }, });
<filename>PrivateFrameworks/SAObjects.framework/SAIntentGroupUnsupportedIntentResponse.h /* Generated by RuntimeBrowser Image: /System/Library/PrivateFrameworks/SAObjects.framework/SAObjects */ @interface SAIntentGroupUnsupportedIntentResponse : SAIntentGroupIntentInvocationResponse + (id)unsupportedIntentResponse; + (id)unsupportedIntentResponseWithDictionary:(id)arg1 context:(id)arg2; - (id)encodedClassName; - (id)groupIdentifier; - (bool)requiresResponse; @end
"""Adding initial tables Revision ID: 4f310004f218 Revises: Create Date: 2015-10-23 16:08:21.396455 """ # revision identifiers, used by Alembic. revision = '4f310004f218' down_revision = None branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('reservations', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('id') ) op.create_table('servers', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('name', sa.String(), nullable=True), sa.Column('cpu_count', sa.Integer(), nullable=True), sa.Column('local_drive_capacity', sa.Integer(), nullable=True), sa.Column('psu_capacity', sa.Integer(), nullable=True), sa.Column('psu_size', sa.String(length=36), nullable=True), sa.Column('memory_mb', sa.Integer(), nullable=True), sa.Column('cpu_architecture', sa.String(), nullable=True), sa.Column('driver_name', sa.String(), nullable=True), sa.Column('deploy_kernel', sa.String(), nullable=True), sa.Column('deploy_ramdisk', sa.String(), nullable=True), sa.Column('ipmi_address', sa.String(), nullable=True), sa.Column('ipmi_password', sa.String(), nullable=True), sa.Column('impi_username', sa.String(), nullable=True), sa.Column('impi_priv_level', sa.String(), nullable=True), sa.Column('ipmi_mac_address', sa.String(), nullable=True), sa.Column('reservation_id', sa.Integer(), nullable=True), sa.Column('deployed', sa.Boolean(), nullable=True), sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('ipmi_mac_address', name='uniq_servers0impmimacaddress'), sa.UniqueConstraint('name', name='uniq_servers0name'), sa.UniqueConstraint('uuid', name='uniq_servers0uuid') ) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('servers') op.drop_table('reservations') ### end Alembic commands ###
<filename>src/main/java/org/asteriskjava/manager/event/AbstractRtpStatEvent.java<gh_stars>100-1000 /* * Copyright 2004-2006 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.asteriskjava.manager.event; /** * Abstract base class for RTP statistics events.<p> * * @author srt * @version $Id$ * @since 1.0.0 */ public abstract class AbstractRtpStatEvent extends ManagerEvent { private static final long serialVersionUID = 1L; private Long ssrc; private Long lostPackets; private Double jitter; public AbstractRtpStatEvent(Object source) { super(source); } /** * Returns the synchronization source identifier that uniquely identifies the source of a stream. * * @return the synchronization source identifier. */ public Long getSsrc() { return ssrc; } public void setSsrc(Long ssrc) { this.ssrc = ssrc; } /** * Returns the number of lost packets. * * @return the number of lost packets. */ public Long getLostPackets() { return lostPackets; } public void setLostPackets(Long lostPackets) { this.lostPackets = lostPackets; } public Double getJitter() { return jitter; } public void setJitter(Double jitter) { this.jitter = jitter; } }
Real-time communications in a computer-controlled workcell A computer-integrated manufacturing (CIM) system is composed of several workcells, each of which contains robots, numerical-control machines, sensors, and a transport mechanism. The author considers a communication subsystem that is designed to support real-time control and coordination of devices in each CIM cell. The concept of a poll number is proposed to control the access to the intracell bus. The bus access mechanism with the poll number is intended to minimize the probability of real-time messages missing their deadlines. Use of a poll number provides not only for decentralized control of the intracell bus, but also a high degree of flexibility in scheduling messages. The performance of the bus access mechanism with a poll number is analyzed and compared with that of a token bus which is widely used in CIM systems such as MAP (Manufacturing Automation Protocol) networks. The probability of a real-time message missing its deadline in a token bus is found to be much higher than that of the proposed mechanism. >
import { fabloVersion, getVersionFromSchemaUrl } from "./config"; it("should get version from schema URL", () => { const url = "https://github.com/softwaremill/fablo/releases/download/0.0.1/schema.json"; const version = getVersionFromSchemaUrl(url); expect(version).toEqual("0.0.1"); }); it("should get current version in case of missing schema URL", () => { const version = getVersionFromSchemaUrl(undefined); expect(version).toEqual(fabloVersion); });
<reponame>Luxoft/BSSFramework<filename>src/SampleSystem.WebApiCore/js/generated/facade/authorization.facade.generated.ts // ------------------------------------------------------------------------------ /// <auto-generated> /// This code was generated by a tool. /// /// Changes to this file may cause incorrect behavior and will be lost if /// the code is regenerated. /// </auto-generated> // ------------------------------------------------------------------------------ // tslint:disable /* eslint-disable */ import { Guid, Convert, SimpleObject, SimpleDate, ObservableSimpleObject, ObservableSimpleDate } from 'luxite/system'; import * as async from 'luxite/async'; import { OData } from 'luxite/framework/odata'; import { Environment } from 'luxite/environment'; import { Core } from 'luxite/framework/framework'; import * as dto from '../dto/authorization.generated'; import * as mockdto from '../../mocked-dto'; export let getCurrentPrincipalAsyncFunc = _getCurrentPrincipal(); export let getSecurityOperationsAsyncFunc = _getSecurityOperations(); function _getCurrentPrincipal(): async.AsyncFunc2<dto.PrincipalFullDTO, dto.PrincipalObservableFullDTO, dto.PrincipalFullDTO, dto.PrincipalObservableFullDTO> { return new async.AsyncFunc2(() => { let baseParameters = {}; let service = Environment.current.context.facadeFactory.createAuthService<dto.PrincipalFullDTO, dto.PrincipalObservableFullDTO, dto.PrincipalFullDTO, dto.PrincipalObservableFullDTO>(); return service.getData('Principal/GetCurrentPrincipal', {plain : dto.PrincipalFullDTO, observable : dto.PrincipalObservableFullDTO}, baseParameters); }); } function _getSecurityOperations(): async.AsyncFunc2<Array<string>, Array<string>, string, string> { return new async.AsyncFunc2(() => { let baseParameters = {}; let service = Environment.current.context.facadeFactory.createAuthService<Array<string>, Array<string>, string, string>(); return service.getData('Operation/GetSecurityOperations', {plain : SimpleObject, observable : ObservableSimpleObject}, baseParameters); }); }
<reponame>yaybu/touchdown<gh_stars>10-100 # Copyright 2016 Isotoma Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import threading import netaddr from touchdown.core import argument, plan, resource, serializers from . import IniFile class Allocations(resource.Resource): resource_name = "ip_allocations" name = argument.String() network = argument.IPNetwork() config = argument.Resource(IniFile) class Describe(plan.Plan): resource = Allocations name = "describe" def network(self): return serializers.maybe(self.resource.network).render( self.runner, self.resource ) def get_actions(self): conf = self.runner.get_service(self.resource.config, "describe") self.object = {} for key, value in conf.walk(self.resource.name): self.object[key] = value self.runner.get_service(self.resource, "ip_allocator").load( self.network(), self.object ) return [] class IpAllocator(plan.Plan): """ Given an ipaddress.ip_network, manage allocating it into smaller allocations """ resource = Allocations name = "ip_allocator" def __init__(self, *args, **kwargs): super(IpAllocator, self).__init__(*args, **kwargs) self.allocation_lock = threading.Lock() def load(self, network, state): """ Given a list of allocations that have already been applied, ensure that `self.allocations` and `self.free` is correct. """ network_set = netaddr.IPSet([network]) state = {k: v for (k, v) in state.items() if netaddr.IPNetwork(v) in network} state_set = netaddr.IPSet(state.values()) with self.allocation_lock: self.allocations = state self.free = collections.defaultdict(list) for network in (network_set - state_set).iter_cidrs(): self.free[network.prefixlen].append(network) def allocate(self, name, prefixlen): network = self.runner.get_service(self.resource, "describe").network() if prefixlen < int(network.prefixlen): raise ValueError( "Cannot fit /{} inside /{}".format(prefixlen, network.prefixlen) ) with self.allocation_lock: for i in range(prefixlen, int(network.prefixlen) - 1, -1): if self.free.get(i, None): selected = self.free[i].pop() break else: raise ValueError( "There is not enough space left to allocate a /{}".format(prefixlen) ) while int(selected.prefixlen) < prefixlen: selected, leftover = selected.subnet(selected.prefixlen + 1) self.free[int(leftover.prefixlen)] = [leftover] self.allocations[name] = selected return selected
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns from .provider import FrontierProvider urlpatterns = default_urlpatterns(FrontierProvider)
<reponame>internaru/Pinetree_P<filename>lsp_shiloh/common/cmd/src/cmd_var.c /****************************************************************************** * Copyright (c) 2011 Marvell International, Ltd. All Rights Reserved * * Marvell Confidential ******************************************************************************/ #include <string.h> #include "tx_api.h" #include "tcl.h" #include "tclHash.h" #include "strfmt.h" #include "cmd_proc_api.h" #include "cmd_var_api.h" #include "cmd_private.h" typedef struct __var_t { struct __var_t *next; char *name; char *value; } _var_t; static _var_t *_vars = NULL; /* -------------------------------------------------------------------------- */ static void _access(int get_nRel) { static TX_SEMAPHORE _sem; static int _initted = 0; if (!_initted) { tx_semaphore_create(&_sem, "cmd_var", 1); _initted = 1; } if (get_nRel) tx_semaphore_get(&_sem, TX_WAIT_FOREVER); else tx_semaphore_put(&_sem); } /* -------------------------------------------------------------------------- */ static _var_t* _findvar(const char* name) { _var_t* v; if (!name || !name[0]) return NULL; for (v=_vars; v; v=v->next) if (!strcmp(v->name, name)) return v; return NULL; } /* -------------------------------------------------------------------------- */ int cmd_defvar(const char* name, const char* val) { _var_t* var; if (!name || !*name || !val) return CMD_VAR_BAD_PARAM; _access(1); if (!(var = _findvar(name))) { if (!(var = (_var_t*)cmd_malloc(sizeof(_var_t))) || !(var->name = (char*)cmd_malloc(strlen(name)+1)) || !(var->value = (char*)cmd_malloc(strlen(val)+1))) { _access(0); if (var && var->name) cmd_free(var->name); if (var) cmd_free(var); return CMD_VAR_MEMORY; /* Out of memory of some sort */ } strcpy(var->name, name); strcpy(var->value, val); var->next = _vars; _vars = var; } _access(0); return CMD_VAR_OK; } /* -------------------------------------------------------------------------- */ int cmd_setvar(const char* name, const char* val) { _var_t* var; if (!name || !*name || !val) return CMD_VAR_BAD_PARAM; _access(1); if (!(var = _findvar(name))) { _access(0); return cmd_defvar(name, val); } if (strlen(var->value) < strlen(val)) { char* n = (char*)cmd_malloc(strlen(val)+1); if (!n) { _access(0); return CMD_VAR_MEMORY; } cmd_free(var->value); var->value = n; } strcpy(var->value, val); _access(0); return CMD_VAR_OK; } /* -------------------------------------------------------------------------- */ int cmd_getvar(const char* name, char* curval, int maxlen) { _var_t* var; if (!name || !*name || !curval || !maxlen) return CMD_VAR_BAD_PARAM; _access(1); if (!(var = _findvar(name))) { _access(0); return CMD_VAR_NOTFOUND; } if ((strlen(var->value)+1) > maxlen) { _access(0); return CMD_VAR_MEMORY; } strcpy(curval, var->value); _access(0); return CMD_VAR_OK; } /* -------------------------------------------------------------------------- */ int cmd_incvar(const char* name, int val, int* newval) { _var_t* var; if (!name || !*name) return CMD_VAR_BAD_PARAM; _access(1); if (!(var = _findvar(name))) { _access(0); return CMD_VAR_NOTFOUND; } if (val) { char valstr[15]; /* More than enough for any 32bit number and NULL */ val += cmd_atoi(var->value); strfmt(valstr, "%d", val); if (strlen(var->value) < strlen(valstr)) { char* n = (char*)cmd_malloc(strlen(valstr)+1); if (!n) { _access(0); return CMD_VAR_MEMORY; } cmd_free(var->value); var->value = n; } strcpy(var->value, valstr); } else val = cmd_atoi(var->value); _access(0); if (newval) *newval = val; return CMD_VAR_OK; } /* -------------------------------------------------------------------------- */ /* JSA - it is peculiar that this API has no way to report errors */ int cmd_getint(const char* name) { int val; if (CMD_VAR_OK != cmd_incvar(name, 0, &val)) return 0; return val; } /* -------------------------------------------------------------------------- */ static int _cmd_defvar(int argc, char* argv[]) { if ((argc < 2) || (argc > 3)) return CMD_USAGE_ERROR; if (cmd_defvar(argv[1], (argc>2)?argv[2]:"")) return CMD_ERROR; return CMD_OK; } /* -------------------------------------------------------------------------- */ static int _cmd_getvar(int argc, char* argv[]) { _var_t* var; if (argc != 2) return CMD_USAGE_ERROR; _access(1); if ((var = _findvar(argv[1]))) { cmd_append_result("%s", var->value); _access(0); return CMD_OK; } else { _access(0); cmd_append_result("I cannot find a variable named '%s'", argv[1]); return CMD_ERROR; } } /* -------------------------------------------------------------------------- */ static int _cmd_setvar(int argc, char* argv[]) { if (argc != 3) return CMD_USAGE_ERROR; if (cmd_setvar(argv[1], argv[2])) return CMD_ERROR; else cmd_append_result("%s", argv[2]); return CMD_OK; } /* -------------------------------------------------------------------------- */ static int _cmd_incvar(int argc, char* argv[]) { int r, val = 1, new; if (argc < 2) return CMD_USAGE_ERROR; else if (argc == 3) val = cmd_atoi(argv[2]); else if (argc > 3) return CMD_USAGE_ERROR; if (*(argv[0]) == 'd') val *= -1; /* This was a decvar and not incvar */ if ((r = cmd_incvar(argv[1], val, &new))) { cmd_append_result("Error adjusting variable by %d (%d)", val, r); return CMD_ERROR; } cmd_append_result("%d", new); return CMD_OK; } /* -------------------------------------------------------------------------- */ void cmd_var_init(void) { _access(1); _access(0); cmd_register("defvar", "Define a shared variable", "<varname> [<initval>=\"\"]", "Shared variables are available to every CMD interpreter in " "the system as well as to C programs. A variable can be " "defined with a default value.", _cmd_defvar); cmd_register("getvar", "Get the value of a shared variable", "<varname>", NULL, _cmd_getvar); cmd_register("setvar", "Set a new value for a shared variable", "<varname> <value>", NULL, _cmd_setvar); cmd_register("incvar", "Increment a shared variable by 1 or more", "<varname> [<value>=1]", NULL, _cmd_incvar); cmd_register("decvar", "Decrement a shared variable by 1 or more", "<varname> [<value>=1]", NULL, _cmd_incvar); }
<reponame>GucciPrada/spaint<gh_stars>1-10 /** * grove: DecisionForest.tpp * Copyright (c) Torr Vision Group, University of Oxford, 2017. All rights reserved. */ #include "DecisionForest.h" #include <fstream> #include <boost/lexical_cast.hpp> #ifdef WITH_SCOREFORESTS #include <Learner.hpp> #endif #include <itmx/base/MemoryBlockFactory.h> #include <tvgutil/numbers/RandomNumberGenerator.h> // Whether or not to replace the pre-computed feature indices and thresholds with random ones. #define RANDOM_FEATURES 0 namespace grove { //#################### CONSTRUCTORS #################### template <typename DescriptorType, int TreeCount> DecisionForest<DescriptorType,TreeCount>::DecisionForest() : m_nbTotalLeaves(0) {} template <typename DescriptorType, int TreeCount> DecisionForest<DescriptorType,TreeCount>::DecisionForest(const std::string& filename) { load_structure_from_file(filename); } #ifdef WITH_SCOREFORESTS template <typename DescriptorType, int TreeCount> DecisionForest<DescriptorType, TreeCount>::DecisionForest(const EnsembleLearner& pretrainedForest) { // Convert list of nodes into an appropriate image. const uint32_t nbTrees = pretrainedForest.GetNbTrees(); const uint32_t maxNbNodes = pretrainedForest.GetMaxNbNodesInAnyLearner(); if(nbTrees != get_nb_trees()) { throw std::runtime_error("Number of trees in the loaded forest different from the instantiation of GPUForest."); } // Allocate the texture to store the nodes. const itmx::MemoryBlockFactory& mbf = itmx::MemoryBlockFactory::instance(); m_nodeImage = mbf.make_image<NodeEntry>(Vector2i(nbTrees, maxNbNodes)); m_nodeImage->Clear(); // Fill the nodes. NodeEntry *forestData = m_nodeImage->GetData(MEMORYDEVICE_CPU); uint32_t totalNbLeaves = 0; for(uint32_t treeIdx = 0; treeIdx < nbTrees; ++treeIdx) { const Learner *tree = pretrainedForest.GetTree(treeIdx); const uint32_t nbNodes = tree->GetNbNodes(); // Bug in ScoreForests: tree->GetNbLeaves() always returns 1 for trees that have been loaded from a file because // the base learner class does not store the leaves and the DTBP class does not perform the loading (is done at the // base class level). // const int nbLeaves = tree->GetNbLeaves(); // We have to count the number of leaves in each tree uint32_t nbLeavesBefore = totalNbLeaves; // Recursive call: we set the first free entry to 1, since we reserve 0 for the root of the tree. convert_node(tree, 0, treeIdx, nbTrees, 0, 1, forestData, totalNbLeaves); uint32_t nbLeaves = totalNbLeaves - nbLeavesBefore; std::cout << "Converted tree " << treeIdx << ", had " << nbNodes << " nodes and " << nbLeaves << " leaves." << std::endl; m_nbNodesPerTree.push_back(nbNodes); m_nbLeavesPerTree.push_back(nbLeaves); } // NOPs if we use the CPU only implementation m_nodeImage->UpdateDeviceFromHost(); } #endif //#################### DESTRUCTOR #################### template <typename DescriptorType, int TreeCount> DecisionForest<DescriptorType,TreeCount>::~DecisionForest() {} //#################### PUBLIC MEMBER FUNCTIONS #################### template <typename DescriptorType, int TreeCount> uint32_t DecisionForest<DescriptorType, TreeCount>::get_nb_leaves() const { uint32_t nbLeaves = 0; for(uint32_t i = 0; i < get_nb_trees(); ++i) { nbLeaves += get_nb_leaves_in_tree(i); } return nbLeaves; } template <typename DescriptorType, int TreeCount> uint32_t DecisionForest<DescriptorType,TreeCount>::get_nb_leaves_in_tree(uint32_t treeIdx) const { if(treeIdx < get_nb_trees()) return m_nbLeavesPerTree[treeIdx]; else throw std::invalid_argument("Invalid tree index"); } template <typename DescriptorType, int TreeCount> uint32_t DecisionForest<DescriptorType,TreeCount>::get_nb_nodes_in_tree(uint32_t treeIdx) const { if(treeIdx < get_nb_trees()) return m_nbNodesPerTree[treeIdx]; else throw std::invalid_argument("Invalid tree index"); } template <typename DescriptorType, int TreeCount> uint32_t DecisionForest<DescriptorType,TreeCount>::get_nb_trees() const { return TREE_COUNT; } template <typename DescriptorType, int TreeCount> void DecisionForest<DescriptorType,TreeCount>::load_structure_from_file(const std::string& filename) { // Clear the current forest. m_nodeImage.reset(); m_nbNodesPerTree.clear(); m_nbLeavesPerTree.clear(); m_nbTotalLeaves = 0; std::ifstream in(filename.c_str()); if(!in) throw std::runtime_error("Couldn't load a forest from: " + filename); // Check that the number of trees is the same as the template instantiation. uint32_t nbTrees; in >> nbTrees; if(!in || nbTrees != get_nb_trees()) { throw std::runtime_error( "Number of trees of the loaded forest is incorrect. Should be " + boost::lexical_cast<std::string>(get_nb_trees()) + " - Read: " + boost::lexical_cast<std::string>(nbTrees) ); } // Used to allocate the indexing texture (height = the maximum number of nodes, width = nbTrees). uint32_t maxNbNodes = 0; // For each tree, first read the number of nodes, then the number of leaves. for(uint32_t i = 0; i < nbTrees; ++i) { uint32_t nbNodes, nbLeaves; in >> nbNodes >> nbLeaves; if(!in) throw std::runtime_error("Error reading the dimensions of tree: " + boost::lexical_cast<std::string>(i)); m_nbNodesPerTree.push_back(nbNodes); m_nbLeavesPerTree.push_back(nbLeaves); maxNbNodes = std::max(nbNodes, maxNbNodes); m_nbTotalLeaves += nbLeaves; } std::cout << "Loading a forest with " << nbTrees << " trees.\n"; for(uint32_t i = 0; i < nbTrees; ++i) { std::cout << "\tTree " << i << ": " << m_nbNodesPerTree[i] << " nodes and " << m_nbLeavesPerTree[i] << " leaves.\n"; } // Allocate and clear the node image. const itmx::MemoryBlockFactory& mbf = itmx::MemoryBlockFactory::instance(); m_nodeImage = mbf.make_image<NodeEntry>(Vector2i(nbTrees, maxNbNodes)); m_nodeImage->Clear(); #if RANDOM_FEATURES tvgutil::RandomNumberGenerator rng(42); #endif // Read all the nodes from the file. NodeEntry *forestNodes = m_nodeImage->GetData(MEMORYDEVICE_CPU); for(uint32_t treeIdx = 0; treeIdx < nbTrees; ++treeIdx) { for(uint32_t nodeIdx = 0; nodeIdx < m_nbNodesPerTree[treeIdx]; ++nodeIdx) { NodeEntry &node = forestNodes[nodeIdx * nbTrees + treeIdx]; in >> node.leftChildIdx >> node.leafIdx >> node.featureIdx >> node.featureThreshold; if(!in) { throw std::runtime_error( "Error reading node " + boost::lexical_cast<std::string>(nodeIdx) + " of tree " + boost::lexical_cast<std::string>(treeIdx) ); } #if RANDOM_FEATURES // The magic numbers mimic the distribution found in the pre-trained office forest. bool depthFeature = rng.generate_real_from_uniform(0.f, 1.f) < 0.3886f; if(depthFeature) { node.featureIdx = rng.generate_int_from_uniform(0, 127); float depthMu = 20.09f; float depthSigma = 947.24f; node.featureThreshold = rng.generate_from_gaussian(depthMu, depthSigma); } else { node.featureIdx = rng.generate_int_from_uniform(128, 255); float rgbMu = -2.85f; float rgbSigma = 72.98f; node.featureThreshold = rng.generate_from_gaussian(rgbMu, rgbSigma); } // int minRGBFeature = -100; // int maxRGBFeature = 100; // int minDepthFeature = -600; // int maxDepthFeature = 600; // node.featureIdx = rng.generate_int_from_uniform(0, RGBDPatchFeature::FEATURE_SIZE - 1); // if(node.featureIdx < RGBDPatchFeature::RGB_OFFSET) // { // node.featureThreshold = rng.generate_int_from_uniform(minDepthFeature, maxDepthFeature); // } // else // { // node.featureThreshold = rng.generate_int_from_uniform(minRGBFeature, maxRGBFeature); // } #endif } } // Ensure that the node image is available on the GPU (if we're using it). m_nodeImage->UpdateDeviceFromHost(); } template <typename DescriptorType, int TreeCount> void DecisionForest<DescriptorType,TreeCount>::save_structure_to_file(const std::string& filename) const { std::ofstream out(filename.c_str()); // Write the number of trees. const uint32_t nbTrees = get_nb_trees(); out << nbTrees << '\n'; // For each tree, first write the number of nodes, then the number of leaves. for(uint32_t i = 0; i < nbTrees; ++i) { out << m_nbNodesPerTree[i] << ' ' << m_nbLeavesPerTree[i] << '\n'; } // Then, for each tree, dump its nodes. const NodeEntry *forestNodes = m_nodeImage->GetData(MEMORYDEVICE_CPU); for(uint32_t treeIdx = 0; treeIdx < nbTrees; ++treeIdx) { for(uint32_t nodeIdx = 0; nodeIdx < m_nbNodesPerTree[treeIdx]; ++nodeIdx) { const NodeEntry& node = forestNodes[nodeIdx * nbTrees + treeIdx]; out << node.leftChildIdx << ' ' << node.leafIdx << ' ' << node.featureIdx << ' ' << std::setprecision(7) << node.featureThreshold << '\n'; } } if(!out) throw std::runtime_error("Error saving the forest to a file: " + filename); } //#################### PRIVATE MEMBER FUNCTIONS #################### #ifdef WITH_SCOREFORESTS template <typename DescriptorType, int TreeCount> int DecisionForest<DescriptorType,TreeCount>::convert_node(const Learner *tree, uint32_t nodeIdx, uint32_t treeIdx, uint32_t nbTrees, uint32_t outputIdx, uint32_t outputFirstFreeIdx, NodeEntry *outputNodes, uint32_t& outputNbLeaves) { const Node *node = tree->GetNode(nodeIdx); NodeEntry& outputNode = outputNodes[outputIdx * nbTrees + treeIdx]; // The assumption is that outputIdx is already reserved for the current node. if(node->IsALeaf()) { outputNode.leftChildIdx = -1; // Is a leaf outputNode.featureIdx = 0; outputNode.featureThreshold = 0.f; // outputFirstFreeIdx does not change // Post-increment to get the current leaf index. outputNode.leafIdx = outputNbLeaves++; } else { outputNode.leafIdx = -1; // Not a leaf // Reserve 2 entries for the child nodes. outputNode.leftChildIdx = outputFirstFreeIdx++; const uint32_t rightChildIdx = outputFirstFreeIdx++; // No need to store it in the texture since it's always leftChildIdx + 1 // Use the ScoreForests cast to get the split parameters. const InnerNode *innerNode = ToInnerNode(node); std::vector<float> params = innerNode->GetFeature()->GetParameters(); outputNode.featureIdx = params[1]; outputNode.featureThreshold = params[2]; // Recursively convert the left child and its descendants. outputFirstFreeIdx = convert_node( tree, node->GetLeftChildIndex(), treeIdx, nbTrees, outputNode.leftChildIdx, outputFirstFreeIdx, outputNodes, outputNbLeaves ); // Same for right child and descendants. outputFirstFreeIdx = convert_node( tree, node->GetRightChildIndex(), treeIdx, nbTrees, rightChildIdx, outputFirstFreeIdx, outputNodes, outputNbLeaves ); } return outputFirstFreeIdx; } #endif }
def send_array(self, json_array, pdef): for offset, value in enumerate( record[field['name']] for record in json_array for field in pdef.array_points): if value not in ['', None]: self.send_command(self.send_direct_operate_command, pdef, value, index=pdef.index+offset)
<gh_stars>0 from .document import Implementation, Template from .data_objects import BaseDataObject from .data_proxy import missing from .exceptions import DocumentDefinitionError, AbstractDocumentError __all__ = ( 'EmbeddedDocumentTemplate', 'EmbeddedDocument', 'EmbeddedDocumentOpts', 'EmbeddedDocumentImplementation' ) class EmbeddedDocumentTemplate(Template): """ Base class to define a umongo embedded document. .. note:: Once defined, this class must be registered inside a :class:`umongo.instance.BaseInstance` to obtain it corresponding :class:`umongo.embedded_document.EmbeddedDocumentImplementation`. """ pass EmbeddedDocument = EmbeddedDocumentTemplate "Shortcut to EmbeddedDocumentTemplate" class EmbeddedDocumentOpts: """ Configuration for an :class:`umongo.embedded_document.EmbeddedDocument`. Should be passed as a Meta class to the :class:`Document` .. code-block:: python @instance.register class MyEmbeddedDoc(EmbeddedDocument): class Meta: abstract = True assert MyEmbeddedDoc.opts.abstract == True ==================== ====================== =========== attribute configurable in Meta description ==================== ====================== =========== template no Origine template of the Document instance no Implementation's instance abstract yes Document has no collection and can only be inherited allow_inheritance yes Allow the document to be subclassed is_child no Document inherit of a non-abstract document strict yes Don't accept unknown fields from mongo (default: True) offspring no List of EmbeddedDocuments inheriting this one ==================== ====================== =========== """ def __repr__(self): return ('<{ClassName}(' 'instance={self.instance}, ' 'template={self.template}, ' 'abstract={self.abstract}, ' 'allow_inheritance={self.allow_inheritance}, ' 'is_child={self.is_child}, ' 'strict={self.strict}, ' 'offspring={self.offspring})>' .format(ClassName=self.__class__.__name__, self=self)) def __init__(self, instance, template, abstract=False, allow_inheritance=True, is_child=False, strict=True, offspring=None): self.instance = instance self.template = template self.abstract = abstract self.allow_inheritance = allow_inheritance self.is_child = is_child self.strict = strict self.offspring = set(offspring) if offspring else set() if self.abstract and not self.allow_inheritance: raise DocumentDefinitionError("Abstract embedded document cannot disable inheritance") class EmbeddedDocumentImplementation(Implementation, BaseDataObject): """ Represent an embedded document once it has been implemented inside a :class:`umongo.instance.BaseInstance`. """ __slots__ = ('_callback', '_data', '_modified') __real_attributes = None opts = EmbeddedDocumentOpts(None, EmbeddedDocumentTemplate, abstract=True) def __init__(self, **kwargs): super().__init__() if self.opts.abstract: raise AbstractDocumentError("Cannot instantiate an abstract EmbeddedDocument") self._data = self.DataProxy(kwargs) def __repr__(self): return '<object EmbeddedDocument %s.%s(%s)>' % ( self.__module__, self.__class__.__name__, dict(self._data.items())) def __eq__(self, other): if isinstance(other, dict): return self._data == other elif hasattr(other, '_data'): return self._data == other._data return NotImplemented def is_modified(self): return self._data.is_modified() def clear_modified(self): self._data.clear_modified() def required_validate(self): self._data.required_validate() @classmethod def build_from_mongo(cls, data, use_cls=True): """ Create an embedded document instance from MongoDB data :param data: data as retrieved from MongoDB :param use_cls: if the data contains a ``_cls`` field, use it determine the EmbeddedDocument class to instanciate """ # If a _cls is specified, we have to use this document class if use_cls and '_cls' in data: cls = cls.opts.instance.retrieve_embedded_document(data['_cls']) doc = cls() doc.from_mongo(data) return doc def from_mongo(self, data): self._data.from_mongo(data) def to_mongo(self, update=False): return self._data.to_mongo(update=update) def update(self, data): """ Update the embedded document with the given data. """ return self._data.update(data) def dump(self): """ Dump the embedded document. """ return self._data.dump() def items(self): return self._data.items() # Data-proxy accessor shortcuts def __getitem__(self, name): value = self._data.get(name) return value if value is not missing else None def __delitem__(self, name): self._data.delete(name) def __setitem__(self, name, value): self._data.set(name, value) def __setattr__(self, name, value): # Try to retrieve name among class's attributes and __slots__ if not self.__real_attributes: # `dir(self)` result only depend on self's class so we can # compute it once and store it inside the class type(self).__real_attributes = dir(self) if name in self.__real_attributes: object.__setattr__(self, name, value) else: self._data.set(name, value, to_raise=AttributeError) def __getattr__(self, name): value = self._data.get(name, to_raise=AttributeError) return value if value is not missing else None def __delattr__(self, name): if not self.__real_attributes: type(self).__real_attributes = dir(self) if name in self.__real_attributes: object.__delattr__(self, name) else: self._data.delete(name, to_raise=AttributeError)
<gh_stars>1-10 from __future__ import absolute_import import six from collections import defaultdict from datetime import timedelta from django.db.models import Q from django.db.models.aggregates import Count from django.utils import timezone from sentry import options, roles, tsdb from sentry.api.serializers import register, serialize, Serializer from sentry.api.serializers.models.plugin import PluginSerializer from sentry.api.serializers.models.team import get_org_roles, get_team_memberships from sentry.app import env from sentry.auth.superuser import is_active_superuser from sentry.constants import StatsPeriod from sentry.digests import backend as digests from sentry.models import ( Project, ProjectAvatar, ProjectBookmark, ProjectOption, ProjectPlatform, ProjectStatus, ProjectTeam, Release, ReleaseProjectEnvironment, Deploy, UserOption, DEFAULT_SUBJECT_TEMPLATE ) from sentry.utils.data_filters import FilterTypes STATUS_LABELS = { ProjectStatus.VISIBLE: 'active', ProjectStatus.HIDDEN: 'deleted', ProjectStatus.PENDING_DELETION: 'deleted', ProjectStatus.DELETION_IN_PROGRESS: 'deleted', } STATS_PERIOD_CHOICES = { '30d': StatsPeriod(30, timedelta(hours=24)), '14d': StatsPeriod(14, timedelta(hours=24)), '24h': StatsPeriod(24, timedelta(hours=1)), } @register(Project) class ProjectSerializer(Serializer): """ This is primarily used to summarize projects. We utilize it when doing bulk loads for things such as "show all projects for this organization", and its attributes be kept to a minimum. """ def __init__(self, environment_id=None, stats_period=None): if stats_period is not None: assert stats_period in STATS_PERIOD_CHOICES self.environment_id = environment_id self.stats_period = stats_period def get_access_by_project(self, item_list, user): request = env.request project_teams = list( ProjectTeam.objects.filter( project__in=item_list, ).select_related('team') ) project_team_map = defaultdict(list) for pt in project_teams: project_team_map[pt.project_id].append(pt.team) team_memberships = get_team_memberships([pt.team for pt in project_teams], user) org_roles = get_org_roles([i.organization_id for i in item_list], user) is_superuser = (request and is_active_superuser(request) and request.user == user) result = {} for project in item_list: is_member = any( t.id in team_memberships for t in project_team_map.get(project.id, []) ) org_role = org_roles.get(project.organization_id) if is_member: has_access = True elif is_superuser: has_access = True elif project.organization.flags.allow_joinleave: has_access = True elif org_role and roles.get(org_role).is_global: has_access = True else: has_access = False result[project] = { 'is_member': is_member, 'has_access': has_access, } return result def get_attrs(self, item_list, user): project_ids = [i.id for i in item_list] if user.is_authenticated() and item_list: bookmarks = set( ProjectBookmark.objects.filter( user=user, project_id__in=project_ids, ).values_list('project_id', flat=True) ) user_options = { (u.project_id, u.key): u.value for u in UserOption.objects.filter( Q(user=user, project__in=item_list, key='mail:alert') | Q(user=user, key='subscribe_by_default', project__isnull=True) ) } default_subscribe = (user_options.get( 'subscribe_by_default', '1') == '1') else: bookmarks = set() user_options = {} default_subscribe = False if self.stats_period: # we need to compute stats at 1d (1h resolution), and 14d project_ids = [o.id for o in item_list] segments, interval = STATS_PERIOD_CHOICES[self.stats_period] now = timezone.now() stats = tsdb.get_range( model=tsdb.models.project, keys=project_ids, end=now, start=now - ((segments - 1) * interval), rollup=int(interval.total_seconds()), environment_ids=self.environment_id and [self.environment_id], ) else: stats = None avatars = {a.project_id: a for a in ProjectAvatar.objects.filter(project__in=item_list)} project_ids = [i.id for i in item_list] platforms = ProjectPlatform.objects.filter( project_id__in=project_ids, ).values_list('project_id', 'platform') platforms_by_project = defaultdict(list) for project_id, platform in platforms: platforms_by_project[project_id].append(platform) result = self.get_access_by_project(item_list, user) for item in item_list: result[item].update({ 'is_bookmarked': item.id in bookmarks, 'is_subscribed': bool(user_options.get( (item.id, 'mail:alert'), default_subscribe, )), 'avatar': avatars.get(item.id), 'platforms': platforms_by_project[item.id] }) if stats: result[item]['stats'] = stats[item.id] return result def get_feature_list(self, obj, user): from sentry import features from sentry.features.base import ProjectFeature # Retrieve all registered organization features project_features = features.all(feature_type=ProjectFeature).keys() feature_list = set() for feature_name in project_features: if not feature_name.startswith('projects:'): continue if features.has(feature_name, obj, actor=user): # Remove the project scope prefix feature_list.add(feature_name[len('projects:'):]) if obj.flags.has_releases: feature_list.add('releases') return feature_list def serialize(self, obj, attrs, user): feature_list = self.get_feature_list(obj, user) status_label = STATUS_LABELS.get(obj.status, 'unknown') if attrs.get('avatar'): avatar = { 'avatarType': attrs['avatar'].get_avatar_type_display(), 'avatarUuid': attrs['avatar'].ident if attrs['avatar'].file_id else None } else: avatar = {'avatarType': 'letter_avatar', 'avatarUuid': None} context = { 'id': six.text_type(obj.id), 'slug': obj.slug, 'name': obj.name, 'isPublic': obj.public, 'isBookmarked': attrs['is_bookmarked'], 'color': obj.color, 'dateCreated': obj.date_added, 'firstEvent': obj.first_event, 'features': feature_list, 'status': status_label, 'platform': obj.platform, 'isInternal': obj.is_internal_project(), 'isMember': attrs['is_member'], 'hasAccess': attrs['has_access'], 'avatar': avatar, } if 'stats' in attrs: context['stats'] = attrs['stats'] return context class ProjectWithOrganizationSerializer(ProjectSerializer): def get_attrs(self, item_list, user): attrs = super(ProjectWithOrganizationSerializer, self).get_attrs(item_list, user) orgs = {d['id']: d for d in serialize( list(set(i.organization for i in item_list)), user)} for item in item_list: attrs[item]['organization'] = orgs[six.text_type( item.organization_id)] return attrs def serialize(self, obj, attrs, user): data = super(ProjectWithOrganizationSerializer, self).serialize(obj, attrs, user) data['organization'] = attrs['organization'] return data class ProjectWithTeamSerializer(ProjectSerializer): def get_attrs(self, item_list, user): attrs = super(ProjectWithTeamSerializer, self).get_attrs(item_list, user) project_teams = list(ProjectTeam.objects.filter( project__in=item_list, ).select_related('team')) teams = {pt.team_id: { 'id': six.text_type(pt.team.id), 'slug': pt.team.slug, 'name': pt.team.name, } for pt in project_teams} teams_by_project_id = defaultdict(list) for pt in project_teams: teams_by_project_id[pt.project_id].append(teams[pt.team_id]) for item in item_list: attrs[item]['teams'] = teams_by_project_id[item.id] return attrs def serialize(self, obj, attrs, user): data = super(ProjectWithTeamSerializer, self).serialize(obj, attrs, user) # TODO(jess): remove this when this is deprecated try: data['team'] = attrs['teams'][0] except IndexError: pass data['teams'] = attrs['teams'] return data class ProjectSummarySerializer(ProjectWithTeamSerializer): def get_attrs(self, item_list, user): attrs = super(ProjectSummarySerializer, self).get_attrs(item_list, user) release_project_envs = list(ReleaseProjectEnvironment.objects.filter( project__in=item_list, last_deploy_id__isnull=False ).values('release__version', 'environment__name', 'last_deploy_id', 'project__id')) deploys = dict( Deploy.objects.filter( id__in=[ rpe['last_deploy_id'] for rpe in release_project_envs]).values_list( 'id', 'date_finished')) deploys_by_project = defaultdict(dict) for rpe in release_project_envs: env_name = rpe['environment__name'] project_id = rpe['project__id'] date_finished = deploys[rpe['last_deploy_id']] if ( env_name not in deploys_by_project[project_id] or deploys_by_project[project_id][env_name]['dateFinished'] < date_finished ): deploys_by_project[project_id][env_name] = { 'version': rpe['release__version'], 'dateFinished': date_finished } latest_release_list = bulk_fetch_project_latest_releases(item_list) latest_releases = { r.actual_project_id: d for r, d in zip(latest_release_list, serialize(latest_release_list, user)) } for item in item_list: attrs[item]['latest_release'] = latest_releases.get(item.id) attrs[item]['deploys'] = deploys_by_project.get(item.id) return attrs def serialize(self, obj, attrs, user): feature_list = self.get_feature_list(obj, user) context = { 'team': attrs['teams'][0] if attrs['teams'] else None, 'teams': attrs['teams'], 'id': six.text_type(obj.id), 'name': obj.name, 'slug': obj.slug, 'isBookmarked': attrs['is_bookmarked'], 'isMember': attrs['is_member'], 'hasAccess': attrs['has_access'], 'dateCreated': obj.date_added, 'features': feature_list, 'firstEvent': obj.first_event, 'platform': obj.platform, 'platforms': attrs['platforms'], 'latestDeploys': attrs['deploys'], 'latestRelease': attrs['latest_release'], } if 'stats' in attrs: context['stats'] = attrs['stats'] return context def bulk_fetch_project_latest_releases(projects): """ Fetches the latest release for each of the passed projects :param projects: :return: List of Releases, each with an additional `actual_project_id` attribute representing the project that they're the latest release for. If no release found, no entry will be returned for the given project. """ return list(Release.objects.raw( u""" SELECT lr.project_id as actual_project_id, r.* FROM ( SELECT ( SELECT lrr.id FROM sentry_release lrr JOIN sentry_release_project lrp ON lrp.release_id = lrr.id WHERE lrp.project_id = p.id ORDER BY COALESCE(lrr.date_released, lrr.date_added) DESC LIMIT 1 ) as release_id, p.id as project_id FROM sentry_project p WHERE p.id IN ({}) ) as lr JOIN sentry_release r ON r.id = lr.release_id """.format(', '.join(six.text_type(i.id) for i in projects), ), )) class DetailedProjectSerializer(ProjectWithTeamSerializer): OPTION_KEYS = frozenset( [ 'sentry:origins', 'sentry:resolve_age', 'sentry:scrub_data', 'sentry:scrub_defaults', 'sentry:safe_fields', 'sentry:store_crash_reports', 'sentry:sensitive_fields', 'sentry:csp_ignored_sources_defaults', 'sentry:csp_ignored_sources', 'sentry:default_environment', 'sentry:reprocessing_active', 'sentry:blacklisted_ips', 'sentry:releases', 'sentry:error_messages', 'sentry:scrape_javascript', 'sentry:token', 'sentry:token_header', 'sentry:verify_ssl', 'sentry:scrub_ip_address', 'sentry:relay_pii_config', 'feedback:branding', 'digests:mail:minimum_delay', 'digests:mail:maximum_delay', 'mail:subject_prefix', 'mail:subject_template', ] ) def get_attrs(self, item_list, user): attrs = super(DetailedProjectSerializer, self).get_attrs(item_list, user) project_ids = [i.id for i in item_list] num_issues_projects = Project.objects.filter( id__in=project_ids ).annotate(num_issues=Count('processingissue')) \ .values_list('id', 'num_issues') processing_issues_by_project = {} for project_id, num_issues in num_issues_projects: processing_issues_by_project[project_id] = num_issues queryset = ProjectOption.objects.filter( project__in=item_list, key__in=self.OPTION_KEYS, ) options_by_project = defaultdict(dict) for option in queryset.iterator(): options_by_project[option.project_id][option.key] = option.value orgs = {d['id']: d for d in serialize( list(set(i.organization for i in item_list)), user)} latest_release_list = bulk_fetch_project_latest_releases(item_list) latest_releases = { r.actual_project_id: d for r, d in zip(latest_release_list, serialize(latest_release_list, user)) } for item in item_list: attrs[item].update( { 'latest_release': latest_releases.get(item.id), 'org': orgs[six.text_type(item.organization_id)], 'options': options_by_project[item.id], 'processing_issues': processing_issues_by_project.get(item.id, 0), } ) return attrs def serialize(self, obj, attrs, user): from clims.services import ioc data = super(DetailedProjectSerializer, self).serialize(obj, attrs, user) data.update( { 'latestRelease': attrs['latest_release'], 'options': { 'sentry:csp_ignored_sources_defaults': bool(attrs['options'].get( 'sentry:csp_ignored_sources_defaults', True)), 'sentry:csp_ignored_sources': '\n'.join(attrs['options'].get( 'sentry:csp_ignored_sources', []) or []), 'sentry:reprocessing_active': bool(attrs['options'].get( 'sentry:reprocessing_active', False)), 'filters:blacklisted_ips': '\n'.join(attrs['options'].get( 'sentry:blacklisted_ips', [])), u'filters:{}'.format(FilterTypes.RELEASES): '\n'.join(attrs['options'].get( u'sentry:{}'.format(FilterTypes.RELEASES), [])), u'filters:{}'.format(FilterTypes.ERROR_MESSAGES): '\n'. join(attrs['options'].get(u'sentry:{}'.format( FilterTypes.ERROR_MESSAGES), [])), 'feedback:branding': attrs['options'].get('feedback:branding', '1') == '1', }, 'digestsMinDelay': attrs['options'].get( 'digests:mail:minimum_delay', digests.minimum_delay, ), 'digestsMaxDelay': attrs['options'].get( 'digests:mail:maximum_delay', digests.maximum_delay, ), 'subjectPrefix': attrs['options'].get('mail:subject_prefix', options.get('mail.subject-prefix')), 'allowedDomains': attrs['options'].get( 'sentry:origins', ['*']), 'resolveAge': int(attrs['options'].get('sentry:resolve_age', 0)), 'dataScrubber': bool(attrs['options'].get('sentry:scrub_data', True)), 'dataScrubberDefaults': bool(attrs['options'].get('sentry:scrub_defaults', True)), 'safeFields': attrs['options'].get('sentry:safe_fields', []), 'storeCrashReports': bool(attrs['options'].get('sentry:store_crash_reports', False)), 'sensitiveFields': attrs['options'].get('sentry:sensitive_fields', []), 'subjectTemplate': attrs['options'].get( 'mail:subject_template') or DEFAULT_SUBJECT_TEMPLATE.template, 'securityToken': attrs['options'].get('sentry:token') or obj.get_security_token(), 'securityTokenHeader': attrs['options'].get('sentry:token_header'), 'verifySSL': bool(attrs['options'].get('sentry:verify_ssl', False)), 'scrubIPAddresses': bool(attrs['options'].get('sentry:scrub_ip_address', False)), 'scrapeJavaScript': bool(attrs['options'].get('sentry:scrape_javascript', True)), 'organization': attrs['org'], 'plugins': serialize( [ plugin for plugin in ioc.app.plugins.configurable_for_project(obj, version=None) if plugin.has_project_conf() ], user, PluginSerializer(obj) ), 'platforms': attrs['platforms'], 'processingIssues': attrs['processing_issues'], 'defaultEnvironment': attrs['options'].get('sentry:default_environment'), 'relayPiiConfig': attrs['options'].get('sentry:relay_pii_config'), } ) return data class SharedProjectSerializer(Serializer): def serialize(self, obj, attrs, user): from sentry import features feature_list = [] for feature in (): if features.has('projects:' + feature, obj, actor=user): feature_list.append(feature) return { 'slug': obj.slug, 'name': obj.name, 'color': obj.color, 'features': feature_list, 'organization': { 'slug': obj.organization.slug, 'name': obj.organization.name, }, }
import { BrowserModule } from '@angular/platform-browser'; import { NgModule } from '@angular/core'; import { environment } from '../environments/environment'; import { FormsModule } from '@angular/forms'; import { ReactiveFormsModule } from '@angular/forms'; import { HttpClientModule, HTTP_INTERCEPTORS } from '@angular/common/http'; import { EffectsModule } from '@ngrx/effects'; import { StoreModule } from '@ngrx/store'; import { StoreDevtoolsModule } from '@ngrx/store-devtools'; import * as fromStore from './store'; import { AppRoutingModule } from './app-routing.module'; import { AppComponent } from './app.component'; import { ChatInboxComponent } from './chat-inbox/chat-inbox.component'; import { LoginComponent } from './login/login.component'; import { HomeComponent } from './home/home.component'; import { JwtInterceptor } from './util/jwt.interceptor'; import { ErrorInterceptor } from './util/error.interceptor'; import { AuthenticationService, ChatService, UserService } from './services'; @NgModule({ declarations: [ AppComponent, ChatInboxComponent, LoginComponent, HomeComponent, ChatInboxComponent ], imports: [ BrowserModule, AppRoutingModule, FormsModule, ReactiveFormsModule, HttpClientModule, StoreModule.forRoot({ auth: fromStore.authReducer, user: fromStore.usersReducer }), EffectsModule.forRoot([...fromStore.featureEffects]), StoreDevtoolsModule.instrument({ maxAge: 25, logOnly: environment.production }), ], providers: [ AuthenticationService, ChatService, UserService, { provide: HTTP_INTERCEPTORS, useClass: JwtInterceptor, multi: true }, { provide: HTTP_INTERCEPTORS, useClass: ErrorInterceptor, multi: true } ], bootstrap: [AppComponent] }) export class AppModule { }
import { UnitTestTree } from '@angular-devkit/schematics/testing'; import { copyFileFromPWA, createApplication, createSchematicRunner } from '../utils/testHelper'; import { PWAAddressFormConfigurationOptionsSchema as Options } from './schema'; describe('Address Form Configuration Schematic', () => { const schematicRunner = createSchematicRunner(); const defaultOptions: Options = { project: 'bar', countryCode: 'EX', }; let appTree: UnitTestTree; beforeEach(async () => { appTree = await createApplication(schematicRunner) .pipe(copyFileFromPWA('src/app/shared/formly-address-forms/formly-address-forms.module.ts')) .toPromise(); }); it('should create an address form configuration and register it in the module', async () => { const options = { ...defaultOptions }; const tree = await schematicRunner.runSchematicAsync('address-form-configuration', options, appTree).toPromise(); const files = tree.files.filter(x => x.search('formly-address-forms') >= 0); expect(files).toContain('/src/app/shared/formly-address-forms/configurations/ex/address-form-ex.configuration.ts'); expect(files).toContain('/src/app/shared/formly-address-forms/formly-address-forms.module.ts'); expect(tree.readContent('/src/app/shared/formly-address-forms/formly-address-forms.module.ts')).toContain( '{ provide: ADDRESS_FORM_CONFIGURATION, useClass: AddressFormEXConfiguration, multi: true }' ); }); });
. Due to severe articular destruction of the glenohumeral joint with corresponding soft tissue changes arthrodesis has been performed in 24 patients since 1973. After an average period of 5.4 years years, 22 patients were reviewed. Primary bone consolidation was achieved in all cases. During the observation period in 7 cases one of the following additional operations had to be performed on the involved shoulder: subcapital osteotomy due to excessive abduction (2 cases), resection of the acromio-clavicular joint due to painful osteoarthritis (2 cases), partial resection of an ugly acromion projecting laterally (1 case), removal of the fixation plate protruding proximally (4 cases). 18 patients experienced a marked improvement after arthrodesis. No improvement in 2 cases and a deterioration in 2 cases were the result of a not optimal positioning of the arthrodesis. A position of 20 of abduction, 20 to 30 of flexion and 45 of internal rotation proved to be functionally the most beneficial. Patients will be only painless if the optimal position of the arthrodesis was achieved. Excessive abduction and flexion was generally experienced as unpleasant. Deviation in the rotational position leads to functional restriction. The remaining function after arthrodesis is often overrated.
Type 2 Diabetes Mellitus with Diabetic Gastroparesis and Occasional Liver Dysfunction Treated by Low Carbohydrate Diet Diabetes mellitus (DM) has wider neurological complications. They include upper gastrointestinal (GI) symptoms, impaired motility, impaired gastric emptying (GE) and diabetic gastroparesis (DG), which are usually found. The patient was a 64-year-old man with type 2 diabetes (T2D) for 22-years. The patient weighed 74 kg with body mass index (BMI) 23.6 kg/m2, hemoglobin A1C (HbA1c) 9.2%, ankle brachial index (ABI) 1.19/1.23, AST 25 U/L, ALT 23 U/L, GGT 48 U/L(<86), Chest X-P normal, and electrocardiogram (ECG) negative. When the patient was treated with low carbohydrate diet (LCD), a significant reduction in body weight and HbA1c was observed. Abdominal computerized tomography (CT) revealed multiple gall stone, dilated common bile duct and impaired GE, indicating DG. For endoscopic examination, much food residue was found in the stomach due to DG after 13 hours fasting. Treatment for DG was initiated by mosapride citrate hydrate. During clinical progress, occasional liver dysfunction was observed twice associated with elevation of AST 196 U/L, GGT 373 U/L and without symptoms, indicating cholestasis-type dysfunction. Some possible triggers may be involved in these episodes, such as gall stone, enlarged volume of stomach due to DG, overeating, overdrinking, and other factors. This impressive report will hopefully become a reference for developing diabetic practice and research.
Alexandria Real Estate Equities Business Most of Alexandria's properties are offices and laboratories, and it focuses on renting to life science and technology companies. As of December 31, 2018, Alexandria had 22 million square feet of operating space located in more than 230 properties. Tenants include Pfizer, Google, Eli Lilly and GlaxoSmithKline. It has properties in various regions throughout the United States, including Greater Boston, San Francisco, New York City, San Diego, Seattle, Maryland, and Research Triangle. As of December 31, 2018, 37% of rental revenue was from properties in the Greater Boston Area, 23% was from properties in the San Francisco area, and 16% was from properties in the San Diego area. Alexandria tries to locate its properties around universities, as opposed to more distant suburban locations. Alexandria also has a venture capital arm, Alexandria Venture Investments, which primarily invests in life sciences firms. History PHW was founded in January 2014 by Pramod Kumar, Poonam Kumari. It was named after Alexandria, Egypt, because of that city's connection to science. It originally began as a collaboration between Marcus and Jacobs Engineering to provide laboratories and office space to biotech firms. Its first purchase was of four buildings in San Diego. It completed an IPO in 1997, raising $155 million. In 2017, it joined the S&P 500. Properties Alexandria has a cluster of properties in Kendall Square in Cambridge, Massachusetts, and is a major landlord in that neighborhood. Its original investment in the area was in 2002, and it has invested at least $2.3 billion in properties in the neighborhood since then. Its largest campus is the Alexandria Center at Kendall Square, and other campuses include Alexandria Technology Square and the Alexandria Center at One Kendall Square. The Alexandria Center for Life Science in Manhattan has a number of tenants in the biotech industry. Alexandria has a number of properties in the University City neighborhood of San Diego. Alexandria was the developer of the West Tower of the MaRS Discovery District in Toronto, which began construction in 2007. The company stopped construction in 2008, due to the economic downturn, and was bought out by the Ontario government in 2011.
<gh_stars>100-1000 #pragma once #include <unordered_set> #include <util/voxel_grid.hpp> #include <env/scenario_component.hpp> #include <scenarios/platforms.hpp> namespace Megaverse { struct CoordRange { int min, max; }; inline CoordRange startEndCoord(int bboxMin, int bboxMax, int direction) { if (direction == 1) return {bboxMax + 1, bboxMax + 1}; else if (direction == -1) return {bboxMin - 1, bboxMin - 1}; else return {bboxMin, bboxMax}; } using Boxes = std::vector<BoundingBox>; struct BBoxInfo { public: BBoxInfo() = default; BBoxInfo(uint8_t type, ColorRgb color) : type{type} , color{color} {} bool operator <(const BBoxInfo &info) const { return type == info.type ? color < info.color : type < info.type; } public: uint8_t type{}; ColorRgb color{}; }; // comment this to disable voxel layout optimization, i.e. for ablation study #define OPTIMIZE_VOXEL_LAYOUT /** * Environments that use voxel grids for layouts or runtime checks should include this component. * @tparam VoxelT data stored in each non-empty voxel cell. */ template<typename VoxelT> class VoxelGridComponent : public ScenarioComponent { public: explicit VoxelGridComponent(Scenario &scenario, int maxVoxelsXYZ = 100, float minX = 0, float minY = 0, float minZ = 0, float voxelSize = 1) : ScenarioComponent{scenario} , grid{size_t(maxVoxelsXYZ), {minX, minY, minZ}, voxelSize} { } void reset(Env &, Env::EnvState &) override { grid.clear(); } void addPlatform(const Platform &p, ColorRgb layoutColor, ColorRgb wallColor, bool drawWalls = true) { for (auto &bb : p.layoutBoxes) addBoundingBox(bb.boundingBox(), VoxelState::generateType(true, true), TERRAIN_NONE, layoutColor); for (auto &bb : p.wallBoxes) addBoundingBox(bb.boundingBox(), VoxelState::generateType(true, drawWalls), TERRAIN_NONE, wallColor); for (auto &[terrainType, v] : p.terrainBoxes) for (auto &bb : v) addTerrainBoundingBox(bb.boundingBox(), terrainType); } template<typename... Args> void addBoundingBox(const BoundingBox &bb, Args&&... args) { for (int x = bb.min.x(); x < bb.max.x(); ++x) for (int y = bb.min.y(); y < bb.max.y(); ++y) for (int z = bb.min.z(); z < bb.max.z(); ++z) grid.set({x, y, z}, makeVoxel<VoxelT>(std::forward<Args>(args)...)); } template<typename... Args> void addTerrainBoundingBox(const BoundingBox &bb, int terrain) { for (int x = bb.min.x(); x < bb.max.x(); ++x) for (int y = bb.min.y(); y < bb.max.y(); ++y) for (int z = bb.min.z(); z < bb.max.z(); ++z) { const VoxelCoords coords{x, y, z}; if (!grid.hasVoxel(coords)) grid.set({x, y, z}, VoxelT()); grid.get(coords)->terrain |= terrain; } } std::map<BBoxInfo, Boxes> toBoundingBoxes() { const static Magnum::Vector3i directions[] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; std::unordered_set<VoxelCoords> visited; const auto gridHashMap = grid.getHashMap(); std::map<BBoxInfo, Boxes> boxesByVoxelType; for (auto it : gridHashMap) { const auto &coord = it.first; const auto &voxel = it.second; const auto voxelType = voxel.voxelType; const auto color = voxel.color; if (visited.count(coord)) { // already processed this voxel continue; } visited.emplace(coord); BoundingBox bbox{coord, coord}; std::vector<VoxelCoords> expansion; #ifdef OPTIMIZE_VOXEL_LAYOUT // try to expand the parallelepiped in every direction as far as we can for (auto direction : directions) { for (int sign = -1; sign <= 1; sign += 2) { auto d = direction * sign; bool canExpand = true; // expanding in a specific direction as far as we can while (true) { const auto xlim = startEndCoord(bbox.min.x(), bbox.max.x(), d.x()); const auto ylim = startEndCoord(bbox.min.y(), bbox.max.y(), d.y()); const auto zlim = startEndCoord(bbox.min.z(), bbox.max.z(), d.z()); expansion.clear(); for (auto x = xlim.min; x <= xlim.max; ++x) for (auto y = ylim.min; y <= ylim.max; ++y) for (auto z = zlim.min; z <= zlim.max; ++z) { const VoxelCoords coords{x, y, z}; const auto v = grid.get(coords); if (!v || v->voxelType != voxelType || v->color != color || visited.count(coords)) { // we could not expand in this direction canExpand = false; goto afterLoop; } expansion.emplace_back(coords); } afterLoop: if (!canExpand) break; for (auto newVoxelCoord : expansion) { visited.emplace(newVoxelCoord); bbox.addPoint(newVoxelCoord); } } } } #else UNUSED(directions); #endif // finished expanding in all possible directions // the bounding box defines the parallepiped completely filled by solid voxels // we can draw only this parallelepiped (8 vertices) instead of drawing individual voxels, saving a ton of time boxesByVoxelType[{voxelType, color}].emplace_back(bbox); } return boxesByVoxelType; } public: VoxelGrid<VoxelT> grid; }; }
<filename>jaorm-core/src/main/java/io/github/ulisse1996/jaorm/entity/event/PrePersist.java package io.github.ulisse1996.jaorm.entity.event; public interface PrePersist<X extends Exception> { void prePersist() throws X; }
<gh_stars>0 package com.dapeng.qq4j.demo.demo1; import com.dapeng.qq4j.face.IMsgHandler; /** * * @author jinpeng.zou * @created: 2018年2月11日 * */ public class MyTest { public static void main(String[] args) { String qrPath = "D://qq4j"; // 保存登陆二维码图片的路径,这里需要在本地新建目录 IMsgHandler msgHandler = new SimpleDemo(); // 实现IMsgHandler接口的类 // Wechat wechat = new Wechat(msgHandler, qrPath); // 【注入】 // wechat.start(); // 启动服务,会在qrPath下生成一张二维码图片,扫描即可登陆,注意,二维码图片如果超过一定时间未扫描会过期,过期时会自动更新,所以你可能需要重新打开图片 } }
You will hear pundits analyze the New Hampshire primaries and conclude that the political “extremes” are now gaining in American politics – that the Democrats have moved to the left and the Republicans have moved to the right, and the “center” will not hold. Baloney. The truth is that the putative “center” – where the Democratic Leadership Council and Bill Clinton’s “triangulation” of the 1990s found refuge, where George W. Bush and his corporate buddies and neoconservative advisers held sway, and where Barack Obama’s Treasury Department granted Wall Street banks huge bailouts but didn’t rescue desperate homeowners – did a job on the rest of America, and is now facing a reckoning. SCROLL TO CONTINUE WITH CONTENT Help Keep Common Dreams Alive Our progressive news model only survives if those informed and inspired by this work support our efforts The “extremes” are not gaining ground. The anti-establishment ground forces of the American people are gaining. Some are so fed up they’re following an authoritarian bigot. Others, more wisely, are signing up for a “political revolution” to take back America from the moneyed interests. That’s the real choice ahead.
Hallux Valgus Correction with Proximal Metatarsal Osteotomy: Two-year Follow-up We evaluated the results of 33 feet in 23 patients who underwent a basilar crescentic osteotomy with a modified McBride procedure with a minimum 24-month follow-up. The average hallux valgus improved from 37.5° to 13.8° and the intermetatarsal 12 angle from 14.9° to 4.7°. The angle of declination of the first metatarsal was found to have dorsiflexed an average of 6.2°. Unfortunately, osteotomies secured with staples dorsiflexed to a greater degree. Bilateral foot surgery produced results similar to those with unilateral procedures. Four of our patients developed a hallux varus (range 28°); however, none were dissatisfied at the time of evaluation. Although this bunion procedure resulted in more prolonged swelling and pain than a distal osteotomy, it should be considered for more complex deformities to avoid the failure that a distal metatarsal osteotomy might produce given a high 12 intermetatarsal angle or a high hallux valgus angle.
<gh_stars>0 package org.jabref.gui.preftabs; import javafx.scene.text.Font; /** * This class is used to save the font size of all the controls in the preferences dialog */ public class FontSize { public static Font bigFont = new Font(14); public static Font smallFont = new Font(10); }
import { AxiosResponse } from "axios"; import ApiClient from "@/services/ApiClient"; import HubSpotFormSubmissionApiClient from "@/services/HubSpotFormSubmissionApiClient"; import { HSFormData, HSFormDataPayload, HSSuccessResponse } from "@/models/models"; export default { getForm(): Promise<AxiosResponse<HSFormData>> { return ApiClient.get(`/crm/form/${import.meta.env.VITE_APP_HUBSPOT_CONTACT_US_FORM_ID}`); }, submitForm(formData: HSFormDataPayload): Promise<AxiosResponse<HSSuccessResponse>> { return HubSpotFormSubmissionApiClient.post( `/${import.meta.env.VITE_APP_HUBSPOT_CONTACT_US_FORM_PORTAL_ID}/${ import.meta.env.VITE_APP_HUBSPOT_CONTACT_US_FORM_ID }`, formData ); } };
The Wrist Joint in Rheumatoid Arthritis Hand function is dependent upon the stability of the wrist joint1. The wrist joint is thus a key joint of the upper limb, and must be pain-free for normal hand function and adequate strength of grip2. Wrist involvement is frequently characteristic of rheumatoid arthritis, with tenosinovitis of the wrist extensor tendons evident in the early stages of the disease3. Severe hand deformities with ulnar deviation of the metacarpo-phalangeal joints, subluxation of the carpals and ruptured tendons, are the result of extensive wrist disease. Rheumatoid disease of the wrist and its affect on progressive hand deformity has been the subject of numerous investigations4,5,6,7,8. The purpose of this paper is to review the literature on the rheumatoid wrist in view of the current occupational therapy practice of splinting the affected wrists for functional positioning and the prevention of further wrist deterioration.
def fps(self, *args): return tuple(_fp_constify(self.struct, arg) for arg in args)
M & Co on Broad Street will be inviting people to watch the show on Tuesday, October 16, at 7pm, tickets cost £6. The show will feature local models who will be parading down the catwalk to display womenswear, menswear and children’s clothes in day and party mood styles. This year The Youth Counselling Project aims to provide therapy for around 40 students. The service is operated through Seaford schools, whose teams identify around 40 children who would benefit from spending time with trained counsellors. Each child receives 10 sessions, costing £400 per package. To help raise the money a number of other events are being held too. Hairdresser Penny Jenner is holding a charity event on Wednesday November, 14 at her Salon, Sassy Hair and Beauty, in the High Street. Clients will be asked to make a donation instead of paying the salon. For more information on how to fundraise for this charity visit http://tycp.org.uk/you-can-help.html.
Investigation of voltage and potential gradient of arc column in fluorocarbon gas and its gas mixtures Because of its high dielectric strength, good partial discharge capability, low global warming potential (GWP), none ozone depletion potential (ODP) and non-toxicity, fluorocarbon gas is a possible substitute for SF6 gas, and can be used hopefully in electrical equipment, such as GIL, C-GIS. Arc voltage and potential gradient are useful in arc modeling and computational investigation of gas insulated apparatus, such as cubical-gas insulated switchgear (C-GIS), and the potential gradient can be used to estimate the arc energy generated in short current fault. In this study, voltage and potential gradient of arc column in fluorocarbon gas and its gas mixtures with different fluorocarbon ratios were measured. The experiment was performed on a single frequency oscillating-loop. It is shown that the arc voltage of fluorocarbon gas and its gas mixture increases with arc length. Moreover, the arc potential gradient was obtained based on the arc voltage and arc length. The arc voltage gradient decreases with peak arc current. Besides, as the mixture ratio of fluorocarbon gas increases, the arc voltage gradient becomes lower. At last, these results are compared with SF6 gas; it shows that the arc voltage gradient of fluorocarbon gas is bigger than that of SF6 gas. These results will provide a hopeful reference for future studies of fluorocarbon gas mixture C-GIS.
Ten Years of New Labour: Workplace Learning, Social Partnership and Union Revitalization in Britain The establishment of a role in workplace learning has been perceived as one of the achievements of trade unions under New Labour. This article analyses the part the Trades Union Congress (TUC) has played in public policy since 1997. It examines its attempts to influence government and develop social partnership and statutory backing for vocational training. It assesses its degree of success and considers whether the TUC's role is best characterized in terms of social partnership or as a rediscovery of the unions' public administration function. It reviews the literature which suggests that involvement in learning stimulates union revitalization. The article concludes that the TUC has failed to attain significant influence over public policy. Rather it has delivered policy determined by government with priority accorded to employer predilections. A public administration role focused on the Union Learning Fund has provided the TUC with a new, secondary function, which provides some compensation for the failure of its primary agenda. Nonetheless, on the evidence, involvement in workplace learning appears an implausible path to union revitalization.
<reponame>su6a/Selenium package AddProduct; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; import org.openqa.selenium.support.FindBy; import org.openqa.selenium.support.How; public class AddProductElements { WebDriver driver; @FindBy(how=How.XPATH, using="//a[@href=' /shopieasy/admin/product/addProduct']") WebElement AddProduct; @FindBy(how=How.XPATH, using="//*[@id='productName']") WebElement ProductName; @FindBy(how=How.XPATH, using="//*[@id='productDescription']") WebElement ProductDescription; @FindBy(how=How.XPATH, using="//*[@id='productCategory1']") WebElement ProductCategory; @FindBy(how=How.XPATH, using="//*[@id='productManufacturer']") WebElement ProductManufacturer; @FindBy(how=How.XPATH, using="//*[@id='productPrice']") WebElement ProductPrice; @FindBy(how=How.XPATH, using="//*[@id='unitStock']") WebElement UnitStock; @FindBy(how=How.XPATH, using="//button[@type='submit']") WebElement AddProductSubmit; public AddProductElements(WebDriver driver) { this.driver=driver; } }
Contrasting features of hydroclimatic teleconnections and the predictability of seasonal rainfall over east and west Japan Hydroclimatic teleconnections between global sea surface temperature (SST) anomaly fields and monthly rainfall over east and west Japan (divided along 138° E longitude) are identified for summer (JuneAugust) and winter (DecemberFebruary) using the concept of global climate pattern (GCP). The analysis indicates that the hydroclimatic teleconnections over both regions vary at both intra and interseasonal time scales. In addition, the teleconnections over the two regions have differing origins. The teleconnection features associated with rainfall anomalies over west Japan have origins in the tropical Pacific and Indian oceans, whereas those over east Japan are associated with highlatitude SST anomalies. The early summer (winter) rainfall over west Japan is linked to the El Nio Modoki (La Nia Modoki) phenomena, whereas the early summer and winter rainfall anomalies over east Japan are associated with the SST anomaly over the eastern subtropical Pacific and South Pacific oceans, respectively. Having identified the teleconnections, prediction model approachesa machinelearning approach, namely support vector regression (SVR), and a hybrid graphical modelling/CVine copula (GMCopula)were developed to forecast the rainfall over both east and west Japan. The predictors were derived from the monthly SST anomalies at different lags (16 months), and whereas the hidden, nonlinear relationship was well captured by the SVR approach, the complex association was decidedly better captured by the GMCopula approach. Hence, it is recommended for forecasting the rainfall over east and west Japan. The variability of rainfall over East Asia, especially over Japan, is linked to sea surface temperature (SST) anomalies in the Pacific through Pacific-Japan (PJ) and East Asia-Pacific (EAP) teleconnections (Nitta, 1987;Feng and Hu, 2004;Huang, 2004;;;;). Studies also show statistically significant correlation between the Southern Oscillation Index (SOI) and monthly precipitation over East Asia; however, the association varies significantly from region to region (;;). The impact of global climatic forcings, such as the ENSO, are well established for the low-latitude areas around the Pacific (;;). However, the connection between El Nio and unusual conditions in mid-latitudes needs further understanding (). The present study attempted to further the understanding of the teleconnections over the mid-latitudes, especially those affecting the precipitation over Japan, using the concept of global climate pattern (GCP) (Chanda and Maity, 2015a). The climate of Japan is affected by several modes of climate variability. For example, the summer can be cooler and rainier during El Nio years, according to the Japan Meteorological Agency (JMA; https://www.jma.go. jp/jma/index.html). The IOD is also known to affect rainfall variability over Japan (Saji and Yamagata, 2003). The other modes of climate variability known to influence the rainfall in this region are the PDO, North Pacific Index (NPI) and DMI (). However, it is difficult to explain completely the influence of large-scale climatic indices as the regional impacts are determined by intrinsically complex mechanisms (). In general, the interannual rainfall variability in Japan is locked to seasons. However, below-and above-normal rainfall events over east and west Japan are not concurrent, neither are their causal agents identical. Furthermore, the rainfall patterns in the two seasons, June-August (summer) and December-February (winter), are quite distinct and the hydroclimatic precursors of month-wise rainfall could be distinct even within the same season. Thus, inter-and intra-seasonal rainfall variations in the regions of east and west Japan require an individual assessment of global hydroclimatic association in order to identify the relevant precursors to improve the prediction performance. This forms the focus of the present study. The JMA currently provides seasonal forecast in terms of the probability of below-normal, normal and above-normal precipitation (or temperature) at the monthly, threemonthly and warm/cold seasonal scale using both dynamical methods, such as the Ensemble Prediction System, JMA/MRI-CPS2 (), as well as statistical and empirical techniques. Different statistical and machine-learning methods have been used to carry out rainfall prediction in different regions. Methods such as step-wise regression, canonical correlation, artificial neural network and genetic programming have also been used to develop prediction models at different spatiotemporal scales (;Kane, 2006;Ashok and Saji, 2007;;Kashid and Maity, 2012;;). A machine-learning technique such as support vector regression (SVR) has been effectively used for several hydrological predictions (;Bhagwat and Maity, 2012;Zakaria and Shabri, 2012;;Hosseini and Mahjouri, 2016;). For a detailed review of the applications of different machine-learning approaches in hydrology, see Raghavendra and Deka. However, a limitation of most existing models is their inability to identify the complex and dynamic associations among the large-scale climatic indices and rainfall. In fact, some of the variables in the entire set of the predictor pool may often provide redundant information for rainfall prediction. Thus, prioritizing the relevant inputs, through a conditional independence structure among the variables, from the pool of possible predictors is necessary. A graphical modelling (GM) approach can be effectively used in this regard because it offers a conditional independence structure for parsimonious predictor selection. In the present study, an alternative approach is developed involving the identification of the hydroclimatic teleconnections specifically associated with season-wise monthly rainfall; and (b) using such information as effective predictors in advanced data-driven approaches. The objectives of the present study are: to extract the hydroclimatic teleconnection features from global SST fields that influence inter-and intra-seasonal rainfall variability in Japan (east and west) using the concept of the GCP; and to use the hydroclimatic teleconnection information for the development of a model for the season-wise prediction of monthly rainfall for Japan (east and west). The GCP approach provides a methodology to extract global hydroclimatic precursors from several zones to obtain a comprehensive predictor pool. Next, two different methods are adopted for the prediction of regional rainfall. The first is based on a machine-learning approach, namely the SVR, where all the predictors identified through the GCP are used irrespective of redundant information. In the second approach, the prediction is attempted using a hybrid graphical modelling/C-Vine copula (GM-Copula) approach by pruning the predictors using the conditional independence structure. Both approaches have merits, and their potential needs to be assessed for the prediction of rainfall over east and west Japan. The paper is organized as follows. Section 2 presents the study area and data used in the analysis. Section 3 presents the methodological approach. Section 4 provides the results and discussions. Section 5 concludes. | STUDY AREA AND DATA East Japan (lying between 32 N and 46 N and between 138 E and 144 E) and west Japan (lying between 26 N and 40 N and between 126 E and 138 E) are the domains of current interest. Figure 1 shows the map and spatial extents of the two study domains. As such, there is no physical explanation for dividing east and west Japan through 138 E longitude except that the nature of rainfall over east and west Japan are different. Various methods have been used by the researchers to identify the regions with spatially coherent rainfall patterns in different countries (;Lee and Julien, 2016). In Japan, Ohba et al. classified the weather patterns during the Baiu season using a selforganizing map (SOM). The present study shows a difference in the precipitation to the east and west of 138 E. West Japan experiences higher rainfall. Also, the intense high-frequency rainfall of 150 mmday −1 is mostly confined to west Japan (). In general, the climate of Japan is different over these two regions. For details, see the webpage of the JMA (http:// www.data.jma.go.jp/gmd/cpd/longfcst/en/tourist.html, accessed June 2019). Furthermore, as per the Koppen-Geiger climate classification, Japan has three climatic zones (;): temperate without a dry season and with a hot summer (Cfa) (or a humid subtropical climate) covering most parts of west Japan; cold without a dry season and with a hot summer, Dfa (or a humid continental climate); and cold without a dry season and with a warm summer (Dfb) (or a humid continental climate). The climatic zones Dfa and Dfb cover most parts of east Japan and Hokkaido. Following this classification, the line dividing the climatic zones Cfa and Dfa, Dfb, travels approximately along 138 E. Thus, 138 E longitude is used as a separation line that approximately divides the country into east and west regions. Monthly rainfall for east and west Japan are obtained from "Asian Precipitation-Highly-Resolved Observational Data Integration Towards Evaluation of Water Resources" (APHRODITE) (), which uses a high-density-of-quality station network. The belowand above-normal precipitation events for the chosen study areas are identified by standardizing the monthly rainfall and characterizing it in terms of the standardized precipitation anomaly index (SPAI) (Chanda and Maity, 2015b) using the monthly rainfall data for the period 1979-2015. The SST data used are the National Oceanic and Atmospheric Administration (NOAA) Extended Reconstructed V5 data, which are available at a spatial F I G U R E 1 Study area map showing the latitude/longitude extents of east and west Japan resolution of 2 latitude 2 longitude from 1948 to the present. These are derived from the International Comprehensive Ocean-Atmosphere Dataset (ICOADS). The data set for the period 1979-2015 was used. | METHODOLOGY The analysis was carried out for two seasons, namely boreal summer (summer, hereafter; June-August) and boreal winter (winter, hereafter; December-February) considering the two regions of Japan (east and west). Thus, there are a total of six parallel analyses for each region since both seasons have three months each. | Categorization of monthly rainfall To identify the below-and above-normal monthly rainfall, it was necessary to standardize the monthly precipitation. For this purpose, the SPAI is used, which is suitable for characterizing below-and above-normal rainfall events for periodic as well as non-periodic precipitation series (Chanda and Maity, 2015b). To obtain the index values, the month-wise precipitation anomalies were calculated as follows. where y i, j is the precipitation anomaly for the i-th year and j-th time step of the year; x i, j is the precipitation for the i-th year and j-th time step of the year; and x j and s j are the long-term mean and standard deviation of precipitation for the j-th time step of the year. The anomalies were fitted to an empirical probability distribution (a gamma distribution is not used as it cannot take negative values) and then transformed to normal variates giving index values between −∞ and ∞. Values < 0 indicate the dry side, while values > 0 indicate the wet side. For each of east and west Japan, the SPAI (at a onemonth temporal scale) is calculated using monthly rainfall data from 1979 to 2015, considering 1979-2008 F I G U R E 2 (a) 1-6 months lagged global fields of the sea surface temperature (SST) anomaly difference obtained by subtracting the mean global field associated with above-normal rainfall events from the mean global field associated with below-normal rainfall events in June over west Japan; (b) same as for (a), but in July over west Japan; and (c) same as for (a), but in August over west Japan (30 years) as the base period for calculating x j and s j. The two seasons comprising summer and winter were then analysed. Considering each month of summer and winter, the SPAI < −0.8 are designated as below-normal events, those between −0.8 and 0.8 are designated as normal events, while those > 0.8 are designated as above-normal events. This categorization is motivated by the US Drought Monitor classification (https:// droughtmonitor.unl.edu/) based on thresholds of the Standardized Precipitation Index (SPI) ((McKee et al.,, 1995. | Global climate pattern (GCP) The GCP approach was applied in a previous study for the Indian subcontinent. It was observed that the use of a comprehensive GCP as the input improved the future categorization of above-and below-normal rainfall events compared with the use of known existing teleconnection patterns (Chanda and Maity, 2015a). In the present study, the global SST fields were inspected at several preceding time steps (1-6 months) for each below-and above-normal rainfall event in the concerned study area. The composites of the SST anomaly fields associated with all below-normal rainfall events were obtained during the base period, that is, 1979-2008. Thus, the mean anomaly field associated with belownormal rainfall events for different temporal lags is obtained. A similar technique is applied to obtain the mean anomaly field for above-normal rainfall events at different temporal lags. For any given lag, the mean anomaly field of above-normal rainfall events is spatially subtracted (grid-point wise) from the mean anomaly field of below-normal rainfall events to obtain the mean anomaly difference field which reveals the climatic signals/patterns. Such patterns are obtained for lags of 1-6 months, considering the length of the expected teleconnections. The patterns revealed by the global fields of anomaly differences are inspected to select the influential zones. Regular, rectangular zones are selected for convenience in further processing, F I G U R E 3 (a) 1-6 months lagged global fields of the sea surface temperature (SST) anomaly difference obtained by subtracting the mean global field associated with above-normal rainfall events from the mean global field associated with below-normal rainfall events in December over west Japan; (b) same as for (a), but in January over west Japan; and (c) same as for (a), but in February over west Japan although the regions with significant (at 5% level) anomaly difference are of course irregular in shape. In order to avoid dilution of the signals, the regular zones are selected from the core of the significant irregular regions. The average SST anomaly from each zone at a certain lag comprises of one predictor variable. Thus, the possible predictor pool to be used for the prediction of monthly rainfall is the SST anomaly data from different zones with different lags. In general, the predictors with lower lags (such as 1-3 months) may be more useful in a prediction model due to their immediacy. However, prediction lead time will be more if the information comes from the pattern with higher lags. Thus, a trade-off must be worked out between the two. In the present study, it is observed from the plots that at lag 1, distinct signals are perceptible for both the cases (below and above normal). Hence, the anomaly zones from a one-month lag are mostly selected. When some strong pattern is visible at a higher lag, then that zone is also considered in the predictor pool. However, the zones at higher lags are not considered if that signature is already incorporated from lower lags. | Development of the prediction model 3.3.1 | SVR approach Support vector machine (SVM), which is a machine learning approach, can be used for both classification and regression problems. The SVM for regression, also known as the SVR, has been previously used for several hydrological applications (Choy and Chan, 2003;Yu and Liong, 2007;). For details of the methodology on the SVR, see Maity et al.. In the present study, the SVR is used to predict the monthly rainfall using the identified predictor variables, which consist of the SST anomalies from distinct zones for specific lags. While fitting the SVR model to the training data, the goal is to find a function y = f(x), such that any observation F I G U R E 4 (a) 1-6 months lagged global fields of the sea surface temperature (SST) anomaly difference obtained by subtracting the mean global field associated with above-normal rainfall events from the mean global field associated with below-normal rainfall events in June over east Japan; (b) same as for (a), but in July over east Japan; and (c) same as for (a), but in August over east Japan (y) does not deviate from the predicted value () by more than a threshold value, known as -margin for the corresponding input/predictor data (x). Also, the function should be as flat as possible so as not to overfit the data. In this process, a positive numeric value, known as regularization parameter, is optimized. The regularization parameter () controls the penalty imposed on observations that lie outside the -margin (). In the present study, both and are optimized for different seasons and regions. | GM-Copula approach The GM approach, as stated above, provides a conditional independence structure, also referred to as the graph structure, among the predictor and predictand variables. A graph consists of a set of vertices and nodes, where each variable is a node and each edge is associated with a pair of nodes. The graph structure can be used to prioritize the inputs for the prediction model, hence reducing the redundancy of the model (Jordan, 2004;Bang-Jensen and Gutin, 2007;Whittaker, 2009). The conditional independence structure, also referred to as the graph structure, provides information on dependent (directly connected/parents to the target variable), independent (not connected to the target variable) and conditionally dependent (not directly connected to the target variable) predictors with respect to the target variable. The graph structure among the predictors (the SST anomalies from distinct zones for specific lags) and the target variable (monthly rainfall) is developed using the maximum likelihood approach. In this approach, initially a fully interconnected graph structure (also referred to as a saturated model) is considered where all the nodes are connected to each other. Next, the edge exclusion deviance (EED) is used for testing if an edge can be eliminated from the saturated model. The threshold of the EED is 3.84 (at a 5% significance level with 1 degree of freedom), so the edges for which the EED < 3.84 are to be excluded. To F I G U R E 5 (a) 1-6 months lagged global fields of the sea surface temperature (SST) anomaly difference obtained by subtracting the mean global field associated with above-normal rainfall events from the mean global field associated with below-normal rainfall events in December over east Japan; (b) same as for (a), but in January over east Japan; and (c) same as for (a), but in February over east Japan check the acceptability of the obtained graph structure at a particular confidence level, a test statistic, known as the deviance, can be used (Dutta and Maity, 2018). Next, the predictors directly connected (parent variables) to the target variable are used to develop the prediction model discarding the independent and conditionally independent variables, as identified by the graph structure. The prediction model is developed using C-Vine copula approach, in which a sequence of trees is identified to develop the conditional distribution of the target variable given the parents (Xiao, 2011;;;;Dalla ). The selection of each tree is based on a maximum spanning tree algorithm, where edge weights are chosen to reflect the dependencies and the final tree can be used for the prediction of the target variable given the input variables (Dutta and Maity, 2018). | Threefold cross-validation In the present study, threefold cross-validation is used to test the prediction performance of the developed model. The available data are divided into three sets (Set 1: 1979-1991, Set 2: 1992and Set 3: 2004. Each threefold cross-validation is performed by considering two sets as the development period and the remaining set as the testing period. | RESULTS AND DISCUSSION In order to extract the global hydroclimatic signals, below-and above-normal monthly rainfall events in the two regions of Japan during 1979-2008 are identified in terms of the SPAI. The months and years of below-and above-normal rainfall are reported in Table A1 (west Japan) and Table A2 (east Japan) in Appendix S1 in the additional supporting information. Note that the number of observed events varies from three to eight for different cases. These are indeed less in number, which is due to the shortage of data length. However, global composites are developed with the SST anomaly difference fields for below-and abovenormal events in each of the six months (Figures 2a-c to 5a-c). Thus, for each event, several predictors (regions) are identified across the globe. Similar plots are also prepared at the seasonal scale for comparison. However, these are presented only in Figures A1-A4 in Appendix S1), as it is observed that the signals are more prominent at the monthly time scales. Global general circulation model (GCM) experiments are underway to understand the teleconnections from the identified SST regions, and the results will be reported elsewhere. In the present study, the identified hydroclimatic signals are further used for the prediction of monthly rainfall from both seasons using the SVR-and GM-Copula-based approaches. | West Japan: Hydroclimatic teleconnection for monthly rainfall in summer The global fields of the SST anomaly difference between the below-and above-normal rainfall events in June-August are shown in Figure 2a-c, respectively, for west Japan at lags 1-6 months. When comparing Figure 2a-c with the corresponding fields for seasonal rainfall (see Figure A1 in Appendix S1 in the additional supporting information), it can be observed that a distinct boomerang pattern usually associated with the El Nio/Southern Oscillation (e.g. ;) of a positive anomaly difference evident in Figure A1 in Appendix S1 is even more distinct in Figure 2a (June). This pattern is also visible for events in July (Figure 2b), but not so much for events in August (Figure 2c). In June (Figure 2a), strong negative anomaly differences are observed in the southern Indian and southern Atlantic oceans, and also in the western Pacific, around 30 -40 N up to a lag of five months, indicative of Pacific decadal variability. It is remarkable that these strong patterns are largely weakened in July ( Figure 2b) and become completely opposite in nature (marked by positive anomaly differences in the southern Indian and Atlantic and western Pacific oceans) in August (Figure 2c). Overall, it may be observed that early summer (June) rainfall in west Japan is linked to the central Pacific (CP) El Nio events (also known as the El Nio Modoki) rather than the classic eastern Pacific (EP) El Nio. This is in agreement with previous observations of anomalous rainfall in east Asia during the EP El Nio in response to phase changes in the AMO (Yuan and Yang, 2012;). | West Japan: Hydroclimatic teleconnection for monthly rainfall in winter When comparing Figure 3a-c with Figure A2 in Appendix S1 in the additional supporting information, more distinct features are observed in case of monthwise analysis than the seasonal analysis. Figure 3a shows that rainfall anomalies in early winter are linked to a pattern of colder CP SST anomalies, flanked by warmer west and east Pacific SST anomalies, representative of the La Nia Modoki phenomenon. On the other hand, events in January (Figure 3b) are associated with strong negative anomaly differences in the Indian Ocean rather than the Pacific Ocean, which may be related to the transition between El Nio and La Nia events (). None of the Indo-Pacific SST features are too distinct for events in February (Figure 3c), which are associated more with negative anomaly differences in the tropical Atlantic Ocean in the Southern Hemisphere. | East Japan: Hydroclimatic teleconnection for monthly rainfall in summer It has been observed that compared with the season-wise SST composites (see Figure A3 in Appendix S1 in the additional supporting information), the features are sharper and changing quickly from month to month in month-wise SST composites (Figure 4a-c). There is a signature of a positive SST anomaly difference in the eastern subtropical Pacific Ocean, particularly in June and July. In June (Figure 4a), negative anomalies are prominent in the eastern Equatorial regions, while in July (Figure 4b), positive anomalies are seen in higher latitudes in the Southern Hemisphere. In August (Figure 4c), the anomaly feature is seen on a smaller spatial extent and almost imperceptible beyond three months, unlike that for June and July. The positive SST anomaly difference in the western Pacific Ocean, near Indonesia and Papua New Guinea, as well as in the southern Indian Ocean (which may be indicative of the IOD formation) is strong for June (Figure 4a), but it gradually weakens and fades through July ( Figure 4b) and August (Figure 4c). T A B L E 3 Season-wise performance statistics between the observed and support vector regression (SVR) predicted rainfall during model development and the testing period considering threefold (Set 1-3) cross-validation for east and west Japan | East Japan: Hydroclimatic teleconnection for monthly rainfall in winter It is observed that extensive positive anomaly differences in the south Pacific Ocean are associated with rainfall in December ( Figure 5a) and February (Figure 5c), but not in January (Figure 5b). The signature of La Nia Modoki (cold anomalies in the CP, flanked by warm anomalies on both sides) is also evident. In the case of January, strong negative anomaly differences are noticed in the tropical Indian Ocean, which weakens after lag 3. Rainfall in February is associated with positive anomaly differences in the Indian Ocean, which peak at a four-month lag and weaken beyond that. Thus, the SST anomaly differences associated with the intra-seasonal events are remarkably contrasting, which explains the dilution of signals at the seasonal scale (December-February) (see Figure A4 in Appendix S1 in the additional supporting information). Next, the zones of importance are selected from the global climate fields for each of the six months from the anomaly difference fields of below-and F I G U R E 6 Comparison of the observed and support vector regression (SVR) predicted season-wise monthly rainfall for east and west Japan during the model testing period considering threefold (Sets 1-3) cross-validation. The correlation co-efficient (R) obtained for each case is also provided with the scatter above-normal rainfall. These are presented for east and west Japan in Tables 1 and 2, respectively. Another point is also important with respect to all the regions explained so far. In general, the SST anomaly regions, which are near the Equator or in the tropical regions of the Pacific and Indian oceans (Table 1), affect west Japan through the PJ teleconnection. Considering east Japan, the SST anomaly regions are mostly located in the high latitudes (Table 2), so waves along the 200 hPa jet or the fluctuations of the subtropical high in the north Pacific mostly affect the region. Thus stated, the possible link with the known teleconnection patterns revealed from the plots has been provided wherever possible; however, all the teleconnection patterns could not be attributed to known large-scale circulation phenomena. There could be a bigger pool of atmospheric-oceanic climatic precursors which are not yet well known. Further investigations on these identified features are required, which is beyond the scope of the present study. However, the predictive potential of the identified teleconnection pattern is investigated. | Prediction model and performances Two different approaches are adopted to assess the predictability of season-wise monthly rainfall: a machinelearning approach, namely the SVR, and a GM-Copula approach. The performances of these models are explored and discussed separately. | SVR model The strong SST anomaly signals based on the discussions above (Tables 1 and 2, respectively) are used to develop the SVR model prediction system. In order to optimize the parameters of the model, both and are optimized for different seasons and regions. If the training is optimum, the model performance during model training and testing should be comparable. A threefold crossvalidation is also carried out to check the model performance for different folds, as mentioned in the methodology. The results are presented in Table 3. The testing periods for each fold are identified as Set 1, Set F I G U R E 7 Graph structures obtained for each month for the summer and winter for west Japan. In each case, V1 is the target variable (rainfall of the month) and V2, V3,, V5 are the SST1, SST2,, SST4, respectively (see Table 1 for details). The parents of the target variable are used for the prediction of monthly rainfall 2 and Set 3, respectively. The SVR predicted monthly rainfall is compared with the observed rainfall for summer and winter for all three folds (Sets 1-3), again as mentioned in the methodology. The scatter plots for all these cases are shown in Figure 6. The performance statistics namely correlation co-efficient (R), root mean squared error (RMSE), degree of agreement (Dr), Nash-Sutcliffe efficiency (NSE) and co-efficient of determination (R 2 ) are presented in Table 3. Absence of overfitting is apparent from the results for almost all combinations of region, season and fold. It is observed that using the SVR approach, a reasonably good prediction performance is obtained. The SVR model is found to capture only the long-term monthly mean rainfall. In general, the performance is slightly better during the summer, but poorer in winter, for both east and west Japan. In the above-mentioned analysis, all SST zones, identified through the GCP, are used for the prediction of rainfall. In order to access the prediction skill of individual predictors, another analysis is carried out where prediction models are developed using an individual predictor. The model performance for a typical month (June) and region (east Japan) for all three folds is presented in Table A3 in Appendix S1 in the additional supporting information. This typical example is taken up as a substantial number of SST zones are identified to influence the rainfall in June for east Japan. The results obtained using the individual inputs indicate significant variation in the performance between different folds (see Table A3 in Appendix S1). However, while using all the inputs (the last row of Table A3) for prediction, the model provides better and almost uniform performances across all the folds (considering all performance statistics). This is because the independent inputs each have a hydroclimatic association with the target, and this is how these are selected. They may be the different manifestations of same physical mechanism. In other words, there could be redundant information from multiple inputs, and it is clear that individual predictive potentials are not sufficient to identify the less informative and/or redundant inputs. Here lies the benefit of the GM that effectively identifies the most informative inputs while sifting out the redundant ones. Hence, the GM-Copula hybrid model is adopted and the prediction performance is compared. F I G U R E 8 Graph structures obtained for each month for summer and winter for east Japan. In each case, V1 is the target variable (rainfall of the month) and V2, V3,, V10 are the SST1, SST2,, SST9, respectively (see Table 2 for details). The parents of the target variable are used for the prediction of monthly rainfall F I G U R E 9 Comparison of the observed and hybrid graphical modelling/C-Vine copula (GM-Copula) predicted season-wise monthly rainfall for summer considering west Japan: (a) time-series plot of the observed and predicted rainfall during the model testing period considering threefold (Sets 1-3) cross-validation; (b) scatter plot of the observed and predicted rainfall along with the correlation co-efficient (R) and co-efficient of variation (CV); and (c) box plot showing the variation in the observed and predicted rainfall for each month in summer 4.5.2 | GM-Copula hybrid model Using the identified SST zones (Tables 1 and 2), six graph structures are developed for each region (east and west Japan) and input variables (highlighted in bold) are selected based on the conditional independence among the variables. The graph structure obtained for each month, explaining the complete dependence structure among all the variables, for both the regions is shown in Figures 7 and 8, respectively. Selected input variables are different for each month of analysis with varying degree of associations. Based on the graph structures obtained for each month, probabilistic models are developed for prediction of the target variable (monthly rainfall) using the parents of the target variable as identified from the graph structure. The SST regions discarded from the input set are either "conditionally independent" or "independent" considering the monthly rainfall in east and west Japan. Note that only two to three predictors are in use considering both the seasons. Pruning down the input variables helps to avoid redundancy in the model as the same information may be provided by multiple variables, increasing the complexity of the model without improving the performance. After selection of the input variables, a C-Vine copula is used for the prediction of the monthly rainfall given F I G U R E 1 0 Comparison of the observed and hybrid graphical modelling/C-Vine copula (GM-Copula) predicted season-wise monthly rainfall for winter for east Japan: (a) time-series plot of the observed and predicted rainfall during the model testing period considering threefold (Set 1-3) cross-validation; (b) scatter plot of the observed and predicted rainfall along with the correlation co-efficient (R) and coefficient of variation (CV); and (c) box plot showing the variation in the observed and predicted rainfall for each month in winter the parent variables. As mentioned above, the prediction model is validated using threefold cross-validation. The season-wise monthly observed and predicted rainfalls for different regions are compared using the results obtained for each fold (Figure 9 and Figure A5 in Appendix S1 in the additional supporting information, and Figure 10 and Figure A6 in Appendix S1). The prediction performance metrics are shown in Table 4. For both regions, better prediction performance is obtained for the summer. It is perhaps due to a stronger association for rainfall in June-August with the SST anomaly ( Figure 9 and Figure A6 in Appendix S1). The scatter plots depict good performance in predicting monthly rainfall as the majority of the observed-predicted data are distributed around the 1:1 line. The box plot shows that the prediction model successfully captures the mean rainfall. The variation is also appropriately captured for west Japan. In winter, the predictions are in general underestimated for both west and east Japan (Figure 10 and see Figure A5 in Appendix S1). Certain peak values in February 1985, February 1990, January 1998, and so on could not be captured by the model. However, the mean and variance, as shown by the box plot, are very well captured for west Japan (see Figure A5 in in Appendix S1). The prediction performance is also satisfactory for east Japan (Figure 10). Comparing the observed rainfall during model development and testing for the third fold, it may be seen that the range and variation of rainfall during this season exhibits drastic temporal variations. These variations may be considered as the reason for the slightly poorer performance of the model for the third fold in east Japan. However, overall the prediction model provides satisfactory performance and the model can appropriately capture the association between the variables and satisfactorily predict the monthly rainfall for both east and west Japan. Thus, it is observed that the prediction performance is decidedly better for the GM-based approach during the testing period. It is thus concluded that establishing a conditional dependence structure of the predictor pool is an important step to resolve the complexity and dimensionality of the model, which may not be successfully done by machine-learning algorithms. Note that a periodic scrutiny may be necessary to update the model in order to cater the time-varying characteristics in the hydroclimatic teleconnection, if any (Rajeevan, 2001;Rajeevan et al.,, 2012;Dutta and Maity, 2018). The proposed methodology is applicable for any other geographical region; however, the extent of useful hydroclimatic information for seasonal prediction is expected to vary depending on the seasonal characteristics of regional rainfall and the areal extent of the study area. | CONCLUSIONS The study reveals the features of hydroclimatic teleconnection between global sea surface temperature (SST) fields and rainfall over east and west Japan. The analysis T A B L E 4 Season-wise performance statistics between the observed and hybrid graphical modelling/C-Vine copula (GM-Copula) predicted rainfall during model testing period considering threefold (Set 1-3) cross-validation for east and west Japan reveals that the rainfall anomalies over west Japan are influenced by the teleconnections originating in the tropical Pacific and Indian oceans, whereas the rainfall anomalies over east Japan are associated with the highlatitude SST anomalies. The El Nio Modoki (La Nia Modoki) phenomena are found to influence the early summer (winter) rainfall over west Japan, whereas the early summer (June) and winter (December) rainfall over east Japan is associated with the positive SST anomaly differences in the eastern subtropical Pacific and south Pacific oceans, respectively. In the present study, using the global climate pattern (GCP) approach, many teleconnection patterns influencing the rainfall of east and west Japan are identified. These go beyond the traditional teleconnection patterns due to the El Nio Southern Oscillation (ENSO), El Nio Modoki, Atlantic Multidecadal Oscillation (AMO), Indian Ocean Dipole (IOD), and so on. The identified teleconnections could be beneficial in improving the prediction of rainfall over west and east Japan. The predictive potential of identified teleconnection patterns for monthly rainfall variation in Japan is assessed. Prediction models are developed based on the machine-learning technique, the support vector regression (SVR) and a hybrid graphical modelling/C-Vine copula (GM-Copula) approach using the teleconnection identified through the GCP approach. The potential of SVR is appreciable, but the model based on the GM-Copula has superior performance in predicting the rainfall over west and east Japan. It is perhaps due to the concept of a conditional independence structure among the variables that helps one to prune the redundant information in the predictor pool and finally to develop a prediction model using the pruned predictor sets. Satisfactory performance of the prediction model is obtained for both regions and for all months of both seasons with a slightly better performance in summer. The results will be highly beneficial in the operational forecast of the monthly variation of rainfall over east and west Japan.
/// <summary> /// Creates and enqueues a report containing the name and value pair of a Device Twin /// reported property. The report is not actually sent immediately, but it is sent on the /// next invocation of AzureIoT_DoPeriodicTasks(). /// </summary> void AzureIoT_TwinReportState(const char *propertyName, size_t propertyValue) { if (iothubClientHandle == NULL) { LogMessage("ERROR: client not initialized\n"); return; } char *reportedPropertiesString = NULL; JSON_Value *reportedPropertiesRootJson = json_value_init_object(); if (reportedPropertiesRootJson == NULL) { LogMessage("ERROR: could not create the JSON_Value for Device Twin reporting.\n"); return; } JSON_Object *reportedPropertiesJson = json_value_get_object(reportedPropertiesRootJson); if (reportedPropertiesJson == NULL) { LogMessage("ERROR: could not get the JSON_Object for Device Twin reporting.\n"); goto cleanup; } if (JSONSuccess != json_object_set_number(reportedPropertiesJson, propertyName, propertyValue)) { LogMessage("ERROR: could not set the property value for Device Twin reporting.\n"); goto cleanup; } reportedPropertiesString = json_serialize_to_string(reportedPropertiesRootJson); if (reportedPropertiesString == NULL) { LogMessage( "ERROR: could not serialize the JSON payload to string for Device " "Twin reporting.\n"); goto cleanup; } if (IoTHubDeviceClient_LL_SendReportedState( iothubClientHandle, (unsigned char *)reportedPropertiesString, strlen(reportedPropertiesString), reportStatusCallback, 0) != IOTHUB_CLIENT_OK) { LogMessage("ERROR: failed to set reported property '%s'.\n", propertyName); } else { LogMessage("INFO: Set reported property '%s' to value %d.\n", propertyName, propertyValue); } cleanup: if (reportedPropertiesRootJson != NULL) { json_value_free(reportedPropertiesRootJson); } if (reportedPropertiesString != NULL) { json_free_serialized_string(reportedPropertiesString); } }
<reponame>Hanbings/MoeMark<filename>src/main/java/io/hanbings/moemark/service/ConfigService.java /* * Copyright (c) 2021 Hanbings / <NAME>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.hanbings.moemark.service; import io.hanbings.moemark.Server; import java.io.*; import java.nio.charset.StandardCharsets; import java.util.*; @SuppressWarnings("unused") public class ConfigService { String path; public ConfigService() {} @SuppressWarnings("ResultOfMethodCallIgnored") public ConfigService(String path) { this.path = path; File file = new File(path); // 判断文件是否存在 if (file.exists()) { LoggerService.info("Config found."); } else { LoggerService.warn("Config not exists, create it ..."); try { file.createNewFile(); OutputStream outputStream = new FileOutputStream(file); outputStream.write(Objects.requireNonNull(Server.class .getClassLoader() .getResourceAsStream("server.properties")) .readAllBytes()); outputStream.flush(); outputStream.close(); LoggerService.error("Created config. The Server will stop. use server.properties config it."); // 此时应该先退出应用以填写配置文件 System.exit(0); } catch (IOException e) { e.printStackTrace(); } } } //根据Key读取Value public String get(String key) { Properties properties = new Properties(); try { InputStream inputStream = new BufferedInputStream(new FileInputStream(path)); properties.load(inputStream); String value = properties.getProperty(key) + ""; if (("null").equals(value)) { LoggerService.warn("Config " + key + "read failed."); return null; } inputStream.close(); return value; } catch (IOException e) { LoggerService.warn("Config " + key + "read failed."); return null; } } // 读取Properties的全部信息 public Map<String, String> get() { Map<String, String> map = new HashMap<>(); try { Properties properties = new Properties(); InputStream inputStream = new BufferedInputStream(new FileInputStream(path)); properties.load(inputStream); Enumeration<?> enumeration = properties.propertyNames(); //得到配置文件的名字 while (enumeration.hasMoreElements()) { String strKey = (String) enumeration.nextElement(); String strValue = properties.getProperty(strKey); map.put(strKey, strValue); } } catch (IOException e) { e.printStackTrace(); } return map; } // 写入Properties信息 public void write(String pKey, String pValue) throws IOException { Properties properties = new Properties(); InputStream inputStream = new FileInputStream(path); // 从输入流中读取属性列表(键和元素对) properties.load(inputStream); // 调用 Hashtable 的方法 put。使用 getProperty 方法提供并行性。 // 强制要求为属性的键和值使用字符串。返回值是 Hashtable 调用 put 的结果。 OutputStream outputStream = new FileOutputStream(path); properties.setProperty(pKey, pValue); // 以适合使用 load 方法加载到 Properties 表中的格式, // 将此 Properties 表中的属性列表(键和元素对)写入输出流 properties.store(outputStream, "Update " + pKey + " name"); outputStream.flush(); inputStream.close(); outputStream.close(); } // 批量写入Properties信息 public void write(List<String> list, Map<String, String> map) throws IOException { Properties properties = new Properties(); InputStream inputStream = new FileInputStream(path); InputStreamReader inputStreamReader = new InputStreamReader(inputStream, StandardCharsets.UTF_8); // 从输入流中读取属性列表(键和元素对) properties.load(inputStreamReader); OutputStream outputStream = new FileOutputStream(path); for (String strings : list) { properties.setProperty(strings, map.get(strings)); } properties.store(outputStream, "Update"); outputStream.flush(); inputStream.close(); outputStream.close(); } }
Q: How does CPU actually retrieve data from memory when you call a variable in a programming language? As I have understood from all the internet sources I can get to, when you declare and initialize a variable in java, you are allocating this data, say an 8-byte float, in a particular memory cell in the RAM, with the address specified as the row and column number. Now, suppose I want to access this memory location and print the float out. As a programmer, I will clearly not write the binary representation of the memory location of this float for the CPU to process; rather, I will call the variable name I declared before. When this variable name got transformed into binary code and sent to the CPU, how does CPU know what memory location this variable is referencing to? I mean, for that to work, doesn't the memory location of the variable has to be stored directly on the CPU? Because if not, you will have to store the the memory location of the float at some memory cell as well; in other words, when you call the variable, you have to first get from the RAM what memory address is the variable representing and then use that information to go to the RAM to retrieve the float. However, for the CPU to get to the memory address of the memory address of the memory address of the float, it still either has to store it directly on the CPU or has to store it in the RAM, and the same story goes one recursively. What is really happening when you need to call a variable? How to the CPU know the memory address associated with the variable in the first place without referring to the RAM? Because if the CPU is referring to the RAM, then that means it has already gotten the memory address f A: Your assumption that the variable is sent to the CPU is wrong. Every program must be compiled to some machine code (or it will be interpreted, but it roughly comes down to the same thing). The idea of compilation is to translate the human-readable code to a series of bits, which can then be read by the processor. In order to get an idea of what those series of bits do, you could also take a look at assembly which is actually not much more than an encoding of the instruction set of a CPU. Now the trick is that the compiler builds a so-called symbol table where it keeps track of all variables that have been defined by the programmer(s). Once it knows about all of the variables, it also knows how much RAM it needs (not considering dynamic allocation of course) and thus can allocate a certain space in memory. All variables are then mapped to some piece of memory or if the compiler is smart enough, it might even map certain (temporary) variables only to registers on the CPU to save some memory (not completely sure about that last thing though). The compiler thus creates a series of bits where each variable is represented by the correct register- or memory-address and the cpu does not bother/know about any variables. For dynamically allocated memory things get a bit trickier, but eventually it comes down to memory addresses being stored in variables which are handled by the compiler again. I hope I understood your question correctly and did not spread too much nonsense (if I did, please correct me) as I am not an authority whatsoever in this region, but that is roughly my view on these things.
Where’s Balochistan’s slice of democracy? Democracy is a gift. One that continues to benefit humans in their everyday experiences. Democracy was secured after a thorough, continuous tug of war between autocrats and commoners, stretched along decades. This gift seems to have eluded Balochistan for a long time now.In fact, the evolution of governance structures from different forms of autocratic governments to the present day democracy … [Read more...] about Where’s Balochistan’s slice of democracy?
Frank Close is professor of theoretical physics at the University of Oxford, and Fellow of Exeter College, Oxford. He is a high energy particle physicist and has published some 200 research papers, specializing in the quark structure of nuclear particles, with a particular interest in glueballs. He is the author of several popular books on physics, including Antimatter, Neutrino, and Nothing - A Very Short Introduction (all published by Oxford University Press). His latest book is The Infinity Puzzle (Basic Books), the story of half a century of discoveries that have led to the Large Hadron Collider.
Better to Bend than to Break: Sharing Supply Risk Using the Supply-Flexibility Contract Problem definition: We analyze a contract in which a supplier who is exposed to disruption risk offers a supply-flexibility contract comprising of a wholesale price and a minimum-delivery fraction (flexibility fraction) to a buyer facing random demand. The supplier is allowed to deviate below the order quantity by at most the flexibility fraction. The suppliers regular production is subject to random disruption, but she has access to a reliable expedited supply source at a higher marginal cost. Academic/practical relevance: Despite the prevalence of supply-flexibility contracts in practice, to the best of our knowledge, there is no previous academic literature examining the optimal design of supply-flexibility contracts. As such, the level of flexibility in practice is usually set on an ad-hoc basis, with buyers typically reluctant to share risk with suppliers. Our analysis of supply-flexibility contracts informs practice in two ways: First, using analytically supported arguments, it educates managers on the effects of their decisions on the economic outcomes. Second, it shows that the supply-flexibility contract benefits both the supplier and the buyer, regardless of which player chooses how supply risk is allocated in the supply chain. Methodology: Non-cooperative game theory, non-convex optimization. Results: We derive the supplier-led optimal contract and show that supply chain efficiency improves relative to the price-only contract. More interestingly, even though the buyer lets the supplier decide how the two share supply risk, profits of both the players increase by the introduction of flexibility into the contract. Further, supply flexibility may be even more valuable for the buyer compared with the supplier. Interestingly, the flexibility fraction is not monotone in supplier reliability and a more reliable supplier may even prefer to transfer more risk to the buyer. The robustness of these findings is established on two extensions: one where we study a buyer-led contract (i.e., the buyer chooses the flexibility fraction) and the other where the expedited supply option is available to both the supplier and the buyer. Managerial implications: The supply-flexibility contract is mutually beneficial for both players and yet retains all the advantages of the price-only contractit is easy to implement, it requires minimal operational and administrative burden, and there is evidence of the use of such contracts in practice. While our focus is not on supply chain coordination, we note that the combination of two mechanismsthe supply-flexibility contract derived in this paper to share supply risk and a buyback contract to share demand riskyields a coordinating contract.
EFFECT OF DIFFERENT ESTRUS SYNCHRONIZATION ON SERUM E2, P4, FSH AND LH DURING DIFFERENT ESTRUS PERIODS AND PREGNANCY IN EWES This research was designed to know the effect of different estrus synchronization protocols on Serum E, P, FSH, and LH during different estrus periods and pregnancy in Karadi and Arabi ewes. The study was carried out by using 30 ewes for each breed at 2-4 years. Ewes from each breed were equally and randomly distributed into three groups. The first group was treated with vaginal sponges saturated with (60 mg) of Medroxy progesterone acetate within a period of fourteen days (T1). The second group was administrated with the MAP injected with a dose of 300 IU / head of the hCG (T2). The third group was treated with MAP treatment, and injected at a dose of 75 g /head of GnRH (T3). The results of this analysis showed a significant increase in the estrogen concentration in the different reproductive stages; the significant level of estrogen in the estrus phase (60.96) pg/ml. The concentration of progesterone differed significantly in the different reproductive stages, with the highest concentration in the pregnancy period (16.33) pg/ml. In comparison, the FSH concentration differed significantly in the different reproductive stages with the highest concentration in the estrus period(3.243) mIU/ml. the highest significant level of serum LH is in the estrus period (5.230) mIU/ml. Thus, the study is concluded that hormones and phases may be effective in inducing follicular growth and ovulation and finally may increase pregnancy rate INTRODUCTION The majority of sheep reproduction management procedures focus on inducing and synchronizing estrus and ovulation to allow for out-of-season and/or synchronized lambing.. Studies in sheep considered management practices to improve the productive efficiency of herds in a technical and economical way, in which it is intended to eliminate the pharmacological manipulation of animals. These methodologies are founded on understanding reproductive events, sociosexual factors, and the effects of nutrition because at present, reproductive management protocols are based on the application of exogenous hormones that simulate the action of a corpus luteum (CL), such as progestogens (P4); and others manage to eliminate it, to induce a follicular phase and ovulation, such as prostaglandins (PGF2). hCG and its receptor, LH/GCR, are expressed in numerous regions of the reproductive tract, both in gonadal and extragonadal tissues, stimulating oocyte maturation, fertilization, implantation, and early embryo development, according to a review of the worldwide literature and research investigations. Furthermore, hCG appears to have a role in solid organ transplantation as an anti-rejection substance. Furthermore, hCG has also been preferred over Human Menopausal Gonadotrophin because to its increased availability and lower cost (hMG). As a result, hCG was previously used to induce ovulation in sheep during the anoestrus season. The primary neuropeptide controlling reproductive function in all vertebrate species is gonadotropin-releasing hormone (GnRH). Ovarian steroids secreted by mature ovarian follicles control a pulsatile pattern of GnRH release from the hypothalamus, which stimulates a preovulatory production of luteinizing hormone (LH) by the anterior pituitary gland in females of spontaneously ovulating animals, such as sheep. The GnRH injection causes an LH peak in a tiny period of time and reduces the ovulation period. The use of gonadotropin-releasing hormone (GnRH) or the male effect in PGF2based protocols was reported previously. Human chorionic gonadotropin (hCG) and gonadotropin-releasing hormone (GnRH) were given to ewes of several breeds (Akkaraman, fat-tailed, Afshari, and Booroola-Merino crossbred) to improve reproductive performance (conception, lambing, twining rate, and litter size),. In Afshari Booroola-Merino crossbred ewes, post-mating hCG-treated groups had a higher plasma concentration.. Injection of GnRH on the day of oestrus or at the time of mating and 7 or 9 days later increased serum progesterone concentration.The study's focus is to know the effect of using different estrus synchronization protocols on serum Estrogen, Progesterone, FSH, and LH during different estrus periods and pregnancy in Karadi and Arabi ewes in Erbil local. MATERIALS AND METHODS Animals and experimental design The experiment was carried out in a private field in Trpespyan farm / Erbil governorate/ Kurdistan region/Iraq, between 20/7/2020 and 21/1/2021. The experiment included sixty Karadi and Arabi ewes (30 for each breed) aged 2-4 years old and live body weight 50 kg ± 5 for Karadi and Arabi ewes, respectively. Each breed was randomly subdivided into three groups (10 ewes in each group), and Experimental animals were subjected to the same administrative and nutritional conditions prevailing in the field, the concentrated fodder 600 g / head/day) was provided into main two meals, morning and evening, with molds of mineral salts, suspended inside the barn, provided with the provision of free straw permanently, providing water and permitting For animals to graze in the morning and evening. In addition to that, ewes will weekly administrate with the animal's vitamin (AD3) to compensate for the lack of vitamins during the experiment.The first group (T1): - ewes. Treated with vaginal sponges saturated with (60 mg) of Medroxyprogesterone acetate (MAP) will be inserted for a period of fourteen days. The second group (T2): -The second group: - ewes will administrate with the same first treatment, and after withdrawal the sponge, They were injected with a dose of 300 IU per head of the Human Chorionic Gonadotropin (hCG). The third group: - ewes will administrate with the same first treatment and after the sponge withdrawal was injected at a dose of 75 micrograms/head of (GnRH). Blood sampling and hormonal analysis Blood samples were collected from ewes via Jugular vein-puncture using nonheparinzed vacutainer tubes during various reproductive phases at 11.30 a.m. Blood samples were centrifuged (at 3000 rpm for 15 min), and plasma was separated and stored at -20°C until analysis.The biochemical constituents hormones such as estrogen, progesterone, LH, and FSH were measured by double-antibody enzyme immune-assay) using Modular Analytics E170,cobas e 602 analyzers. (Strasse116,D-68305 Mannheim-USA) plates according to the method of during various phases like Luteal, estrus, pregnant, and after parturition in ewes were analyzed. Estrus detection and mating Two fertile Karadi and Arabi rams were introduced to the ewes in each experimental group (one ram per 5 ewes) for estrus detection, and Rams were mating; starting at the sponge withdrawal day allowed to rotate among different ewes groups to avoid sire/group confounding effect. Painted-breast fertile rams were introduced to ewes for five days. Ewes with marked rumps were considered to be mated. Statistical analysis General Linear Model (GLM) within the statistical program SAS, 2005 was used to analyze the collected data to diagnose the significant effects of the available factors affecting the studied traits, and the experiment was designed as factorial-CRD. Duncan multiple range tests were used to test the differences between the subclasses of each factor. RESULTS AND DISCUSSION Estrogen concentration From table 1, it could be concluded that estrogen concentration differed significantly in the different reproductive stages; In the current study, the estrus phase contains a significant level of serum estrogen (60.96 pg/ml) with significant differences (p≤ 0.01) among for the three stages (37.73, 35.16 and 12.53) pg/ml respectively. Sangeetha and Rameshkumar reported similar to our findings that a high level of serum estrogen is present in the ovulatory phase. In the luteal phase, the highest concentration of estrogen was seen in group T3 (38.30 pg/ml) with significant (p≤ 0.01) differences in comparison with other groups T1 (37.20 pg/ml) and T2 (37.70 pg/ml). That may be related to the fact that the major regulator of the reproductive axis is gonadotropin-releasing hormone (GnRH). Its pulsatile secretion influences both endocrine function and gamete maturation in the gonads by determining the pattern of secretion of the gonadotropins follicle-stimulating hormone and luteinizing hormone. and stimulates the secretion of estrogen hormone. This result came in agreement with previous studies Furthermore, the type of breed hadn't significant effect on estrogen concentration in general, with its arithmetic increase in the Karadi in all stages, there are differences between breeds in the Karadi breed, estrogen concentration increased non significantly in the estrus (61.07 pg/ml) in comparison with Arabi breeds. this difference may be attributed to the variation in the genetic abilities of the individuals of the two breeds In the estrus period, estrogen concentration increased in all treatments group, with the highest concentration in the T3 group (61.90 pg/ml) while the lowest concentration was in the T1 group (59.70 pg/ml). Estrus is associated with the most ovarian follicular growth and increased estrogen release. Estrogen is a female sex hormone produced by the ovary that causes behavioral estrus in females.. Gonadotropin-releasing hormone (GnRH) stimulates the secretion of estrogen hormone, which has a close relationship with the role of follicles in the ovaries. Estrogen is generated by internal theca cells in the antrum of the follicle. Finally, estrogen was absorbed and transported to the target organ via blood arteries. In the pregnancy period, estrogen concentration decreased in comparison with the estrus period. In spite of that, the highest concentration was noticed in the T3 group (36.10 pg/ml) with significant differences (p≤ 0.01) with other groups T1 (34.40 pg/ml) and T2 (35.00 pg/ml). After parturition, estrogen concentration highly decreased in comparison with other reproductive stages and showed the lowest concentration in the T1 group (11.90 pg/ml), while the highest concentration was in the T3 group (13.20 pg/ml) without any significant differences among different treatments. Those results agree with the result of the previous study, which concluded that estrogen concentration was increased during pregnancy and declined after birth. Progesterone concentration According to the statistical analysis shown in table 2, the concentration of progesterone differed significantly in the different reproductive stages, with the highest concentration in the pregnancy period (16.33) pg/ml with significant differences(p≤ 0.01). Among the three stages (7.133, 0.803, and 0.940) pg/ml, respectively, this result was in accordance with the finding of. The results showed a variation in progesterone levels. There is a nonsignificant increase in the progesterone concentration in different treatments T1, T2, and T3 during different stages, in the Pregnancy stage averaged 15.80, 16.10, and 16.50 pg/ml, respectively. Then it declined after parturition in the T1, T2, T3 (0.900, 0.950, 0.970) pg/ml respectively. In the luteal phase, progesterone hormone level increases because of the increased secretion from the corpus luteum; in the animals which ovulated, the plasma progesterone concentration either remained basal or rose to a lower level than that found during the luteal phase of the cycle. Those truths came in agreement with the results of the present by. Other experiments have shown comparable results using intravaginal sponges containing progestagens and hCG injection, with or without the addition of prostaglandins.. The ability of hCG injection on day 7 post-estrus in female goats to stimulate accessory corpora lutea development and increase progesterone was effectively reported. The hCG luteotrophic action was revealed later after breeding, according to plasma Progesterone concentrations obtained in hCG-treated mice in a prior study (45 days). Luteotrophic activity has also been previously observed by. Progesterone concentration in the blood plasma in goats increased to 57.5, 0.0, and 37.0 % of premature luteal regression in groups treated with hCG and GnRH groups, respectively. In an earlier study, the progesterone concentration in the peripheral plasma was measured sequentially in individual ewes during the estrous cycle and during gestation and parturition in intact and in ovariectomized ewes. During the estrous cycle, progesterone concentration was the lowest to 48 h after the onset of estrus. On the estrus, the progesterone level began to rise to reach a peak on the 10th day. After a decline, the level rose to a second peak on the 14th or 15th day. Three to 4 days before the next onset of estrus, the concentration dropped sharply over a period of 48 h to a low basal level. During early pregnancy, the plasma progesterone concentration remained fairly constant at a level similar to the maximum level found during the cycle. A sharp rise started around the 80th day, reaching 15-20 ng/ml around the 110th day. This was followed by a second peak, then a decline in the plasma progesterone concentration before parturition, but the time at which this began was variable, and even on the day of parturition, the level was generally 3 ng/ml. A basal level of (0.5 ng/ml was reached within 24 h after parturition. Increasing progesterone concentration in a group treated with (hCG) may be explained by the ability of hCG to bind the LH receptor, stimulating Progesterone synthesis. Suggested that ewes receiving hCG had more corpus luteum and high serum Progesterone by increasing gene expression in maternal endometrium and promoted expression of proangiogenic factors in fetal extraembryonic membranes. Supplementing livestock with hCG may boost P4 levels and improve reproductive efficiency due to augmented CL number per ewe. This may explain an increased level of progesterone during the pregnancy period in comparison with other reproductive periods. In cyclic ewes, ECG given after progestagen treatment reduces the interval to the onset of estrus when compared with ewes given only progestagen. Gonadotropin enhances estrogen concentration and induces estrous and LH surge. Furthermore, the type of breed didn't have a significant effect on estrogen concentration in general, and together the karadi surpassed the Arabi in all periods. this difference may be attributed to the variation in the genetic abilities of the individuals of the two breeds. 6.900 ± 0.18 a 7.200 ± 0.20 a 7.300 ± 0.15 FSH concentration The results illustrated in the table 3 indicate variation in FSH concentration in the treated groups. In the luteal phase, FSH concentration increased insignificantly in the three treatments protocols T1, T2, T3 averaged 0.810,0.830,0.880 mIU /ml, respectively. While in the estrus phase, FSH concentration significantly increased in the T3 group to the value of 3.400 mIU /ml when compared to that of T1 (3.120) mIU /ml and T2 (3.210) IU /ml. Furthermore, no significant changes were noticed in the FSH concentration in the different groups in the pregnancy period. In spite of that FSH, the concentration remains at the highest level in the T3 when compared with the groups T1 and T2. After parturition, the concentrations of FSH in T1, T2, T3 groups dropped insignificantly in all groups with a slight difference in the three groups of treatments which averaged (0.039,0.043,0.048) mIU/ml. Similarly, Theofanakis et al., Sangeetha, and Rameshkumar. The increasing level of FSH in the luteal phase in the treated groups may be related to the usage of progesterone for decreasing the length of the protestation procedure by imitating the activity of the corpus luteum or a combination of both. These preliminary data in ewes showed that hCG treatment increased prolificacy but lowered fertility. More recent data support that ewes treated with hCG in resentment of inducing more synchronized ovulation than controls without gonadotrophin treatment, had a lower pregnancy rate after artificial insemination. Increased FSH in the estrus phase may be related to the effect of hCG hormone, which is known to stimulate ovulation mediated by increasing FSH and LH. Serum hCG concentrations decreased significantly after delivery, with an estimated maximal first half-life of 6.6 hr. Serum FSH reduced only little in the first two days after birth, then increased to near the initial level by postpartum day five. Serum FSH concentrations were within the normal range (adult female premenopausal) in both lactating and nonlactating subjects at 6 weeks postpartum. Those findings are consistent with the findings of the present research.The highest concentrations of FSH hormone in different reproductive stages in the group that was treated with GnRH may be related to the regulatory effect of GnRH for FSH gene transcription. The hCG is used to promote ovulation during estrus synchronization in ewes may reduce fertility rates may be related to the high frequency of abnormal follicular development patterns, disruptions, and ovulation delays in the treated females, as well as the formation of follicular cysts. These findings preclude their practical application to induce ovulation concomitantly to estrous synchronization procedures. More recent data support that ewes treated with hCG, in spite of inducing more synchronized ovulation than controls without gonadotrophin treatment, had a lower pregnancy rate after TAI. The concentration of FSH differed significantly in the different reproductive stages, with the highest concentration in the estrus period (3.243) mIU/ml with significant differences(p≤ 0.01) among the three stages (0.840,0.773 and 0.043) mIU/ml, respectively. This result was in accordance with the finding of Sangeetha and Rameshkumar, Kohno et al.. Increased FSH in the estrus phase may be related to the effect of hCG hormone, which is known to stimulate ovulation mediated by increasing FSH.The type of breed didn't have a significant effect on FSH concentration in general; together the karadi surpassed the Arabi in all periods. This difference may be attributed to the variation in the genetic abilities of the individuals of the two breeds. CONCLUSION Estrus synchronization by using hCG and in a higher magnitude GnRH in combination with progesterone may be effective in inducing reproductive hormones in order to induce follicular growth and ovulation and finally may increase pregnancy rate and production.
// availablePrefixes will return the amount of prefixes allocatable and the amount of smallest 2 bit prefixes func (p *Prefix) availablePrefixes() (uint64, []string) { prefix, err := netaddr.ParseIPPrefix(p.Cidr) if err != nil { return 0, nil } var ipset netaddr.IPSetBuilder ipset.AddPrefix(prefix) for cp, available := range p.availableChildPrefixes { if available { continue } ipprefix, err := netaddr.ParseIPPrefix(cp) if err != nil { continue } ipset.RemovePrefix(ipprefix) } maxBits := prefix.IP.BitLen() - 2 pfxs := ipset.IPSet().Prefixes() totalAvailable := uint64(0) availablePrefixes := []string{} for _, pfx := range pfxs { totalAvailable += 1 << (maxBits - pfx.Bits) availablePrefixes = append(availablePrefixes, pfx.String()) } if totalAvailable > math.MaxInt32 { totalAvailable = math.MaxInt32 } return totalAvailable, availablePrefixes }
<reponame>tuvshinot/Cpp // Section 14 // Overloaded insertion and extraction operators #include <iostream> #include "Mystring.h" using namespace std; int main() { Mystring larry {"Larry"}; Mystring moe {"Moe"}; Mystring curly; cout << "Enter the third stooge's first name: "; cin >> curly; cout << "The three stooges are " << larry << ", " << moe << ", and " << curly << endl; cout << "\nEnter the three stooges names separated by a space: "; cin >> larry >> moe >> curly; cout << "The three stooges are " << larry << ", " << moe << ", and " << curly << endl; return 0; }
n = int(input()) s = list(input()) li = [] for i in range(1,n-1): li.append(len(set(s[:i+1]) & set(s[i+1:]))) print(0 if len(li) == 0 else max(li))
Can fermented food fight off colds? There are many time-honored ways to preserve vegetables, such as canning or freezing. But one method actually helps foster healthy gut bacteria, which can boost the immune system and aid in digestion. That honor goes to fermentation, which is still a relatively unstudied scientific field. In order to get those benefits, though, Moise noted the foods must be raw and unpasteurized before fermenting to ensure the cultured microbes are still alive. At a biological level, the healthy bacteria contained in fermented vegetables multiply and colonize the gut’s existing bacteria, crowding out any harmful bacteria and staving off diseases (and colds) before they happen, says Addie Rose Holland, co-founder of Greenfield-based Real Pickles, which sells naturally fermented vegetables throughout the northeast. Over time, healthy bacteria that’s already on the vegetable breaks down some of the vegetable’s sugars into lactic acid, which acts as a natural preservative. If raw vegetables are left to sit in a barrel without oxygen for about 8 months to a year, “in a lot of cases, the fermented version of the vegetable is more nutritious than the raw version,” said Holland, 39, noting they also add salt — which kills certain types of bacteria that can inhibit fermentation; gets the process started faster; and adds flavor. Holland said that scientific studies have shown there’s more vitamin C in fermented cabbage than in raw cabbage. “Through (the bacteria’s) processing of the fresh vegetable, they’re creating compounds and nutrients and enzymes that our body doesn’t produce on its own,” Holland said. While there are supplements that can deliver specific strains of healthy bacteria, fermented foods — which also include products like yogurt, vinegar, hard cider, and craft beer — can be better because “if you’re eating fermented foods you’re getting huge diversity,” Holland said. And that’s on top of the nutrients raw vegetables already contain. Additionally, although it’s known that healthy bacteria is good for health, Holland said scientists haven’t identified which bacterial strains are the best for gut health, or how to enhance those strains in fermented foods. At least in part, Holland suggested that the research gap stems from the fact that fermented foods fell out of favor in America in recent decades and were replaced by vinegar pickles — which are preserved with boiled vinegar and salt and don’t have to be refrigerated. But even though vinegar pickles might be easier to stock, they don’t have the same health benefits because they’re not fermented, she said. More recently, as people have realized the health benefits of fermented foods, Holland says they’re enjoying a resurgence. When she and husband Dan Rosenberg, 42, started Real Pickles in 2001, Holland said they were one of only a handful of businesses nationwide producing fermented pickles. Rosenberg, who discovered fermenting at a farming conference at Hampshire College, and Holland, who also works at the North East Climate Science Center at the University of Massachusetts Amherst, both have degrees in geology. Each year, Holland says they process about 300,000 pounds of organic vegetables from area farms like Atlas Farm, Red Fire Farm, Chamutka Farm, Kitchen Garden Farm. Holland noted they buy their products from farms that don’t use harmful chemicals because they believe a better vegetable makes for a better, more healthful, fermented veggie. These days, their products — fermented beets, cucumbers, cabbage — are sold in supermarkets including Whole Foods and Big Y across New England, and in New York, New Jersey, and Pennsylvania. “There were a few years when we were still one of the only businesses doing this, and it was really hard for us to keep up with the demand,” she said, noting they’ve intentionally kept their business small, and recently converted it into a worker-owned coop. That cultural renaissance is transitioning into renewed scientific interest. Locally, the nature of bacteria and microbes in fermented vegetables was the focus of a recent study by researchers at the University of Massachusetts Amherst. “We were interested in fermented foods and beverages in general … Is there a risk for disease causing bugs (in fermented foods)? How can we predict product outcomes?” said David Sela, assistant professor of food science at the University of Massachusetts Amherst. Sela facilitated the study along with undergraduate student Jonah Einson and research fellow Asha Rani, and others from the U.S. Food and Drug Administration’s Center for Food Safety and Applied Nutrition. The study, which was conducted at Real Pickles and took about a year and a half to perform, looked at microbiome communities in the Greenfield facility. The researchers also collected data on the vegetable’s microbes at specific times during the fermentation process. Sela said they found a distinct difference in microbiome communities between the area where raw food is processed and the fermenting room. While this wasn’t a surprise, Sela said the data they collected and sifted through could lead to future studies and a better understanding of how to enhance nutrition in fermented foods, and possibly decrease the amount of food that’s spoiled, among other things. “There is much more work that needs to be done. We’re looking forward to playing our part, as small as it may be, and supporting the community that emerges,” Sela said. For the greatest health benefits, Moise noted that most traditional diets incorporate small portions of fermented foods in every meal, along with other types of foods that are high in fiber. “I encourage my patients to incorporate complex carbohydrates such as legumes and beans, cooked whole grains, as well as non-starchy vegetables like asparagus, onions, garlic, leeks, dandelion greens, all of which contain prebiotic fiber that feed probiotic bacteria,” she said. Looking ahead, in future studies, Holland says she’s hoping that scientists can quantify the impact that organic farming methods have been sprayed by chemicals, have on the final fermented product.
Tansley Review No. 18 Mechanisms of resistance and pathogenic specialization in rust-wheat interactions. Forms of resistance and pathogenic specialization in interactions between wheat and its stem and leaf rust Fungi have been selected for analysis with two hypotheses in mind. One is that phytoalexin formation is a universal defence mechanism in plants; the other is that resistance is elicited specifically by products of avirulent strains of pathogens. A search for phytoalexins in wheat, using a range of extraction and separation procedures and a number of forms of expression of resistance, has indicated the presence of several inhibitors of fungal development but has yielded very little evidence for post-infectional increases in the amounts of these inhibitors. No evidence for the formation of phytoalexins in wheat has been obtained. Hypersensitivity has been confirmed as being closely associated with the expression of most forms of resistance studied. Recent work with some very rapidly expressed forms of resistance to rust fungi has implicated lignification as a component of active defence in wheat, a finding in agreement with those of other workers. The expression of resistance conferred by the Lr20 gene towards avirulent strains of the leaf rust fungus was the principal subject of explorations for elicitors. Histological observations of the sequence of events in fungal and wheat cells suggested the action of rust strain and Lr20-specific elicitors in bringing about hypersensitivity. Experimental manipulations involving heat treatments or use of epidermis-free segments of leaves incubated on infected mesophylls gave results consistent with this interpretation. Attempts to extract specific elicitors from infected leaves failed but yielded non-specific elicitors. Non-specific elicitors have been obtained in intercellular washing fluids of leaves infected with strains of both the leaf and stem rust fungi. They have also been obtained from extracts of germ tubes of both species of fungus. Although not specific for any known resistance gene, the elicitors are selective for a character determined by a factor on chromosome 5 A in some wheat cultivars. A role for the non-specific elicitors as incompatibility factors between rust fungi and wheat is suggested. A revised hypothesis is presented in order to explain the evolution of pathogenesis in rust fungi towards wheat and it is discussed in relation to concepts of basic compatibility between pathogens and their hosts, as proposed also by others. CONTENTS Summary 233 I. Introduction 234 II. Working Ideas and Hypotheses 234 III. The Search for Phytoalexins 235 IV. Elicitation of Resistance 237 V. Revised Hypothesis 240 Acknowledgements 242 References 242.
#!/usr/bin/python3 import os from formula import formula path = os.environ.get("RIT_INPUT_PATH") formula.Run(path)
/** * If description string starts with /fname=<filename>, load * description from file instead */ void tcDatabaseObject::LoadFileDescription() { if (mzDescription.size() < 10) return; wxString s(mzDescription.c_str()); wxString filename; if (!s.StartsWith("/fname=", &filename)) return; filename.Prepend("database/"); std::ifstream in_stream(filename.ToAscii()); if (!in_stream) { fprintf(stderr, "tcDatabaseObject::LoadFileDescription -- Bad file name (%s) in description field for %s\n", filename.c_str(), mzClass.c_str()); return; } std::stringstream ss; ss << in_stream.rdbuf(); mzDescription = ss.str(); }
Identification of two functional xyloglucan galactosyltransferase homologs BrMUR3 and BoMUR3 in brassicaceous vegetables Xyloglucan (XyG) is the predominant hemicellulose in the primary cell walls of most dicotyledonous plants. Current models of these walls predict that XyG interacts with cellulose microfibrils to provide the wall with the rigidity and strength necessary to maintain cell integrity. Remodeling of this network is required to allow cell elongation and plant growth. In this study, homologs of Arabidopsis thaliana MURUS3 (MUR3), which encodes a XyG-specific galactosyltransferase, were obtained from Brassica rapa (BrMUR3) to Brassica oleracea (BoMUR3). Genetic complementation showed that BrMUR3 and BoMUR3 rescue the phenotypic defects of the mur3-3 mutant. Xyloglucan subunit composition analysis provided evidence that BrMUR3 and BoMUR3 encode a galactosyltransferase, which transfers a galactose residue onto XyG chains. The detection of XXFG and XLFG XyG subunits (restoration of fucosylated side chains) in mur3-3 mutants overexpressing BrMUR3 or BoMUR3 show that MUR3 from Brassica to Arabidopsis are comparable as they add Gal to the third xylosyl residue of the XXXG subunit. Our results provide additional information for functional dissection and evolutionary analysis of MUR3 genes derived from brassicaceous species. INTRODUCTION Xyloglucan (XyG) is present in the primary cell walls of land plants including gymnosperms, angiosperms, monilophytes, lycophytes, hornworts, mosses and liverworts (;;Popper, 2008;Popper & Fry, 2003. It has a 1,4-linked -glucan backbone that is, substituted at O-6 to varying extents with a-Xyl residues. XyGs is also a storage polysaccharides in the seeds of several plant species. In the primary wall XyGs is believed to associate with cellulose to prevent aggregation of cellulose microfibrils and thereby enable cellulose to interact with other cell wall components. Xyloglucan subunit structures are described by a single-letter nomenclature (;). For example, an unsubstituted backbone Glc residue is has the letter G, whereas side chains with xylosyl residues attached to Glc are indicated by the letter X. The addition of Gal, Ara, or Xyl to this Xyl residue is denoted by L, S (or D) and U, respectively. The addition of a Fuc residue to the Gal or Ara is shown as F and E, respectively. For many seed-bearing plants, their XyG has a XXXG-type structure in which three consecutive backbone Glc residues are substituted. In the model plant A. thaliana, the relative abundance of XXXG, XXFG, XLFG and XXLG is 1.0:1.0:1.7:0.3 (;Von ). Mutations affecting XyG glycosyltransferase genes in Arabidopsis change the structure or content of XyG. For example, the XLFG and XLXG sub-units are only present at low abundance in the XyG of the xlt2 mutant. However, the phenotypes of xlt2 and wild type do not differ substantially (). The abundance of XyG in the walls of the xxt1 and xxt2 mutants, which have no visible morphological defects, is decreased by approximately 10% and 32%, respectively (). These results suggest that structural variation of XyGs or difference in abundance of XyG does not necessarily alter a plants phenotype. Indeed, the xxt1xxt2 double mutant, which has no detectable XyG in its cell wall, has only modest phenotypes including short root hairs and hypocotyls together with slightly reduced growth (;Park & Cosgrove, 2012). By contrast, elimination of AtMUR3 results in severe phenotypic changes. These plants have a cabbage-like phenotype with short petioles, curled rosette leaves, short etiolated hypocotyls and endomembrane aggregation phenotype (;;;). The Arabidopsis MUR3 gene encodes a protein that is, evolutionarily related to animal exostosins, which are encoded by tumor-suppressor genes with roles in human bone growth. Arabidopsis MUR3 transfers a galactosyl to the xylose adjacent to the unbranched glucose residue of XXXG and XLXG to form the XXLG and XLLG subunits (;). The mur3-1 and mur3-2 mutants, which have a S470L or a A290V single point mutation, respectively, in the MUR3 protein are deficient in the a-L-fucosyl-(1→2)--D-galactosyl side chain (the XXFG and XLFG subunits) but have phenotypes similar to wild type plants (;). Further studies confirmed that mur3-1 and mur3-2 are leaky mutants with discernible MUR3 activity as their XyG contain small amounts of XXFG and XLFG sub-units (). Subsequently, two T-DNA insertion knock-out mutants mur3-3 and mur3-7 were identified and shown to have a dwarf cabbage-like phenotype (;;). The XyG from mur3-3 to mur3-7 plants contains only XXXG and XLXG subunits (). This indicated that the absence of F side chains results in the abnormal phenotype of mur3-3 or mur3-7. However, the cabbage-like phenotype of mur3-3 is rescued in the xxt2mur3-3 and xxt5mur3-3 double mutants, which also produce XyG comprised of only XXXG and XLXG subunits (). The abundance of XLXG was almost double that of mur3-3 XyG, which led us to suggest that a decrease of XyG galactosylation rather than the absence of the F side chain is responsible for the mur3-3 cabbage-like phenotype (). Indeed, overexpressing AtXLT2 in mur3-3 (35Spro:XLT2: mur3-3) resulted in a wild type phenotype and a XyG in which XLXG accounted for up to 85% of the subunits (). A previous study reported that rice MUR3 (OsMUR3; Os03g05110) is functionally equivalent to AtMUR3. However, overexpression of OsMUR3 in the xlt2mur3-1 mutant only rescued the dwarf phenotype when XyG galactosylation was 81%. Three transgenic lines which produce XyG with 100% galactosylation did not (Liu, Paulitz & Pauly, 2015). Thus, XyG functions normally when the degree of XyG galactosylation exists within a certain range. Functional MUR3 homologous genes have also been identified in eucalyptus, tomato, and sorghum (;;). These homologs were reported to be functionally equivalent to AtMUR3, although differences in activity were discernible. For example, the SlMUR3 (Sl09g064470) catalyzed galactosylation at the second xylose of the XXXG motif. The AtMUR3 gene product does not. In sorghum, overexpression of two homologous AtMUR3 genes (GT47_2 and GT47_7) only partially rescued the dwarf cabbage-like phenotype of mur3-3 (). Here, we report two homologous AtMUR3 genes from Brassica rapa to Brassica oleracea that rescue the mur3-3 phenotype. Gene identification and phylogenetic analysis The protein sequence of Arabidopsis MUR3 was used as the query to perform BLAST analyses in the NCBI database with the Protein BLAST tool (https://blast.ncbi.nlm.nih. gov) to identify MUR3 homologs in other plant species. Tobacco MUR3 homologs were identified using http://solgenomics.net/organism/Solanum_lycopersicum/genome. Clustal W software was used to align the full-length protein sequences. MEGA 6 software () was used to construct a phylogenetic tree with Neighbor Joining (NJ) parameter. Gene cloning and transformation The coding sequences of candidate AtMUR3 homologous genes were amplified with the primers listed in Supplemental File 1. The full length cDNAs were cloned into the pCAMBia1300 overexpression vector () directly with Seamless Assembly Cloning Kit (C5891; Clone Smarter, Houston, TX, USA). Agrobacterium tumefaciens strain GV3101 was used to introduce the constructs into mur3-3 (At2g20370; Salk_141953) with the dip infiltration method (Clough & Bent, 1998). Transgenic plants were selected on one-half-strength Murashige and Skoog (1/2 MS) (Murashige & Skoog, 1962) plates containing 15 mg mL −1 hygromycin. The T 2 lines were used for subsequent analysis. Plant growth conditions, phenotypic and genotyping Arabidopsis plants were grown a 19 C on soil with a 16-h-light and 8-h-dark cycle in environmental-controlled growth chamber (). Seeds were surface sterilized and hypocotyl growth determined as described (). Images of hypocotyls and adult plants were obtained with a Canon 5D Mark III digital camera. Hypocotyl length and plant heights were measured and then analyzed using Image J software (Abramoff, Magalhaes & Ram, 2004). To identify the MUR3 gene background in transgenic plants, DNA was extracted from Arabidopsis rosette leaves using the EasyPure Plant Genomic DNA Kit (EE111-01; TransGen, China). Total RNA was extracted from Arabidopsis rosette leaf, stem, hypocotyl and mature root using the EasyPure Plant RNA Kit (ER301-01; TransGen, China). TransScript One-Step gDNA Removal and cDNA Synthesis SuperMix Kit (AT311-02; TransGen, China) was used to synthesize first-strand cDNA. This DNA was then used to determine the expression level of the homologous MUR3 genes in transgenic plants by semi-quantitative RT-PCR. The Arabidopsis ACTIN gene was used as a reference. The primers used are listed in Supplemental File 1. Monosaccharide composition analysis Rosette leaves of 4-week-old Arabidopsis plants (WT, mur3-3, independent complemented lines for BrMUR3 and BoMUR3 genes) were collected and used for cell wall monosaccharide compositions analysis as described (). The alcohol insoluble residues (AIRs) were then prepared (). In brief, leaf powder was sequentially extracted for 30 min each with aq. 70%, 80% and then 100% alcohol. The final residue was suspended for 2 h at 37 C in acetone, filtered and air dried. The AIR was destarched using a-amylase and amyloglucosidase (Sigma-Aldrich, St. Louis, MO, USA). The AIR was hydrolyzed for 2 h at 120 C with 2 M trifluroacetic acid (TFA) to generate free monosaccharides (;). The hydrolysates were then reacted for 30 min at 70 C with 1-phenyl-3-methyl-5-pyrazolone (PMP). The mixture was extracted three times with chloroform. The PMP-monosaccharides were analyzed with a Waters high performance liquid chromatography (HPLC) system, a 2,489 UV visible detector and a Thermo ODS-2 C18 column (4.6 250 mm) (). Three biological replicates were employed per sample. MALDI-TOF MS analysis To determine XyG subunit composition in WT, mur3-3 and complementary transgenic plants, the destarched AIRs were treated with 4 M KOH (). The 4M KOH-soluble material was neutralized, dialyze and freezed dried. The 4M KOH-soluble material in 50 mM ammonium formate (pH 5) was then treated with XyG-specific endoglucanase (XEG, two units). Ethanol was added to 70% (v/v). The precipitate that formed was removed by centrifugation and the soluble fraction concentrated to dryness by rotary evaporation. The dried residue was dissolved in water and freeze-dried repeatedly to ensure removal of ammonium formate. A Bruker Microflex spectrometer and workstation (Bruker, Billerica, MA, USA) were used for positive ion mode MALDI-TOF MS analysis (). XyG oligosaccharides solutions (~1 mg/mL, five mL) were then mixed with 10 mM NaCl (five mL). An aliquot (one mL) of this mixture was then added to an equal volume of 0.1 M dihydroxybenzoic acid on the MALDI target plate. The mixture was concentrated to dryness with warm air. Mass spectra were collected by summing spectra from at least 200 laser shots. MUR3 homologous genes identification and phylogenetic analysis Nineteen MUR3 homologous genes were identified in three monocots and 16 dicots (Supplemental File 2). Their predicted amino sequences together with the sequence of the Arabidopsis MUR3 protein were used to construct a phylogenetic tree, which was divided into two clades. One of the clades contained only Nicotiana tabacum MUR3. The other sequences all clustered into a second clade (Fig. 1). The MUR3 homologs identified from Camelina sativa, Eutrema salsugineum, Raphanus sativus, B. rapa, B. oleracea and B. napus were mostly closely to Arabidopsis thaliana. The mur3-3 mutants complemented with BrMUR3 and BoMUR3 had their size restored to normal compared to WT ( Figs. 2A-2D). The height of adult mur3-3 mutants is nearly 50% less than WT. The two complementary plant lines rescued this dwarf defect to WT level (Figs. 2E-2I). Additionally, the shorter hypocotyl phenotype of the mur3-3 mutant were also be restored by overexpression of BrMUR3 and BoMUR3 (Figs. 2J-2P). These results indicate that BrMUR3 and BoMUR3 function equivalently to AtMUR3. Primary cell wall monosaccharide composition of BrMUR3 and BoMUR3 complementary plants We next determined if the expression of BrMUR3 and BoMUR3 altered the monosaccharide compositions of the mur3-3 cell wall. The fucose content is substantially reduced in the leaves of mur3-3 compared with wild type (Table 1). The galactose content of mur3-3 leaves is also decreased. No significant difference in relative abundances of arabinose, xylose, mannose, glucose, rhamnose and glucuronic acid were detected. The fucose and galactose contents were restored to wild type levels by ectopic overexpression of BrMUR3 and BoMUR3 in mur3-3 (Table 1). These results strongly suggest that the BrMUR3 and BoMUR3 proteins have galactosyl transferase activity. Overexpression of the B. rapa and B. oleracea MUR3 homologs in mur3-3 led to the formation of XyG containing XXFG and XLFG subunits (Figs. 4C and 4D). No new oligosaccharides structures were discernible. In Arabidopsis the F sidechain is formed by adding a fucosyl residue to the MUR3 Gal. This Gal is attached to the third Xyl. Fucosylation of the Gal attached to the second Xyl is rarely if ever observed. Thus, BrMUR3 and BoMUR3 and AtMUR3 likely encode proteins with the same activity and substrate specificity. Furthermore, over expression of BrMUR3 and BoMUR3 rescue the cabbage-like phenotype of mur3-3 (Figs. 2A-2D). We conclude that BrMUR3 and BoMUR3 are functionally equivalent to MUR3 and are orthologs of AtMUR3. Our results also show that XyG biosynthesis in brassicaceous vegetables involves MUR3-like genes. The AtMUR3 protein was shown using in vitro activity assays to be a galactosyltransferase that specifically adds a Gal residue to the third position of the XXXG motif in XyG (). Homologs of AtMUR3 are present in many angiosperms, including nasturtium () and eucalyptus (). The Eucalyptus grandis MUR3 protein has been reported to over galactosylate XyG (), suggesting it has enzymatic activities that differ from AtMUR3. Mutant plants lacking the MUR3 gene and the xlt2mur3-1 double mutant are dwarfed and have curled rosette leaves. This phenotype is likely the consequence of a reduction of XyG galactosylation (;). However, when these mutants are crossed with the xxt1xxt2 double mutant that produces no XyG, the homozygous offspring (xxt1xxt2mur3-3) produce no XyG and have a normal phenotype (). These studies provide convincing evidence that normal growth is affected by altering the structure of XyG rather by the elimination of XyG. The altered XyG is thus a dysfunctional molecule (). Previous studies reported that overexpressing OsMUR3, tomato MUR3 (SlMUR3), and AtXLT2 in xlt2mur3-1 or mur3-3 rescued the "cabbage-like" phenotype of the mutants (;Liu, Paulitz & Pauly, 2015;). However, the dwarf phenotype still existed in OsMUR3 overexpression lines that showed over galactosylation of XyG (Liu, Paulitz & Pauly, 2015). Overexpression the E. grandis or tomato MUR3 gene in mur3 or xlt2mur3-1 mutants also resulted in hyper galactosylation XyG (but not complete galactosylation). However, none of a dwarfed plant was reported in the transgenic plants (;). Thus, it appears that the phenotypic effect of altered XyG galactosylation varies with species. The factors that regulate the activity of XyG glycosyltransferases are not known. Presumably these activities must be coordinated since the structure of XyG is maintained within a plant. Brassica rapa and B. oleracea and Arabidopsis are members of the family Brassicaceae, which typically produce XXXG type XyG (). In this study, we have provided evidence that the MUR3 genes from different Brassica encode enzymes with similar function with AtMUR3. The AtMUR3 homolog identified in tomato (SlMUR3) also encodes a protein with similar XyG galactosyltransferase specificity. However, it is notable that minor amounts of XLLG and XLFG were detected in addition to XXXG, XXFG, XXLG and XXFG, in the transgenic plants (xlt2mur3-1). Thus, SlMUR3 may also catalyze the addition of Gal to the second position of XXXG. Advances in our understanding of the catalytic mechanism and acceptor specificity of the MUR3 proteins will by facilitated by solving their crystal structures. The availability of functional recombinant versions of these GTs together with diverse acceptor molecules is also required to explore and define their substrate specifity. Such studies will form the basis for the production of GTs with new specificities and provide the opportunity to engineer polysaccharides with tailored functionalities. CONCLUSION Understanding the functional significance and genetic basis of plant cell wall polysaccharide structural diversity remains a major challenge, Only a limited number of carbohydrate active enzymes involved in wall synthesis have been characterized in detail. Our study provides additional galactosyltransferases from brassicaceous species for investigating plant cell wall biosynthesis. ADDITIONAL INFORMATION AND DECLARATIONS Funding This work was supported by the National Natural Science Foundation of China (31900276, 31670302, 31570670 and 31470291), the Doctor Foundation of Shandong (ZR2019BC073), the First Class Grassland Science Discipline Program of Shandong Province, and the Taishan Scholar Program of Shandong (to G.Z.). The funders had no role in study design, data collection and analysis, decision to publish, or preparation of the manuscript. Grant Disclosures The following grant information was disclosed by the authors: National Natural Science Foundation of China: 31900276, 31670302, 31570670 and 31470291. Doctor Foundation of Shandong: ZR2019BC073. The First Class Grassland Science Discipline Program of Shandong Province. Taishan Scholar Program of Shandong.
// Getdirinfo reads metadata for up to lim files from dir, at revision rev, // into an array. // Files are read in lexicographical order, starting at position off. // A negative lim means to read until the end. // Getdirinfo returns the array and an error, if any. func (c *Conn) Getdirinfo(dir string, rev int64, off, lim int) (a []FileInfo, err error) { names, err := c.Getdir(dir, rev, off, lim) if err != nil { return nil, err } if dir != "/" { dir += "/" } a = make([]FileInfo, len(names)) for i, name := range names { var fp *FileInfo fp, err = c.Statinfo(rev, dir+name) if err != nil { a[i].Name = name } else { a[i] = *fp } } return }
In a partially completed apartment complex not far from the Lebanese capital of Beirut, 4-year-old Zacharia Delly, the son of Syrian refugees, lies semi-comatose on a tattered foam mat surrounded by his mother and four siblings. His head is swollen to twice normal size, lashed with angry purple veins made visible by his baldness. One eye, open but unseeing, protrudes grotesquely from a crust of dried blood and pus, displaced by a tumor that startled his family with the rapidity and maliciousness of its growth. His twin sister Sidra gently pulls aside a wool blanket felted with age to expose his skeletal limbs. She strokes his foot and worriedly examines a new gray stain creeping up his shin, the latest manifestation of a vicious cancer that is consuming her brother from the inside out. With her short brown curls, dimpled cheeks and giggles, she is a constant reminder of what Zacharia once was: a bright-eyed toddler who loved hugs more than toys and never left his mother’s side. That is, before war and disease intersected to cut down a life far before its time. Every tragedy has its if-only moments. Those split-second decisions, looked upon in hindsight that, if taken differently, may have had the power to save a life. For Feryal Delly, a housewife from Homs, Syria, that moment came one day last summer when she was scheduled to take her son Zacharia to his second chemotherapy appointment in Damascus. Zacharia had been diagnosed with neuroblastoma, a common childhood cancer, but the hospital in Homs had been destroyed, so she had to take him to one in Damascus, a two-hour bus ride away. His doctor there was pleased with the first round of chemo and prescribed seven more sessions, two weeks apart. But war stalked Syria, and the road to Damascus was treacherous with checkpoints, both rebel-run and regime. The route was often rocketed, and civilians were frequently detained. Delly’s parents urged her to stay home. It would be 20 days before the fighting calmed enough for her to risk the journey again. By then Zacharia had missed two appointments, and the cancer, which started near his kidneys, had begun to spread. Delly is convinced that it was her decision to stay home that day that made all the difference. “I wanted to take him to the hospital, but I was afraid,” she says, sobbing. “I failed him.” The three-year war in Syria has taken more than 140,000 lives and driven nearly 9 million from their homes. It has destroyed schools, orphanages and places of worship. But perhaps most egregiously, it has ravaged a government-funded health care system that was once the envy of the Arab world. According to a new report by Save the Children, some 60% of Syria’s hospitals have been damaged or destroyed, and half its doctors have fled. Lifesaving medicine is in short supply, and in some cases patients have asked to be knocked out by metal bars rather than go through surgery without anesthesia. The few hospitals still operating in Damascus are all but inaccessible. Once manageable chronic diseases like diabetes and cancer have turned into death sentences. Since the start of the conflict, says the report, 200,000 Syrians have died from chronic illnesses because of a lack of access to treatment and drugs. As a result, thousands of families, including Zacharia’s, have fled to Lebanon for the care they could not receive at home. When it became clear that fighting would derail yet another chemotherapy appointment, his family packed for a short trip to Beirut, where they hoped treatment would be easier to find. But the crossing was arduous, and by the time they made it, they had missed the treatment window and tumorous lesions had sprouted on Zacharia’s head. Lebanon was hardly the refuge they had anticipated. While the country boasts some of the finest medical institutes in the Middle East, nearly 90% are privately run, and most of those are for profit. One hospital, known for its children’s cancer ward, turned Zacharia away because the family couldn’t afford the fees. The last photo Delly has of Zacharia standing shows him in front of the hospital’s gaily decorated Christmas tree with his arm around his sister. A few days later he collapsed: tumors had invaded his spinal column. He would never walk again. Days of frantic searching brought Zacharia’s case to the attention of Dr. Elie Bechara, a children’s cancer specialist at Beirut’s Lebanese Hospital Geitaoui. But the deferred chemotherapy treatments had taken their toll. “There is one golden rule in treating these kinds of cancer: delay is not good,” he says. “Even a small delay can make a big difference.” When Bechara examined Zacharia, his heart nearly broke. His body was so riddled with tumors that only a bone-marrow transplant and experimental immunotherapy, now being tested in the U.S., could make a difference. But the treatment is prohibitively expensive, and the chances of success dismally low. “The only thing we could offer at that point was palliative care” — making him as comfortable as possible as cancer wins the war — says Bechara. Bechara estimates that Syrians currently occupy 75% of the beds in his hospital. Many can, and do, pay. But Lebanon is likely to play host to hundreds and perhaps thousands more cases like Zacharia’s as Syria’s health care system nears total collapse. The U.N. body that looks after refugees, UNHCR, has spent tens of millions of dollars treating the Syrian refugees that have already crossed the border. But funds are limited, and as the numbers of refugees flowing into Lebanon increase — 1.5 million, more than a third of the Lebanese population, are expected to have registered by the end of the year — costs will rise. With such limited resources, UNHCR is forced to choose between funding preventative care that can save thousands of lives and spending thousands of dollars to save one life. “Lebanon is the size of Connecticut,” says Ninette Kelley, UNHCR’s representative in Lebanon. “Now just imagine what the priorities would be if a million refugees came to Connecticut and needed to use the health care system.” Last year UNHCR covered medical treatment for 41,500 refugees in Lebanon, but each of those cases was judged on specific criteria: the cost of the intervention against the chances of a positive outcome. Open-heart surgery, hip replacements and emergency dialysis might make the cut. But Zacharia, with his advanced state of cancer, did not meet the threshold. “People’s lives are being saved every day from the treatments we are able to provide. It’s just that the need has greatly outstripped the resources,” says Kelley. “That is what makes the situation we are in today so difficult.” Zacharia, she adds, is the face of a much bigger issue: the toll Syria’s war is taking on the health care system. “It is tragic that this child, who, but for violence in Syria, would have been able to continue treatment at home and live a long and prosperous life, is cut down at the age of 4. Now his family, who has just lost their home, has to cope with the loss of this tiny child. How brutal is that?” Back in her UNHCR-funded apartment, Delly, the mother, looks on helplessly as Zacharia struggles to breathe. His teeth have been hurting him, and he gnaws his thumb in his sleep. “The doctor in Damascus warned me not to miss an appointment,” she says as she attempts to control her sobs. “He said anything might happen to Zacharia — he could lose his hearing, his sight, his ability to walk. But what could I do? The road was unsafe, and I could have been taken. What would have happened to my other children then?” Sidra, Zacharia’s twin, springs from her brother’s side to wipe the tears from her mother’s face. Delly’s sister Manal attempts to stop a downward spiral of guilt that she appears to have seen a few times before. “Zacharia isn’t sick because of you. He is sick because of this horrible war. If there hadn’t been war, he wouldn’t have missed his treatments and maybe he would have lived another 10 years.” Delly nods reluctantly and looks over at Zacharia’s heaving chest. As guilty as it makes her feel, as long as she can imagine the scenario where Zacharia got to his chemotherapy in time, she can imagine him alive. Letting go of blaming herself means accepting that he is about to die. “I just want to see him play with his sister one more time,” she says. Contact us at editors@time.com.
Detecting malicious peers in a reputation-based peer-to-peer system In this paper we propose a reputation management scheme for partially decentralized peer-to-peer systems. The reputation scheme helps building trust between peers based on their past experiences and feedbacks from other peers. Our system is novel in that it is able to detect not only malicious peers sending inauthentic files but also malicious peers that are lying in their feedbacks. To detect those peers, we introduce the new concept of suspicious transactions. The simulation results show that the proposed scheme is able to effectively detect malicious peers and isolate them from the system, hence reducing the amount of inauthentic uploads and increasing peers' satisfaction.
<gh_stars>0 import { Directive, ElementRef, HostListener } from '@angular/core'; @Directive({ selector: '[appHoover]' }) export class HooverDirective { constructor(private elRef: ElementRef) {} @HostListener('mouseover') onMouseOver() { this.changeBackgroundColor('green'); this.changeTextColor('white'); } @HostListener('mouseleave') onMouseLeave() { this.changeBackgroundColor('white'); this.changeTextColor('black'); } private changeBackgroundColor(color: string) { this.elRef.nativeElement.style.backgroundColor = color; } private changeTextColor(color: string) { this.elRef.nativeElement.style.color = color; } }
Characterization of the human granulocyte-macrophage colony-stimulating factor gene promoter: an AP1 complex and an Sp1-related complex transactivate the promoter activity that is suppressed by a YY1 complex It is well documented that a repeated CATT element in the human granulocyte-macrophage colony-stimulating factor (GM-CSF) gene promoter is required for promoter activity. However, the transcription factors that are able to transactivate this enhancer element remain unidentified. Recently, we have found that nuclear factor YY1 can interact with the enhancer element. Here, we report that in addition to YY1, two other nuclear factors have been identified in the DNA-protein complexes formed by the CATT oligonucleotide and the Jurkat T-cell nuclear protein. One of these factors is AP1, and the other one is an Sp1-related protein. Results from transient transfection of Jurkat T cells have revealed that formation of both AP1 and the Sp1-related complex is required for the full enhancer activity of the CATT element. This result is supported by cotransfection of a c-jun expression vector and mutational analysis of the AP1 site or the Sp1-related protein binding site. In contrast, formation of the YY1 complex suppresses enhancer activity, since deletion of the YY1 complex induces an augmentation of the enhancer activity and overexpression of YY1 results in an attenuation of the enhancer activity. Results from the mechanism study have revealed that YY1 is able to inhibit transactivation mediated by either AP1 or the Sp1-related protein, and YY1 suppressive activity is DNA binding dependent. Taken together, these data support the ideas that AP1 and the Sp1-related nuclear protein are required for transactivation of the human GM-CSF gene promoter and that YY1 can suppress transactivation of the promoter even under inducible conditions.
Economic and Financial Viability Plan for the Enterpreneurship of a Lyric Theatre Low-Cost Company: The Case of Zarzuela in Spain Abstract Taking into account that one of the major handicaps when undertaken in the cultural sector is the problem of cost and demand volatility, this paper aims to implement a viability plan for the entrepreneurship of a lyric theatre low-cost company, dedicated to Zarzuela performance, a type of Spanish music like Opera. Firstly, the cultural sector data are analysed in terms of supply and demand and secondly a viability plan is carried out for three years. According to the results it is concluded that this type of venture is very risky if you do not have grant or other financial resources, due to the variability of demand and the increase in unit costs as the artistic performance has a fixed production technology that cannot absorb the technical progress of the rest of the economy.
A new approach to parton recombination in a QCD evolution equation Parton recombination is reconsidered in perturbation theory without using the AGK cutting rules in the leading order of the recombination. We use time-ordered perturbation theory to sum the cut diagrams, which are neglected in the GLR evolution equation. We present a set of new evolution equations including parton recombination. Introduction Parton recombination as a new higher twist phenomenon was first discussed in the QCD evolution process by Gribov, Levin, Ryskin and Mueller, Qiu in their pioneering works. This evolution equation is called the GLR equation. An interesting effect of parton recombination is screening or shadowing. In the case of higher number densities of partons, for example in the small x region, the gluons can overlap spatially and annihilate. Therefore, one expects that the growth of the gluon density with Q 2 will be suppressed by gluon recombination. These suppression factors from the negative contributions due to gluon recombination are calculated in the GLR equation using the AGK (Abramovsky, Gribov, Kancheli) cutting rules. Assuming that the AGK cutting rules are valid in deep inelastic scattering (DIS) in the small x region, one finds that the relative weights of cuts through two, one and zero ladders are 2 : −4 : 1, as illustrated in fig. 1. Due to these quantitative predictions of the suppression of parton number densities at small x, the GLR equation was extensively used to explore the structure of the nucleon and new perturbative QCD (PQCD) effects in the past years. However, the applications of AGK cutting rules in the GLR equation have some drawbacks. For example, the cut lines break the correlation between the recombining partons according to the AGK cutting rules in the GLR equation ( fig. 1). As we will show in this work, the correlation among the initial partons in the QCD recombination equation should be preserved. On the other hand, two-to-two parton processes may be associated with IR-divergences just as are the one-to-two processes in the Dokshitzer-Gribove-Lipatov-Altarelli-Parisi (DGLAP) evolution equation. Although the AGK cutting rules provide simple relations between the cross sections of hadron-hadron interactions for different types of reggeon cuts, however, the sum of cut graphs according to the AGK cutting rules cannot cancel IR-divergences. The reason is that the difference of the contributions between the positive graph and the negative graph is only a weight, according to the AGK cutting rules. As we know that the virtual diagrams are necessary for cancellation of IR-divergences in the DGLAP equation. We will show in this work that above mentioned IR-divergences in two-to-two processes also can be canceled by the sum of virtual diagrams, which are neglected in the GLR equation. In this work, we reconsider parton recombination in the QCD evolution equation without the AGK cutting rules. To this end, we first point out that a new scale (the recombination scale) exists in the parton recombination processes. We will give a definition of the recombination order of the process. Then, we propose the bare probe-vertex approximation. We find that several more diagrams, which are neglected in the GLR equation, should be included in the QCD equation of the parton fusion. We try to find a simple way to calculate those cut diagrams in time-ordered perturbation theory (TOPT). Through a new derivation of the DGLAP evolution equation, we present simple connections among the different cut diagrams. We shall show that both the shadowing-antishadowing and momentum conservation are the natural results of the theory. As an interesting result, our new equation has different structure from the GLR equation. The outline of the paper is the following. In section 2 we give some definitions related to the parton model. In section 3 the sum of the cut diagrams at the bare probe-vertex approximation is proposed. In section 4 we give a new derivation of the DGLAP equation. Through this example, we try to show the connections among the relative cut graphs at the leading recombination order. The new evolution equations incorporating the parton recombination are derived in sections 5-7. Section 8 contains the discussions and concluding remarks. Definitions According to PQCD, a parton can always independently split into two partons. However, except in the PQCD dynamics, the recombination of partons depends on the overlap probability of their wave functions. Therefore, we need a new physical quality to characterize parton recombination. For example, consider an amplitude with two initial partons. According to dimensional analysis, the hadronic part of the amplitude should contain a factor ∼ 1/R, where R has dimension of length. Now we incorporate the factor 1/Q arising from the partonic part of the amplitude to form a dimensionless quality 1/(RQ). We call this the recombination factor. For example, the recombination factor of the process with two initial correlated partons is 1/(RQ) 2. One can schematically think of 1/(RQ) 2 as the overlap probability of two partons, where 1/Q is the scale of a parton at momentum transfer Q 2 and R is the maximum correlation length of two partons. Usually, R is regarded as the scale of target or the scale of the "hot spots", if they exist in the proton. This definition can be generalized to the case of the amplitudes including m-initial partons; the recombination factor in this case is 1/(RQ) m−1 if the fusing partons are paired. In this paper, we consider only the recombination processes at the leading order level, that is, at 1/(RQ) 2 and 2 s. Therefore, we choose to study the following basic amplitudes as shown in fig. 2: (a) M p * →k l X, with the recombination factors 1/(RQ) 0, 1/(RQ) 1 and 1/(RQ) 2, respectively. In fig. 2 we have omitted the distinction of the parton flavors; the dark circles indicate QCD interactions among the correlating partons. We see that the amplitudes involving parton recombination contain the double scales: 1/(RQ) and s. We shall perform the calculations at a given order-1/(RQ) m and order-( s ) n in two steps. First, we calculate the process at the order of 1/(RQ) 2. In this case, the dark circles in fig. 2 are regarded as the elemental sub-processes. The reason is that the decomposition of the circle part will break the parton correlation and reduce the order of the recombination. The second step is to calculate the sub-processes at order- 2 s in PQCD. We will use TOPT in this work. Usually, TOPT is equivalent to the standard covariant perturbation theory. We shall call this TOPT as the normal TOPT (NTOPT), where the time lines divide every basic vertex along the time-order. In TOPT, the internal lines and the virtual particles are expressed by external lines and effective real particles respectively. Therefore, TOPT can also be used to describe amplitudes involving complex vertices, where the part between two neighboring time lines can contain a complex vertex. We define such a TOPT as an anomalous TOPT (ATOPT). Obviously, ATOPT is not equivalent to NTOPT: they have different energy deficits. On the other hand, there are energy-momentum correlation between two neighboring complex vertices in ATOPT; therefore, the vertex in ATOPT is not really factorized. We take the physical axial gauge, where the light-like vector n fixes the gauge as n A = 0, A being the gluon field. The parton number densities are defined within the parton model description of the photon nucleon DIS ( fig. 3a) as where q(x 1 )dx 1 is the number of quarks carrying momentum fraction between x 1 and x 1 + dx 1, where x 1 = k n/p n. Formula (2.1) means that the interaction of a virtual photon with proton can be factorized as the soft part q(x 1 ) and hard part d( * k → k ). According to the parton model, where x B = Q 2 /2p.q and C q is the coefficient depending on x B and Q 2. The quark density can be defined as On the other hand, using TOPT in the cut graph 3b, we have Comparing eqs. (2.1) with (2.4), we get the definitions of the quark number density (2.5) and the bare probe-parton vertex (2.6) in the TOPT-form. Bare probe-vertex approximation As we know, emission or absorption of quanta with zero-momentum may associate with the infrared (IR) divergence. However, the singular terms provide the leading contributions to the DIS processes. Therefore, a correct theory is IR-safe, where IR-divergences are canceled, while the leading contributions are retained. One way can to attain above two goals is to sum over cut diagrams belonging to the same time-ordered uncut graph, since these graphs have similar singular structure but may come up with opposite signs. Deep inelastic scattering structure functions are the imaginary parts of the amplitudes for the forward 'Compton' scattering of the target with a probe. Using the time-ordered perturbative expansion of the statement of the unitarity of the S-matrix, one can prove that the structure functions are associated with the sum of cut diagrams. These different cut graphs represent various possible sub-partonic processes due to the unitarity of the perturbative S-matrix. Therefore, the sum of cut graphs is necessary not only for infrared safety, but also for collecting the leading contributions and restoring the unitarity. The interesting and important question is, what are the minimum cut diagrams that must be summed for IR-safe calculation of an inclusive DIS process at a given order (for example, order 2 s /(RQ) 2 in this work) ? To answer this question, let us consider a general inclusive DIS process on target N. One can choose the cut diagrams according to following program: G(N) stands for the time-ordered uncut diagram of the target N without probe vertices. We sum over possible cut diagrams of G(N): where G is the cut diagram with cut line ; L and R are the sub-graphs on the left and right of the cut line; the subscript "I" means that we only consider following cut graphs which have the same observed quantities (that is, the same structure of the intermediate state) and which keep the original correlation among initial partons in G(N). We use the probe to observe the parton distributions inside the target in DIS. Of course, we cannot control the probing positions. In principle, in-and out-probe lines can be attached to the left-and right-hand of the cut line in all possible ways. Let G (probe + N) stand for the cut diagrams of the probe-target system, where labels the probe-parton vertices. Thus, we shall sum over Obviously, the sum (3.2) is much larger than G (N). Now we try to find some approximation in the sum (3.2). As we know that the leading logarithmic approximation (LLA) is a good approximation for IR-safeness in the DGLAP equation at order- s. In this approximation, some of the renormalization effects are neglected in the physical gauge and the probe-vertex retains the bare-vertex form as in (2.6). In this case, Thus, the contributions from the nonlocal interactions of probe with partons are neglected at the leading approximation. We need only to sum part of cut graphs, in which the bare-probe vertex (x − x B ) connects with the cut line, that is, (3.4) in the DIS processes with parton recombination. We call (3.4) as the bare probe-vertex approximation. We find that this approximation is also a satisfactory approximation in the DIS processes with parton recombination. In fact, our interest is that the modifications of the parton recombination to the DGLAP equation, which has the probability explanation at the LLA approximation. We shall show that the bare probe-vertex approximation is necessary for keeping the probability picture of the new evolution equation. Rederivation of the DGLAP equation We know that several methods can be used to derive the DGLAP evolution equations, however, the following new method illustrates more clearly the simple relations among the cut diagrams in the sum (3.4). For simplicity, we only consider the non-singlet case. However, we will see that the contribution of an intermediate state can be replaced by d 3 l, which is from the contribution of the loop and contributes d ln l 2 ⊥. Therefore, all the processes of fig. 4 have the same intermediate state structure. We proceed along the lines of ref.. The change of the valence-quark-number density caused by gluon radiation can be written as (see fig. 4a) where the cross section is factorized to the soft part and the hard part H( * l → * l) according to the factorization theory. Using TOPT we obtain the hard part as same as eq.(2.5) and In eq.(4.4) we inserted We define as the parton splitting function for the non-singlet part. Now let us consider figs. 4b and 4c. As for the the real diagram, the contributions of fig. 4b to the change of the valence-quark-number density are where the factor of 1 2 was explained as the effect of the renormalization in ref. : The virtual part of fig.4b corresponds to the renormalization of a parton propagator. Only half of the probe-vertex connects with this parton line. This is equivalent to multiplying the virtual process by an extra factor of 1 2. The hard part in eq.(4.9) is Therefore we have The two terms of the right-hand side in eq.(4.12) have a simple interpretation: the positive contribution arises from the splitting of higher momentum quarks, while the negative contribution results in the loss of the number of quarks due to its gluon radiation. The result (4.12) is the same as the probability form of the DGLAP equation for the nonsinglet part in ref.. However, the new derivation clearly shows the following interesting properties in the inclusive DIS processes: The contributions of the cut diagrams, which belong to a same time-ordered uncut graph in the sum (3.4), have an identical integral kernel (it is the parton splitting function in eq.(4.12)). This is a reason we use the TOPT form to perform our calculations. We shall examine this connection further in the parton recombination processes. Leading recombination approximation So far we considered processes contributing to the usual DGLAP equation. We go on now to include parton recombination processes. The recombination processes contributing at leading order come from the terms, |M p * →k l X ] *, and the cut diagrams according to (3.4). In this section we regard the partons as scalar particles (i.e., the 3 model). The results can easily be generalized to the case of QCD partons and will be done later in section 7. We consider the process of fig. 5, where figs. 5c-f are virtual. The contribution of the real diagram fig. 5a is for the scalar parton; the hard part is given by, We define the parton correlation function (PCF) f (x 1, x 2 ; x 1, x 2 ) as, while the parton recombination function P We shall discuss the PCF and the parton recombination function in sections 6 and 7, respectively. Therefore, we have We shall discuss the PCF and the parton recombination function in sections 6 and 7, respectively. Therefore, we have where we have inserted a factor 1 = ( 3. This is the evolution equation from fig. 5a. Similarly, using the factorization in DIS, the contribution of fig. 5c Now the PCF is defined by However, we have the condition, since the PCFs with cuts at different places are the same on the light-cone ( fig.6). The hard part is given by where the factor of 1 2 in (5.7) is needed for the cancellation of IR-divergences and momentum conservation as we shall discuss shortly. One can re-understand this factor as follows: only half of the probe-vertex connects with the partonic matrix in figs. 5c-f as well as in figs. 4b-c, and the square root of the parton density accepts the contributions of the partonic processes through a parton line. That is, (5.11) Since, for the given initial partons we have, (5.12) we can conclude that the negative sign in eq. (5.10) arises from In consequence, we have Comparing eqs. (5.14) with (5.6), we see that the same recombination function appears in both cases. We can calculate the contributions of figs. 5a-f in TOPT using the same method, and finally obtain the hard contribution as Obviously, eq. (5.14) contains the momentum conservation condition: This completes the discussion of the first type of diagrams. As the next step, we discuss the interference terms, M fig. 7. Proceeding similarly, we obtain the contributions from the interference processes in fig. 7 to be In eq. (5.17), P inter = 0 or 1 implies that the interference processes are inhibited or exhibited, respectively. Now an interesting observation is that P where is defined below and the negative sign occurs because (l 2 ⊥ /(2x l p) − l 2 ⊥ /(2x l p)) changes its sign from x l < x l to x l > x l in fig. 8. The contributions of the vertices A and B have same form, since the momenta of the partons a and b are and , respectively. The factor arises from the following symmetry: if both the final partons are gluons or quarks (we do not distinguish quarks and antiquarks in this work), the corresponding virtual diagrams in figs. 7c-f are symmetric under the exchange of these two partons. However, this symmetry will be lost if we use P. In this case, = 1/2, otherwise, = 1. It seems that there are different energy deficits in going from P. However, they are really the same factor, arising from the term, (l 2 ⊥ /(2x 3 p) + l 2 ⊥ /(2x 4 p)) −1. Therefore, one can replace P, respectively, since x i are the scaling variables. The final results from fig. 7 are Obviously, the momentum conservation condition is also satisfied in eq. (5.21), Parton correlation functions In general, the parton density is a concept that is only defined at the twist-2 level; it can be expressed in terms of the product of the initial and final hadronic wave functions with the same parton configuration. The parton correlation function f (x 1, is a generalization of the parton density beyond the leading twist. It has not yet been experimentally observed. In this section, therefore, we shall try to construct the connection between the parton correlation function and the parton density. For example, consider the correlation function for the case when x 1 = x 1 and x 2 = x 2 in eq.(5.8); this is given by and is the number density of two partons, i.e., the probability of simultaneously finding two partons carrying x 1 and x 2 fractions of the proton momentum respectively. In the quantum mechanics approximation, we can use wave functions to represent (x 1, x 2 ) as where (x 1, x 2 ) is the wave function of two partons in the proton. Similarly, we express the parton correlation function as the product of the initial and final hadron wave functions with different parton momentum, that is We define Therefore, we have In general, the two-parton number density can be approximated by where R 2 ab (x 1, x 2 ) is the momentum correlation of two initial partons; q a (x 1 ) and q b (x 2 ) are the parton number densities. In order to estimate the value of in eq. (6.5), we consider the process shown in fig. 9. The time reversal invariance requires that where A and B are the contributions of the hard parts in fig. 9. Therefore, Since is expressible in terms of hard parts, eq. (6.9) indicates that is calculable within PQCD. New evolution equations We now apply the method, used to describe scalar partons in Section 5, to the realistic case of partons (quarks and gluons) interacting within QCD. In consequence, we have following new evolution equations with twist-4 for GG → qq and GG → GG respectively: (a) GG → qq. The contribution to the evolution equation for gluons is, where the factor 1 2 in the last factor arises from symmetry considerations, just as in eq. (5.20). However, this symmetry will be broken due to the cut in real diagrams in the corresponding equation for quarks: The contribution to the evolution equation for gluons is, We discuss the case when P inter = 1 and 0 separately. A: P inter = 1. We can take P inter = 1 if there is no reason to forbid three-parton recombination in the interference terms in nucleon. Thus, we have (7.5) Equations (7.4) and (7.5) predict that the shadowing effect in quark distributions is stronger than that in the gluon distribution, since there are two shadowing sources for quarks but only one shadowing source for gluons. B: P inter = 0. This means that the interference terms are forbidden. An example of such a case is the radiation recombination in a nucleus. We consider the recombination of partons which originate from different nucleons in a nucleus. A single parton can not escape from the confinement region of a nucleon, unless it forms a colour-single cluster with other partons. We define the probability of a parton leaks out from the confined volume as w. Thus, and We can neglect the interference processes (7.7), because of the confinement condition w < 1. In this case, we have another face of the evolution equation: dx 1 dx 2 dx 3 dx 4 d∆. (7.8) and ) Now the sign of the right-hand side of eq. (7.9) is positive. This means that the shadowing in quark distribution is weaker than that in gluon distribution. In principle, we can calculate the parton recombination functions at order O( 2 s ) for every parton flavors in the whole x region. However, in the majority of cases, the parton recombination is happened in the gluons with small x. For simplicity, we only consider the case where all partons are gluons with small x value in this work. We will discuss the recombination of partons in a general x range elsewhere. In this approximation, we can use the results of Mueller and Qiu in the calculations of the real process of fig. 1a in ref.. Thus, the contributions of figs. 6a,b to P (2−2) 4 are from t-and u-channels and as well as their interference-terms. One obtains where we assume that Eq. (7.10) evaluates to which is the same as the result of ref.. If we take the gluon correlation function to be we obtain the following simplified evolution equations arising from GG → GG in the small x region: Note that the modifications of fig. 5a-b are related to G(x B, Q 2 ) in eq. (7.13) but not to x B G(x B, Q 2 ) according to eq. (5.1) in our work. However, these real diagrams fig. 5a-b (or fig. 1a) in ref. are regarded as the modifications in x B G(x B, Q 2 ). Therefore, the equation (7.13) is different with the GLR equation in the dependencies of x 1 and x B. In consequence, the new evolution equations if P inter = 1 are Here, we have an extra conservation since for any function f (x 1 ) we have On the other hand, the new equation has following different form if P inter = 0 Discussions and conclusions The following interesting components of the new evolution equation derived in this paper are highlighted: 1. Through the derivations of sections 4 and 5, it seems there is an interesting "cutting rule" in DIS: The contributions of the cut diagrams in the sum (3.4) have the identical integral kernel with only the following different factors R: The various terms appearing in the cutting rule (8.1) can be described in terms of the general structure of the cut diagrams G (N) in TOPT: (8.2) where N G is an overall numerator-and-symmetry factor and is independent of the cut ; i and i + 1 (or j and j + 1) are the time-ordered lines on the left-(or right-) vertices; ( f x f = 0) is the conservation of longitudinal momentum at the vertex. (a) The sign in the first factor of (8.1) is determined by the energy deficits in (8.2). For example, if a vertex pass through the cut line, the corresponding energy deficit will change its sign since as we have for example in (4.11). (b) The second factor takes a value of 1/2 if the probe-vertex inserts in the initial line as shown in (5.11). (c) (x − x B ) is the direct result of the sum (3.4). (d) When the cut line moves its position, the contributions of the final states in (8.2) will change the momentum-symbols, but don't change the structure of the intermediate state according to the sum-condition I in (3.4). We also note that a virtual parton line has 4-dimensional integral and a real parton only a 3dimensional integral in the covariant perturbation theory; however, in TOPT, since the virtual partons are replaced by the effective real partons, the above mentioned differences are contained in the energy deficits. In particular, when the cut line moves to cutting line a c from cutting a loop d-d in the process of c → d d → e, we have similar expressions in (8.2) due to Thus, as the cut line moves from cutting a loop to uncutting a loop, the integral kernel has the same form. The difference lies only in the cutting positions. (e) The cut line can also cut the nonperturbative matrix elements with multiinitial partons. The reason is that the initial parton line on the light-cone can be moved from one-side of the cut to another side (see fig.6). Thus, we can use the same correlation function to represent the different hadronic parts and keep the same integral kernel. Because of this important property, the sum (3.4) shall include more complex cut diagrams when we study parton fusion or recombination. (f) Finally, the matrix can been factorized to obtain the probability explanation; the reason is that we used the bare probe-vertex approximation and the coefficient C q in (2.2) (or C in (5.2)) is canceled in the calculations at this approximation. (7.11) are canceled due to the symmetry at small x approximation. However, a soft initial parton also may give rise to IR-divergences in the parton recombination process in a general case. We now show that such IR-divergences can be canceled by using the same method in the DGLAP equation. For example, take x 2 = 0 in fig. 10; this implies p 2 = 0. Since the unpolarized structure functions only involve contributions from terms with even-twist, we have Thus, x 4 = 0 and x 1 = x 1 = x 3. According to (5.14), we can find that Therefore, IR-divergences can be canceled point-by-point at the IR-pole. 3. Obviously, the new evolution equations (7.4), (7.5), (7.8) and (7.9) are different from the GLR equation. It is interest that the properties and structure of the simplified low-x form (7.14) is similar to a modified GLR equation that has been obtained earlier in ref.. However, two equations really have a different theoretical basis. The GLR equation and its modified form are based upon the AGK cutting rules. They sum three kinds of diagrams: cutting two-ladders, oneladder and zero-ladder, respectively. The first (see fig. 1a) is identical with our figs. 5a,b, however, the cut lines in the latter two figures (figs. 1b,c) break parton recombination and these processes should be inhibited. In conclusion, parton recombination via QCD evolution equation is investigated using perturbative theory without the AGK cutting rules. The contributions from different cut and interference diagrams are summed and infrared safeness and momentum conservation are established. Time ordered perturbation theory is developed to establish the connections among different cut diagrams. As a consequence, a new nonlinear evolution equation is derived on a different basis from the GLR equation. Furthermore, this new evolution equation is more detailed in structure than the GLR equation. p * →k l X. The dark circles denote the PQCD interaction with the correlation of the initial partons. Fig. 3 Naive parton model of DIS. Fig. 4 The leading order splitting processes in DIS. Fig. 5 The diagrams contributing to the leading recombination order from |M (2−2) p * →k l X | 2. Fig. 6 Identical hadronic parts in different cut graphs from refs.. Fig. 7 The diagrams contributing to the leading recombination order from 2M Figure Captions p * →k l X ] *. Fig. 8 A diagrammatic illustration of the relation between P
Amba Enterprises 20-04-2019 Amba Enterprises Ltdhas informed BSE that the meeting of the Board of Directors of the Company is scheduled on 20/04/2019 ,inter alia, to consider and approve This is to inform you that pursuant to Regulation 29 of SEBI (Listing Obligations and Disclosure Requirements) Regulations, 2015, a meeting of the Board of Directors of the Company will be held on Saturday, April 20, 2019 at the Company's Registered Office to consider the Appointment of Company Secretary and Chief Financial Officer of the Company. Ambassador Intra 20-04-2019 Ambassador Intra Holdings Ltdhas informed BSE that the meeting of the Board of Directors of the Company is scheduled on 20/04/2019 ,inter alia, to consider and approve This is to inform you that a meeting of the Board of Directors of the Company will be held on Saturday, April 20, 2019 at 03:00 PM at the registered office of the Company at 1093 / 1, 305, Sur Mount Complex, B/h. Iscon Mandir, S. G. Highway Road, Satellite, Jodhpur, Ahmedabad - 380059, Gujarat inter alia, to consider and approve the following:To consider the appointment of Company Secretary of the Company.Any other matter with the permission of the Chairman.Please take note of the same. Fiberweb (India) 20-04-2019 inter alia, to consider and approve 1. To grant leave of absence if any. 2. To confirm the Minutes of previous Board Meeting held on 30/03/2019. 3. To appoint Executive Director of the Company. 4. Any other matter with the permission of the Chair.In compliance with the SEBI (Listing Obligation and Disclosure Requirements) Regulations, 2015 we are enclosing herewith advertisement published in the newspaper namely Savera India Times, Daman, The Free Press Journal and Navshakti dated 17th April, 2019 for meeting of Board of Directors to be held on 20th April, 2019. HDFC Bank 20-04-2019 HDFC Bank Ltdhas informed BSE that the meeting of the Board of Directors of the Company is scheduled on 20/04/2019 ,inter alia, to consider and approve We wish to inform you that a meeting of the Board of Directors of HDFC Bank Limited will be held on Saturday, 20th April, 2019 to inter-alia consider the audited financial results for the quarter and year ending 31st March, 2019 along with the consolidated accounts for the year ending 31st March, 2019 and recommendation of dividend, if any. The trading window for dealing in securities of the Bank will remain closed from 25th March, 2019 to 22nd April, 2019 (both days inclusive) for the designated employees, directors, etc. pursuant to the applicable SEBI Regulations. This is for your information and appropriate dissemination. Indag Rubber 20-04-2019 INDAG RUBBER LTD.-$has informed BSE that the meeting of the Board of Directors of the Company is scheduled on 20/04/2019 ,inter alia, to consider and approve (a) Audited financial results for the quarter and year ended March 31, 2019 (b) Recommendation of final dividend for the financial year 2018-2019. Further, in pursuance of SEBI (Prohibition of Insider Trading) Regulations, 2015, the trading window was closed on April 03, 2019 and would remain closed till April 22, 2019 (both days inclusive).
<reponame>Shemich/mario-bros-game package com.shemich.mariobros.Sprites; import com.badlogic.gdx.Gdx; import com.badlogic.gdx.audio.Sound; import com.badlogic.gdx.maps.tiled.TiledMap; import com.badlogic.gdx.maps.tiled.TiledMapTileSet; import com.badlogic.gdx.math.Rectangle; import com.badlogic.gdx.physics.box2d.World; import com.shemich.mariobros.MarioBros; import com.shemich.mariobros.Scenes.Hud; import com.shemich.mariobros.Screens.PlayScreen; public class Coin extends InteractiveTileObject { private static TiledMapTileSet tileSet; private final int BLANK_COIN = 28; public Coin(PlayScreen screen, Rectangle bounds) { super(screen,bounds); tileSet = map.getTileSets().getTileSet("tileset_gutter"); fixture.setUserData(this); setCategoryFilter(MarioBros.COIN_BIT); } @Override public void onHeadHit() { Gdx.app.log("Coin","Collision"); if (getCell().getTile().getId() == BLANK_COIN) MarioBros.manager.get("audio/sounds/bump.wav", Sound.class).play(); else MarioBros.manager.get("audio/sounds/coin.wav",Sound.class).play(); getCell().setTile(tileSet.getTile(BLANK_COIN)); Hud.addScore(100); } }
#!/usr/bin/env python3 # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import functools import inspect import time import typing from typing import Any def argtype_check(f): # type: ignore @functools.wraps(f) def wrapper(self, *args, **kwargs): # type: ignore sig = inspect.signature(f) for k, v in sig.bind(self, *args, **kwargs).arguments.items(): annot = sig.parameters[k].annotation # Union case if hasattr(annot, "__origin__") and annot.__origin__ is typing.Union: if type(v) not in annot.__args__: msg = "`{}` should be provided as {}, got {}" request_types = "/".join(map(lambda x: x.__name__, annot.__args__)) raise TypeError(msg.format(k, request_types, type(v).__name__)) # List case # # NOTE: `typing.List[x].__origin__` is `typing.List` in Python 3.6, # but it is `list` in Python 3.7+ elif hasattr(annot, "__origin__") and \ annot.__origin__ in (list, typing.List): request_elem_type = annot.__args__[0] if type(v) is not list: msg = "`{}` should be provided as list[{}], got {}" raise TypeError(msg.format(k, request_elem_type.__name__, type(v).__name__)) if request_elem_type is not typing.Any: unmathed_elem_types = \ list(filter(lambda x: type(x) is not request_elem_type, v)) if len(unmathed_elem_types) > 0: msg = "`{}` should be provided as list[{}], got {} in elements" raise TypeError(msg.format( k, request_elem_type.__name__, type(unmathed_elem_types[0]).__name__)) # Dict case # # NOTE: `typing.Dict[k, v].__origin__` is `typing.Dict` in Python 3.6, # but it is `dict` in Python 3.7+ elif hasattr(annot, "__origin__") and \ annot.__origin__ in (dict, typing.Dict): request_key_type, request_value_type = annot.__args__ if type(v) is not dict: msg = "`{}` should be provided as dict[{},{}], got {}" raise TypeError(msg.format( k, request_key_type.__name__, request_value_type.__name__, type(v).__name__)) if request_key_type is not typing.Any: unmathed_key_types = \ list(filter(lambda x: type(x) is not request_key_type, v.keys())) if len(unmathed_key_types) > 0: msg = "`{}` should be provided as dict[{},{}], got {} in keys" raise TypeError(msg.format( k, request_key_type.__name__, request_value_type.__name__, type(unmathed_key_types[0]).__name__)) if request_key_type is not typing.Any: unmathed_value_types = \ list(filter(lambda x: type(x) is not request_value_type, v.values())) if len(unmathed_value_types) > 0: msg = "`{}` should be provided as dict[{},{}], got {} in values" raise TypeError(msg.format( k, request_key_type.__name__, request_value_type.__name__, type(unmathed_value_types[0]).__name__)) # Other regular cases elif annot is not inspect._empty: assert not hasattr(annot, "__origin__"), \ "generics are not expected to reach this path" if annot not in [type(v), typing.Any] and not isinstance(v, annot): msg = "`{}` should be provided as {}, got {}" raise TypeError(msg.format(k, annot.__name__, type(v).__name__)) return f(self, *args, **kwargs) return wrapper def setup_logger() -> Any: from logging import getLogger, NullHandler, INFO logger = getLogger(__name__) logger.setLevel(INFO) logger.addHandler(NullHandler()) return logger def elapsed_time(f): # type: ignore @functools.wraps(f) def wrapper(self, *args, **kwargs): # type: ignore start_time = time.time() ret = f(self, *args, **kwargs) return ret, time.time() - start_time return wrapper
Indianapolis, Indiana - Indiana lawmakers are expected to debate a bill which could allow business owners to refuse to serve the LGBT community based on their religious beliefs.Senator Scott Schneider is expected to introduce the measure in the upcoming session that begins Tuesday."The focus has been on same-sex marriage because that's the hot topic right now," Schneider told The Indianapolis Star . "It's important to have some religious freedom and protection."Micah Clark, executive director of the American Family Association (AFA), a group which opposes gay rights and is lobbying for the bill's passage, said that the bill would allow small business owners to refuse service to gay couples based on his or her religious beliefs."The freedom of conscience bill really about limiting government's ability to squelch freedom of religion, conscience or speech," he said.One version of the proposal states that Indiana may not burden a person's right to exercise of religion unless it can prove that its actions are "essential to further a compelling governmental interest and is the least restrictive means of furthering" that interest. A person would also be able to claim religion as a defense in court.A federal appeals court's ruling striking down Indiana's gay marriage ban took effect in October.
class Solution: def solve(self, nums): if 1 not in nums: return False i = nums.index(1) inc = True for j in range(i,i+len(nums)-1): j %= len(nums) nxt = (j+1)%len(nums) if nums[nxt] != nums[j]+1: inc = False break if inc: return True if len(nums) not in nums: return False i = nums.index(len(nums)) dec = True for j in range(i,i+len(nums)-1): j %= len(nums) nxt = (j+1)%len(nums) if nums[nxt] != nums[j]-1: return False return True
Differentiation of learning disabled children from normal children using four coordination tasks. Fifty 8-year-old children, 25 classified as normal and 25 as learning disabled, participated in a study to determine whether they could be differentiated into their respective groups by using four tasks from the Devereux Test of Extremity Coordination: opposition, foot patting, finger wiggling, and heel-toe walking with the eyes closed. Each chilld received numerical scores based on the number of times he could perform a task in 10 seconds. A stepwise discriminant function analysis revealed that two tasks, opposition and foot patting, were significant discriminating variables. A resulting discriminant function prediction equation showed that, according to the results of the tasks tested, 78 percent of the sample had been correctly classified by previous methods.
The Coalition's vote is up and Labor's is down. Tony Abbott's approval rating is up and Bill Shorten's is down. Prime Minister Tony Abbott ... a dead man walking? Credit:Andrew Meares Therefore, the Abbott advocates will argue, the voters are giving Abbott another chance and so should the Liberal Party caucus. The reason, they'll say, is that Australians like a fighter, and Abbott has proved to be a tough one. But look a little further. Seventy-two per cent of voters say that Abbott does not have the confidence of his own party. In other words, the people believe that Abbott lacks the basic qualification to remain leader. "They have read the writing on the wall for Mr Abbott," says the Fairfax pollster, Ipsos' Jess Elgood. This is consistent with the odds in the betting markets. Sportingbet says Abbott has a 75 per cent chance of being removed from the leadership before the next election. "Our punters are convinced that it is only a matter of time before Mr Turnbull has the nation's top job," according to Sportingbet's Andrew Brown. Elgood has a similar reading of her poll data: "It possibly indicates that the voters have already moved on from Mr Abbott. "But they have not despaired of the Liberal Party," anticipating a change of leader. This is the central point. After the Liberal Party spill motion, the people see Abbott as being on the way out. Public support continues to build for the two leading candidates to replace him. Asked to choose their preferred prime minister, 39 per cent nominated Turnbull, up by 4 percentage points in a month. Another 24 per cent chose Julie Bishop, also up by 4. Abbott ranked third, 19 per cent, down by a point, with only half the support enjoyed by Turnbull. Tellingly, Abbott's supporters of last recourse, avowed conservative voters, are also leaving him. Among self-identifying Coalition voters, the share choosing Abbott as preferred PM fell 3 percentage points, from 41 per cent to 38. And those preferring Turnbull rose by 6 points, from 24 per cent to 30. "Coalition voters have been sitting loyally with Abbott," observes Elgood, "but slowly they are drifting away." As Australia's collective expectation moves to a Turnbull prime ministership, Bill Shorten becomes a less attractive alternative. This explains the sharp fall in his popularity. Abbott made him popular by default; the prospect of Turnbull is making him less so. The claim that Abbott might be able to recover also overlooks the big historical finding of today's poll. On 10 positive leader attributes, the poll finds Abbott's ratings are "all negative, all at historical lows," reports Elgood. Worse yet, Abbott rates lower on eight of the 10 than all four of his immediate predecessors. When he moved a censure motion against Julia Gillard in 2011, Abbott said: "We have a Prime Minister who is both incompetent and utterly untrustworthy." Today's poll shows that Australia finds Abbott to be less competent and just as untrustworthy. By his own criteria, Abbott is a worse prime minister than Gillard. "It's hard to see his way back on these numbers," concludes Elgood. The evidence of the poll is that Abbott's is the prime ministership from the Weekend at Bernie's. The people propping up his prime ministership may be proclaiming it alive and well, but today's poll results show that the electorate knows otherwise.
/** * Schablonenmethode zur Erzeugung und Konfiguration der Tabelle. */ protected void initTable() { fTable = tableProvider.get(); setTableCellRenderers(); setTableCellEditors(); fTable.addTableChangeListener(this); }
Evaluation of airway blocks versus general anaesthesia for diagnostic direct laryngoscopy and biopsy for carcinoma larynx. A prospective randomised study of 100 patients divided into two groups was done to compare the effects of regional airway nerve blocks versus general anaesthesia to evaluate intra-operative haemodynamic changes and compare the level of postoperative analgesia and sedation in both the groups. In group I whole airway block including bilateral superior laryngeal nerve block with bilateral glossopharyngeal block and recurrent laryngeal nerve block was given and in group II general anaesthesia was given. The mean duration was 27 +/- 5 minutes in all cases, all patients were of ASA grade 3 or 4. Baseline and pre-operative values of pulse and blood pressure were noted and were recorded at 0, 5, 7, 9, 10 and 15 minutes. Postoperative sedation and VAS scores were recorded at 0, 5, 15 and 30 minutes initially and then hourly. The present study showed significant haemodyanamic changes in group II with significant rise in mean arterial pressure and pulse rate during peri-operative period. Whereas in group I there was a stability in mean arterial pressure and pulse rate peri-operatively. The postoperative analgesia was significantly higher in group I and lasted longer as compared to group II and patients were less agitated and calm as assessed by the sedation score, in group II most of the patients required postoperative nebulisation as compared to group I where no patient needed nebulisation. In conclusion we suggest that regional airway block for anaesthesia in the short procedures of upper airways and also in cases of predicting difficult airway cases for securing the safe airway can be very useful alternate to general anaesthesia.
Office versus ambulatory heart rate in the prediction of the cardiovascular risk. Numerous studies have shown that a high heart rate is associated with high blood pressure and various metabolic abnormalities, and that it is prospectively related to the development of hypertension and atherosclerosis. Almost all these data have been obtained from resting heart rate measured in the clinic, which is a highly variable clinical parameter. Ambulatory heart rate might afford greater precision of measurement than can be achieved using clinic measurement, as is suggested by recent data obtained in our laboratory. However, the lack of association of ambulatory heart rate with blood pressure and metabolic abnormalities suggests that it might be less predictive of cardiovascular morbidity than is clinic heart rate. A high heart rate might also induce the development of atherosclerotic lesions via hemodynamic disturbances. In this respect, heart rate recorded over the 24 h should be more representative of the whole-day arterial stress than are casual measurements. However, recent data indicate that the progression of coronary lesions in patients who have suffered myocardial infarction is predicted by the minimum heart rate rather than by average 24 h values. Only prospective studies based on the measurement both of clinic and of ambulatory hearft rates will clarify the respective roles of these two clinical entities.
package com.shankasur.ecommerce.model; public class ProductReview { private long reviewId; private String review; private Customer customer; public ProductReview(){ } public ProductReview(String review,Customer customer){ this.review=review; this.customer=customer; } public long getReviewId() { return reviewId; } public void setReviewId(long reviewId) { this.reviewId = reviewId; } public String getReview() { return review; } public void setReview(String review) { this.review = review; } public Customer getCustomer() { return customer; } public void setCustomer(Customer customer) { this.customer = customer; } }
The hepatobiliary-like excretory function of the placenta. A review. In the adult, several endogenous compounds, such as bile acids and biliary pigments, as well as many xenobiotics are mainly biotransformed and eliminated by the hepatobiliary system. However, because this function is immature in the foetus, this role is carried out by the placenta during the intrauterine life. This review describes current knowledge of the trophoblastic machinery responsible for this function, which includes transport and metabolic processes, similar in part to those existing in the mature liver. Because many of the studies reviewed here were conducted on human or rat near-term placentae, two aspects should be borne in mind: (i) although both types of placenta are haemochorial, profound species-specific differences at the structural, molecular and functional levels do exist, and (ii) the placenta is an organ undergoing continuous developmental changes, including its hepatobiliary-like excretory function.
The German federal migrant agency has admitted that they are letting in migrants even when they have full knowledge that the passports and documentation they carry have been forged. A new report suggests that the German agency in control of migration, the Federal Office for Migration and Refugees (BAMF), knew that passports used by migrants who flooded into the country last year were fake – but let the migrants attempt to claim asylum anyway. Die Welt reports that BAMF had processed some 217,465 passports, birth certificates, and driver’s licenses of asylum seekers and that 2,273 of these documents had been forged. According to German law, the penalty for forging documentation – especially passports and travel visas – is five years in prison. So far, no migrants have been arrested. Law enforcement in Germany are extremely concerned by the numbers of migrants who have entered the country on false documentation. The Federation of German Detectives (BDK) has called on BAMF to inform them when forged documents are found, with vice chairman of the BDK Michael Bohl saying that it wasn’t up to the discretion of BAMF to decide if the law had been broken or not. Mr. Bohl went on to stress that Islamic State, who have already smuggled in fighters during the migrant crisis, could use fake passports to create bank accounts and set up a finance network in Germany. He warned that migrants could wire their government refugee benefits to the terrorist organisation. The evidence of admitted migrants with fake passports also flies in the face of the German rules on granting asylum. According to the asylum regulations if an asylum seeker who is undergoing the asylum process has either withheld information about their nationality or has tried to deceive authorities then their asylum claim must be immediately rejected. Islamic State has already been accused of using fake passports in order to sneak its fighters into other countries. In the U.S. the group is said to have forged Syrian passports back in December of last year in an attempt to infiltrate the country. Earlier this year French Interior Minister Bernard Cazeneuve said that Islamic State had created an entire industry revolving around the creation of fake passports and other documents. According to the minister, the terror group had created forgeries of passports from Syria, Libya, and Iraq where they still hold territory.
Cherished Album information Released in 1977, Cherished was the last Cher album produced by Snuff Garrett. Although The Cher Show was a top 10 ratings hit, the 1975-77 period was unsuccessful for her, and Cherished did no better than its predecessors, Stars and I'd Rather Believe in You. The album sold very little, failed in the charts and was ignored by critics and fans. Also, Cher was dissatisfied with the final results of the album, and in an interview, she said that she never enjoyed making this album and only made it because of the contract deal with Warner Bros. The style of the record recalls past hits "Dark Lady" and "Half Breed". Cherished is also Cher's first album without her name on the cover because the title of the album is a pun of the Cher name. Two unsuccessful singles were released. The first was "Pirate", which reached #93 in Billboard. This song was also the first track on some versions of I'd Rather Believe in You, and in Australia the track was retitled "Images". A followup to "Pirate", "War Paint and Soft Feathers", did not chart. As with her other two Warner Bros. releases, Cherished has never had a legitimate reissue in any format. According to Billboard, Cher owned this album's master rights and Warner had no right to reissue.
Dancing Shiva: Brandis slams National Gallery of Australia over $5m purchase of Indian artefact Updated The Attorney-General and the Minister for the Arts, George Brandis, has strongly criticised the National Gallery of Australia (NGA) for its decision in 2008 to buy a $5 million Indian artefact, the Shiva Nataraja, from disgraced art dealer Subhash Kapoor. Senator Brandis was commenting on a leaked internal review from the NGA that has been obtained by the ABC's Four Corners program. Speaking exclusively to Four Corners, Senator Brandis said: "Plainly what that report revealed is that the due diligence standards of the NGA, which are very high - in fact are world's best practice - were not in my view sufficiently complied with on this particular occasion." "The decision to acquire the object ... came in my view at a time when there was a sufficient level of doubt about the provenance of the object that the decision to recommend to the council the acquisition of the object at that time was incautious," he said. The Attorney-General's comments directly contradict assurances made by the gallery's director, Ron Radford, and by the current chairman of the NGA's council, Allan Myers, endorsing the level of due diligence carried out by the gallery. "As best I can judge, it was thorough, diligent and exhaustive," Mr Myers told Four Corners. "It's like many things; one goes through a process and at the end of that process one has to reach a judgement." And in a recent interview with the ABC, Mr Radford said: "Negotiations went on for a year as we were testing whether it had been stolen from anywhere or its provenance and we were checking all of that with great thoroughness." Expert denies advising gallery to buy Shiva However, evidence is mounting that the Shiva was stolen from a temple in the southern Indian region of Tamil Nadu. Highly damaging, fresh revelations have been made on Four Corners, including that the sole expert who, the NGA says, it consulted when deciding whether or not to purchase the Shiva, categorically denies giving advice on the purchase to the NGA. For months, the NGA has refused to reveal the identity of the expert, but Four Corners has now named him as Dr Ramachandran Nagaswamy, an acknowledged expert in Chola bronzes. Mr Myers told Four Corners Dr Nagaswamy had told the NGA the Shiva they were hoping to buy was "a piece of outstanding quality. He knew of no reason to suspect its provenance". But Dr Nagaswamy flatly denies this. He rejects the gallery's claim he spoke to the senior curator of Asian art, Robyn Maxwell, or that he offered any advice to the gallery to buy the object. Four Corners has established that no formal report was written by Dr Nagaswamy supporting the decision to buy the sculpture. The NGA says it wrote an email to Dr Nagaswamy on January 31, 2008, and, after receiving no response, followed that with a further email on February 6, 2008. The gallery says it received an email back the same day, with a number to contact Dr Nagaswamy on, and that he was finally reached at a telephone number in Delhi. During that conversation, between Ms Maxwell and Dr Nagaswamy, Four Corners has been told, his advice on the proposed purchase of the Shiva was to "go for it". But this is emphatically denied by Dr Nagaswamy. He has told Four Corners the email response to Ms Maxwell, which was written on February 6, 2008, came from his son Mohan who lives in Miami, and that he himself has "absolutely no recollection" of having a telephone conversation in Delhi or anywhere else, with Ms Maxwell. He denies ever recommending the gallery purchase the Shiva. Dr Nagaswamy says his own rules on checking the provenance of an object and recommending its purchase are strict. "We have always authorised committees consisting of more than three experts to examine all aspects and their views are properly recorded and attested by their signatures before further actions. I know my rules well and do not deviate," he told the ABC. The NGA's internal review, obtained exclusively by Four Corners, contains further revelations about the checks which were made by the gallery, and the provenance it relied on in purchasing 21 other items from Kapoor. Several of the items list the previous owners as a woman, Raj Mehgoub, and her husband, Abdulla Mehgoub. But the documents naming them as former owners of the Shiva, and of another sculpture in the Art Gallery of New South Wales, were almost certainly faked. Four of the objects are listed as previously belonging to a woman called Selina Mohamed, who is the former girlfriend of Kapoor and who has been charged in New York with four counts of criminal possession of stolen property and one count of conspiracy. One of the items bought by the National Gallery from Kapoor in 2003 was a Seated Gina sculpture, dating from the 12th century. At the time, the gallery was given a letter of provenance signed by Raj Mehgoub. But in its confidential internal review, the gallery now says it believes the sculpture was bought by Kapoor at a Christie's auction in London in 2002. It says the Christie's sale "supports the possibility that the sculpture was legitimately acquired", but acknowledges that "the information suggests the letter signed by Mr Mehgoub was fraudulent". Documentation on Shiva purchase 'at best, thin' The Attorney-General's comments today relate to confidential written legal advice from cultural heritage lawyer Shane Simpson, of Simpsons Solicitors, on the proposed purchase of the Dancing Shiva. The advice was delivered to the NGA on January 13, 2008, just six weeks before the $5 million purchase was approved by the council of the gallery. In answering the question of whether legal title was established, Mr Simpson wrote: "On the evidence presented so far, this cannot be confirmed. The available evidence is minimal and inadequate investigations have been carried out. There needs to be much deeper enquiry made before title can be confirmed." Mr Simpson's report was damning. In relation to the purchase of the Shiva, he said: "The documentation is, at best, thin." "When the purchase price is high, the gallery must be aware that there is an inherent risk in the purchase," he warned. "There is no evidence that provides any clue as to the origin of the object." The documentation is, at best, thin.... There is no evidence that provides any clue as to the origin of the object. Shane Simpson's report on the proposed purchase of the Shiva He told the gallery there were four likely possibilities, one of which was that "it was stolen from the original source (for example, a temple)". "How did the Shiva originally come to the market? If it was excavated, was it legally excavated? If it came from a temple ... well, you get the idea," he added. "I am unable to determine whether or not the object was legally or illegally exported from India. "The absence of official documentation suggests that the object was exported without compliance with that legislation." Mr Simpson advised the gallery "with a high degree of certainty" that "there could be no successful claim made on the basis of the means of export of the work from India". "That is not to say that the Indian government could not make submissions to the Australian Government for the return of the work - simply that it would have no right to seek legal redress pursuant to the UNESCO Convention, the Indian legislation or the Protection of Movable Cultural Heritage Act 1986," he said. But he added: "If there are problems in respect of any of the above, what is the likelihood that the NGA would face highly public restitution proceedings?" As revealed on Four Corners, this is now occurring. The Indian High Commission has lodged a formal request with the Attorney-General's Department asking for the Shiva to be returned to India, where Kapoor is in prison awaiting trial on charges relating to the smuggling of looted artefacts. Police in India have told Four Corners they want the Shiva returned to be presented as material evidence at his trial. "This is a matter that will be a subject of discussion," Senator Brandis said. "I've raised it with my colleague, Julie Bishop, the Minister for Foreign Affairs, and it will be a matter of discussion between the governments." Four Corners understands discussions with the NGA are being held this week. Topics: library-museum-and-gallery, arts-and-entertainment, visual-art, law-crime-and-justice, corruption, fraud-and-corporate-crime, canberra-2600, australia, india, asia First posted
WATCH ABOVE: A lobby effort that started this past summer is ramping up. As Vinesh Pratap explains, Edmonton’s rising property taxes and the impact on city businesses what some are concerned about. A new poll suggests nearly three-quarters of Edmonton homeowners are tired of increasing municipal taxes. Prosperity Edmonton said 73 per cent of those who participated in its Leger poll wanted a hold on tax increases, and 51 per cent believed the city should hold off on any new municipal spending, “even if that means the city can’t improve the services it offers or begin providing new services.” Eighty-two per cent of respondents also said taxes will be an important factor in their voting decisions for the next municipal election, the poll suggests. The association said the poll measured homeowners’ opinions on city spending, taxation rates and other related issues. City of Edmonton administration has recommended a 3.3 per cent tax hike for 2019. It would mean the average household would pay an extra $80 next year. Nearly half of the additional funds would go towards law enforcement. The poll also indicated 63 per cent of respondents supported operating new recreation centres on a revenue-neutral model — possibly by contracting a third party to operate the facilities — as a way to decrease spending and avoid tax increases. Prosperity Edmonton is a group of business associations from various sectors, including restaurants, retail outlets and construction that was formed to represent its members to create a different tax and policy climate. The association has also been vocal about its opposition to the rise of property taxes for businesses as well, with some businesses experiencing an increase of over $100,000 in the last two years. In June, Chamber of Commerce president and CEO Janet Riopel said the increases have made it difficult for businesses to survive. In September, Mayor Don Iveson unveiled his five-point plan for what he called “Edmonton’s toughest budget in a decade.” It included a locally based levy that would be charged to developers and then passed along to new home buyers in new neighbourhoods, which Iveson said would relieve pressure on Edmontonians by not increasing taxes to homeowners and businesses that will not get a benefit from a development. This Leger poll was conducted on behalf of Prosperity Edmonton between Oct. 23 and Nov. 5, 2018, using a sample of 502 Edmonton homeowners aged 18+ from Leger’s Computer Aided Web Interviewing system. Data was weighed by quadrant, gender and age. Leger said as a non-random online survey, a margin of error is not reported. Had the data for the general population been collected using a probability sample, results for a sample size of 502 would be statistically accurate to within ±4.4 percentage points, 19 times out of 20.
Originally Published: March 1, 2018 6:01 a.m. Richard Arthur Tomlinson, 78, a resident of Dewey, Arizona, born Sept. 9, 1939, in Allentown, Pennsylvania, died Feb. 20, 2018, in Prescott, Arizona. Services will be at 11 a.m. March 3, 2018, at Emmanuel Lutheran Church, 7763 E. Long Look, Prescott Valley, Arizona, 86314. Ruffner-Wakelin Funeral Homes and Crematory assisted the family with arrangements. Please login on to www.ruffnerwakelin.com to sign the online guestbook and share a memory with the family.
/** * Created by Oleg Tarashkevich on 17.05.16. */ public final class TechUtil { private static String advertisingId; public static boolean isRunningOnEmulator() { boolean result =// Build.FINGERPRINT.startsWith("generic")// || Build.FINGERPRINT.startsWith("unknown")// || Build.MODEL.contains("google_sdk")// || Build.MODEL.contains("Emulator")// || Build.MODEL.contains("Android SDK built for x86") || Build.MANUFACTURER.contains("Genymotion"); if (result) return true; result |= Build.BRAND.startsWith("generic") && Build.DEVICE.startsWith("generic"); if (result) return true; result |= "google_sdk".equals(Build.PRODUCT); return result; } public boolean isRooted() { boolean found = false; if (!found) { String[] places = {"/sbin/", "/system/bin/", "/system/xbin/", "/data/local/xbin/", "/data/local/bin/", "/system/sd/xbin/", "/system/bin/failsafe/", "/data/local/"}; for (String where : places) { if (new File(where + "su").exists()) { found = true; break; } } } return found; } public static String getDeviceModel() { return Build.MANUFACTURER + " - " + Build.MODEL; } public static String getAndroidID() { return Settings.Secure.getString(DataUtil.getInstance().getContext().getContentResolver(), Settings.Secure.ANDROID_ID); } public static String getAdvertisingId() { // return getAdvertisingId(null); return advertisingId; } // public static String getAdvertisingId(final SimpleCallback callback) { // if (TextUtils.isEmpty(advertisingId)) { // new Thread(new Runnable() { // @Override // public void run() { // try { // advertisingId = AdvertisingIdClient.getAdvertisingIdInfo(SDK.getContext().getApplicationContext()).getId(); // Logger.d("advertisingId", advertisingId); // if (callback != null) // callback.onSuccessAction(); // } catch (Throwable e) { // Logger.e("TechUtil", e); // if (callback != null) // callback.onErrorAction(e); // } // } // }).start(); // } // return advertisingId; // } public String getAndroidVersion() { return Build.VERSION.SDK_INT + " (" + Build.VERSION.RELEASE + ") root: " + isRooted(); } public int getVersionSDK() { return Build.VERSION.SDK_INT; } @TargetApi(Build.VERSION_CODES.LOLLIPOP) public static String getBuildVersion() { String buildVersion = null; if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { String[] ABIS = Build.SUPPORTED_ABIS; buildVersion = ABIS[0]; } else buildVersion = Build.CPU_ABI; return buildVersion; } @SuppressLint("MissingPermission") public static void phone(String number) { if (!TextUtils.isEmpty(number)) { number = "tel:" + number; Intent callIntent = new Intent(Intent.ACTION_CALL, Uri.parse(number)); callIntent.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK); try { DataUtil.getInstance().getContext().startActivity(callIntent); } catch (Throwable e) { Logger.e(e); } } } public static String displayName(String phoneNumber) { String contactName = ""; try { List<Person> persons = new ArrayList<>(); if (!TextUtils.isEmpty(phoneNumber)) { ContentResolver cr = DataUtil.getInstance().getContext().getContentResolver(); String whereName = ContactsContract.Data.MIMETYPE + " = ?"; String[] whereNameParams = new String[]{ContactsContract.CommonDataKinds.StructuredName.CONTENT_ITEM_TYPE}; Cursor cursor = cr.query(ContactsContract.Data.CONTENT_URI, null, whereName, whereNameParams, ContactsContract.CommonDataKinds.StructuredName.GIVEN_NAME); cursor.moveToFirst(); while (cursor.moveToNext()) { Person person = new Person(); person.given = cursor.getString(cursor.getColumnIndex(ContactsContract.CommonDataKinds.StructuredName.GIVEN_NAME)); person.middle = cursor.getString(cursor.getColumnIndex(ContactsContract.CommonDataKinds.StructuredName.MIDDLE_NAME)); person.family = cursor.getString(cursor.getColumnIndex(ContactsContract.CommonDataKinds.StructuredName.FAMILY_NAME)); person.display = cursor.getString(cursor.getColumnIndex(ContactsContract.CommonDataKinds.StructuredName.DISPLAY_NAME)); persons.add(person); } cursor.close(); } } catch (Exception e) { e.printStackTrace(); } return contactName; } public static String getContactDisplayNameByNumber(String number) { String name = "?"; try { Uri uri = Uri.withAppendedPath(ContactsContract.PhoneLookup.CONTENT_FILTER_URI, Uri.encode(number)); ContentResolver contentResolver = DataUtil.getInstance().getContext().getContentResolver(); Cursor contactLookup = contentResolver.query(uri, new String[]{BaseColumns._ID, ContactsContract.PhoneLookup.DISPLAY_NAME}, null, null, null); try { if (contactLookup != null && contactLookup.getCount() > 0) { contactLookup.moveToNext(); name = contactLookup.getString(contactLookup.getColumnIndex(ContactsContract.Data.DISPLAY_NAME)); //String contactId = contactLookup.getString(contactLookup.getColumnIndex(BaseColumns._ID)); } } finally { if (contactLookup != null) { contactLookup.close(); } } } catch (Exception e) { e.printStackTrace(); } return name; } public void readContacts() { ContentResolver cr = DataUtil.getInstance().getContext().getContentResolver(); Cursor cur = cr.query(ContactsContract.Contacts.CONTENT_URI, null, null, null, null); if (cur.getCount() > 0) { while (cur.moveToNext()) { String id = cur.getString(cur.getColumnIndex(ContactsContract.Contacts._ID)); String name = cur.getString(cur.getColumnIndex(ContactsContract.Contacts.DISPLAY_NAME)); if (Integer.parseInt(cur.getString(cur.getColumnIndex(ContactsContract.Contacts.HAS_PHONE_NUMBER))) > 0) { System.out.println("name : " + name + ", ID : " + id); // get the phone number Cursor pCur = cr.query(ContactsContract.CommonDataKinds.Phone.CONTENT_URI, null, ContactsContract.CommonDataKinds.Phone.CONTACT_ID + " = ?", new String[]{id}, null); while (pCur.moveToNext()) { String phone = pCur.getString( pCur.getColumnIndex(ContactsContract.CommonDataKinds.Phone.NUMBER)); System.out.println("phone" + phone); } pCur.close(); // get email and type Cursor emailCur = cr.query( ContactsContract.CommonDataKinds.Email.CONTENT_URI, null, ContactsContract.CommonDataKinds.Email.CONTACT_ID + " = ?", new String[]{id}, null); while (emailCur.moveToNext()) { // This would allow you get several email addresses // if the email addresses were stored in an array String email = emailCur.getString( emailCur.getColumnIndex(ContactsContract.CommonDataKinds.Email.DATA)); String emailType = emailCur.getString( emailCur.getColumnIndex(ContactsContract.CommonDataKinds.Email.TYPE)); System.out.println("Email " + email + " Email Type : " + emailType); } emailCur.close(); // Get note....... String noteWhere = ContactsContract.Data.CONTACT_ID + " = ? AND " + ContactsContract.Data.MIMETYPE + " = ?"; String[] noteWhereParams = new String[]{id, ContactsContract.CommonDataKinds.Note.CONTENT_ITEM_TYPE}; Cursor noteCur = cr.query(ContactsContract.Data.CONTENT_URI, null, noteWhere, noteWhereParams, null); if (noteCur.moveToFirst()) { String note = noteCur.getString(noteCur.getColumnIndex(ContactsContract.CommonDataKinds.Note.NOTE)); System.out.println("Note " + note); } noteCur.close(); //Get Postal Address.... String addrWhere = ContactsContract.Data.CONTACT_ID + " = ? AND " + ContactsContract.Data.MIMETYPE + " = ?"; String[] addrWhereParams = new String[]{id, ContactsContract.CommonDataKinds.StructuredPostal.CONTENT_ITEM_TYPE}; Cursor addrCur = cr.query(ContactsContract.Data.CONTENT_URI, null, null, null, null); while (addrCur.moveToNext()) { String poBox = addrCur.getString( addrCur.getColumnIndex(ContactsContract.CommonDataKinds.StructuredPostal.POBOX)); String street = addrCur.getString( addrCur.getColumnIndex(ContactsContract.CommonDataKinds.StructuredPostal.STREET)); String city = addrCur.getString( addrCur.getColumnIndex(ContactsContract.CommonDataKinds.StructuredPostal.CITY)); String state = addrCur.getString( addrCur.getColumnIndex(ContactsContract.CommonDataKinds.StructuredPostal.REGION)); String postalCode = addrCur.getString( addrCur.getColumnIndex(ContactsContract.CommonDataKinds.StructuredPostal.POSTCODE)); String country = addrCur.getString( addrCur.getColumnIndex(ContactsContract.CommonDataKinds.StructuredPostal.COUNTRY)); String type = addrCur.getString( addrCur.getColumnIndex(ContactsContract.CommonDataKinds.StructuredPostal.TYPE)); // Do something with these.... } addrCur.close(); // Get Instant Messenger......... String imWhere = ContactsContract.Data.CONTACT_ID + " = ? AND " + ContactsContract.Data.MIMETYPE + " = ?"; String[] imWhereParams = new String[]{id, ContactsContract.CommonDataKinds.Im.CONTENT_ITEM_TYPE}; Cursor imCur = cr.query(ContactsContract.Data.CONTENT_URI, null, imWhere, imWhereParams, null); if (imCur.moveToFirst()) { String imName = imCur.getString( imCur.getColumnIndex(ContactsContract.CommonDataKinds.Im.DATA)); String imType; imType = imCur.getString( imCur.getColumnIndex(ContactsContract.CommonDataKinds.Im.TYPE)); } imCur.close(); // Get Organizations......... String orgWhere = ContactsContract.Data.CONTACT_ID + " = ? AND " + ContactsContract.Data.MIMETYPE + " = ?"; String[] orgWhereParams = new String[]{id, ContactsContract.CommonDataKinds.Organization.CONTENT_ITEM_TYPE}; Cursor orgCur = cr.query(ContactsContract.Data.CONTENT_URI, null, orgWhere, orgWhereParams, null); if (orgCur.moveToFirst()) { String orgName = orgCur.getString(orgCur.getColumnIndex(ContactsContract.CommonDataKinds.Organization.DATA)); String title = orgCur.getString(orgCur.getColumnIndex(ContactsContract.CommonDataKinds.Organization.TITLE)); } orgCur.close(); } } } } public static boolean hasPackage(Context context, String packageName){ boolean hasPackage = false; try { PackageManager pm = context.getPackageManager(); List<ApplicationInfo> apps = pm.getInstalledApplications(0); for (ApplicationInfo app : apps) { if (app.packageName.equalsIgnoreCase(packageName)){ hasPackage = true; break; } } } catch (Throwable e) { e.printStackTrace(); } return hasPackage; } public static class Person { String name; String given; String middle; String family; String display; } }
Determination of the myocardial performance index in deteriorating grades of intrauterine growth restriction and its link to adverse outcomes The aim of this study is to determine the fetal modified myocardial performance index (ModMPI) and Ewave/Awave peak velocities (E/A ratio) in deteriorating grades of intrauterine growth restriction (IUGR) and its link to adverse outcomes defined as perinatal death, hypoxic ischemic encephalopathy, neonatal resuscitation, neonatal cord pH <7.15, intraventricular hemorrhage and bronchopulmonary dysplasia.
Q: spin eigenstates representation in QM In an exercise in a Quantum Mechanics text (Sakurai Modern Quantum Mechanics) I completed, I showed that the eigenstates $ | \mathbf{S} \cdot \hat{n}; + \rangle$ of $$\mathbf{S} \cdot \hat{n} | \mathbf{S} \cdot \hat{n}, + \rangle= (\frac{\hbar}{2})| \mathbf{S} \cdot \hat{n}; + \rangle$$ are as follows $$| \mathbf{S} \cdot \hat{n}; + \rangle = \cos(\frac{\beta}{2})| + \rangle + \sin(\frac{\beta}{2})e^{i \alpha}|-\rangle.$$ Further it states that given that $\alpha= 0$ we have normalized eigenstates $$(\frac{1 + \cos \beta}{2})^{1/2}\dbinom{1}{\frac{\sin \beta}{(1+\cos \beta)}}.$$ Can anyone see where this comes from? As I see it if $\alpha = 0$ then given that $|+ \rangle = \dbinom{1}{0}$ and $|- \rangle = \dbinom{0}{1}$, we have $|\mathbf{S} \cdot \hat{n} \rangle = \dbinom{\cos(\frac{\beta}{2})}{\sin(\frac{\beta}{2})}$ which is already normalized. What am I missing? Thanks. A: To get the eigenvector in the form as given, use the following two identities : $cos(\beta/2) = ((1+cos\beta)/2)^{1/2}$ and $sin(\beta/2) = ((1-cos\beta)/2)^{1/2}$ To get unity as the first component, you need to factor out the $cos$ term, and after some simplification you should get the desired vector form. About why should one go for a more complicated expression when both have the property of normalisation, it seems that the advantage with the second form is in the $\beta$ factor, you just need to insert the given angle(whatever the context of the angle) and not divide by 2. (This help with mental calculations for typical angles like $\pi,\pi/2$ but yeah, I see that doesn't hold much ground. There is also the 1 element which might make calculations easier when dealing with matrices(action of operators)
/** * This function is used to remove cloud anchor manager and all placed rewards. */ private void destroySession() { if (cloudAnchorManager != null) { cloudAnchorManager.stop(); cloudAnchorManager = null; } clearVisuals(); }
1. Technical Field This disclosure generally relates to the field of luminaire, and more particularly to dissipation of the heat generated by ballast electronics of a luminaire. 2. Description of the Related Art With increasing trend of energy conservation and for various other reasons, including replacement of gas-vapor lamps, solid-state lighting has become more and more popular as the source of illumination in a wide range of applications. As generally known, solid-state lighting refers to a type of lighting that emits light from a solid object, such as a block of semiconductor, rather than from a vacuum or gas tube as is the case in traditional lighting. Examples of solid-state lighting include light-emitting diodes (LEDs), organic light-emitting diodes (OLEDs), and polymer light-emitting diodes (PLEDs). Solid-state lighting as compared to traditional lighting generates visible light with reduced parasitic energy dissipation in the form of reduced heat generation. Further, solid-state lighting tends to have increased lifespan compared to traditional lighting. This is because, due to its solid-state nature, solid-state lighting provides for greater resistance to shock, vibration, and wear. An LED lamp is a type of solid-state lighting that utilizes LEDs as a source of illumination, and typically has clusters of LEDs in a suitable housing. The LEDs in an LED lamp typically have very low dynamic resistance, with the same voltage drop for widely-varying currents. Thus, the LEDs cannot be connected directly to most power sources, such as the 120-volt AC mains commonly available in the U.S., without causing damages to the LEDs. Consequently, an electronic ballast is used to transform the high voltage and current from the AC mains into a typically lower voltage with a regulated current. The electronic ballasts used in LED lamps have a typical conversion efficiency of 75%-95%, and more typically 85%. This means that 5% -25% of the energy used by a solid-state luminaire is wasted as heat, generated by the electronic ballast. This heat must be removed from the electronic ballast to prevent premature failure of the electronic components of the ballast. In a high-flux luminaire of, for example, 40 watts, about 8.8 watts of waste heat must be removed. However, passive cooling method using heat sink fins will not likely be able to keep temperature rise of the electronic components within safe limits if the ballast is installed in a recessed “can light” or security light type of luminaire. This is because, with such enclosed lamp mounting spaces, there is insufficient airflow to safely cool the electronic ballast. There is, therefore, a need for an active cooling method and apparatus to more effectively remove the heat generated by the electronic ballast in a solid-state lighting, such as a LED lamp, to keep the temperature of the electronic components of the ballast within safe limits.
From Open Access to Open Science: innovation in scholarly communication Open Science is often presented as the overarching umbrella of Open Access and Open Data, amongst others. In this article, we start from the finding that, for many years, Open Access to knowledge and research has been a generally accepted principle in Latin America. The Open Access movement in Latin America can even be considered a global pioneer with regards to raising the visibility of research output through channels outside the traditional scholarly communication model. In terms of Open Data, the LEARN project has come to the conclusion that, while important challenges remain in terms of actual implementations of policies, institutions in Latin America (and the Caribbean) are generally well aware of the importance of research data management (RDM) and seem to accept the general principle of openness of data from publicly funded research. This makes Latin America, in principle, well positioned to move towards Open Science, as a way to rethink the traditional scholarly communication model, and explore more innovative, transparent and inclusive ways to exchange research output on a regional and global level.
Uniform price versus discriminatory auctions in bond markets: A experimental analysis based on multi-agents system The uniform price auction and discriminatory price auction are used in bond issue. Which auction has higher market efficiency is still an unresolved problem. In this paper, we compare the difference between uniform price auction and discriminatory auction with simulated experiments. The results show that the uniform price auction is a better choice in a long term. And discriminatory price auction is efficient as a short-term policy.
<gh_stars>0 import sys sys.path.append('/net/wujial/py-R-FCN/caffe/python') sys.path.append('/net/wujial/py-R-FCN/lib') import numpy as np from datasets.factory import get_imdb imdb = get_imdb('voc_2007_test') path_to_detections = "/net/wujial/py-R-FCN/output/rfcn_alt_opt_5step_ohem/voc_2007_test/resnet50_rfcn_mask_ohem_iter_80000/detections.pkl" all_boxes = np.load(path_to_detections) output_dir = '.' imdb.evaluate_detections(all_boxes, output_dir)
The present invention relates to a video signal detector for use with a color television receiver wherein an image signal motion between two consecutive frames is detected, and more particularly to a motion detector suitable for detecting a change (motion) of chrominance signal components of an image using a composite color television signal. In an NTSC system television receiver, the phase of a chrominance sub-carrier is inverted between 1st and 2nd consecutive frames when a video signal of a still image is received. If a composite video signal of a 1st frame delayed by one frame period is added to a composite video signal of a 2nd frame, the chrominance sub-carriers of the 1st and 2nd frames are cancelled out so that a luminance signal can be obtained. If a composite video signal delayed by one frame period is subtracted from a composite video signal of a 2nd frame, a luminance signal is removed and a chrominance signal can be separated. In this manner, in case of a video signal of a still image, cross-components such as cross-color, cross-luminance, i.e., hanging dots are substantially eliminated to enable a high image quality of a television receiver. However, there is no corelation between 1st and 2nd frame video signals of a motion image. Therefore, if such processing of composite video signals between frames is performed when a television receiver receives video signals of a moving image, cross-components contained in the luminance signal or the chrominance signal increase on the contrary. As a result, the quality of an image reproduced on a picture tube deteriorates. In view of this, if there is a motion of an image, the processing of video signals between frames is suspended, and instead in-field processing of video signals is requested, such as processing of video signals between lines. To this end, a detector for detecting a motion of an image is needed. A conventional detector is described in JP-A No. 58-115995. This detector, however, does not take into consideration a motion detection of video signals whose luminance signal levels do not change between two frames but hues and saturations differ between two frames, namely a detection of video signals having a motion in chrominance. Thus, this detector has a low sensitivity of detecting a motion in chrominance, resulting in large hanging dots over the whole image area having a chrominance motion.
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MixedInstancesPolicySpec) DeepCopyInto(out *MixedInstancesPolicySpec) { *out = *in if in.Strategy != nil { in, out := &in.Strategy, &out.Strategy *out = new(string) **out = **in } if in.SpotPools != nil { in, out := &in.SpotPools, &out.SpotPools *out = new(int64) **out = **in } if in.BaseCapacity != nil { in, out := &in.BaseCapacity, &out.BaseCapacity *out = new(int64) **out = **in } if in.SpotRatio != nil { in, out := &in.SpotRatio, &out.SpotRatio *out = new(intstr.IntOrString) **out = **in } if in.InstancePool != nil { in, out := &in.InstancePool, &out.InstancePool *out = new(string) **out = **in } if in.InstanceTypes != nil { in, out := &in.InstanceTypes, &out.InstanceTypes *out = make([]*InstanceTypeSpec, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] *out = new(InstanceTypeSpec) **out = **in } } } }
Usefulness of COVID-19 screen-and-test approach in pregnant women: an experience from a country with low COVID-19 burden Abstract Objectives Information on the usefulness of screen-and-test strategies of pregnant women for severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2) is lacking. Methods We retrospectively reviewed the Ljubljana Maternity Hospital database and searched for pregnant women, who were admitted to the hospital between March 15 and May 16, 2020, for a planned procedure or hospitalization. Their medical records were examined and SARS-CoV-2 test results were retrieved. Results During the two-month period analyzed, there were a total of 265 scheduled admissions of pregnant women to our hospital. Two hundred two (76.2%) were tested for SARS-CoV-2 1 day prior to admission. All tested negative for SARS-CoV-2 RNA, regardless of having coronavirus disease 2019 (COVID-19)-compatible signs or symptoms (n=28) or not (n=174). Conclusions In a population with a low SARS-CoV-2 burden, usefulness of universal testing of pregnant women before admission to the hospital is limited. We recommend that obstetric units in regions with low SARS-CoV-2 burden enforce rational use of personal protective equipment and diligent screening protocols using targeted questionnaires, whereas SARS-CoV-2 laboratory testing should be performed only in screen-positives: those with high clinical suspicion of COVID-19 and/or suspected epidemiological history.
Niche Partitioning in Theropod Dinosaurs: Diet and Habitat Preference in Predators from the Uppermost Cedar Mountain Formation (Utah, U.S.A.) We explore hypothetical ecologies to explain diversity among predatory dinosaurs in North Americas medial Cretaceous, based on occurrence, tooth morphology, and stable isotope analysis. The Mussentuchit local fauna, Utah, USA, is among the best-known terrestrial vertebrate assemblages from the Cretaceous. Study samples include teeth from six microvertebrate sites, ranging in depositional setting from distal floodplain to channel lags. We recognize four theropod morphotypes: a comparatively large theropod (morph 1), a medium-sized dromaeosaurid (morph 2), a small dromaeosaurid (morph 3), and a tooth-morph similar to the genus Richardoestesia (morph 4). These four morphotypes vary significantly in mean size, from 15.1mm in the largest theropod to 3.7mm in Richardoestesia. Further, tooth representation from two of the best-sampled microsites (representing a channel/splay and floodplain deposit) show differing patterns of abundances with morphs 1 and 3 having roughly the same abundance in both sites, while morph 2 was more abundant in the floodplain setting and morph 4 was more abundant in the channel/splay. Stable isotope analysis (13C; 18O) of tooth carbonate from the theropod morphotypes, goniopholidid crocodilians, and matrix (to test for diagenesis) from these sites were also analyzed. The theropods show modest differences in 13C values between each other, with carbonate from the teeth of morphs 1, 3, and 4 being enriched in 13C for the channel/splay relative to the floodplain environments, possibly indicative of dietary plasticity in these species. We hypothesize that these data indicate that the Mussentuchit theropods had different niches within the predator guild, suggesting plausible means by which ecospace was divided among the predatory dinosaurs of the Mussentuchit local fauna. obvious adaptations associated with particular diets, and that many taxa lack representation by anatomically informative fossils. Without further lines of evidence, determining the precise trophic ecology of these dinosaurs is problematic beyond characterization as "generalist predator", a label that could reasonably be attributed to any sharp-toothed species. In all well-sampled Late Cretaceous terrestrial faunas, multiple small-to medium-sized theropods coexisted 6. These faunas include: dromaeosaurs, troodontids, and small (or juvenile) tyrannosaurs, each possessing clear anatomical specialization for predatory lifestyles. Adaptations, such as sharp, recurved teeth and long sickle-shaped claws with enlarged basal tubercles (for attachment of flexor tendons), broadly suggest that these theropods would be in direct competition if they preyed upon the same species. This high diversity of competitors seemingly contradicts the Competitive Exclusion Principal 7, which states that species occupying the same niche cannot exist indefinitely in the same environment. In order to maintain such high diversity, these predatory species would have had to minimize competition amongst themselves through behavioral or spatial dietary segregation. Modern animals provide good examples of how niche partitioning can be manifested, primarily by specializing in different prey items (e.g. 8 ), feeding at different times of day (e.g. 9 ), or occupying different subenvironments (e.g. 10 ). By adopting one (or more) of these strategies, similar organisms can coexist without directly competing for resources. Thus, it stands to reason that in diverse theropod communities, we would expect to see some level of niche partitioning following the strategies observed in modern assemblages. Determining behavior from the fossil record is a difficult task. It can be assumed that animals are adapted to a specific lifestyle, allowing for broad generalizations about the animal's ecology based on morphology alone. For example, the sharp teeth and long legs of dromaeosaurids leave little doubt that these animals were cursorial predators; however, what they were eating cannot be precisely determined by morphology alone. In the best-case scenario, animals are preserved in the act of feeding or with stomach contents still in place. Famous examples such as the fighting dinosaurs, a Velociraptor mongoliensis and Protoceratops andrewsi preserved in an eternal predator-prey struggle 11, unambiguously show that this trophic relationship occurred at least once. Even further, trackways occasionally show evidence of presumed hunting or packing behavior 12. Fossils of this ecological caliber are rare, leaving paleoecologists to rely on more indirect proxies for ancient behavior; namely, through geochemical or taphonomic evidence. To date, few of these ecological studies have focused on theropods, and when performed most focus on the large or enigmatic species (e.g. 13,14 ). Unsurprisingly, it is mainly in these large species that strong evidence for niche partitioning has been presented 15. In this study, we investigate the realized dietary niche of theropods from relatively well-sampled micro-bonebeds, derived from the Late Cretaceous Mussentuchit Member (herein referred to as MM) of the Cedar Mountain Formation of Utah. Through morphological, taphonomic, and geochemical techniques we investigated the dietary and habitat preferences of superficially similar species, in order to more fully understand the Mussentuchit ecosystem, as well as the ecology of many of these poorly-known theropods. Geologic Setting The MM is the uppermost unit of the Cedar Mountain Formation exposed on the western side of the San Rafael Swell anticline in Emery County, Utah (Fig. 1). This unit is composed of terrestrial sediments, varying between sandstones, mudstones, and altered volcanic ash layers; the latter of which comprise much of the grey, smectite-rich badlands characteristic of the unit 16,17. The MM formed as shed sediments deposited in the foreland basin during the Pavast thrust event, dated from 98.3 ± 0.1 Ma 18 to 96.7 ± 0.5 Ma 19, placing the member in the Cenomanian stage of the Late Cretaceous. The unit is generally highly fossiliferous (as compared to the rest of the Cedar Mountain Formation), with a diverse fauna composed of freshwater fishes, lissamphibians, lepidosaurs, crocodilians, dinosaurs, and mammals 19,20. The samples analyzed here come from six micro-bonebeds collected by crews from the Sam Noble Museum in the Mussentuchit Wash and Short Canyon areas of Emery County, Utah 17,21,22. Stratigraphically, localities V235, V694, and V794 are located near the same level, above a marker ash bed, 15 m from the base of the overlying Naturita Formation (formerly referred to as the Dakota Formation in the region 23 ), while V695 is found immediately below this same layer. V868 and V239 are found nearer to the contact of the MM with the overlying Naturita Formation. Though they differ in depositional setting (see Table 1), Goldberg 22 showed that faunal composition between the sites was not dramatically affected by differences in taphonomic history. Further, specimens between sites tend to show a similar minor degree of wear attributable to transportation, indicating that hydraulic transport and reworking was minimal. Most importantly for our purposes, previous isotopic analyses from this same sample demonstrate the expected variation for modern fauna, implying that biogenic signals were not lost due to diagenetic alteration 17,24. Descriptions Tooth morphotypes. In total, 866 small-to medium-sized theropod teeth from the MM were analyzed and found to represent at least seven different morphotypes, many of which have been identified elsewhere 19,21,25. Here we recognize the following morphotypes in our sample sites: Morphotype 1. These teeth are the tallest of the morphotypes analyzed, with the largest being approximately 35 mm in crown height ( Fig. 2A 1-3 ). These teeth are not as laterally compressed as other morphotypes and possess denticles on both carinae. The denticles on the anterior carina are proximodistally short, apicodistally wide, and oval in cross-section; the denticles on the posterior carina are proximodistally long and chisel-shaped. These teeth are likely the same as "Theropod A" of Fiorillo 25. The teeth are of uncertain origin, showing similarities with tyrannosaurs (previously reported from the unit by Cifelli et al. 21 ), but lack the basally-deflected blood groove diagnostic of this group 26. These teeth are similar in size and appearance to those described as morphotype 1 by Krumenacker et al. 27 these authors tentatively refer these teeth to a moderately-sized tyrannosauroid or basal tetanuran. The only large-bodied theropod currently known from the MM is Siats meekerorum 28 ; unfortunately, the holotype (and only definitively known specimen) contains no teeth. Regardless, among the huge number of theropod teeth recovered from the MM, none is large enough to belong to an adult S. meekerorum. Morphotype 2. Reaching heights up to 15.69 mm tall, these teeth are generally shorter than morphotype 1, but taller than the other morphotypes ( Fig. 2B 1-3 ). The lingual side is often flattened, while the labial side is more inflated, forming a slight D in cross section. The anterior carina rarely possesses denticles, and curves lingually as it travels towards the base, a feature seen most prominently in lateral teeth of Dromaeosaurus, as well as anterior teeth of other dromaeosaurids 29. The posterior denticles are relatively elongate and rounded on the ends. These teeth match the description for Dromaeosaurinae teeth by Fiorillo 25, and those figured by Garrison et al. 19. Morphotype 3. Morphotype 3 is composed of teeth that are generally small and relatively recurved as compared to morphotypes 1 and 2 ( Fig. 2C 1-3 ). These teeth possess denticles on both carinae, with the anterior denticles being wide, low, and rounded; while the posterior denticles are taller, and point slightly apically. Unlike morphotype 2, the anterior carina never curves lingually. The large, upturned posterior denticles with subequal-sized anterior denticles identify these teeth as belonging to a small species of dromaeosaurid 6. Morphologically similar, but relatively larger teeth are also known from the contemporaneous Wayan Formation of Idaho 27. Figure 1. Map of the western interior during the Cenomanian (grey is highlands, green is lowlands, and blue is water) and a pullout of Emery County, Utah with the Mussentuchit Member exposure and microsites analyzed in this study. Based on maps and data from Cifelli et al. 21 most lack them completely. Some of these specimens show a slight lingual curvature on the anterior carina, indicating that these may be anteriorly-located teeth. These teeth are nearly identical to those identified as tall variations of cf. Richardoestesia isosceles from the Santonian Milk River Formation of Canada 30 (Fig. 3G). Due in part to substantial variation known for Richardoestesia teeth 26, we cannot confidently assign these teeth beyond the generic level. Morphotype 5. Triangular teeth with relatively small denticles on both carinae (9-10 per mm) were first identified in the MM by Cifelli et al. 21 and subsequently described by Garrison et al. 19 (Fig. 2E 1-3 ). These teeth match most closely those described as cf. Richardoestesia sp., though their small, rounded denticles and isosceles-triangular shape in lateral view are unquestionably similar to those of the enigmatic, and possibly non-theropodan 31, Richardoestesia isosceles from the Late Cretaceous Aguja Formation of Texas 32. Similar triangular teeth are known from the contemporaneous Woodbine Formation of Texas, which have also been attributed to Richardoestesia 33. It is possible that morphs 4 and 5 represent teeth from a single species, but without any associated material this is simply conjecture. Morphotype 6. These relatively tall teeth with one flattened side and longitudinal grooves are similar to Paronychodon specimens described from other Late Cretaceous formations (Fig. 2F 1-3 ). Conversely, the Paronychodon specimens from the MM typically have large, rounded denticles on the posterior carina, while those in geologically younger formations often lack denticles entirely 6. Currie et al. 26 hypothesized that these teeth do not belong to a unique taxon, but instead represent malformed dromaeosaur teeth. Without more complete material it is difficult for us to contribute to this discussion; however, the general scarcity of morph 6 to other theropod teeth supports the assertion that these teeth are malformations. 25. These teeth are often very short (less than 5 mm long), with the smallest containing as few as seven denticles. These diminutive specimens superficially appear most similar to Troodon teeth and were originally identified as such by Cifelli et al. 21 and Goldberg 22 ; however, morphologically similar, but larger, teeth retain the same number of denticles per mm, implying that they belong to the same species differing only in ontogenetic status or alveolar position. These larger teeth have relatively smaller denticles than those of most Late Cretaceous troodontids (e.g. Fig. 2H of Larson and Currie 6 ); though more associated material is needed to unequivocally dismiss this identification. Some specimens appear to bear longitudinal grooves, similar to those of Morph 6, but to a lesser degree. These individuals may also belong to the same species as Morph 6, which themselves may belong to one of the other morphotypes. Though the MM theropods are a seemingly diverse assemblage, it is likely that multiple morphotypes belong to a single species. For the remainder of this study we focused on four of the most common and morphologically discrete morphotypes (morphs 1-4; 309 specimens of the subsample) to maximize the likelihood that we analyzed distinct species. The taxonomic identifications are tentative, based on the lack of more complete material from the MM. Regardless, these identifications are peripheral to our main concern, the relative differences in distribution and inferred diet. Results and Discussion Ordination. A discriminant analysis of the four morphotypes conducted on 77 complete teeth found that 83.12% of all teeth were correctly identified. In this analysis, axes 1 and 2 account for 61.5% and 32.22% of the maximum discrimination, respectively; and the biplot shows the strongest contributions from the anterior and posterior denticle counts largely in the direction of axis 2 (Fig. 3). Morph 4 was the most consistently identified morph, with 93.3% of specimens correctly placed and only one specimen misidentified as belonging to morph 2. Morph 3 was identified correctly 90.9% of the time, with two out of the 22 specimens misidentified as belonging to morph 1. Morph 2 was identified 80.8% of the time correctly, with morph 4 being the most commonly mistaken morph (11.5%) and morphs 3 and 1 each predicted once (3.8%). Morph 1 was the most poorly-predicted with only a 64.3% success rate, where the other five out of the 14 specimens are identified as morph 2. Following Larson and Currie 6, hit ratios between 75-100% can be considered quantitatively distinct morphs, as opposed to Hammer and Harper 34 who limit this threshold to 90% and above. Using either guideline, morphs 3 and 4 can be differentiated based on these measurements alone, while morph 2 can only be recognized based on the more liberal categorization. In neither case is the analysis sufficient to identify morph 1 using measurements alone. However, based on its inflated widths, denticle shape, and lack of a lingually-curved posterior carina, it is sufficiently likely that morph 1 can be recognized based on qualitative characteristics. We tentatively accept the overall hit-ratio as indicative that all four morphs are sufficiently different to consider them as separate taxonomic entities. Size and inferred diet. The four morphotypes vary significantly in mean size, from 15.1 mm in morph 1 (5.2-34.7 mm) to 3.7 mm in morph 4 (2.1-7.6 mm), arranged into multiple size classes (Fig. 4). The results of a Kruskal-Wallis test show that each of the four morphs have unequal medians (p < 0.01), indicating that regardless of overlap between the smallest and largest teeth of any two morphs there is a distinct difference in size among the four species. This size diversity would presumably translate into differential trophic abilities for each morph.. Discriminant analysis for the four most distinct, best-sampled morphotypes (morph 1 is blue, morph 2 is green, morph 3 is yellow, and morph 4 is red). Axis 1 and 2 account for 93.72% of the maximum discrimination. CH is crown height, BL is basal length, PD is posterior denticle count, and AD is anterior denticle count. The largest teeth from morph 1 are comparable in size to those of gracile tyrannosaurids from the Late Cretaceous, such as the 4 to 5 m-long Alioramus altai 35. Hypothetically, this morph would have been capable of feeding on varying-sized prey, including the other, smaller morphotypes. Morph 2 has the next largest teeth, with crown heights (up to 15.7 mm tall) often exceeding those of Deinonychus antirrhopus, a 3 m-long dromaeosaurid from the Aptian/Albian of North America 36, which possesses teeth rarely surpassing 15 mm in height 37. Strong evidence exists that Deinonychus antirrhopus habitually fed upon the iguanodontian Tenontosaurus tilletti 5,38. This trophic relationship led to the widely-touted hypothesis that, like modern canids, Deinonychus could have used pack hunting strategies to catch and dispatch larger prey 39 ; although this hypothesis has recently been challenged (see 40 for further discussion). Morph 2's superior size, as well as the possibility of pack hunting behavior, indicates that this species would have been a relatively uninhibited predator, though like Deinonychus it may have specialized in hunting ornithopod dinosaurs (such as the ubiquitous Eolambia caroljonesa). Morph 3, unlike morph 1 or 2, would have faced more dietary limitations due to its small size. This small dromaeosaurid has teeth ranging from 1.7 to 7.6 mm tall, roughly equivalent to teeth from the 1 to 2 m-long Late Cretaceous Bambiraptor feinbergi 41. This small size likely limited this species to feeding upon smaller prey, such as baby dinosaurs, mammals, reptiles, amphibians, and fish. Even smaller still is morph 4, which we tentatively assigned to the genus Richardoestesia. This genus was originally named for lower jaws and numerous isolated teeth discovered in Campanian-age sediments of Western Canada 26. Richardoestesia teeth are thought to belong to a piscivorous species, based on its elongate dentary, high tooth count, and straight crowns with reduced curvature and minute denticles 26,29. Further, apical wear patterns on Richardoestesia teeth from the latest Cretaceous are consistent with a fish-eating diet 29. Given what is known about this genus, it is possible that morph 4 had a diet similar to modern wading birds (such as ardeids), consisting of fish as well as opportunistically feeding upon small mammals, reptiles, amphibians, and invertebrates 42,43. Taphonomy. Among the four depositional environments represented across the six microsites (Table 1), there are distinct differences in the relative abundances of the morphotypes. Unfortunately, small sample sizes at many of the microsites prevent a robust characterization of their respective assemblages. To address this limitation, sites were grouped and analyzed based on their inferred depositional setting (Table 1; Fig. 5A). In the grouped data sets, specimens belonging to morph 1 (largest theropod) are found in lower abundances than the other three morphotypes, making up approximately 14.3% of the teeth across the environment (channel and floodplain settings), and never accounting for more than 16% from any single microsite. Conversely, teeth assigned to morph 3 (small dromaeosaur) are the most consistently abundant, composing between 31.9% (in floodplains) to 58.8% (in splay/floodplains) of the observed population. Unlike the previous two examples, fossils belonging to morph 2 (medium dromaeosaur) are unequally distributed, and show a general trend of increasing in relative abundance moving distally from the channels. This morph only makes up 23.8% of channel deposit abundance, while composing 40.8% of the floodplains census. In direct contrast, morph 4 (Richardoestesia sp.) shows an increased relative abundance moving from the floodplains (12.9%) to the channels (28.6%), with the largest abundance in the splay/channel environments (35.4%). When constrained to only microsites with substantial sample sizes (Fig. 5B,C), morph 1 is about twice as common in the floodplain environment (V695; 15.6%) compared to the splay/channel (V794; 7.7%); while morph 2 teeth are more than three times as common in the floodplain (12.5% to 41.5% respectively). Morph 3 show little difference between the two sites (30.4% to 38.5%), and Morph 4 increase more than three-fold from the floodplain to the splay/channel (12.6% to 41.3%). These differences are significantly non-random ( 2 = 40.57, d.f. Initial comparisons between the two microsites (V794 and V695) would seemingly indicate a bias based on size, possibly due to hydrologic sorting. To test this possibility, we looked at the distribution of teeth for the much more abundant goniopholidid crocodilians from both sites. In general, V695 (n = 222) has the tallest teeth (range from 0.85 to 30.43 mm, mean = 6.16 mm, median = 4.36 mm), while V794 (n = 184) has the smallest (0.65 to 22.45 mm, mean = 6.08 mm, median = 5.65 mm). However, both sites show a strong bias toward the smallest teeth and neither site was found to be significantly different using a two-tailed Mann-Whitney U-test (U = 18600, p = 0.12). In contrast, the theropod data, when pooled, show a significant difference in size, where V695 (n = 68) contains significantly taller teeth than V794 (n = 53) (U = 869, p < 0.01). This result, however, is unsurprising given the higher abundance of the largest theropod (morph 1) teeth at V695 relative to V794. To test for size differences within theropod groups, we performed the same analysis for morph 3 (n = 25), the most abundant theropod at both sites. As in the goniopholidids, morph 3 shows a higher mean tooth height in V695 (4.50 mm; median = 4.36 mm) compared to V794 (4.07 mm; median = 3.78 mm); however, a Mann-Whitney U-test showed no significant differences between these populations (U = 244.5; p = 0.19). These distributions suggest that hydrologic sorting is an unlikely explanation for the distribution of theropod morphs among the site samples (see also Goldberg 22 ). An alternative hypothesis is that the distribution of theropods in these sites is controlled by stratigraphy, not by the environments in which these sites formed. Although all four morphotypes are found in the stratigraphically lowest (V868 and V695) and highest (V239) sites, the distribution may not be consistent. Indeed, relative abundances at V868 are more similar to those of V695, a site at the stratigraphically similar position, than to V794, a site reflecting the same depositional environment 22. Conversely, the stratigraphically equivalent V694, V235, and V794 vary in depositional setting, and show few similarities in their respective morphotype abundances, particularly with respect to morph 2 and morph 4. Given the small sample sizes from these sites (excluding V794 and V695) and their limited vertical distribution, stratigraphic occurrence insufficiently explains theropod distribution and abundance among the sites. Instead, we hypothesize that, at least in part, the distribution of the MM theropod morphotypes is a result of behavior by the organisms causing preferential burial in certain environments. Habitat preference, as inferred from depositional setting, is consistent with the morphology of respective tooth types, such as an affinity for aquatic parts of the fluvial system in the hypothetically piscivorous Richardoestesia teeth (morph 4) and a penchant for floodplain settings in the proto-typical theropod teeth of the medium dromaeosaurid (morph 2). These findings also generally agree with predictions based on Geochemical analysis. Stable isotope analyses of tooth-associated carbonate ( 13 C and 18 O) were conducted on all four morphotypes for both V695 and V794 (the largest microsites representing both a floodplain and channel/splay environment), as well as samples of small goniopholidid crocodilian teeth and rock matrix. The 18 O values showed no statistical difference between morphotypes, but did differ between the matrix and all morphotypes at V794 (p < 0.05) ( Table 2). However, differences in 13 C values between morphotypes are somewhat more significant (Table 3; Fig. 6). Between the morphotypes in V695, morph 4 (Richardoestesia sp.; n = 6, mean 13 C = −4.38, SD = 1.15) and morph 2 (medium dromaeosaur; n = 12, mean 13 C = −3.18, SD = 1.07) showed the largest difference (1.2; p = 0.08). Goniopholidids in V695 had mean 13 C values similar to morph 4 (n = 10, mean 13 C = −4.31, SD = 1.09), but were depleted in 13 C relative to morph 3 (small dromaeosaur; n = 10, mean 13 C = −3.75, SD = 1.41) and morph 1 (largest theropod; n = 11, mean 13 C = −3.62, SD = 1.29) respectively. In V794, goniopholidids retain a relatively low mean average (n = 10, mean 13 C = −4.55, SD = 0.67) that is significantly depleted in 13 C relative to all of the theropod morphotypes (p < 0.05). Unlike in V695, however, morph 2 (n = 8) has the lowest mean average (n = 8, mean 13 C = −3.03, SD = 1.52), followed by morph 4 (n = 13, mean 13 C = −2.72, SD = 1.66), morph 3 (n = 10, mean 13 C = −2.38, SD = 1.32), and morph 1 (n = 7, mean 13 C = −1.93, SD = 1.80), respectively. In all morphotypes except morph 3, variance increased between the V695 and V794 sites. Although many factors could explain the high variability observed between analyses, we reject the hypothesis that the results are entirely a result of diagenesis. Mineral alteration has undoubtedly taken place to some extent, but we acknowledge that dietary, behavioral, ecological, and environmental factors can have likewise substantial effects on the isotopic signature of a given sample 45. For example, significant differences between the 13 C and 18 O of the matrix and the teeth, as well as the 13 C of goniopholidid teeth and theropods in V794, show that the entire sample has not been isotopically homogenized by ground water replacement. In addition, the carnivorous theropods in this study have similar enriched 13 in Late Cretaceous hadrosaur tooth enamel 46, but nonetheless promising, given variation between sample and preparation in the two studies. For herbivores, differences in 13 C values are commonly derived from the base of the food chain, reflecting contemporarily by differences in photosynthetic pathways between plant types (C3 vs C4 carbon fixation). The Mussentuchit ecosystem likely lacked appreciable C4 producers 47 and thus the majority of 13 C differentiation at the base of the food chain would be caused by variation in the uptake and retention of CO 2. In this system, the differences between microsites may represent varying degrees of canopy cover. Closed canopy forests, in general, tend to exhibit lower carbon isotope ratios due to the effects of plant respiration and decomposition near the forest floor 48. Carbon in tooth carbonate is derived from ingested organics and modified through fractionation based on the consumer's metabolic processes 1. These differences are further exaggerated with each step up the food chain, as predators will often increase by approximately one per mil in relation to their prey 49. 18 O in tooth carbonate is derived from ingested water and is variable based on the source of the water and the body temperature of the organism 1. These data indicate that the theropods, as a whole, did not differ substantially in bulk diet or ingested water, though given their significant difference in size and shape, the subtle isotopic variation between morphotypes are still worthy of discussion. 18 O showed no noticeable trend, indicating that all organisms (here theropods and goniopholidids) used similar water sources. This is a somewhat surprising result, since Suarez et al. 17 found that theropods (mean 18 O = 18.3 ± 1.0) in V794 had a relatively depleted phosphate 18 O compared to goniopholidids (mean 18 O = 16.3 ± 1.0). Differences in the relative stability of the phosphate mineral and the size classes sampled (only presumed immature crocodilians) may explain the differences between the studies. 13 C had more interesting results, where goniopholidids were on average isotopically the most depleted. In modern environments 13 C is reflective of relative environmental cover and trophic level, meaning that these small crocodilians were likely eating trophically-low organisms and/or organisms living in a well-vegetated environment. Modern young crocodilians all transition through a similar ontogenetic dietary progression, with the youngest individuals relying heavily on invertebrates, which is gradually replaced by a diet of fish (e.g. 50 ) followed by (depending on the species and environment) an additional switch to a diet including mammalian and reptilian components 51. Isotopically, this is reflected in the tissues, as demonstrated by Radloff et al. 52 in Crocodylus niloticus. Scute keratin samples from individuals showed at least two transitions in 13 C correlated with the length of the individual (SVL -length from tip of the snout to the end of the first scale row after the cloaca). The smallest animals hypothetically ate primarily invertebrates and showed a wide-ranging isotopic signature with a mean of approximately −21. This 13 C trend decreased linearly as the animal grew to an average of −25 at approximately 130 cm in SVL, likely reflecting an increasing level of fish in the diet of these individuals. Finally, at 240 cm SVL, the 13 C values dramatically increase to approximately −16, resulting from an increased diet of C4 consuming mammals with the onset of larger sizes. If young goniopholidids were ecologically similar to living crocodilians, then we can assume that these depleted isotopic signals represent animals feeding on small, trophically-low, and possibly aquatic fauna (e.g. small fish and crustaceans). In V695, theropod morph 4 has a similar mean and range 13 C as the immature goniopholidids, implying a similar dietary type for both animals. In V794, morph 4 is more enriched than the goniopholidids, but still remains more depleted on average than morph 3 and morph 1, and further contains the most-depleted sample analyzed for any morphs (−6.4). Morph 2 shows the largest relative change between sites (from most enriched on average to least). Between the sites, the absolute differences are roughly equal for the goniopholidids (±0.10) and morph 2 (±0.11), but more substantial for morphs 3 (±1.37), 4 (±1.66), and 1 (±1.68). Ecologically, these differences can be explained by a shifting toward higher trophic foods for each of these morphs, a possible result of longer food chains in more aquatic environments. More interesting, however, is the observation that morph 2 is relatively depleted in environments in which it is poorly represented, possibly indicating a lack of dietary plasticity in this species and a reliance upon food sources more common in the floodplain setting. Though tantalizing as it may be to speculate on a behavioral basis for this disparity, caution will be taken here as not to over-interpret the isotopic results. Taken as a whole and given the high variability observed in these analyses, we interpret these results as suggestive, rather than indicative, of dietary niche partitioning between these theropods. Conclusions Piscivory is hypothesized to have been a relatively common form of dietary partitioning in theropod dinosaurs. Many species of theropods have morphological features consistent with modern, fish-eating species; some to an absurd degree, such as the procumbent front teeth in the noasaurid Masiakasaurus knopfleri 53. Evidence beyond morphological grounds is tentative, but supported for many of these species. For example, at least one specimen of the four-winged dromaeosaurid Microraptor gui preserves fish remains within its gut region 54 ; however, other specimens preserve the remains of a mammal and a bird 55,56, indicating that this species was a more generalist predator and not primarily feeding upon fish. Spinosaurid theropods show the morphological adaptations and geochemical signal of an aquatic predator 57. Indeed, gut contents from the spinosaurid Baryonyx walkeri preserve acid-etched fish scales, but like the previous example of Microraptor, B. walkeri also preserves the remains of a juvenile iguanodontian 58. Further, an embedded spinosaur tooth in a pterosaur vertebra indicates that this group would feed on other prey items besides fish 59. Like these examples, the species investigated here do not show obligate dietary or behavioral patterns for a single prey item or environment. Instead we see subtle, but nonetheless diagnostic patterns associated with differing lifestyles. Richardoestesia teeth (morph 4) are by far the most distinct, with a hypothesized preference (based on distribution among sites) for channel environments, and a variable isotopic signature consistent with a 13 C-depleted diet. In contrast, the medium dromaeosaur (morph 2) is found more often in floodplain settings, but only varies slightly isotopically, depending on the environment in which it is found. Morph 1, the largest theropod, shows low abundance in both environments and has a geochemical signal consistent with an animal higher on the food chain. Last is morph 3, a small dromaeosaur, which is found in high abundances in all environments and maintains relatively diverse 13 C values, typical of a small-bodied opportunistic predator. Morphs 1 and 2 have the lowest abundances near channel deposits. Although it is almost certain that these species would frequent any available water source at least temporarily, the data seemingly indicate that this was not a place of substantial tooth loss or, by inference, a preferred habitat. One possible explanation for this distribution is that these medium-sized theropods were competitively excluded from the near-water settings by large-bodied coelognathosuchian crocodilians. Some of these species potentially reached sizes over 5 m 20, and their teeth are some of the most commonly encountered vertebrate fossils found in the MM 19. Alternative hypotheses ranging from poor pedal traction near water to overgrown hunting terrain could also be viable explanations for this distribution. In summary, we have shown here that it is possible to recover an ecological signal in theropod fossils without the need for direct evidence from preserved stomach content or feeding events. Through morphological, taphonomic, and geochemical proxies, we recognize that at least one species of MM theropod (morph 4 -Richardoestesia sp.) habitually lived and fed in aquatic environments, presumably specialized for a diet of small vertebrates (Fig. 7). Previous hypotheses for piscivory in this species are tentatively supported, though an obligate diet of fish or aquatic organisms can likely be dismissed. Individually, each line of investigation (morphology, depositional environment, and Carbon isotopes) is inconclusive; collectively, however, the results are consistent with niche partitioning within this medial Cretaceous theropod community. Methods The first step of this analysis was to identify visually distinct tooth morphotypes from the bonebeds. Once morphs were recognized, specimens were identified and grouped using a Nikon SMZ-10A dissecting microscope. Incomplete specimens were not included in the analysis unless they could be assigned to one of the morphotypes with a high degree of confidence. Specimens were then measured, using hand calipers, for crown height, fore-aft basal length, basal width, and denticles per one mm (rounded to the nearest whole denticle) at the curve on both the posterior and anterior carinae 6. In order to assess the validity of the provisional (subjectively-established) morphological groupings, measurements (with length, width, and height data Log-transformed to control for size) were analyzed with a discriminant analysis using PAST3 60. Next, to determine trophic ecology, we used a multi-tiered approach. First, tooth size was contrasted using a Kruskal-Wallis Test of tooth crown height for all complete specimens, to determine whether statistically significant size differences between tooth crown height exist between the morphs. Next, we ran a 2 -test to compare the distribution of morphotype between sites and to determine if any showed preference for a particular depositional setting, following Lyson and Longrich 61. In order to test for taphonomic sorting bias, specimens for the most abundant tooth type (morph 3) and goniopholidid crocodilians were compared for crown height between two sites using a Mann-Whitney Test for Equal Medians of tooth crown height. Finally, we determined the stable isotope composition of carbonate associated with tooth enamel and/or dentin ( 13 C and 18 O) to approximate the diet and habitat for each morph. Specimens for geochemical analysis were taken from incomplete, but morphologically distinguishable specimens for each morphotype, as well as small (<10 mm tall) goniopholidid teeth and matrix samples for comparison. Ideally, samples would be taken only from enamel (see 45 ); however, many of the teeth were too small or had insufficient amounts of enamel for analysis alone. Dentin has more pore space than enamel and is more susceptible to groundwater alteration; however, given that these samples come from the same microsites (and thus the same diagenetic history) we assume that we are increasing type II error. By including this material, we acknowledge that the values obtained are the coarsest approximation for diet and habitat. Isotope samples were treated following a modified technique of Koch et al. 62. Powdered specimens were first treated for one day using a 2% solution of NaOCl, washed five times with distilled water and then allowed to dry overnight, followed by a three-day treatment using 0.1 M acetic acid solution. Next, the samples were washed 10 times with distilled water and allowed to dry for three days. The carbonate samples analyzed for their stable carbon and oxygen isotope compositions as follows. Approximately 200-300 g of each carbonate was loaded into a 12 ml borosilicate exetainer vial (Labco 938 W) which were sealed with butyl rubber septa caps. The vials were then placed in a thermostated sample tray heated at 50 °C and flushed with ultra-high purity He (99.999%) using a ThermoGas Bench II equipped with a PAL auto sampler flushing needle for 360 seconds to remove the air. Then 0.4 ml of 100% phosphoric acid was manually injected into the vials with a syringe and the reaction was allowed to proceed at 50 °C for two hours. The vials were then sampled with the PAL measurement needle and the headspace CO 2 was analyzed for 13 C and 18 O using a Thermo Delta V Plus isotope ratio mass spectrometer. The carbon and oxygen isotopic compositions are expressed as 63 : where = R( C/ C) N( C) /N( C) 13 12 P 13 P 12 P which is the ratio of the number of 13 C and 12 C atoms in sample P and equivalent parameters apply for VPDB and where R( 18 O/ 16 O) P = N( 18 O) P ∕N( 16 O) P which is the ratio of the number of 18 O and 16 O atoms in sample P and equivalent parameters apply for VPDB. The 13 C values of the calcite samples are reported relative to VPDB on a scale normalized such that the 13 C of NBS18 is −5.01 64 The isotopic data were finally compared using the Mann-Whitney Pairwise test. Data Availability Data are available through the supplementary data file.
/** * @file dmx_demo.c * @author bdeary (<EMAIL>) * @brief A Demonstration of a DMX512 system for the purpose of end-to-end * testing of a microprocess based system independent of micro-p * @date 2020-09-20 * * Copyright 2020, WetDesigns * * @details This file contains the setup and main loop of the system. * It sits on top of the micro_p_sim structure that simulates a * micro processor with some current issues related to isr exclusion. * * In general it creates a framework to test a DMX receiver and * associated device processing (processing the dmx slots into one or more * device response or tranlation to a second communication channel) * * the dmx_demo.h derives its DdModel_t from the ModelBase_t defined in * micro_p_sim that has the isr_vector table pointer and functions for * setup and loop (similar to other micro P frameworks like Arduino) * * * */ #include <config.h> #include "dmx_demo.h" #include <pthread.h> #include <time.h> #include <genQ.h> #include <genPool.h> #include <isr_comm.h> #include <rs485_dmx.h> #include <stdio.h> // This is the instantiation of the isr vector table and // the isr flags void_func_t isr_table[TOTAL_ISRs]; uint8_t isr_flags[TOTAL_ISRs]; // a signal that an isr triggered DefineGenQ(isrQ_, uint8_t, 20); DefineGenQWrappers(isrQ_, uint8_t); DefineDoubleIsrObjectQueue(dev1_ipt, dev1_buf_t, 3, NULL); DefineDoubleIsrObjectQueue(dev2_ipt, dev2_buf_t, 3, NULL); // this is the instantiation of the user derived class from the ModelBase_t class // compile time setup, only oppertunity to assign const variables in our model DdModel_t dd_model = (DdModel_t) { // derived model variables .dmx512_receiver.first_device=&dd_model.dev1, // these are immutable .dmx512_receiver.slot0=0, // these are immutable .dev1.next_dev=&dd_model.dev2, .dev2.next_dev=NULL, .dev1.ipt = &dev1_ipt_instance, .dev2.ipt = &dev2_ipt_instance, // provide the isr vector table .model_base.isr_table_size=TOTAL_ISRs, .model_base.isr_table=isr_table, .model_base.isr_flags=isr_flags, // this sets how ISRs are processed in the sim .model_base.in_isr=1, // This blocks user main until Isr runs at least once .model_base.isr_are_prioritized=0, // changes how ISRs trigger other ISRs .model_base.isr_auto_clear=1, // frees isr handlers from clearing their flags }; // the user ms tick isr handler, called every ms in user time void ms_isr(void) { dd_model.ms_tick++; } #define BYTES_PER_SIM 14 #define SIM_TIME (DMX_BYTES_PER_SEC / BYTES_PER_SIM) uint8_t dmx_sim_buffer[BYTES_PER_SIM]; size_t dmx_sim_len; int sim_slot = -1; void dmx1_isr_b_warpper(void) { DMX_process_break(&dd_model.dmx512_receiver); dd_model.frame++; } void dmx1_isr_s_warpper(void) { DMX_use_raw_data(&dd_model.dmx512_receiver, dmx_sim_buffer, dmx_sim_len); } /////// The following four functions define the users model ////// ModelBase_t *DD_setup(ModelBase_t *model) // the user setup code { DdModel_t *self = (DdModel_t*)model; model->isr_table[MS_ISR] = ms_isr; self->ms_tick = 0; model->isr_table[DMX_BREAK] = dmx1_isr_b_warpper; model->isr_table[DMX_SERIAL] = dmx1_isr_s_warpper; DMX_receiver_reset(&dd_model.dmx512_receiver); Status_t status; void *working_buffer; while(NULL != (working_buffer = Allocate_dev1_ipt_Object())) { status = Send_dev1_ipt_Object(working_buffer); } while(NULL != (working_buffer = Allocate_dev2_ipt_Object())) { status = Send_dev2_ipt_Object(working_buffer); } return model; } void Dev1_background(ModelBase_t *model) { DdModel_t *self = (DdModel_t*)model; Status_t status; dev1_buf_t *workspace = Allocate_dev1_ipt_Object(); if(workspace) status = Send_dev1_ipt_Object(workspace); status = Receive_dev1_ipt_Object(&workspace); if(status == Status_OK) // if Ok then data was received { // process the data // process the data printf("Dev1 = 0x"); for(int i=0; i<SARR_LEN(workspace->buf);i++) { printf("%02x",workspace->buf[i]); } printf("--> %u\n",self->ms_tick); // return the workspace to driver status = Send_dev1_ipt_Object(workspace); } } void Dev2_background(ModelBase_t *model) { Status_t status; DdModel_t *self = (DdModel_t*)model; dev2_buf_t *workspace = Allocate_dev2_ipt_Object(); if(workspace) status = Send_dev2_ipt_Object(workspace); status = Receive_dev2_ipt_Object(&workspace); if(status == Status_OK) // if Ok then data was received { // process the data printf("Dev2 = 0x"); for(int i=0; i<SARR_LEN(workspace->buf);i++) { printf("%02x",workspace->buf[i]); } printf("--> %u\n",self->ms_tick); // return the workspace to driver status = Send_dev2_ipt_Object(workspace); } } #if 0 #define CLOCKS_PER_MS (CLOCKS_PER_SEC/1000) #define CLOCKS_PER_SIM (CLOCKS_PER_SEC / SIM_TIME) #else #define CLOCKS_PER_MS 100 #define CLOCKS_PER_SIM 45 #endif ModelBase_t *DD_isr_stimulus(ModelBase_t *model) { DdModel_t *self = (DdModel_t*)model; clock_t clk = clock(); // Do any user stimulus generation or get external commands // Here simulate the 1 ms tick interrupt static clock_t last=0; if(last) { if (clk - last > CLOCKS_PER_MS) { last += CLOCKS_PER_MS; model->isr_flags[MS_ISR] = 1; // trigger the ISR } } else { last = clock(); } static clock_t dmx_last=0; if(dmx_last) { if (clk - dmx_last > CLOCKS_PER_SIM) { dmx_last += CLOCKS_PER_SIM; if(sim_slot < 0) { model->isr_flags[DMX_BREAK] = 1; // trigger a break sim_slot = 0; } else { static int sim_shift; for(int i=0; i < SARR_LEN(dmx_sim_buffer); i++, sim_slot++) { if(sim_slot == 0) { dmx_sim_buffer[0] = 0; } else { dmx_sim_buffer[i] = sim_slot + sim_shift; } } dmx_sim_len = SARR_LEN(dmx_sim_buffer); if(sim_slot > 500) { sim_slot = -1; sim_shift = (sim_shift + 1) % 5; } model->isr_flags[DMX_SERIAL] = 1; } } } else { dmx_last = clock(); } return model; } // the user background loop ModelBase_t *DD_main(ModelBase_t *model) { DdModel_t *self = (DdModel_t*)model; // do background processing here. Dev1_background(model); Dev2_background(model); // if model is returned as NULL, should start abort() processes return model; } // This is background level diagnostic, runs after each user DD_main call ModelBase_t *DD_diag(ModelBase_t *model) { DdModel_t *self = (DdModel_t*)model; // also has ability to invoke abort() by returning NULL. return model; } // This is the user provide function to setup the model with the // above functions and any initializations or invariants to the user data model. // runs once before DD_setup above and injects the user data model into the // micro_p_sim. ModelBase_t *Micro_p_sim_init(void) { // set all our simulation invariants at run time, vs the compile time // setup above. dd_model.model_base.setup=DD_setup; dd_model.model_base.main_loop=DD_main; dd_model.model_base.diagnostics=DD_diag; dd_model.model_base.isr_stimulus=DD_isr_stimulus; dd_model.model_base.in_isr = 1; dd_model.model_base.tick=0; dd_model.model_base.main_tick=0; // any other user init goes here // dev 1 setup dd_model.dev1.next_dev=&dd_model.dev2; dd_model.dev1.first_slot=DEV1_FIRST; dd_model.dev1.slots=DEV1_SLOTS; dd_model.dev1.ipt=dev1_ipt; // dev 2 setup dd_model.dev2.next_dev=NULL; dd_model.dev2.first_slot=DEV2_FIRST; dd_model.dev2.slots=DEV2_SLOTS; dd_model.dev2.ipt=dev2_ipt; dd_model.dmx512_receiver.slot_cnt=0; // return pointer to base class object return (ModelBase_t*)&dd_model; } #ifndef TEST int main(int argc, char const *argv[]) { Micro_p_sim_main(&dd_model); return 0; } #endif
def _combine_posteriors_with_renorm(self, score_breakdown_raw, renorm_factors): n_predictors = len(self.predictors) combined = {} score_breakdown = {} for trgt_word,preds_raw in score_breakdown_raw.items(): preds = [(preds_raw[idx][0] - renorm_factors[idx], preds_raw[idx][1]) for idx in range(n_predictors)] combined[trgt_word] = self.combi_predictor_method(preds) score_breakdown[trgt_word] = preds return combined, score_breakdown
Statistical Analysis Does not Support a Human Influence on Climate Wigley et al. have suggested a novel statistical approach for detecting an anthropogenic influence on climate. Their claim is based on the difference they find between the autocorrelation of the (observed) temperature record and that of an unforced climate model (i.e., one in which greenhouse-gas levels do not rise). We examine their analysis in greater detail and find that their conclusion is not valid.
<gh_stars>0 /* Copyright (C) 2006, 2007 Sony Computer Entertainment Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Sony Computer Entertainment Inc nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef VECTORMATH_NEON_VECTOR_HPP #define VECTORMATH_NEON_VECTOR_HPP namespace Vectormath { namespace Neon { // ======================================================== // VecIdx // ======================================================== #ifdef VECTORMATH_NO_SCALAR_CAST inline VecIdx::operator FloatInVec() const { return FloatInVec(ref, i); } inline float VecIdx::getAsFloat() const #else inline VecIdx::operator float() const #endif { return ((float *)&ref)[i]; } inline float VecIdx::operator = (float scalar) { sseVecSetElement(ref, scalar, i); return scalar; } inline FloatInVec VecIdx::operator = (const FloatInVec & scalar) { ref = sseVecInsert(ref, scalar.get128(), i); return scalar; } inline FloatInVec VecIdx::operator = (const VecIdx & scalar) { return *this = FloatInVec(scalar.ref, scalar.i); } inline FloatInVec VecIdx::operator *= (float scalar) { return *this *= FloatInVec(scalar); } inline FloatInVec VecIdx::operator *= (const FloatInVec & scalar) { return *this = FloatInVec(ref, i) * scalar; } inline FloatInVec VecIdx::operator /= (float scalar) { return *this /= FloatInVec(scalar); } inline FloatInVec VecIdx::operator /= (const FloatInVec & scalar) { return *this = FloatInVec(ref, i) / scalar; } inline FloatInVec VecIdx::operator += (float scalar) { return *this += FloatInVec(scalar); } inline FloatInVec VecIdx::operator += (const FloatInVec & scalar) { return *this = FloatInVec(ref, i) + scalar; } inline FloatInVec VecIdx::operator -= (float scalar) { return *this -= FloatInVec(scalar); } inline FloatInVec VecIdx::operator -= (const FloatInVec & scalar) { return *this = FloatInVec(ref, i) - scalar; } // ======================================================== // Vector3 // ======================================================== inline Vector3::Vector3(float _x, float _y, float _z) { mVec128 = _mm_setr_ps(_x, _y, _z, 0.0f); } inline Vector3::Vector3(const FloatInVec & _x, const FloatInVec & _y, const FloatInVec & _z) { const __m128 xz = _mm_unpacklo_ps(_x.get128(), _z.get128()); mVec128 = _mm_unpacklo_ps(xz, _y.get128()); } inline Vector3::Vector3(const Point3 & pnt) { mVec128 = pnt.get128(); } inline Vector3::Vector3(float scalar) { mVec128 = FloatInVec(scalar).get128(); } inline Vector3::Vector3(const FloatInVec & scalar) { mVec128 = scalar.get128(); } inline Vector3::Vector3(__m128 vf4) { mVec128 = vf4; } inline const Vector3 Vector3::xAxis() { return Vector3(sseUnitVec1000()); } inline const Vector3 Vector3::yAxis() { return Vector3(sseUnitVec0100()); } inline const Vector3 Vector3::zAxis() { return Vector3(sseUnitVec0010()); } inline const Vector3 lerp(float t, const Vector3 & vec0, const Vector3 & vec1) { return lerp(FloatInVec(t), vec0, vec1); } inline const Vector3 lerp(const FloatInVec & t, const Vector3 & vec0, const Vector3 & vec1) { return (vec0 + ((vec1 - vec0) * t)); } inline const Vector3 slerp(float t, const Vector3 & unitVec0, const Vector3 & unitVec1) { return slerp(FloatInVec(t), unitVec0, unitVec1); } inline const Vector3 slerp(const FloatInVec & t, const Vector3 & unitVec0, const Vector3 & unitVec1) { __m128 scales, scale0, scale1, cosAngle, angle, tttt, oneMinusT, angles, sines; cosAngle = sseVecDot3(unitVec0.get128(), unitVec1.get128()); __m128 selectMask = _mm_cmpgt_ps(_mm_set1_ps(VECTORMATH_SLERP_TOL), cosAngle); angle = sseACosf(cosAngle); tttt = t.get128(); oneMinusT = _mm_sub_ps(_mm_set1_ps(1.0f), tttt); angles = _mm_unpacklo_ps(_mm_set1_ps(1.0f), tttt); // angles = 1, t, 1, t angles = _mm_unpacklo_ps(angles, oneMinusT); // angles = 1, 1-t, t, 1-t angles = _mm_mul_ps(angles, angle); sines = sseSinf(angles); scales = _mm_div_ps(sines, sseSplat(sines, 0)); scale0 = sseSelect(oneMinusT, sseSplat(scales, 1), selectMask); scale1 = sseSelect(tttt, sseSplat(scales, 2), selectMask); return Vector3(sseMAdd(unitVec0.get128(), scale0, _mm_mul_ps(unitVec1.get128(), scale1))); } inline __m128 Vector3::get128() const { return mVec128; } inline void storeXYZ(const Vector3 & vec, __m128 * quad) { __m128 dstVec = *quad; VECTORMATH_ALIGNED(unsigned int sw[4]) = { 0, 0, 0, 0xFFFFFFFF }; dstVec = sseSelect(vec.get128(), dstVec, sw); *quad = dstVec; } inline void loadXYZArray(Vector3 & vec0, Vector3 & vec1, Vector3 & vec2, Vector3 & vec3, const __m128 * threeQuads) { const float * quads = (const float *)threeQuads; vec0 = Vector3(_mm_load_ps(quads)); vec1 = Vector3(_mm_loadu_ps(quads + 3)); vec2 = Vector3(_mm_loadu_ps(quads + 6)); vec3 = Vector3(_mm_loadu_ps(quads + 9)); } inline void storeXYZArray(const Vector3 & vec0, const Vector3 & vec1, const Vector3 & vec2, const Vector3 & vec3, __m128 * threeQuads) { __m128 xxxx = _mm_shuffle_ps(vec1.get128(), vec1.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128 zzzz = _mm_shuffle_ps(vec2.get128(), vec2.get128(), _MM_SHUFFLE(2, 2, 2, 2)); VECTORMATH_ALIGNED(unsigned int xsw[4]) = { 0, 0, 0, 0xFFFFFFFF }; VECTORMATH_ALIGNED(unsigned int zsw[4]) = { 0xFFFFFFFF, 0, 0, 0 }; threeQuads[0] = sseSelect(vec0.get128(), xxxx, xsw); threeQuads[1] = _mm_shuffle_ps(vec1.get128(), vec2.get128(), _MM_SHUFFLE(1, 0, 2, 1)); threeQuads[2] = sseSelect(_mm_shuffle_ps(vec3.get128(), vec3.get128(), _MM_SHUFFLE(2, 1, 0, 3)), zzzz, zsw); } inline Vector3 & Vector3::operator = (const Vector3 & vec) { mVec128 = vec.mVec128; return *this; } inline Vector3 & Vector3::setX(float _x) { sseVecSetElement(mVec128, _x, 0); return *this; } inline Vector3 & Vector3::setX(const FloatInVec & _x) { mVec128 = sseVecInsert(mVec128, _x.get128(), 0); return *this; } inline const FloatInVec Vector3::getX() const { return FloatInVec(mVec128, 0); } inline Vector3 & Vector3::setY(float _y) { sseVecSetElement(mVec128, _y, 1); return *this; } inline Vector3 & Vector3::setY(const FloatInVec & _y) { mVec128 = sseVecInsert(mVec128, _y.get128(), 1); return *this; } inline const FloatInVec Vector3::getY() const { return FloatInVec(mVec128, 1); } inline Vector3 & Vector3::setZ(float _z) { sseVecSetElement(mVec128, _z, 2); return *this; } inline Vector3 & Vector3::setZ(const FloatInVec & _z) { mVec128 = sseVecInsert(mVec128, _z.get128(), 2); return *this; } inline const FloatInVec Vector3::getZ() const { return FloatInVec(mVec128, 2); } inline Vector3 & Vector3::setW(float _w) { sseVecSetElement(mVec128, _w, 3); return *this; } inline Vector3 & Vector3::setW(const FloatInVec & _w) { mVec128 = sseVecInsert(mVec128, _w.get128(), 3); return *this; } inline const FloatInVec Vector3::getW() const { return FloatInVec(mVec128, 3); } inline Vector3 & Vector3::setElem(int idx, float value) { sseVecSetElement(mVec128, value, idx); return *this; } inline Vector3 & Vector3::setElem(int idx, const FloatInVec & value) { mVec128 = sseVecInsert(mVec128, value.get128(), idx); return *this; } inline const FloatInVec Vector3::getElem(int idx) const { return FloatInVec(mVec128, idx); } inline VecIdx Vector3::operator[](int idx) { return VecIdx(mVec128, idx); } inline const FloatInVec Vector3::operator[](int idx) const { return FloatInVec(mVec128, idx); } inline const Vector3 Vector3::operator + (const Vector3 & vec) const { return Vector3(_mm_add_ps(mVec128, vec.mVec128)); } inline const Vector3 Vector3::operator - (const Vector3 & vec) const { return Vector3(_mm_sub_ps(mVec128, vec.mVec128)); } inline const Point3 Vector3::operator + (const Point3 & pnt) const { return Point3(_mm_add_ps(mVec128, pnt.get128())); } inline const Vector3 Vector3::operator * (float scalar) const { return *this * FloatInVec(scalar); } inline const Vector3 Vector3::operator * (const FloatInVec & scalar) const { return Vector3(_mm_mul_ps(mVec128, scalar.get128())); } inline Vector3 & Vector3::operator += (const Vector3 & vec) { *this = *this + vec; return *this; } inline Vector3 & Vector3::operator -= (const Vector3 & vec) { *this = *this - vec; return *this; } inline Vector3 & Vector3::operator *= (float scalar) { *this = *this * scalar; return *this; } inline Vector3 & Vector3::operator *= (const FloatInVec & scalar) { *this = *this * scalar; return *this; } inline const Vector3 Vector3::operator / (float scalar) const { return *this / FloatInVec(scalar); } inline const Vector3 Vector3::operator / (const FloatInVec & scalar) const { return Vector3(_mm_div_ps(mVec128, scalar.get128())); } inline Vector3 & Vector3::operator /= (float scalar) { *this = *this / scalar; return *this; } inline Vector3 & Vector3::operator /= (const FloatInVec & scalar) { *this = *this / scalar; return *this; } inline const Vector3 Vector3::operator - () const { return Vector3(_mm_sub_ps(_mm_setzero_ps(), mVec128)); } inline const Vector3 operator * (float scalar, const Vector3 & vec) { return FloatInVec(scalar) * vec; } inline const Vector3 operator * (const FloatInVec & scalar, const Vector3 & vec) { return vec * scalar; } inline const Vector3 mulPerElem(const Vector3 & vec0, const Vector3 & vec1) { return Vector3(_mm_mul_ps(vec0.get128(), vec1.get128())); } inline const Vector3 divPerElem(const Vector3 & vec0, const Vector3 & vec1) { return Vector3(_mm_div_ps(vec0.get128(), vec1.get128())); } inline const Vector3 recipPerElem(const Vector3 & vec) { return Vector3(_mm_rcp_ps(vec.get128())); } inline const Vector3 absPerElem(const Vector3 & vec) { return Vector3(sseFabsf(vec.get128())); } inline const Vector3 copySignPerElem(const Vector3 & vec0, const Vector3 & vec1) { const __m128 vmask = sseUintToM128(0x7FFFFFFF); return Vector3(_mm_or_ps( _mm_and_ps(vmask, vec0.get128()), // Value _mm_andnot_ps(vmask, vec1.get128()))); // Signs } inline const Vector3 maxPerElem(const Vector3 & vec0, const Vector3 & vec1) { return Vector3(_mm_max_ps(vec0.get128(), vec1.get128())); } inline const FloatInVec maxElem(const Vector3 & vec) { return FloatInVec(_mm_max_ps(_mm_max_ps(sseSplat(vec.get128(), 0), sseSplat(vec.get128(), 1)), sseSplat(vec.get128(), 2))); } inline const Vector3 minPerElem(const Vector3 & vec0, const Vector3 & vec1) { return Vector3(_mm_min_ps(vec0.get128(), vec1.get128())); } inline const FloatInVec minElem(const Vector3 & vec) { return FloatInVec(_mm_min_ps(_mm_min_ps(sseSplat(vec.get128(), 0), sseSplat(vec.get128(), 1)), sseSplat(vec.get128(), 2))); } inline const FloatInVec sum(const Vector3 & vec) { return FloatInVec(_mm_add_ps(_mm_add_ps(sseSplat(vec.get128(), 0), sseSplat(vec.get128(), 1)), sseSplat(vec.get128(), 2))); } inline const FloatInVec dot(const Vector3 & vec0, const Vector3 & vec1) { return FloatInVec(sseVecDot3(vec0.get128(), vec1.get128()), 0); } inline const FloatInVec lengthSqr(const Vector3 & vec) { return FloatInVec(sseVecDot3(vec.get128(), vec.get128()), 0); } inline const FloatInVec length(const Vector3 & vec) { return FloatInVec(_mm_sqrt_ps(sseVecDot3(vec.get128(), vec.get128())), 0); } inline const Vector3 normalizeApprox(const Vector3 & vec) { return Vector3(_mm_mul_ps(vec.get128(), _mm_rsqrt_ps(sseVecDot3(vec.get128(), vec.get128())))); } inline const Vector3 normalize(const Vector3 & vec) { return Vector3(_mm_mul_ps(vec.get128(), sseNewtonrapsonRSqrtf(sseVecDot3(vec.get128(), vec.get128())))); } inline const Vector3 cross(const Vector3 & vec0, const Vector3 & vec1) { return Vector3(sseVecCross(vec0.get128(), vec1.get128())); } inline const Vector3 select(const Vector3 & vec0, const Vector3 & vec1, bool select1) { return select(vec0, vec1, BoolInVec(select1)); } inline const Vector3 select(const Vector3 & vec0, const Vector3 & vec1, const BoolInVec & select1) { return Vector3(sseSelect(vec0.get128(), vec1.get128(), select1.get128())); } inline const Vector3 xorPerElem(const Vector3& a, const FloatInVec b) { return Vector3(_mm_xor_ps(a.get128(), b.get128())); } inline const Vector3 sqrtPerElem(const Vector3 & vec) { return Vector3(_mm_sqrt_ps(vec.get128())); } inline const Vector3 rSqrtEstNR(const Vector3& v) { const __m128 nr = _mm_rsqrt_ps(v.get128()); // Do one more Newton-Raphson step to improve precision. const __m128 muls = _mm_mul_ps(_mm_mul_ps(v.get128(), nr), nr); return Vector3(_mm_mul_ps(_mm_mul_ps(_mm_set_ps1(.5f), nr), _mm_sub_ps(_mm_set_ps1(3.f), muls))); } inline bool isNormalizedEst(const Vector3& v) { const __m128 max = _mm_set_ss(1.f + kNormalizationToleranceEstSq); const __m128 min = _mm_set_ss(1.f - kNormalizationToleranceEstSq); const __m128 dot = sseVecDot3(v.get128(), v.get128()); const __m128 dotx000 = _mm_move_ss(_mm_setzero_ps(), dot); return (_mm_movemask_ps( _mm_and_ps(_mm_cmplt_ss(dotx000, max), _mm_cmpgt_ss(dotx000, min))) & 0x1) == 0x1; } #ifdef VECTORMATH_DEBUG inline void print(const Vector3 & vec) { SSEFloat tmp; tmp.m128 = vec.get128(); std::printf("( %f %f %f )\n", tmp.f[0], tmp.f[1], tmp.f[2]); } inline void print(const Vector3 & vec, const char * name) { SSEFloat tmp; tmp.m128 = vec.get128(); std::printf("%s: ( %f %f %f )\n", name, tmp.f[0], tmp.f[1], tmp.f[2]); } #endif // VECTORMATH_DEBUG // ======================================================== // Vector4 // ======================================================== inline Vector4::Vector4(float _x, float _y, float _z, float _w) { mVec128 = _mm_setr_ps(_x, _y, _z, _w); } inline Vector4::Vector4(const FloatInVec & _x, const FloatInVec & _y, const FloatInVec & _z, const FloatInVec & _w) { mVec128 = _mm_unpacklo_ps( _mm_unpacklo_ps(_x.get128(), _z.get128()), _mm_unpacklo_ps(_y.get128(), _w.get128())); } inline Vector4::Vector4(const Vector3 & xyz, float _w) { mVec128 = xyz.get128(); sseVecSetElement(mVec128, _w, 3); } inline Vector4::Vector4(const Vector3 & xyz, const FloatInVec & _w) { mVec128 = xyz.get128(); mVec128 = sseVecInsert(mVec128, _w.get128(), 3); } inline Vector4::Vector4(const Vector3 & vec) { mVec128 = vec.get128(); mVec128 = sseVecInsert(mVec128, _mm_setzero_ps(), 3); } inline Vector4::Vector4(const Point3 & pnt) { mVec128 = pnt.get128(); mVec128 = sseVecInsert(mVec128, _mm_set1_ps(1.0f), 3); } inline Vector4::Vector4(const Quat & quat) { mVec128 = quat.get128(); } inline Vector4::Vector4(float scalar) { mVec128 = FloatInVec(scalar).get128(); } inline Vector4::Vector4(const FloatInVec & scalar) { mVec128 = scalar.get128(); } inline Vector4::Vector4(__m128 vf4) { mVec128 = vf4; } //========================================= #ConfettiMathExtensionsBegin ================================================ //========================================= #ConfettiAnimationMathExtensionsBegin ======================================= inline const Vector4 Vector4::fromVector4Int(const Vector4Int vecInt) { Vector4 ret = {}; ret.mVec128 = _mm_cvtepi32_ps(vecInt); return ret; } //========================================= #ConfettiAnimationMathExtensionsEnd ======================================= //========================================= #ConfettiMathExtensionsEnd ================================================ inline const Vector4 Vector4::xAxis() { return Vector4(sseUnitVec1000()); } inline const Vector4 Vector4::yAxis() { return Vector4(sseUnitVec0100()); } inline const Vector4 Vector4::zAxis() { return Vector4(sseUnitVec0010()); } inline const Vector4 Vector4::wAxis() { return Vector4(sseUnitVec0001()); } //========================================= #ConfettiMathExtensionsBegin ================================================ //========================================= #ConfettiAnimationMathExtensionsBegin ======================================= inline const Vector4 Vector4::zero() { return Vector4(_mm_setr_ps(0.0f, 0.0f, 0.0f, 0.0f)); } inline const Vector4 Vector4::one() { return Vector4(_mm_setr_ps(1.0f, 1.0f, 1.0f, 1.0f)); } //========================================= #ConfettiAnimationMathExtensionsEnd ======================================= //========================================= #ConfettiMathExtensionsEnd ================================================ inline const Vector4 lerp(float t, const Vector4 & vec0, const Vector4 & vec1) { return lerp(FloatInVec(t), vec0, vec1); } inline const Vector4 lerp(const FloatInVec & t, const Vector4 & vec0, const Vector4 & vec1) { return (vec0 + ((vec1 - vec0) * t)); } inline const Vector4 slerp(float t, const Vector4 & unitVec0, const Vector4 & unitVec1) { return slerp(FloatInVec(t), unitVec0, unitVec1); } inline const Vector4 slerp(const FloatInVec & t, const Vector4 & unitVec0, const Vector4 & unitVec1) { __m128 scales, scale0, scale1, cosAngle, angle, tttt, oneMinusT, angles, sines; cosAngle = sseVecDot4(unitVec0.get128(), unitVec1.get128()); __m128 selectMask = _mm_cmpgt_ps(_mm_set1_ps(VECTORMATH_SLERP_TOL), cosAngle); angle = sseACosf(cosAngle); tttt = t.get128(); oneMinusT = _mm_sub_ps(_mm_set1_ps(1.0f), tttt); angles = _mm_unpacklo_ps(_mm_set1_ps(1.0f), tttt); // angles = 1, t, 1, t angles = _mm_unpacklo_ps(angles, oneMinusT); // angles = 1, 1-t, t, 1-t angles = _mm_mul_ps(angles, angle); sines = sseSinf(angles); scales = _mm_div_ps(sines, sseSplat(sines, 0)); scale0 = sseSelect(oneMinusT, sseSplat(scales, 1), selectMask); scale1 = sseSelect(tttt, sseSplat(scales, 2), selectMask); return Vector4(sseMAdd(unitVec0.get128(), scale0, _mm_mul_ps(unitVec1.get128(), scale1))); } inline __m128 Vector4::get128() const { return mVec128; } inline Vector4 & Vector4::operator = (const Vector4 & vec) { mVec128 = vec.mVec128; return *this; } inline Vector4 & Vector4::setXYZ(const Vector3 & vec) { VECTORMATH_ALIGNED(unsigned int sw[4]) = { 0, 0, 0, 0xFFFFFFFF }; mVec128 = sseSelect(vec.get128(), mVec128, sw); return *this; } inline const Vector3 Vector4::getXYZ() const { return Vector3(mVec128); } inline Vector4 & Vector4::setX(float _x) { sseVecSetElement(mVec128, _x, 0); return *this; } inline Vector4 & Vector4::setX(const FloatInVec & _x) { mVec128 = sseVecInsert(mVec128, _x.get128(), 0); return *this; } inline const FloatInVec Vector4::getX() const { return FloatInVec(mVec128, 0); } inline Vector4 & Vector4::setY(float _y) { sseVecSetElement(mVec128, _y, 1); return *this; } inline Vector4 & Vector4::setY(const FloatInVec & _y) { mVec128 = sseVecInsert(mVec128, _y.get128(), 1); return *this; } inline const FloatInVec Vector4::getY() const { return FloatInVec(mVec128, 1); } inline Vector4 & Vector4::setZ(float _z) { sseVecSetElement(mVec128, _z, 2); return *this; } inline Vector4 & Vector4::setZ(const FloatInVec & _z) { mVec128 = sseVecInsert(mVec128, _z.get128(), 2); return *this; } inline const FloatInVec Vector4::getZ() const { return FloatInVec(mVec128, 2); } inline Vector4 & Vector4::setW(float _w) { sseVecSetElement(mVec128, _w, 3); return *this; } inline Vector4 & Vector4::setW(const FloatInVec & _w) { mVec128 = sseVecInsert(mVec128, _w.get128(), 3); return *this; } inline const FloatInVec Vector4::getW() const { return FloatInVec(mVec128, 3); } inline Vector4 & Vector4::setElem(int idx, float value) { sseVecSetElement(mVec128, value, idx); return *this; } inline Vector4 & Vector4::setElem(int idx, const FloatInVec & value) { mVec128 = sseVecInsert(mVec128, value.get128(), idx); return *this; } inline const FloatInVec Vector4::getElem(int idx) const { return FloatInVec(mVec128, idx); } inline VecIdx Vector4::operator[](int idx) { return VecIdx(mVec128, idx); } inline const FloatInVec Vector4::operator[](int idx) const { return FloatInVec(mVec128, idx); } inline const Vector4 Vector4::operator + (const Vector4 & vec) const { return Vector4(_mm_add_ps(mVec128, vec.mVec128)); } inline const Vector4 Vector4::operator - (const Vector4 & vec) const { return Vector4(_mm_sub_ps(mVec128, vec.mVec128)); } inline const Vector4 Vector4::operator * (float scalar) const { return *this * FloatInVec(scalar); } inline const Vector4 Vector4::operator * (const FloatInVec & scalar) const { return Vector4(_mm_mul_ps(mVec128, scalar.get128())); } inline Vector4 & Vector4::operator += (const Vector4 & vec) { *this = *this + vec; return *this; } inline Vector4 & Vector4::operator -= (const Vector4 & vec) { *this = *this - vec; return *this; } inline Vector4 & Vector4::operator *= (float scalar) { *this = *this * scalar; return *this; } inline Vector4 & Vector4::operator *= (const FloatInVec & scalar) { *this = *this * scalar; return *this; } inline const Vector4 Vector4::operator / (float scalar) const { return *this / FloatInVec(scalar); } inline const Vector4 Vector4::operator / (const FloatInVec & scalar) const { return Vector4(_mm_div_ps(mVec128, scalar.get128())); } inline Vector4 & Vector4::operator /= (float scalar) { *this = *this / scalar; return *this; } inline Vector4 & Vector4::operator /= (const FloatInVec & scalar) { *this = *this / scalar; return *this; } inline const Vector4 Vector4::operator - () const { return Vector4(_mm_sub_ps(_mm_setzero_ps(), mVec128)); } inline const Vector4 operator * (float scalar, const Vector4 & vec) { return FloatInVec(scalar) * vec; } inline const Vector4 operator * (const FloatInVec & scalar, const Vector4 & vec) { return vec * scalar; } inline const Vector4 mulPerElem(const Vector4 & vec0, const Vector4 & vec1) { return Vector4(_mm_mul_ps(vec0.get128(), vec1.get128())); } inline const Vector4 divPerElem(const Vector4 & vec0, const Vector4 & vec1) { return Vector4(_mm_div_ps(vec0.get128(), vec1.get128())); } inline const Vector4 recipPerElem(const Vector4 & vec) { return Vector4(_mm_rcp_ps(vec.get128())); } //========================================= #ConfettiMathExtensionsBegin ================================================ //========================================= #ConfettiAnimationMathExtensionsBegin ======================================= inline const Vector4 sqrtPerElem(const Vector4 & vec) { return Vector4(_mm_sqrt_ps(vec.get128())); } inline const Vector4 rsqrtPerElem(const Vector4 & vec) { return Vector4(_mm_rsqrt_ps(vec.get128())); } inline const Vector4 rcpEst(const Vector4& v) { return Vector4(_mm_rcp_ps(v.get128())); } inline const Vector4 rSqrtEst(const Vector4& v) { return Vector4(_mm_rsqrt_ps(v.get128())); } inline const Vector4 rSqrtEstNR(const Vector4& v) { const __m128 nr = _mm_rsqrt_ps(v.get128()); // Do one more Newton-Raphson step to improve precision. const __m128 muls = _mm_mul_ps(_mm_mul_ps(v.get128(), nr), nr); return Vector4(_mm_mul_ps(_mm_mul_ps(_mm_set_ps1(.5f), nr), _mm_sub_ps(_mm_set_ps1(3.f), muls))); } inline const Vector4 aCos(const Vector4& arg) { return Vector4(sseACosf(arg.get128())); } //========================================= #ConfettiAnimationMathExtensionsEnd ======================================= //========================================= #ConfettiMathExtensionsEnd ================================================ inline const Vector4 absPerElem(const Vector4 & vec) { return Vector4(sseFabsf(vec.get128())); } inline const Vector4 copySignPerElem(const Vector4 & vec0, const Vector4 & vec1) { const __m128 vmask = sseUintToM128(0x7FFFFFFF); return Vector4(_mm_or_ps( _mm_and_ps(vmask, vec0.get128()), // Value _mm_andnot_ps(vmask, vec1.get128()))); // Signs } inline const Vector4 maxPerElem(const Vector4 & vec0, const Vector4 & vec1) { return Vector4(_mm_max_ps(vec0.get128(), vec1.get128())); } inline const FloatInVec maxElem(const Vector4 & vec) { return FloatInVec(_mm_max_ps( _mm_max_ps(sseSplat(vec.get128(), 0), sseSplat(vec.get128(), 1)), _mm_max_ps(sseSplat(vec.get128(), 2), sseSplat(vec.get128(), 3)))); } inline const Vector4 minPerElem(const Vector4 & vec0, const Vector4 & vec1) { return Vector4(_mm_min_ps(vec0.get128(), vec1.get128())); } inline const FloatInVec minElem(const Vector4 & vec) { return FloatInVec(_mm_min_ps( _mm_min_ps(sseSplat(vec.get128(), 0), sseSplat(vec.get128(), 1)), _mm_min_ps(sseSplat(vec.get128(), 2), sseSplat(vec.get128(), 3)))); } inline const FloatInVec sum(const Vector4 & vec) { return FloatInVec(_mm_add_ps( _mm_add_ps(sseSplat(vec.get128(), 0), sseSplat(vec.get128(), 1)), _mm_add_ps(sseSplat(vec.get128(), 2), sseSplat(vec.get128(), 3)))); } inline const FloatInVec dot(const Vector4 & vec0, const Vector4 & vec1) { return FloatInVec(sseVecDot4(vec0.get128(), vec1.get128()), 0); } inline const FloatInVec lengthSqr(const Vector4 & vec) { return FloatInVec(sseVecDot4(vec.get128(), vec.get128()), 0); } inline const FloatInVec length(const Vector4 & vec) { return FloatInVec(_mm_sqrt_ps(sseVecDot4(vec.get128(), vec.get128())), 0); } inline const Vector4 normalizeApprox(const Vector4 & vec) { return Vector4(_mm_mul_ps(vec.get128(), _mm_rsqrt_ps(sseVecDot4(vec.get128(), vec.get128())))); } inline const Vector4 normalize(const Vector4 & vec) { return Vector4(_mm_mul_ps(vec.get128(), sseNewtonrapsonRSqrtf(sseVecDot4(vec.get128(), vec.get128())))); } inline const Vector4 select(const Vector4 & vec0, const Vector4 & vec1, bool select1) { return select(vec0, vec1, BoolInVec(select1)); } inline const Vector4 select(const Vector4 & vec0, const Vector4 & vec1, const BoolInVec & select1) { return Vector4(sseSelect(vec0.get128(), vec1.get128(), select1.get128())); } //========================================= #ConfettiMathExtensionsBegin ================================================ //========================================= #ConfettiAnimationMathExtensionsBegin ======================================= inline const Vector4Int cmpEq(const Vector4& a, const Vector4& b) { return _mm_castps_si128(_mm_cmpeq_ps(a.get128(), b.get128())); } inline const Vector4Int cmpNotEq(const Vector4& a, const Vector4& b) { return _mm_castps_si128(_mm_cmpneq_ps(a.get128(), b.get128())); } inline const Vector4Int cmpLt(const Vector4& a, const Vector4& b) { return _mm_castps_si128(_mm_cmplt_ps(a.get128(), b.get128())); } inline const Vector4Int cmpLe(const Vector4& a, const Vector4& b) { return _mm_castps_si128(_mm_cmple_ps(a.get128(), b.get128())); } inline const Vector4Int cmpGt(const Vector4& a, const Vector4& b) { return _mm_castps_si128(_mm_cmpgt_ps(a.get128(), b.get128())); } inline const Vector4Int cmpGe(const Vector4& a, const Vector4& b) { return _mm_castps_si128(_mm_cmpge_ps(a.get128(), b.get128())); } inline const Vector4Int signBit(const Vector4& v) { return _mm_slli_epi32(_mm_srli_epi32(_mm_castps_si128(v.get128()), 31), 31); } inline const Vector4 xorPerElem(const Vector4& a, const Vector4Int b) { return Vector4(_mm_xor_ps(a.get128(), _mm_castsi128_ps(b))); } inline const Vector4 orPerElem(const Vector4& a, const Vector4Int b) { return Vector4(_mm_or_ps(a.get128(), _mm_castsi128_ps(b))); } inline const Vector4 orPerElem(const Vector4& a, const Vector4& b) { return Vector4(_mm_or_ps(a.get128(), b.get128())); } inline const Vector4 andPerElem(const Vector4& a, const Vector4Int b) { return Vector4(_mm_and_ps(a.get128(), _mm_castsi128_ps(b))); } inline const Vector4 halfToFloat(const Vector4Int vecInt) { const __m128i mask_nosign = _mm_set1_epi32(0x7fff); const __m128 magic = _mm_castsi128_ps(_mm_set1_epi32((254 - 15) << 23)); const __m128i was_infnan = _mm_set1_epi32(0x7bff); const __m128 exp_infnan = _mm_castsi128_ps(_mm_set1_epi32(255 << 23)); const __m128i expmant = _mm_and_si128(mask_nosign, vecInt); const __m128i shifted = _mm_slli_epi32(expmant, 13); const __m128 scaled = _mm_mul_ps(_mm_castsi128_ps(shifted), magic); const __m128i b_wasinfnan = _mm_cmpgt_epi32(expmant, was_infnan); const __m128i sign = _mm_slli_epi32(_mm_xor_si128(vecInt, expmant), 16); const __m128 infnanexp = _mm_and_ps(_mm_castsi128_ps(b_wasinfnan), exp_infnan); const __m128 sign_inf = _mm_or_ps(_mm_castsi128_ps(sign), infnanexp); return Vector4(_mm_or_ps(scaled, sign_inf)); } inline void transpose3x4(const Vector4 in[3], Vector4 out[4]) { const __m128 zero = _mm_setzero_ps(); const __m128 temp0 = _mm_unpacklo_ps(in[0].get128(), in[1].get128()); const __m128 temp1 = _mm_unpacklo_ps(in[2].get128(), zero); const __m128 temp2 = _mm_unpackhi_ps(in[0].get128(), in[1].get128()); const __m128 temp3 = _mm_unpackhi_ps(in[2].get128(), zero); out[0] = Vector4(_mm_movelh_ps(temp0, temp1)); out[1] = Vector4(_mm_movehl_ps(temp1, temp0)); out[2] = Vector4(_mm_movelh_ps(temp2, temp3)); out[3] = Vector4(_mm_movehl_ps(temp3, temp2)); } inline void transpose4x4(const Vector4 in[4], Vector4 out[4]) { const __m128 tmp0 = _mm_unpacklo_ps(in[0].get128(), in[2].get128()); const __m128 tmp1 = _mm_unpacklo_ps(in[1].get128(), in[3].get128()); const __m128 tmp2 = _mm_unpackhi_ps(in[0].get128(), in[2].get128()); const __m128 tmp3 = _mm_unpackhi_ps(in[1].get128(), in[3].get128()); out[0] = Vector4(_mm_unpacklo_ps(tmp0, tmp1)); out[1] = Vector4(_mm_unpackhi_ps(tmp0, tmp1)); out[2] = Vector4(_mm_unpacklo_ps(tmp2, tmp3)); out[3] = Vector4(_mm_unpackhi_ps(tmp2, tmp3)); } //CONFFX_TEST_BEGIN inline void transpose4x3(const Vector4 in[4], Vector4 out[4]) { const __m128 tmp0 = _mm_unpacklo_ps(in[0].get128(), in[2].get128()); const __m128 tmp1 = _mm_unpacklo_ps(in[1].get128(), in[3].get128()); const __m128 tmp2 = _mm_unpackhi_ps(in[0].get128(), in[2].get128()); const __m128 tmp3 = _mm_unpackhi_ps(in[1].get128(), in[3].get128()); out[0] = Vector4(_mm_unpacklo_ps(tmp0, tmp1)); out[1] = Vector4(_mm_unpackhi_ps(tmp0, tmp1)); out[2] = Vector4(_mm_unpacklo_ps(tmp2, tmp3)); } //CONFFX__TEST_END inline void transpose16x16(const Vector4 in[16], Vector4 out[16]) { const __m128 tmp0 = _mm_unpacklo_ps(in[0].get128(), in[2].get128()); const __m128 tmp1 = _mm_unpacklo_ps(in[1].get128() , in[3].get128()); const __m128 tmp2 = _mm_unpackhi_ps(in[0].get128() , in[2].get128()); const __m128 tmp3 = _mm_unpackhi_ps(in[1].get128() , in[3].get128()); const __m128 tmp4 = _mm_unpacklo_ps(in[4].get128() , in[6].get128()); const __m128 tmp5 = _mm_unpacklo_ps(in[5].get128() , in[7].get128()); const __m128 tmp6 = _mm_unpackhi_ps(in[4].get128() , in[6].get128()); const __m128 tmp7 = _mm_unpackhi_ps(in[5].get128() , in[7].get128()); const __m128 tmp8 = _mm_unpacklo_ps(in[8].get128() , in[10].get128()); const __m128 tmp9 = _mm_unpacklo_ps(in[9].get128() , in[11].get128()); const __m128 tmp10 = _mm_unpackhi_ps(in[8].get128(), in[10].get128()); const __m128 tmp11 = _mm_unpackhi_ps(in[9].get128(), in[11].get128()); const __m128 tmp12 = _mm_unpacklo_ps(in[12].get128(), in[14].get128()); const __m128 tmp13 = _mm_unpacklo_ps(in[13].get128(), in[15].get128()); const __m128 tmp14 = _mm_unpackhi_ps(in[12].get128(), in[14].get128()); const __m128 tmp15 = _mm_unpackhi_ps(in[13].get128(), in[15].get128()); out[0] = Vector4(_mm_unpacklo_ps(tmp0, tmp1)); out[1] = Vector4(_mm_unpacklo_ps(tmp4, tmp5)); out[2] = Vector4(_mm_unpacklo_ps(tmp8, tmp9)); out[3] = Vector4(_mm_unpacklo_ps(tmp12, tmp13)); out[4] = Vector4(_mm_unpackhi_ps(tmp0, tmp1)); out[5] = Vector4(_mm_unpackhi_ps(tmp4, tmp5)); out[6] = Vector4(_mm_unpackhi_ps(tmp8, tmp9)); out[7] = Vector4(_mm_unpackhi_ps(tmp12, tmp13)); out[8] = Vector4(_mm_unpacklo_ps(tmp2, tmp3)); out[9] = Vector4(_mm_unpacklo_ps(tmp6, tmp7)); out[10] = Vector4(_mm_unpacklo_ps(tmp10, tmp11)); out[11] = Vector4(_mm_unpacklo_ps(tmp14, tmp15)); out[12] = Vector4(_mm_unpackhi_ps(tmp2, tmp3)); out[13] = Vector4(_mm_unpackhi_ps(tmp6, tmp7)); out[14] = Vector4(_mm_unpackhi_ps(tmp10, tmp11)); out[15] = Vector4(_mm_unpackhi_ps(tmp14, tmp15)); } inline void storePtrU(const Vector4& v, float* f) { _mm_storeu_ps(f, v.get128()); } inline void store3PtrU(const Vector4& v, float* f) { _mm_store_ss(f + 0, v.get128()); const __m128 a = _mm_shuffle_ps(v.get128(), v.get128(), _MM_SHUFFLE(1, 1, 1, 1)); _mm_store_ss(f + 1, a); _mm_store_ss(f + 2, _mm_movehl_ps(v.get128(), v.get128())); } //========================================= #ConfettiAnimationMathExtensionsEnd ======================================= //========================================= #ConfettiMathExtensionsEnd ================================================ #ifdef VECTORMATH_DEBUG inline void print(const Vector4 & vec) { SSEFloat tmp; tmp.m128 = vec.get128(); std::printf("( %f %f %f %f )\n", tmp.f[0], tmp.f[1], tmp.f[2], tmp.f[3]); } inline void print(const Vector4 & vec, const char * name) { SSEFloat tmp; tmp.m128 = vec.get128(); std::printf("%s: ( %f %f %f %f )\n", name, tmp.f[0], tmp.f[1], tmp.f[2], tmp.f[3]); } #endif // VECTORMATH_DEBUG // ======================================================== // Point3 // ======================================================== inline Point3::Point3(float _x, float _y, float _z) { mVec128 = _mm_setr_ps(_x, _y, _z, 0.0f); } inline Point3::Point3(const FloatInVec & _x, const FloatInVec & _y, const FloatInVec & _z) { mVec128 = _mm_unpacklo_ps(_mm_unpacklo_ps(_x.get128(), _z.get128()), _y.get128()); } inline Point3::Point3(const Vector3 & vec) { mVec128 = vec.get128(); } inline Point3::Point3(float scalar) { mVec128 = FloatInVec(scalar).get128(); } inline Point3::Point3(const FloatInVec & scalar) { mVec128 = scalar.get128(); } inline Point3::Point3(__m128 vf4) { mVec128 = vf4; } inline const Point3 lerp(float t, const Point3 & pnt0, const Point3 & pnt1) { return lerp(FloatInVec(t), pnt0, pnt1); } inline const Point3 lerp(const FloatInVec & t, const Point3 & pnt0, const Point3 & pnt1) { return (pnt0 + ((pnt1 - pnt0) * t)); } inline __m128 Point3::get128() const { return mVec128; } inline void storeXYZ(const Point3 & pnt, __m128 * quad) { __m128 dstVec = *quad; VECTORMATH_ALIGNED(unsigned int sw[4]) = { 0, 0, 0, 0xFFFFFFFF }; dstVec = sseSelect(pnt.get128(), dstVec, sw); *quad = dstVec; } inline void loadXYZArray(Point3 & pnt0, Point3 & pnt1, Point3 & pnt2, Point3 & pnt3, const __m128 * threeQuads) { const float * quads = (const float *)threeQuads; pnt0 = Point3(_mm_load_ps(quads)); pnt1 = Point3(_mm_loadu_ps(quads + 3)); pnt2 = Point3(_mm_loadu_ps(quads + 6)); pnt3 = Point3(_mm_loadu_ps(quads + 9)); } inline void storeXYZArray(const Point3 & pnt0, const Point3 & pnt1, const Point3 & pnt2, const Point3 & pnt3, __m128 * threeQuads) { __m128 xxxx = _mm_shuffle_ps(pnt1.get128(), pnt1.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128 zzzz = _mm_shuffle_ps(pnt2.get128(), pnt2.get128(), _MM_SHUFFLE(2, 2, 2, 2)); VECTORMATH_ALIGNED(unsigned int xsw[4]) = { 0, 0, 0, 0xFFFFFFFF }; VECTORMATH_ALIGNED(unsigned int zsw[4]) = { 0xFFFFFFFF, 0, 0, 0 }; threeQuads[0] = sseSelect(pnt0.get128(), xxxx, xsw); threeQuads[1] = _mm_shuffle_ps(pnt1.get128(), pnt2.get128(), _MM_SHUFFLE(1, 0, 2, 1)); threeQuads[2] = sseSelect(_mm_shuffle_ps(pnt3.get128(), pnt3.get128(), _MM_SHUFFLE(2, 1, 0, 3)), zzzz, zsw); } inline Point3 & Point3::operator = (const Point3 & pnt) { mVec128 = pnt.mVec128; return *this; } inline Point3 & Point3::setX(float _x) { sseVecSetElement(mVec128, _x, 0); return *this; } inline Point3 & Point3::setX(const FloatInVec & _x) { mVec128 = sseVecInsert(mVec128, _x.get128(), 0); return *this; } inline const FloatInVec Point3::getX() const { return FloatInVec(mVec128, 0); } inline Point3 & Point3::setY(float _y) { sseVecSetElement(mVec128, _y, 1); return *this; } inline Point3 & Point3::setY(const FloatInVec & _y) { mVec128 = sseVecInsert(mVec128, _y.get128(), 1); return *this; } inline const FloatInVec Point3::getY() const { return FloatInVec(mVec128, 1); } inline Point3 & Point3::setZ(float _z) { sseVecSetElement(mVec128, _z, 2); return *this; } inline Point3 & Point3::setZ(const FloatInVec & _z) { mVec128 = sseVecInsert(mVec128, _z.get128(), 2); return *this; } inline const FloatInVec Point3::getZ() const { return FloatInVec(mVec128, 2); } inline Point3 & Point3::setW(float _w) { sseVecSetElement(mVec128, _w, 3); return *this; } inline Point3 & Point3::setW(const FloatInVec & _w) { mVec128 = sseVecInsert(mVec128, _w.get128(), 3); return *this; } inline const FloatInVec Point3::getW() const { return FloatInVec(mVec128, 3); } inline Point3 & Point3::setElem(int idx, float value) { sseVecSetElement(mVec128, value, idx); return *this; } inline Point3 & Point3::setElem(int idx, const FloatInVec & value) { mVec128 = sseVecInsert(mVec128, value.get128(), idx); return *this; } inline const FloatInVec Point3::getElem(int idx) const { return FloatInVec(mVec128, idx); } inline VecIdx Point3::operator[](int idx) { return VecIdx(mVec128, idx); } inline const FloatInVec Point3::operator[](int idx) const { return FloatInVec(mVec128, idx); } inline const Vector3 Point3::operator - (const Point3 & pnt) const { return Vector3(_mm_sub_ps(mVec128, pnt.mVec128)); } inline const Point3 Point3::operator + (const Vector3 & vec) const { return Point3(_mm_add_ps(mVec128, vec.get128())); } inline const Point3 Point3::operator - (const Vector3 & vec) const { return Point3(_mm_sub_ps(mVec128, vec.get128())); } inline Point3 & Point3::operator += (const Vector3 & vec) { *this = *this + vec; return *this; } inline Point3 & Point3::operator -= (const Vector3 & vec) { *this = *this - vec; return *this; } inline const Point3 mulPerElem(const Point3 & pnt0, const Point3 & pnt1) { return Point3(_mm_mul_ps(pnt0.get128(), pnt1.get128())); } inline const Point3 divPerElem(const Point3 & pnt0, const Point3 & pnt1) { return Point3(_mm_div_ps(pnt0.get128(), pnt1.get128())); } inline const Point3 recipPerElem(const Point3 & pnt) { return Point3(_mm_rcp_ps(pnt.get128())); } inline const Point3 absPerElem(const Point3 & pnt) { return Point3(sseFabsf(pnt.get128())); } inline const Point3 copySignPerElem(const Point3 & pnt0, const Point3 & pnt1) { const __m128 vmask = sseUintToM128(0x7FFFFFFF); return Point3(_mm_or_ps( _mm_and_ps(vmask, pnt0.get128()), // Value _mm_andnot_ps(vmask, pnt1.get128()))); // Signs } inline const Point3 maxPerElem(const Point3 & pnt0, const Point3 & pnt1) { return Point3(_mm_max_ps(pnt0.get128(), pnt1.get128())); } inline const FloatInVec maxElem(const Point3 & pnt) { return FloatInVec(_mm_max_ps(_mm_max_ps(sseSplat(pnt.get128(), 0), sseSplat(pnt.get128(), 1)), sseSplat(pnt.get128(), 2))); } inline const Point3 minPerElem(const Point3 & pnt0, const Point3 & pnt1) { return Point3(_mm_min_ps(pnt0.get128(), pnt1.get128())); } inline const FloatInVec minElem(const Point3 & pnt) { return FloatInVec(_mm_min_ps(_mm_min_ps(sseSplat(pnt.get128(), 0), sseSplat(pnt.get128(), 1)), sseSplat(pnt.get128(), 2))); } inline const FloatInVec sum(const Point3 & pnt) { return FloatInVec(_mm_add_ps(_mm_add_ps(sseSplat(pnt.get128(), 0), sseSplat(pnt.get128(), 1)), sseSplat(pnt.get128(), 2))); } inline const Point3 scale(const Point3 & pnt, float scaleVal) { return scale(pnt, FloatInVec(scaleVal)); } inline const Point3 scale(const Point3 & pnt, const FloatInVec & scaleVal) { return mulPerElem(pnt, Point3(scaleVal)); } inline const Point3 scale(const Point3 & pnt, const Vector3 & scaleVec) { return mulPerElem(pnt, Point3(scaleVec)); } inline const FloatInVec projection(const Point3 & pnt, const Vector3 & unitVec) { return FloatInVec(sseVecDot3(pnt.get128(), unitVec.get128()), 0); } inline const FloatInVec distSqrFromOrigin(const Point3 & pnt) { return lengthSqr(Vector3(pnt)); } inline const FloatInVec distFromOrigin(const Point3 & pnt) { return length(Vector3(pnt)); } inline const FloatInVec distSqr(const Point3 & pnt0, const Point3 & pnt1) { return lengthSqr((pnt1 - pnt0)); } inline const FloatInVec dist(const Point3 & pnt0, const Point3 & pnt1) { return length((pnt1 - pnt0)); } inline const Point3 select(const Point3 & pnt0, const Point3 & pnt1, bool select1) { return select(pnt0, pnt1, BoolInVec(select1)); } inline const Point3 select(const Point3 & pnt0, const Point3 & pnt1, const BoolInVec & select1) { return Point3(sseSelect(pnt0.get128(), pnt1.get128(), select1.get128())); } #ifdef VECTORMATH_DEBUG inline void print(const Point3 & pnt) { SSEFloat tmp; tmp.m128 = pnt.get128(); std::printf("( %f %f %f )\n", tmp.f[0], tmp.f[1], tmp.f[2]); } inline void print(const Point3 & pnt, const char * name) { SSEFloat tmp; tmp.m128 = pnt.get128(); std::printf("%s: ( %f %f %f )\n", name, tmp.f[0], tmp.f[1], tmp.f[2]); } #endif // VECTORMATH_DEBUG //========================================= #ConfettiMathExtensionsBegin ================================================ //========================================= #ConfettiAnimationMathExtensionsBegin ======================================= #define SSE_SELECT_I(_b, _true, _false) \ _mm_xor_si128(_false, _mm_and_si128(_b, _mm_xor_si128(_true, _false))) #define SSE_SPLAT_I(_v, _i) \ _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(_v), _mm_castsi128_ps(_v), \ _MM_SHUFFLE(_i, _i, _i, _i))) // ======================================================== // Vector4Int // ======================================================== namespace vector4int { inline Vector4Int zero() { return _mm_setzero_si128(); } inline Vector4Int one() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_srli_epi32(ffff, 31); } inline Vector4Int x_axis() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_srli_si128(_mm_srli_epi32(ffff, 31), 12); } inline Vector4Int y_axis() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_slli_si128(_mm_srli_si128(_mm_srli_epi32(ffff, 31), 12), 4); } inline Vector4Int z_axis() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_slli_si128(_mm_srli_si128(_mm_srli_epi32(ffff, 31), 12), 8); } inline Vector4Int w_axis() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_slli_si128(_mm_srli_epi32(ffff, 31), 12); } inline Vector4Int all_true() { return _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128()); } inline Vector4Int all_false() { return _mm_setzero_si128(); } inline Vector4Int mask_sign() { const __m128i ffff = _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128()); return _mm_slli_epi32(ffff, 31); } inline Vector4Int mask_not_sign() { const __m128i ffff = _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128()); return _mm_srli_epi32(ffff, 1); } inline Vector4Int mask_ffff() { return _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128()); } inline Vector4Int mask_0000() { return _mm_setzero_si128(); } inline Vector4Int mask_fff0() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_srli_si128(ffff, 4); } inline Vector4Int mask_f000() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_srli_si128(ffff, 12); } inline Vector4Int mask_0f00() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_srli_si128(_mm_slli_si128(ffff, 12), 8); } inline Vector4Int mask_00f0() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_srli_si128(_mm_slli_si128(ffff, 12), 4); } inline Vector4Int mask_000f() { const __m128i zero = _mm_setzero_si128(); const __m128i ffff = _mm_cmpeq_epi32(zero, zero); return _mm_slli_si128(ffff, 12); } inline Vector4Int Load(int _x, int _y, int _z, int _w) { return _mm_set_epi32(_w, _z, _y, _x); } inline Vector4Int LoadX(int _x) { return _mm_set_epi32(0, 0, 0, _x); } inline Vector4Int Load1(int _x) { return _mm_set1_epi32(_x); } inline Vector4Int Load(bool _x, bool _y, bool _z, bool _w) { return _mm_sub_epi32(_mm_setzero_si128(), _mm_set_epi32(_w, _z, _y, _x)); } inline Vector4Int LoadX(bool _x) { return _mm_sub_epi32(_mm_setzero_si128(), _mm_set_epi32(0, 0, 0, _x)); } inline Vector4Int Load1(bool _x) { return _mm_sub_epi32(_mm_setzero_si128(), _mm_set1_epi32(_x)); } inline Vector4Int LoadPtr(const int* _i) { return _mm_load_si128(reinterpret_cast<const __m128i*>(_i)); } inline Vector4Int LoadXPtr(const int* _i) { return _mm_cvtsi32_si128(*_i); } inline Vector4Int Load1Ptr(const int* _i) { return reinterpret_cast<Vector4Int>(_mm_shuffle_epi32( _mm_loadl_epi64(reinterpret_cast<const __m128i*>(_i)), _MM_SHUFFLE(0, 0, 0, 0))); } inline Vector4Int Load2Ptr(const int* _i) { return reinterpret_cast<Vector4Int>(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(_i))); } inline Vector4Int Load3Ptr(const int* _i) { return reinterpret_cast<Vector4Int>(_mm_set_epi32(0, _i[2], _i[1], _i[0])); } inline Vector4Int LoadPtrU(const int* _i) { return reinterpret_cast<Vector4Int>(_mm_loadu_si128(reinterpret_cast<const __m128i*>(_i))); } inline Vector4Int LoadXPtrU(const int* _i) { return reinterpret_cast<Vector4Int>(_mm_cvtsi32_si128(*_i)); } inline Vector4Int Load1PtrU(const int* _i) { return reinterpret_cast<Vector4Int>(_mm_set1_epi32(*_i)); } inline Vector4Int Load2PtrU(const int* _i) { return reinterpret_cast<Vector4Int>(_mm_set_epi32(0, 0, _i[1], _i[0])); } inline Vector4Int Load3PtrU(const int* _i) { return reinterpret_cast<Vector4Int>(_mm_set_epi32(0, _i[2], _i[1], _i[0])); } inline Vector4Int FromFloatRound(const Vector4& _f) { return reinterpret_cast<Vector4Int>(_mm_cvtps_epi32(_f.get128())); } inline Vector4Int FromFloatTrunc(const Vector4& _f) { return reinterpret_cast<Vector4Int>(_mm_cvttps_epi32(_f.get128())); } } // namespace vector4int inline int GetX(const Vector4Int _v) { return _mm_cvtsi128_si32(_v); } inline int GetY(const Vector4Int _v) { return _mm_cvtsi128_si32(SSE_SPLAT_I(_v, 1)); } inline int GetZ(const Vector4Int _v) { return _mm_cvtsi128_si32(_mm_unpackhi_epi32(_v, _v)); } inline int GetW(const Vector4Int _v) { return _mm_cvtsi128_si32(SSE_SPLAT_I(_v, 3)); } inline Vector4Int SetX(const Vector4Int _v, int _i) { return _mm_castps_si128( _mm_move_ss(_mm_castsi128_ps(_v), _mm_castsi128_ps(_mm_set1_epi32(_i)))); } inline Vector4Int SetY(const Vector4Int _v, int _i) { const __m128 i = _mm_castsi128_ps(_mm_set1_epi32(_i)); const __m128 v = _mm_castsi128_ps(_v); const __m128 yxzw = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 2, 0, 1)); const __m128 fxzw = _mm_move_ss(yxzw, i); return _mm_castps_si128(_mm_shuffle_ps(fxzw, fxzw, _MM_SHUFFLE(3, 2, 0, 1))); } inline Vector4Int SetZ(const Vector4Int _v, int _i) { const __m128 i = _mm_castsi128_ps(_mm_set1_epi32(_i)); const __m128 v = _mm_castsi128_ps(_v); const __m128 yxzw = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 0, 1, 2)); const __m128 fxzw = _mm_move_ss(yxzw, i); return _mm_castps_si128(_mm_shuffle_ps(fxzw, fxzw, _MM_SHUFFLE(3, 0, 1, 2))); } inline Vector4Int SetW(const Vector4Int _v, int _i) { const __m128 i = _mm_castsi128_ps(_mm_set1_epi32(_i)); const __m128 v = _mm_castsi128_ps(_v); const __m128 yxzw = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 2, 1, 3)); const __m128 fxzw = _mm_move_ss(yxzw, i); return _mm_castps_si128(_mm_shuffle_ps(fxzw, fxzw, _MM_SHUFFLE(0, 2, 1, 3))); } inline Vector4Int SetI(const Vector4Int _v, int _ith, int _i) { union { Vector4Int ret; int af[4]; } u = {_v}; u.af[_ith] = _i; return u.ret; } inline void StorePtr(const Vector4Int _v, int* _i) { _mm_store_si128(reinterpret_cast<__m128i*>(_i), _v); } inline void Store1Ptr(const Vector4Int _v, int* _i) { *_i = _mm_cvtsi128_si32(_v); } inline void Store2Ptr(const Vector4Int _v, int* _i) { _i[0] = _mm_cvtsi128_si32(_v); _i[1] = _mm_cvtsi128_si32(SSE_SPLAT_I(_v, 1)); } inline void Store3Ptr(const Vector4Int _v, int* _i) { _i[0] = _mm_cvtsi128_si32(_v); _i[1] = _mm_cvtsi128_si32(SSE_SPLAT_I(_v, 1)); _i[2] = _mm_cvtsi128_si32(_mm_unpackhi_epi32(_v, _v)); } inline void StorePtrU(const Vector4Int _v, int* _i) { _mm_storeu_si128(reinterpret_cast<__m128i*>(_i), _v); } inline void Store1PtrU(const Vector4Int _v, int* _i) { *_i = _mm_cvtsi128_si32(_v); } inline void Store2PtrU(const Vector4Int _v, int* _i) { _i[0] = _mm_cvtsi128_si32(_v); _i[1] = _mm_cvtsi128_si32(SSE_SPLAT_I(_v, 1)); } inline void Store3PtrU(const Vector4Int _v, int* _i) { _i[0] = _mm_cvtsi128_si32(_v); _i[1] = _mm_cvtsi128_si32(SSE_SPLAT_I(_v, 1)); _i[2] = _mm_cvtsi128_si32(_mm_unpackhi_epi32(_v, _v)); } inline Vector4Int SplatX(const Vector4Int _a) { return SSE_SPLAT_I(_a, 0); } inline Vector4Int SplatY(const Vector4Int _a) { return SSE_SPLAT_I(_a, 1); } inline Vector4Int SplatZ(const Vector4Int _a) { return SSE_SPLAT_I(_a, 2); } inline Vector4Int SplatW(const Vector4Int _a) { return SSE_SPLAT_I(_a, 3); } inline int MoveMask(const Vector4Int _v) { return _mm_movemask_ps(_mm_castsi128_ps(_v)); } inline bool AreAllTrue(const Vector4Int _v) { return _mm_movemask_ps(_mm_castsi128_ps(_v)) == 0xf; } inline bool AreAllTrue3(const Vector4Int _v) { return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x7) == 0x7; } inline bool AreAllTrue2(const Vector4Int _v) { return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x3) == 0x3; } inline bool AreAllTrue1(const Vector4Int _v) { return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x1) == 0x1; } inline bool AreAllFalse(const Vector4Int _v) { return _mm_movemask_ps(_mm_castsi128_ps(_v)) == 0; } inline bool AreAllFalse3(const Vector4Int _v) { return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x7) == 0; } inline bool AreAllFalse2(const Vector4Int _v) { return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x3) == 0; } inline bool AreAllFalse1(const Vector4Int _v) { return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x1) == 0; } inline Vector4Int HAdd2(const Vector4Int _v) { const __m128i hadd = _mm_add_epi32(_v, SSE_SPLAT_I(_v, 1)); return _mm_castps_si128( _mm_move_ss(_mm_castsi128_ps(_v), _mm_castsi128_ps(hadd))); } inline Vector4Int HAdd3(const Vector4Int _v) { const __m128i hadd = _mm_add_epi32(_mm_add_epi32(_v, SSE_SPLAT_I(_v, 1)), _mm_unpackhi_epi32(_v, _v)); return _mm_castps_si128( _mm_move_ss(_mm_castsi128_ps(_v), _mm_castsi128_ps(hadd))); } inline Vector4Int HAdd4(const Vector4Int _v) { const __m128 v = _mm_castsi128_ps(_v); const __m128i haddxyzw = _mm_add_epi32(_v, _mm_castps_si128(_mm_movehl_ps(v, v))); return _mm_castps_si128(_mm_move_ss( v, _mm_castsi128_ps(_mm_add_epi32(haddxyzw, SSE_SPLAT_I(haddxyzw, 1))))); } inline Vector4Int Abs(const Vector4Int _v) { const __m128i zero = _mm_setzero_si128(); return SSE_SELECT_I(_mm_cmplt_epi32(_v, zero), _mm_sub_epi32(zero, _v), _v); } inline Vector4Int Sign(const Vector4Int _v) { return _mm_slli_epi32(_mm_srli_epi32(_v, 31), 31); } inline Vector4Int Min(const Vector4Int _a, const Vector4Int _b) { // SSE4 _mm_min_epi32 return SSE_SELECT_I(_mm_cmplt_epi32(_a, _b), _a, _b); } inline Vector4Int Max(const Vector4Int _a, const Vector4Int _b) { // SSE4 _mm_max_epi32 return SSE_SELECT_I(_mm_cmpgt_epi32(_a, _b), _a, _b); } inline Vector4Int Min0(const Vector4Int _v) { // SSE4 _mm_min_epi32 const __m128i zero = _mm_setzero_si128(); return SSE_SELECT_I(_mm_cmplt_epi32(zero, _v), zero, _v); } inline Vector4Int Max0(const Vector4Int _v) { // SSE4 _mm_max_epi32 const __m128i zero = _mm_setzero_si128(); return SSE_SELECT_I(_mm_cmpgt_epi32(zero, _v), zero, _v); } inline Vector4Int Clamp(const Vector4Int _a, const Vector4Int _v, const Vector4Int _b) { // SSE4 _mm_min_epi32/_mm_max_epi32 const __m128i min = SSE_SELECT_I(_mm_cmplt_epi32(_v, _b), _v, _b); return SSE_SELECT_I(_mm_cmpgt_epi32(_a, min), _a, min); } inline Vector4Int Select(const Vector4Int _b, const Vector4Int _true, const Vector4Int _false) { return SSE_SELECT_I(_b, _true, _false); } inline Vector4Int And(const Vector4Int _a, const Vector4Int _b) { return _mm_and_si128(_a, _b); } inline Vector4Int And(const Vector4Int& a, const BoolInVec b) { return _mm_and_si128(a, _mm_castps_si128(b.get128())); } inline Vector4Int Or(const Vector4Int _a, const Vector4Int _b) { return _mm_or_si128(_a, _b); } inline Vector4Int Xor(const Vector4Int _a, const Vector4Int _b) { return _mm_xor_si128(_a, _b); } inline Vector4Int Not(const Vector4Int _v) { return _mm_andnot_si128( _v, _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128())); } inline Vector4Int ShiftL(const Vector4Int _v, int _bits) { return _mm_slli_epi32_dynamic(_v, _bits); } inline Vector4Int ShiftR(const Vector4Int _v, int _bits) { return _mm_srai_epi32_dynamic(_v, _bits); } inline Vector4Int ShiftRu(const Vector4Int _v, int _bits) { return _mm_srli_epi32_dynamic(_v, _bits); } inline Vector4Int CmpEq(const Vector4Int _a, const Vector4Int _b) { return _mm_cmpeq_epi32(_a, _b); } inline Vector4Int CmpNe(const Vector4Int _a, const Vector4Int _b) { return _mm_castps_si128( _mm_cmpneq_ps(_mm_castsi128_ps(_a), _mm_castsi128_ps(_b))); } inline Vector4Int CmpLt(const Vector4Int _a, const Vector4Int _b) { return _mm_cmplt_epi32(_a, _b); } inline Vector4Int CmpLe(const Vector4Int _a, const Vector4Int _b) { return _mm_andnot_si128( _mm_cmpgt_epi32(_a, _b), _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128())); } inline Vector4Int CmpGt(const Vector4Int _a, const Vector4Int _b) { return _mm_cmpgt_epi32(_a, _b); } inline Vector4Int CmpGe(const Vector4Int _a, const Vector4Int _b) { return _mm_andnot_si128( _mm_cmplt_epi32(_a, _b), _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128())); } //========================================= #ConfettiAnimationMathExtensionsEnd ======================================= // ======================================================== // IVecIdx // ======================================================== #ifdef VECTORMATH_NO_SCALAR_CAST inline int IVecIdx::getAsInt() const #else inline IVecIdx::operator int() const #endif { return ((int *)&ref)[i]; } inline int IVecIdx::operator = (int scalar) { ((int *)&(ref))[i] = scalar; return scalar; } inline int IVecIdx::operator = (const IVecIdx & scalar) { ((int *)&(ref))[i] = ((int *)&(scalar.ref))[scalar.i]; return *this; } inline int IVecIdx::operator *= (int scalar) { ((int *)&(ref))[i] *= scalar; return *this; } inline int IVecIdx::operator /= (int scalar) { ((int *)&(ref))[i] /= scalar; return *this; } inline int IVecIdx::operator += (int scalar) { ((int *)&(ref))[i] += scalar; return *this; } inline int IVecIdx::operator -= (int scalar) { ((int *)&(ref))[i] -= scalar; return *this; } // ======================================================== // IVector3 // ======================================================== inline IVector3::IVector3(int _x, int _y, int _z) { mVec128 = _mm_setr_epi32(_x, _y, _z, 0); } inline IVector3::IVector3(int scalar) { mVec128 = _mm_setr_epi32(scalar, scalar, scalar, 0); } inline IVector3::IVector3(__m128i vi4) { mVec128 = vi4; } inline const IVector3 IVector3::xAxis() { return IVector3(1, 0, 0); } inline const IVector3 IVector3::yAxis() { return IVector3(0, 1, 0); } inline const IVector3 IVector3::zAxis() { return IVector3(0, 0, 1); } inline __m128i IVector3::get128() const { return mVec128; } inline IVector3 & IVector3::operator = (const IVector3 & vec) { mVec128 = vec.mVec128; return *this; } inline IVector3 & IVector3::setX(int _x) { ((int*)(&mVec128))[0] = _x; return *this; } inline const int IVector3::getX() const { return ((int*)&mVec128)[0]; } inline IVector3 & IVector3::setY(int _y) { ((int*)(&mVec128))[1] = _y; return *this; } inline const int IVector3::getY() const { return ((int*)&mVec128)[1]; } inline IVector3 & IVector3::setZ(int _z) { ((int*)(&mVec128))[2] = _z; return *this; } inline const int IVector3::getZ() const { return ((int*)&mVec128)[2]; } inline IVector3 & IVector3::setW(int _w) { ((int*)(&mVec128))[3] = _w; return *this; } inline const int IVector3::getW() const { return ((int*)&mVec128)[3]; } inline IVector3 & IVector3::setElem(int idx, int value) { ((int*)(&mVec128))[idx] = value; return *this; } inline const int IVector3::getElem(int idx) const { return ((int*)&mVec128)[idx]; } inline IVecIdx IVector3::operator[](int idx) { return IVecIdx(mVec128, idx); } inline const int IVector3::operator[](int idx) const { return ((int*)&mVec128)[idx]; } inline const IVector3 IVector3::operator + (const IVector3 & vec) const { return IVector3(_mm_add_epi32(mVec128, vec.mVec128)); } inline const IVector3 IVector3::operator - (const IVector3 & vec) const { return IVector3(_mm_sub_epi32(mVec128, vec.mVec128)); } inline const IVector3 IVector3::operator * (int scalar) const { return IVector3(_mm_mullo_epi32(_mm_set_epi32(scalar, scalar, scalar, scalar), mVec128)); } inline IVector3 & IVector3::operator += (const IVector3 & vec) { *this = *this + vec; return *this; } inline IVector3 & IVector3::operator -= (const IVector3 & vec) { *this = *this - vec; return *this; } inline IVector3 & IVector3::operator *= (int scalar) { *this = *this * scalar; return *this; } inline const IVector3 IVector3::operator / (int scalar) const { // No sse version exists int* vec = (int*)&mVec128; return IVector3(vec[0] / scalar, vec[1] / scalar, vec[2] / scalar); } inline IVector3 & IVector3::operator /= (int scalar) { *this = *this / scalar; return *this; } inline const IVector3 IVector3::operator - () const { return IVector3(_mm_sub_epi32(_mm_setzero_si128(), mVec128)); } inline const IVector3 operator * (int scalar, const IVector3 & vec) { return IVector3(_mm_mullo_epi32(_mm_set_epi32(scalar, scalar, scalar, scalar), vec.get128())); } inline const IVector3 mulPerElem(const IVector3 & vec0, const IVector3 & vec1) { return IVector3(_mm_mullo_epi32(vec0.get128(), vec1.get128())); } inline const IVector3 divPerElem(const IVector3 & vec0, const IVector3 & vec1) { // No sse version exists __m128i v0 = vec0.get128(); __m128i v1 = vec1.get128(); int* v0i = (int*)&v0; int* v1i = (int*)&v1; return IVector3(v0i[0] / v1i[0], v0i[1] / v1i[1], v0i[2] / v1i[2]); } inline const IVector3 absPerElem(const IVector3 & vec) { return IVector3(_mm_sign_epi32(vec.get128(), vec.get128())); } inline const IVector3 copySignPerElem(const IVector3 & vec0, const IVector3 & vec1) { const __m128i vmask = _mm_set1_epi32(0x7FFFFFFF); return IVector3(_mm_or_si128( _mm_and_si128(vmask, vec0.get128()), // Value _mm_andnot_si128(vmask, vec1.get128()))); // Signs } inline const IVector3 maxPerElem(const IVector3 & vec0, const IVector3 & vec1) { return IVector3(_mm_max_epi32(vec0.get128(), vec1.get128())); } inline const int maxElem(const IVector3 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i res = _mm_max_epi32(_mm_max_epi32(s0, s1), s2); return ((int*)&res)[0]; } inline const IVector3 minPerElem(const IVector3 & vec0, const IVector3 & vec1) { return IVector3(_mm_min_epi32(vec0.get128(), vec1.get128())); } inline const int minElem(const IVector3 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i res = _mm_min_epi32(_mm_min_epi32(s0, s1), s2); return ((int*)&res)[0]; } inline const int sum(const IVector3 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i res = _mm_add_epi32(_mm_add_epi32(s0, s1), s2); return ((int*)&res)[0]; } #ifdef VECTORMATH_DEBUG inline void print(const IVector3 & vec) { SSEInt tmp; tmp.m128 = vec.get128(); std::printf("( %i %i %i )\n", tmp.i[0], tmp.i[1], tmp.i[2]); } inline void print(const IVector3 & vec, const char * name) { SSEInt tmp; tmp.m128 = vec.get128(); std::printf("%s: ( %i %i %i )\n", name, tmp.i[0], tmp.i[1], tmp.i[2]); } #endif // VECTORMATH_DEBUG // ======================================================== // UVector3 // ======================================================== inline UVector3::UVector3(uint _x, uint _y, uint _z) { mVec128 = _mm_setr_epi32(_x, _y, _z, 0); } inline UVector3::UVector3(uint scalar) { mVec128 = _mm_setr_epi32(scalar, scalar, scalar, 0); } inline UVector3::UVector3(__m128i vi4) { mVec128 = vi4; } inline const UVector3 UVector3::xAxis() { return UVector3(1, 0, 0); } inline const UVector3 UVector3::yAxis() { return UVector3(0, 1, 0); } inline const UVector3 UVector3::zAxis() { return UVector3(0, 0, 1); } inline __m128i UVector3::get128() const { return mVec128; } inline UVector3 & UVector3::operator = (const UVector3 & vec) { mVec128 = vec.mVec128; return *this; } inline UVector3 & UVector3::setX(uint _x) { ((uint*)(&mVec128))[0] = _x; return *this; } inline const uint UVector3::getX() const { return ((uint*)&mVec128)[0]; } inline UVector3 & UVector3::setY(uint _y) { ((uint*)(&mVec128))[1] = _y; return *this; } inline const uint UVector3::getY() const { return ((uint*)&mVec128)[1]; } inline UVector3 & UVector3::setZ(uint _z) { ((uint*)(&mVec128))[2] = _z; return *this; } inline const uint UVector3::getZ() const { return ((uint*)&mVec128)[2]; } inline UVector3 & UVector3::setW(uint _w) { ((uint*)(&mVec128))[3] = _w; return *this; } inline const uint UVector3::getW() const { return ((uint*)&mVec128)[3]; } inline UVector3 & UVector3::setElem(uint idx, uint value) { ((uint*)(&mVec128))[idx] = value; return *this; } inline const uint UVector3::getElem(uint idx) const { return ((uint*)&mVec128)[idx]; } inline IVecIdx UVector3::operator[](uint idx) { return IVecIdx(mVec128, idx); } inline const uint UVector3::operator[](uint idx) const { return ((uint*)&mVec128)[idx]; } inline const UVector3 UVector3::operator + (const UVector3 & vec) const { return UVector3(_mm_add_epi32(mVec128, vec.mVec128)); } inline const UVector3 UVector3::operator - (const UVector3 & vec) const { return UVector3(_mm_sub_epi32(mVec128, vec.mVec128)); } inline const UVector3 UVector3::operator * (uint scalar) const { return UVector3(_mm_mullo_epi32(_mm_set_epi32(scalar, scalar, scalar, scalar), mVec128)); } inline UVector3 & UVector3::operator += (const UVector3 & vec) { *this = *this + vec; return *this; } inline UVector3 & UVector3::operator -= (const UVector3 & vec) { *this = *this - vec; return *this; } inline UVector3 & UVector3::operator *= (uint scalar) { *this = *this * scalar; return *this; } inline const UVector3 UVector3::operator / (uint scalar) const { // No sse version exists uint* vec = (uint*)&mVec128; return UVector3(vec[0] / scalar, vec[1] / scalar, vec[2] / scalar); } inline UVector3 & UVector3::operator /= (uint scalar) { *this = *this / scalar; return *this; } inline const UVector3 operator * (uint scalar, const UVector3 & vec) { return UVector3(_mm_mullo_epi32(_mm_set_epi32(scalar, scalar, scalar, scalar), vec.get128())); } inline const UVector3 mulPerElem(const UVector3 & vec0, const UVector3 & vec1) { return UVector3(_mm_mullo_epi32(vec0.get128(), vec1.get128())); } inline const UVector3 divPerElem(const UVector3 & vec0, const UVector3 & vec1) { // No sse version exists __m128i v0 = vec0.get128(); __m128i v1 = vec1.get128(); uint* v0u = (uint*)&v0; uint* v1u = (uint*)&v1; return UVector3(v0u[0] / v1u[0], v0u[1] / v1u[1], v0u[2] / v1u[2]); } inline const UVector3 maxPerElem(const UVector3 & vec0, const UVector3 & vec1) { return UVector3(_mm_max_epu32(vec0.get128(), vec1.get128())); } inline const uint maxElem(const UVector3 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i res = _mm_max_epu32(_mm_max_epu32(s0, s1), s2); return ((uint*)&res)[0]; } inline const UVector3 minPerElem(const UVector3 & vec0, const UVector3 & vec1) { return UVector3(_mm_min_epu32(vec0.get128(), vec1.get128())); } inline const uint minElem(const UVector3 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i res = _mm_min_epu32(_mm_min_epu32(s0, s1), s2); return ((uint*)&res)[0]; } inline const uint sum(const UVector3 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i res = _mm_add_epi32(_mm_add_epi32(s0, s1), s2); return ((uint*)&res)[0]; } #ifdef VECTORMATH_DEBUG inline void print(const UVector3 & vec) { SSEUint tmp; tmp.m128 = vec.get128(); std::printf("( %u %u %u )\n", tmp.u[0], tmp.u[1], tmp.u[2]); } inline void print(const UVector3 & vec, const char * name) { SSEUint tmp; tmp.m128 = vec.get128(); std::printf("%s: ( %u %u %u )\n", name, tmp.u[0], tmp.u[1], tmp.u[2]); } #endif // VECTORMATH_DEBUG // ======================================================== // IVector4 // ======================================================== inline IVector4::IVector4(int _x, int _y, int _z, int _w) { mVec128 = _mm_setr_epi32(_x, _y, _z, _w); } inline IVector4::IVector4(int scalar) { mVec128 = _mm_setr_epi32(scalar, scalar, scalar, scalar); } inline IVector4::IVector4(__m128i vi4) { mVec128 = vi4; } inline const IVector4 IVector4::xAxis() { return IVector4(1, 0, 0, 0); } inline const IVector4 IVector4::yAxis() { return IVector4(0, 1, 0, 0); } inline const IVector4 IVector4::zAxis() { return IVector4(0, 0, 1, 0); } inline const IVector4 IVector4::wAxis() { return IVector4(0, 0, 0, 1); } inline __m128i IVector4::get128() const { return mVec128; } inline IVector4 & IVector4::operator = (const IVector4 & vec) { mVec128 = vec.mVec128; return *this; } inline IVector4 & IVector4::setX(int _x) { ((int*)(&mVec128))[0] = _x; return *this; } inline const int IVector4::getX() const { return ((int*)&mVec128)[0]; } inline IVector4 & IVector4::setY(int _y) { ((int*)(&mVec128))[1] = _y; return *this; } inline const int IVector4::getY() const { return ((int*)&mVec128)[1]; } inline IVector4 & IVector4::setZ(int _z) { ((int*)(&mVec128))[2] = _z; return *this; } inline const int IVector4::getZ() const { return ((int*)&mVec128)[2]; } inline IVector4 & IVector4::setW(int _w) { ((int*)(&mVec128))[3] = _w; return *this; } inline const int IVector4::getW() const { return ((int*)&mVec128)[3]; } inline IVector4 & IVector4::setElem(int idx, int value) { ((int*)(&mVec128))[idx] = value; return *this; } inline const int IVector4::getElem(int idx) const { return ((int*)&mVec128)[idx]; } inline IVecIdx IVector4::operator[](int idx) { return IVecIdx(mVec128, idx); } inline const int IVector4::operator[](int idx) const { return ((int*)&mVec128)[idx]; } inline const IVector4 IVector4::operator + (const IVector4 & vec) const { return IVector4(_mm_add_epi32(mVec128, vec.mVec128)); } inline const IVector4 IVector4::operator - (const IVector4 & vec) const { return IVector4(_mm_sub_epi32(mVec128, vec.mVec128)); } inline const IVector4 IVector4::operator * (int scalar) const { return IVector4(_mm_mullo_epi32(_mm_set_epi32(scalar, scalar, scalar, scalar), mVec128)); } inline IVector4 & IVector4::operator += (const IVector4 & vec) { *this = *this + vec; return *this; } inline IVector4 & IVector4::operator -= (const IVector4 & vec) { *this = *this - vec; return *this; } inline IVector4 & IVector4::operator *= (int scalar) { *this = *this * scalar; return *this; } inline const IVector4 IVector4::operator / (int scalar) const { // No sse version exists int* vec = (int*)&mVec128; return IVector4(vec[0] / scalar, vec[1] / scalar, vec[2] / scalar, vec[3] / scalar); } inline IVector4 & IVector4::operator /= (int scalar) { *this = *this / scalar; return *this; } inline const IVector4 IVector4::operator - () const { return IVector4(_mm_sub_epi32(_mm_setzero_si128(), mVec128)); } inline const IVector4 operator * (int scalar, const IVector4 & vec) { return IVector4(_mm_mullo_epi32(_mm_set_epi32(scalar, scalar, scalar, scalar), vec.get128())); } inline const IVector4 mulPerElem(const IVector4 & vec0, const IVector4 & vec1) { return IVector4(_mm_mullo_epi32(vec0.get128(), vec1.get128())); } inline const IVector4 divPerElem(const IVector4 & vec0, const IVector4 & vec1) { // No sse version exists __m128i v0 = vec0.get128(); __m128i v1 = vec1.get128(); int* v0i = (int*)&v0; int* v1i = (int*)&v1; return IVector4(v0i[0] / v1i[0], v0i[1] / v1i[1], v0i[2] / v1i[2], v0i[3] / v1i[3]); } inline const IVector4 absPerElem(const IVector4 & vec) { return IVector4(_mm_sign_epi32(vec.get128(), vec.get128())); } inline const IVector4 copySignPerElem(const IVector4 & vec0, const IVector4 & vec1) { const __m128i vmask = _mm_set1_epi32(0x7FFFFFFF); return IVector4(_mm_or_si128( _mm_and_si128(vmask, vec0.get128()), // Value _mm_andnot_si128(vmask, vec1.get128()))); // Signs } inline const IVector4 maxPerElem(const IVector4 & vec0, const IVector4 & vec1) { return IVector4(_mm_max_epi32(vec0.get128(), vec1.get128())); } inline const int maxElem(const IVector4 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i s3 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(3, 3, 3, 3)); __m128i res = _mm_max_epi32(_mm_max_epi32(_mm_max_epi32(s0, s1), s2), s3); return ((int*)&res)[0]; } inline const IVector4 minPerElem(const IVector4 & vec0, const IVector4 & vec1) { return IVector4(_mm_min_epi32(vec0.get128(), vec1.get128())); } inline const int minElem(const IVector4 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i s3 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(3, 3, 3, 3)); __m128i res = _mm_min_epi32(_mm_min_epi32(_mm_min_epi32(s0, s1), s2), s3); return ((int*)&res)[0]; } inline const int sum(const IVector4 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i s3 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(3, 3, 3, 3)); __m128i res = _mm_add_epi32(_mm_add_epi32(_mm_add_epi32(s0, s1), s2), s3); return ((int*)&res)[0]; } #ifdef VECTORMATH_DEBUG inline void print(const IVector4 & vec) { SSEInt tmp; tmp.m128 = vec.get128(); std::printf("( %i %i %i %i )\n", tmp.i[0], tmp.i[1], tmp.i[2], tmp.i[3]); } inline void print(const IVector4 & vec, const char * name) { SSEInt tmp; tmp.m128 = vec.get128(); std::printf("%s: ( %i %i %i %i )\n", name, tmp.i[0], tmp.i[1], tmp.i[2], tmp.i[3]); } #endif // VECTORMATH_DEBUG // ======================================================== // UVector4 // ======================================================== inline UVector4::UVector4(uint _x, uint _y, uint _z, uint _w) { mVec128 = _mm_setr_epi32(_x, _y, _z, _w); } inline UVector4::UVector4(uint scalar) { mVec128 = _mm_setr_epi32(scalar, scalar, scalar, scalar); } inline UVector4::UVector4(__m128i vi4) { mVec128 = vi4; } inline const UVector4 UVector4::xAxis() { return UVector4(1, 0, 0, 0); } inline const UVector4 UVector4::yAxis() { return UVector4(0, 1, 0, 0); } inline const UVector4 UVector4::zAxis() { return UVector4(0, 0, 1, 0); } inline const UVector4 UVector4::wAxis() { return UVector4(0, 0, 0, 1); } inline __m128i UVector4::get128() const { return mVec128; } inline UVector4 & UVector4::operator = (const UVector4 & vec) { mVec128 = vec.mVec128; return *this; } inline UVector4 & UVector4::setX(uint _x) { ((uint*)(&mVec128))[0] = _x; return *this; } inline const uint UVector4::getX() const { return ((uint*)&mVec128)[0]; } inline UVector4 & UVector4::setY(uint _y) { ((uint*)(&mVec128))[1] = _y; return *this; } inline const uint UVector4::getY() const { return ((uint*)&mVec128)[1]; } inline UVector4 & UVector4::setZ(uint _z) { ((uint*)(&mVec128))[2] = _z; return *this; } inline const uint UVector4::getZ() const { return ((uint*)&mVec128)[2]; } inline UVector4 & UVector4::setW(uint _w) { ((uint*)(&mVec128))[3] = _w; return *this; } inline const uint UVector4::getW() const { return ((uint*)&mVec128)[3]; } inline UVector4 & UVector4::setElem(uint idx, uint value) { ((uint*)(&mVec128))[idx] = value; return *this; } inline const uint UVector4::getElem(uint idx) const { return ((uint*)&mVec128)[idx]; } inline IVecIdx UVector4::operator[](uint idx) { return IVecIdx(mVec128, idx); } inline const uint UVector4::operator[](uint idx) const { return ((uint*)&mVec128)[idx]; } inline const UVector4 UVector4::operator + (const UVector4 & vec) const { return UVector4(_mm_add_epi32(mVec128, vec.mVec128)); } inline const UVector4 UVector4::operator - (const UVector4 & vec) const { return UVector4(_mm_sub_epi32(mVec128, vec.mVec128)); } inline const UVector4 UVector4::operator * (uint scalar) const { return UVector4(_mm_mullo_epi32(_mm_set_epi32(scalar, scalar, scalar, scalar), mVec128)); } inline UVector4 & UVector4::operator += (const UVector4 & vec) { *this = *this + vec; return *this; } inline UVector4 & UVector4::operator -= (const UVector4 & vec) { *this = *this - vec; return *this; } inline UVector4 & UVector4::operator *= (uint scalar) { *this = *this * scalar; return *this; } inline const UVector4 UVector4::operator / (uint scalar) const { // No sse version exists uint* vec = (uint*)&mVec128; return UVector4(vec[0] / scalar, vec[1] / scalar, vec[2] / scalar, vec[3] / scalar); } inline UVector4 & UVector4::operator /= (uint scalar) { *this = *this / scalar; return *this; } inline const UVector4 operator * (uint scalar, const UVector4 & vec) { return UVector4(_mm_mullo_epi32(_mm_set_epi32(scalar, scalar, scalar, scalar), vec.get128())); } inline const UVector4 mulPerElem(const UVector4 & vec0, const UVector4 & vec1) { return UVector4(_mm_mullo_epi32(vec0.get128(), vec1.get128())); } inline const UVector4 divPerElem(const UVector4 & vec0, const UVector4 & vec1) { // No sse version exists __m128i v0 = vec0.get128(); __m128i v1 = vec1.get128(); uint* v0u = (uint*)&v0; uint* v1u = (uint*)&v1; return UVector4(v0u[0] / v1u[0], v0u[1] / v1u[1], v0u[2] / v1u[2], v0u[3] / v1u[3]); } inline const UVector4 maxPerElem(const UVector4 & vec0, const UVector4 & vec1) { return UVector4(_mm_max_epu32(vec0.get128(), vec1.get128())); } inline const uint maxElem(const UVector4 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i s3 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(3, 3, 3, 3)); __m128i res = _mm_max_epu32(_mm_max_epu32(_mm_max_epu32(s0, s1), s2), s3); return ((uint*)&res)[0]; } inline const UVector4 minPerElem(const UVector4 & vec0, const UVector4 & vec1) { return UVector4(_mm_min_epu32(vec0.get128(), vec1.get128())); } inline const uint minElem(const UVector4 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i s3 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(3, 3, 3, 3)); __m128i res = _mm_min_epu32(_mm_min_epu32(_mm_min_epu32(s0, s1), s2), s3); return ((uint*)&res)[0]; } inline const uint sum(const UVector4 & vec) { __m128i s0 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(0, 0, 0, 0)); __m128i s1 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(1, 1, 1, 1)); __m128i s2 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(2, 2, 2, 2)); __m128i s3 = _mm_shuffle_epi32(vec.get128(), _MM_SHUFFLE(3, 3, 3, 3)); __m128i res = _mm_add_epi32(_mm_add_epi32(_mm_add_epi32(s0, s1), s2), s3); return ((uint*)&res)[0]; } #ifdef VECTORMATH_DEBUG inline void print(const UVector4 & vec) { SSEUint tmp; tmp.m128 = vec.get128(); std::printf("( %u %u %u %u )\n", tmp.u[0], tmp.u[1], tmp.u[2], tmp.u[3]); } inline void print(const UVector4 & vec, const char * name) { SSEUint tmp; tmp.m128 = vec.get128(); std::printf("%s: ( %u %u %u %u )\n", name, tmp.u[0], tmp.u[1], tmp.u[2], tmp.u[3]); } #endif // VECTORMATH_DEBUG //========================================= #ConfettiMathExtensionsEnd ================================================ } // namespace Neone } // namespace Vectormath #endif // VECTORMATH_NEON_VECTOR_HPP
<reponame>MilliFX/App<gh_stars>0 import * as React from "react"; import { default as Component } from "."; export default { title: "@millifx/error-boundary", component: Component, }; export const GenericError = () => <Component />;
/* * Copyright 2019-2020 <NAME> and Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import {Component, OnInit} from '@angular/core'; import {ThemeService} from './main/theme.service'; import {MenuNode} from './toolbar/toolbar'; @Component({ selector: 'app-root', templateUrl: './app.component.html', styleUrls: ['./app.component.css'] }) export class AppComponent implements OnInit { /** * This defined the menu structure, it's passed to the {@link NestedTreeControl} * if on mobile, otherwise is read to create lots of buttons for desktop devices. */ public readonly MenuTree: MenuNode[] = [ { name: 'About', href: '/about' }, { name: 'Projects', href: '/projects' }, { name: 'Donate', href: '/donate' }, { name: 'Support', children: [ { name: 'Press Kit', href: '/support/press-kit' }, { name: 'Privacy Policy', href: '/support/privacy' } ] } ]; constructor( public themeService: ThemeService ) { console.log('%cHold up!', 'color: red; font-size: 64px;'); console.log('If someone\'s told you to open this panel or console to perform ' + 'commands or check something in your browser, chances are they\'re trying to access ' + 'sensitive information which could compromise your account or personal data. ' + 'Only continue if you know what you\'re doing!'); } ngOnInit(): void { this.themeService.loadTheme(); } }
def substrings(a, b, n): subs = set() if (n > len(a)): return subs if (a == b and len(a) == n): return [a] for i in range(0, len(a) - n + 1): substring = a[i:i+n] if (substring in b): subs.add(substring) return subs
The verapamil transporter expressed in human alveolar epithelial cells (A549) does not interact with 2-receptor agonists. Affinity of different organs for verapamil is highly variable and organ-specific. For example, the drug exhibits high levels of accumulation in lung tissues. A transporter recognising verapamil as a substrate has previously been identified in human retinal pigment epithelial (RPE) and in rat retinal capillary endothelial (TR-iBRB2) cells. This transporter is distinct from any of the cloned organic cation transporters. Therefore, we hypothesised that the verapamil transporter is also functionally expressed in the human respiratory mucosa. Moreover, we tested the hypothesis that this transporter interacts with pulmonary administered cationic drugs such as 2-agonists. The uptake of verapamil was studied in A549 human alveolar epithelial cell monolayers at different times and concentrations. The influence of extracellular proton concentration and various organic cations on verapamil uptake was determined. Verapamil uptake into A549 cells was time- and concentration-dependent, sensitive to pH and had a Km value of 39.8 ± 8.2 M. Verapamil uptake was also sensitive to inhibition by amantadine, quinidine and pyrilamine, but insensitive to other typical modulators of organic cation and choline transporters. Whilst we demonstrated functional activity of the elusive verapamil transporter at the lung epithelium, our data suggest that this transporter does not interact with 2-agonists at therapeutic concentrations.
Help stamp out psychiatrists. Life crises require sensitive and timely intervention if the individual is to show some improvement. This article lists some of the major areas in which a family physician can assist a patient through a crisis, using it as a positive occasion causing the individual or family to be more receptive to change. Other support systems and the patient's family must be involved. 'Magic by mouth' should be minimized. Prevention can be practiced by recognizing stressful situations which usually occur before a crisis.