content
stringlengths
7
2.61M
/** * This class is generated by jOOQ */ package com.sivalabs.demo.jooq.domain.tables.records; import com.sivalabs.demo.jooq.domain.tables.Posts; import java.sql.Timestamp; import javax.annotation.Generated; import org.jooq.Field; import org.jooq.Record1; import org.jooq.Record4; import org.jooq.Row4; import org.jooq.impl.UpdatableRecordImpl; /** * This class is generated by jOOQ. */ @Generated( value = { "http://www.jooq.org", "jOOQ version:3.7.2" }, comments = "This class is generated by jOOQ" ) @SuppressWarnings({ "all", "unchecked", "rawtypes" }) public class PostsRecord extends UpdatableRecordImpl<PostsRecord> implements Record4<Integer, String, String, Timestamp> { private static final long serialVersionUID = -1319080636; /** * Setter for <code>PUBLIC.POSTS.ID</code>. */ public void setId(Integer value) { setValue(0, value); } /** * Getter for <code>PUBLIC.POSTS.ID</code>. */ public Integer getId() { return (Integer) getValue(0); } /** * Setter for <code>PUBLIC.POSTS.TITLE</code>. */ public void setTitle(String value) { setValue(1, value); } /** * Getter for <code>PUBLIC.POSTS.TITLE</code>. */ public String getTitle() { return (String) getValue(1); } /** * Setter for <code>PUBLIC.POSTS.CONTENT</code>. */ public void setContent(String value) { setValue(2, value); } /** * Getter for <code>PUBLIC.POSTS.CONTENT</code>. */ public String getContent() { return (String) getValue(2); } /** * Setter for <code>PUBLIC.POSTS.CREATED_ON</code>. */ public void setCreatedOn(Timestamp value) { setValue(3, value); } /** * Getter for <code>PUBLIC.POSTS.CREATED_ON</code>. */ public Timestamp getCreatedOn() { return (Timestamp) getValue(3); } // ------------------------------------------------------------------------- // Primary key information // ------------------------------------------------------------------------- /** * {@inheritDoc} */ @Override public Record1<Integer> key() { return (Record1) super.key(); } // ------------------------------------------------------------------------- // Record4 type implementation // ------------------------------------------------------------------------- /** * {@inheritDoc} */ @Override public Row4<Integer, String, String, Timestamp> fieldsRow() { return (Row4) super.fieldsRow(); } /** * {@inheritDoc} */ @Override public Row4<Integer, String, String, Timestamp> valuesRow() { return (Row4) super.valuesRow(); } /** * {@inheritDoc} */ @Override public Field<Integer> field1() { return Posts.POSTS.ID; } /** * {@inheritDoc} */ @Override public Field<String> field2() { return Posts.POSTS.TITLE; } /** * {@inheritDoc} */ @Override public Field<String> field3() { return Posts.POSTS.CONTENT; } /** * {@inheritDoc} */ @Override public Field<Timestamp> field4() { return Posts.POSTS.CREATED_ON; } /** * {@inheritDoc} */ @Override public Integer value1() { return getId(); } /** * {@inheritDoc} */ @Override public String value2() { return getTitle(); } /** * {@inheritDoc} */ @Override public String value3() { return getContent(); } /** * {@inheritDoc} */ @Override public Timestamp value4() { return getCreatedOn(); } /** * {@inheritDoc} */ @Override public PostsRecord value1(Integer value) { setId(value); return this; } /** * {@inheritDoc} */ @Override public PostsRecord value2(String value) { setTitle(value); return this; } /** * {@inheritDoc} */ @Override public PostsRecord value3(String value) { setContent(value); return this; } /** * {@inheritDoc} */ @Override public PostsRecord value4(Timestamp value) { setCreatedOn(value); return this; } /** * {@inheritDoc} */ @Override public PostsRecord values(Integer value1, String value2, String value3, Timestamp value4) { value1(value1); value2(value2); value3(value3); value4(value4); return this; } // ------------------------------------------------------------------------- // Constructors // ------------------------------------------------------------------------- /** * Create a detached PostsRecord */ public PostsRecord() { super(Posts.POSTS); } /** * Create a detached, initialised PostsRecord */ public PostsRecord(Integer id, String title, String content, Timestamp createdOn) { super(Posts.POSTS); setValue(0, id); setValue(1, title); setValue(2, content); setValue(3, createdOn); } }
Light microscopic and electron microscopic changes in the livers of cats with extrahepatic bile duct obstruction. Light microscopic and transmission electron microscopic changes were studied in the livers of 6 cats at 25 to 54 days after their extrahepatic bile ducts were experimentally obstructed. Histologic findings included various degrees of bile duct dilatation, ductular proliferation, and peribiliary fibrosis. Concentric layers of dense, birefringent connective tissue surrounded midsized bile ducts. The degrees of bile duct proliferation, hepatocellular degeneration, and bile retention were similar in each cat, but the amount of periductular connective tissue increased with chronicity of bile duct obstruction. Ultrastructural changes included marked swelling of endoplasmic reticulum and mitochondrial inner compartments, marked dilatation and distention of the canaliculi by bile casts, and numerical reduction of canalicular microvilli. Microvilli were often swollen and blunt. Mitochondria were swollen and long, and the cristae lacked normal distribution and density. Cytoplasmic accumulations of granular, electron-dense material and concentrically laminated arrays of material indicative of bile substances were also observed.
A Quasi-public Sphere: Letters to the Editor in the German Democratic Republic Using the example of the German Democratic Republic, the present article argues that communist leadership established a non-public communication channel between politics, administrations, industry and the population that took on most of the functions of the non-existent public sphere: letters to the editor. By law, those letters were considered petitions. The editorial offices had to register and answer them in a timely manner or transmit them to the authorities for consideration. This policy of focusing on individual cases while avoiding public sphere levels of mass communication and public meetings had two advantages for the ruling communist party: Critics were satisfied and kept quiet and other people were left in the dark unless they heard rumours during encounters. Those in power accepted that the absence of a critical discursive space hampered the process of innovation and social changesetting the German Democratic Republic apart from countries with autonomous media systems. The petition solution could only work as long as the number of critical readers letters remained within a reasonable limit, something that became impossible during the crisis of the late 1980s.
<reponame>macoolka/macoolka-doc<gh_stars>0 import en from './en.json'; import zh from './zh.json'; import buildApp, { MessageInfo } from 'macoolka-i18n'; import { defaultOption as typeI18NOption } from 'macoolka-type-model/lib/i18n'; export const defaultOption = { defaultLanguage: 'en', locale: 'en', languages: ['en', 'zh'], data: { en: { ...en, ...typeI18NOption.data.en, }, zh: { ...zh, ...typeI18NOption.data.zh, }, }, }; export type Message = MessageInfo<keyof typeof en | (keyof typeof typeI18NOption.data.en) , { value?: string, }>; export const buildI18N = buildApp<Message>(defaultOption); export default buildI18N;
Parties to a Crime and the Notion of a Complicity Object. By Carl Erik Herlitz. and groundwatcr contamination. Additional chapters provide useful case studies of the protection and management of discrete freshwater resources. Despite its title, the collection's focus is on water pollution control legislation rather than legal controls more generally. The majority of contributions come from private practitioners, and many papers provide useful insights into the practical operation of the relevant legislative provisions, particularly those relating to the practice and policy of enforcement, which provide some of the most interesting contributions. However, the value of other chapters comes more from the detail in which the relevant provisions are covered. Although this allows useful comparative observations to be made, it is a pity that the reader is denied the benefits of the practical experience and considered opinions of the authors, which would surely have come out in subsequent discussion. In this respect, some coverage of the ensuing debates, either as a postscript or in an extended introduction, might have been a valuable addition to the work. Equally, some means of cross-referencing issues between the chapters would also have been useful.
<reponame>AnkoGo123/gopher package rmq import ( "context" "fmt" "github.com/apache/rocketmq-client-go/v2" "github.com/apache/rocketmq-client-go/v2/primitive" "github.com/apache/rocketmq-client-go/v2/producer" "github.com/apache/rocketmq-client-go/v2/rlog" ) type Producer struct { Producer rocketmq.Producer serverName string conf *RocketMQConfig ops []producer.Option } func NewProducer(conf *RocketMQConfig) (p *Producer) { ops := defaultProducerOps(conf) if len(conf.ProducerOptions) > 0 { ops = append(ops, conf.ProducerOptions...) } p = &Producer{ Producer: nil, serverName: conf.EndPoint, conf: conf, ops: ops, } return p } // Conn connect to aliyun rocketmq func (p *Producer) Conn() (conn *Producer, err error) { if p.conf.LogLevel != "" { rlog.SetLogLevel(string(p.conf.LogLevel)) } defaultProducer, err := producer.NewDefaultProducer(p.ops...) if err != nil { return nil, err } p.Producer = defaultProducer if err = p.Producer.Start(); err != nil { return nil, err } return p, nil } func (p *Producer) Close() { if p.Producer != nil { _ = p.Producer.Shutdown() } } // SendSyncSingle 同步单条消息发送,对应消费 topic 的 MessageBatchMaxSize = 1时用 func (p *Producer) SendSyncSingle(c context.Context, message *primitive.Message) (result *primitive.SendResult, err error) { if p.Producer == nil { return nil, fmt.Errorf("[%s] is nil", p.serverName) } return p.Producer.SendSync(c, message) } // SendAsyncSingle 异步单条消息发送,对应消费 topic 的 MessageBatchMaxSize = 1时用 func (p *Producer) SendAsyncSingle(c context.Context, callback func(ctx context.Context, result *primitive.SendResult, err error), message *primitive.Message) (err error) { if p.Producer == nil { return fmt.Errorf("[%s] is nil", p.serverName) } if callback == nil { callback = func(ctx context.Context, result *primitive.SendResult, err error) {} } err = p.Producer.SendAsync(c, callback, message) if err != nil { return err } return nil } func (p *Producer) SendOneWaySingle(c context.Context, message *primitive.Message) (err error) { if p.Producer == nil { return fmt.Errorf("[%s] is nil", p.serverName) } return p.Producer.SendOneWay(c, message) }
<reponame>sentinel-energy/sentinel-archive<filename>friendly_data/iamc.py<gh_stars>1-10 from itertools import product from logging import getLogger from pathlib import Path from typing import cast, Dict, Iterable, List, TYPE_CHECKING from frictionless import Resource from glom import glom, Iter, Invoke, Match, MatchError, Or, T import pandas as pd from friendly_data._types import _path_t from friendly_data.converters import _reader, from_df, to_df from friendly_data.dpkg import pkgindex from friendly_data.dpkg import res_from_entry from friendly_data.helpers import idx_lvl_values from friendly_data.helpers import import_from from friendly_data.helpers import filter_dict from friendly_data.helpers import is_fmtstr from friendly_data.io import dwim_file if TYPE_CHECKING: from pyam import IamDataFrame # weak dependency on pyam; damn plotly! pyam = import_from("pyam", "") logger = getLogger(__name__) class IAMconv: """Converter class for IAMC data This class resolves index columns against the "semi-hierarchical" variables used in IAMC data, and separates them into individual datasets that are part of the datapackage. It relies on the index file and index column definitions to do the disaggregation. It also supports the reverse operation of aggregating multiple datasets into an IAMC dataset. TODO: - describe assumptions (e.g. case insensitive match) and fallbacks (e.g. missing title) - limitations (e.g. when no index column exists) FIXME: - basepath insconsistency - df/iamdf/csv inconsistency """ _IAMC_IDX = pyam.IAMC_IDX + ["year"] @classmethod def _validate(cls, conf: Dict) -> Dict: # FIXME: check if file exists for user defined idxcols conf_match = Match( { "indices": {str: Or(str, int)}, # int for year str: object, # fall through for other config keys } ) try: return glom(conf, conf_match) except MatchError as err: logger.exception( f"{err.args[1]}: must define a dictionary of files pointing to idxcol" "definitions for IAMC conversion, or set a default value for one of:" f"{', '.join(cls._IAMC_IDX)}" ) raise err from None @classmethod def _warn_empty(cls, df: pd.DataFrame, entry: Dict): if df.empty: logger.warning(f"{entry['path']}: empty dataframe, check index entry") @classmethod def iamdf2df(cls, iamdf: "IamDataFrame") -> pd.DataFrame: """Convert :class:`pyam.IamDataFrame` to :class:`pandas.DataFrame`""" return iamdf.as_pandas().drop(columns="exclude").set_index(cls._IAMC_IDX) @classmethod def f2col(cls, fpath: _path_t) -> str: """Deduce column name from file name""" return Path(fpath).stem @classmethod def from_iamdf_simple( cls, iamdf: "IamDataFrame", basepath: _path_t, datapath: _path_t ) -> Resource: """Simple conversion to data package in IAM long format""" return from_df(cls.iamdf2df(iamdf), basepath, datapath) @classmethod def from_file(cls, confpath: _path_t, idxpath: _path_t, **kwargs) -> "IAMconv": """Create a mapping of IAMC indicator variables with index columns Parameters ---------- confpath : Union[str, Path] Path to config file for IAMC <-> data package config file idxpath : Union[str, Path] Path to index file **kwargs Keyword arguments passed on to the pandas reader backend. Returns ------- IAMconv """ basepath = Path(idxpath).parent conf = cls._validate(cast(Dict, dwim_file(confpath))) return cls( pkgindex.from_file(idxpath), conf["indices"], basepath=basepath, **kwargs ) @classmethod def read_indices(cls, path: _path_t, basepath: _path_t, **kwargs) -> pd.Series: """Read index column definitions provided in config""" _lvls: pd.Series = _reader( Path(basepath) / path, usecols=["name", "iamc"], index_col="name", squeeze=True, **kwargs, ) # fallback when iamc name is missing; capitalized name is the most common return _lvls.fillna({i: i.capitalize() for i in _lvls.index}) def __init__(self, idx: pkgindex, indices: Dict, basepath: _path_t, **kwargs): """Converter initialised with a set of IAMC variable index column defintions Parameters ---------- idx : `friendly_data.dpkg.pkgindex` Index of datasets with IAMC variable definitions indices : Dict[str, pd.Series] Index column definitions """ # levels are for user defined idxcols, default for mandatory idxcols self.indices = { col: path_or_default if col in self._IAMC_IDX else self.read_indices(path_or_default, basepath, **kwargs) for col, path_or_default in indices.items() } self.res_idx = pkgindex(glom(idx, Iter().filter(T.get("iamc")).all())) self.basepath = Path(basepath) def index_levels(self, idxcols: Iterable) -> Dict[str, pd.Series]: # only for user defined idxcols return {col: self.indices[col] for col in idxcols if col not in self._IAMC_IDX} def _varwidx(self, entry: Dict, df: pd.DataFrame, basepath: _path_t) -> Resource: """Write a dataframe that includes index columns in the IAMC variable Parameters ---------- entry : Dict Entry from the `friendly_data.dpkg.pkgindex` df : pd.DataFrame Data frame in IAMC format basepath : Union[str, Path] Data package base path Returns ------- Resource The resource object pointing to the file that was written """ _lvls = self.index_levels(entry["idxcols"]) # do a case-insensitive match values = { entry["iamc"].format(**dict(zip(_lvls, vprod))).lower(): "|".join(kprod) for kprod, vprod in zip( glom(_lvls.values(), Invoke(product).star([T.index])), glom(_lvls.values(), Invoke(product).star([T.values])), ) } _df = df.reset_index("variable").query("variable.str.lower() == list(@values)") self._warn_empty(_df, entry) # FIXME: maybe instead of str.split, put a tuple, and expand idxcols = _df.variable.str.lower().map(values).str.split("|", expand=True) idxcols.columns = _lvls.keys() # don't want to include _df["variable"] in results _df = ( pd.concat([idxcols, _df["value"]], axis=1) .set_index(list(_lvls), append=True) .rename(columns={"value": self.f2col(entry["path"])}) ) # FIXME: maybe add a column spec in index entries return from_df(_df, basepath=basepath, datapath=entry["path"]) def _varwnoidx(self, entry: Dict, df: pd.DataFrame, basepath: _path_t) -> Resource: """Write a dataframe that does not includes index columns in the IAMC variable Parameters ---------- entry : Dict Entry from the `friendly_data.dpkg.pkgindex` df : pd.DataFrame Data frame in IAMC format basepath : Union[str, Path] Data package base path Returns ------- Resource The resource object pointing to the file that was written """ value = entry["iamc"].lower() _df = ( df.reset_index("variable") .query("variable.str.lower() == @value") .drop(columns="variable") .rename(columns={"value": self.f2col(entry["path"])}) ) self._warn_empty(_df, entry) return from_df(_df, basepath=basepath, datapath=entry["path"]) def from_iamdf(self, iamdf: "IamDataFrame", basepath: _path_t) -> List[Resource]: """Write an IAMC dataframe Parameters ---------- iamdf : pyam.IamDataFrame The IAMC data frame basepath : Union[str, Path] Data package base path Returns ------- List[Resource] List of resource objects pointing to the files that were written """ df = self.iamdf2df(iamdf) resources = [ self._varwidx(entry, df, basepath) if is_fmtstr(entry["iamc"]) else self._varwnoidx(entry, df, basepath) for entry in self.res_idx ] return resources def resolve_iamc_idxcol_defaults(self, df: pd.DataFrame): """Find missing IAMC indices and set them to the default value from config The IAMC format requires the following indices: `self._IAMC_IDX`; if any of them are missing, the corresponding index level is created, and the level values are set to a constant specified in the config. Parameters ---------- df : pandas.DataFrame """ defaults = filter_dict(self.indices, set(self._IAMC_IDX) - set(df.index.names)) return df.assign(**defaults).set_index(list(defaults), append=True) def to_df(self, files: Iterable[_path_t], basepath: _path_t = "") -> pd.DataFrame: """Convert CSV files to IAMC format according to configuration in the index Parameters ---------- files : Iterable[Union[str, Path]] List of files to collate and convert to IAMC basepath : Union[str, Path] Data package base path Returns ------- DataFrame A ``pandas.DataFrame`` in IAMC format """ dfs = [] for fpath in files: # NOTE: res_from_entry requires: "path", "idxcols", "alias"; later # in the iteration, "iamc" is required _entries = [ entry for entry in self.res_idx.records(["path", "idxcols", "alias", "iamc"]) if f"{fpath}" == entry["path"] ] if _entries: entry = _entries[0] if len(_entries) > 1: logger.warning(f"{entry['path']}: duplicate entries, picking first") else: continue df = to_df(res_from_entry(entry, basepath if basepath else self.basepath)) df = self.resolve_iamc_idxcol_defaults(df) lvls = self.index_levels(df.index.names) if is_fmtstr(entry["iamc"]): # NOTE: need to calculate the subset of levels that are in the # current dataframe; this is b/c MultiIndex.set_levels accepts # a sequence of level values for every level FIXME: check if # data in file is consistent with index definition _lvls = {col: lvls[col][idx_lvl_values(df.index, col)] for col in lvls} iamc_variable = ( df.index.set_levels(levels=_lvls.values(), level=_lvls.keys()) .to_frame() .reset_index(list(_lvls), drop=True) .apply(lambda r: entry["iamc"].format(**r.to_dict()), axis=1) ) else: iamc_variable = entry["iamc"] useridxlvls = list(set(df.index.names) - set(self._IAMC_IDX)) # ensure all user defined index columns are removed before concatinating df = ( df.rename(columns={df.columns[-1]: "value"}) .reset_index(useridxlvls, drop=True) .assign(variable=iamc_variable) .set_index("variable", append=True) ) df.index = df.index.reorder_levels(self._IAMC_IDX) dfs.append(df) df = pd.concat(dfs, axis=0) if df.empty: logger.warning("empty data set, check config and index file") return df def to_csv( self, files: Iterable[_path_t], output: _path_t, basepath: _path_t = "", wide: bool = False, ): """Write converted IAMC data frame to a CSV file Parameters ---------- files : Iterable[Union[str, Path]] List of files to collate and convert to IAMC output : Union[str, Path] (default: empty string) Path of the output CSV file; if empty, nothing is written to file. basepath : Union[str, Path] Data package base path wide : bool (default: False) Write the CSN in wide format (with years as columns) """ df = self.to_df(files, basepath=basepath) if wide: df = pyam.IamDataFrame(df) Path(output).parent.mkdir(parents=True, exist_ok=True) df.to_csv(output)
Electric cars are great for the environment, right? Well, yes, but their carbon footprint isn't exactly zero. You're still drawing that electricity from somewhere, mostly coal-fired power stations. How about a solar-powered vehicle, then? There aren't a lot of those around — for cars, the physics don't quite make sense yet. There's a solar-powered plane, though that's hardly practical for the average commuter. But you can now at least buy a solar-powered scooter — one where the platform itself doubles as a solar panel. The Solar Electric Scooter, an ingenious design by a small southern California startup, will go 20 miles on a single charge. If the day is too cloudy, you can still pull out the battery and plug it in. Its top speed is 15 miles an hour, but the scooter will take you there with tremendous torque, in 3.7 seconds. Better hold on. Right now, the scooter is being sold at a special rate of $1,500 via the WhenYouWish website, a Kickstarter-style funding service. If it goes into production, it'll cost closer to $2000. Those who order at the reduced price will get their scooter even if the whole project isn't funded. Which is handy, because the scooter is currently miles away from its $100,000 funding goal. Could you see yourself on this machine in your daily commute, a quick trip to the store, or any other use case? Let us know in the comments. Image courtesy WhenYouWish/Solar Electric Scooter Company
. Reducing postoperative morbidity and mortality is important not only for patients' outcome but for reduction of financial burden on society. Precise and accurate preoperative evaluation of surgical risk factors is crucial to plan appropriate postoperative allocation of medical resources. American Society of Anesthesiologists physical status is a traditional measure to describe preoperative risk of patients undergoing surgery. In the last decade, several scoring systems with better sensitivity and specificity were reported and validated. Charlson Age-comorbidity Index, Physiological and Operative Severity Score for the Enumeration of Mortality and Morbidity (POSSUM) are frequently used scoring systems. Several lines of evidence indicate that negligence of medical caregivers cause substantial numbers of errors to patients and often leads to severe complications or deaths. Full compliances to surgical checklists and implementation of medical team will help reduce these errors and lead to better patients' postoperative outcomes.
ANALYSIS/OPINION: This week I am going to do something unusual. I am going to enter into a conversation with another columnist. Doing so was not so unusual a few decades back. Bill Buckley and James Jackson Kilpatrick did it when provoked and it was always interesting. Yet today a columnist is a godlike figure. Today’s columnist communicates solely with Olympus, and the result is often a bit tedious. I propose to address the New York Post’s Michael Goodwin and congratulate him on noting that mainstream media (MSM) have passed yet another milepost in their decline. Michael wrote in his column this weekend that “Donald Trump may or may not fix his campaign, and Hillary Clinton may or may not become the first female president. But something else happening before our eyes is almost as important: the complete collapse of American journalism as we know it.” And he elaborated: “The frenzy to bury Trump is not limited to the Clinton campaign and the Obama White House. They are working hand in hand with what was considered the cream of the nation’s news organizations.” Michael writes for a Rupert Murdoch newspaper and I write for the good Times and The American Spectator. None is a member of the MSM, but I would venture that neither of us is as tyrannized into homogeneity as the writers for the MSM. In fact, there exists more diversity of opinion about Mr. Trump and Mrs. Clinton where we write than within the MSM. In our audience we trust our readers to decide for themselves. My only quibble with Michael is that I doubt the MSM had much credibility before it began its ambush of Mr. Trump, though for a certitude it has now gone beyond the point of no return. As he says, the MSM’s “reputations will likely never recover, nor will the standards. No future producer, editor, reporter or anchor can be expected to meet a test of fairness when that standard has been trashed in such willful and blatant fashion.” This is a historic moment. Mainstream Americans will continue their migration to the Internet. That is their alternative and in using it they will continue the steady bankruptcy of the news organizations of MSM. Their abandonment of the standards for fairness and accuracy is completely willful. They find themselves indignant by the looseness with which Mr. Trump says certain things. Most egregious was their hysteria over a sentence fragment about what Second Amendment advocates might or might not do in response to Mrs. Clinton’s attacks on the amendment. The MSM insisted he was encouraging violence. Among mature adults that response was considered preposterous. The MSM is aroused by Mr. Trump’s words but unbothered about actions that Mrs. Clinton has actually taken. They seem to think that Mr. Trump’s misfired jokes or loosely formulated statements are more dangerous to the commonweal than Mrs. Clinton’s decisions with her emails and her mendacious cover-ups. Evidence of what FBI Director James Comey called Mrs. Clinton’s “extremely careless” mishandling of classified documents does not arouse the MSM’s sense of alarm. Even evidence of repeated conflict of interest in her co-mingling of Foundation work and State Department work does not trouble the MSM. Their aphorism is not that actions speak louder than words but that words are more alarming than actions. Actually, Hillary Clinton’s server on which many of her actions were captured will be to her candidacy what Monica’s DNA bespattered dress was to Bill Clinton’s presidency. That is to say, her server will be remembered as high-tech proof that Hillary is a liar. Consider her polls. Sixty-eight percent of the electorate already consider her “untrustworthy,” and as of Monday evening, we are informed of the discovery of 14,900 more emails that she failed to turn over to the authorities. The MSM may think that Hillary’s actions do not reverberate as loudly with the American people as a handful of cavalier utterances from Donald, but I think they are mistaken. Her acts and the lies she has repeated to cover up for them are going to become increasingly consequential in this race. They will be repeated on the Internet, within the alternative media, in the conservative press, and — at least transiently — in the MSM itself. Lying about mishandled intelligence and repeated conflicts of interest may not be serious matters with the MSM. My guess is they are with the average American. Michael Goodwin has drawn our attention to an important historical development. With the trashing of Donald Trump and the celebration of a career criminal, the mainstream media have become passe. • R. Emmett Tyrrell Jr. is editor in chief of The American Spectator. He is author of “The Death of Liberalism,” published by Thomas Nelson Inc. Copyright © 2019 The Washington Times, LLC. Click here for reprint permission.
/******************************************************************************* **NOTE** This code was generated by a tool and will occasionally be overwritten. We welcome comments and issues regarding this code; they will be addressed in the generation tool. If you wish to submit pull requests, please do so for the templates in that tool. This code was generated by Vipr (https://github.com/microsoft/vipr) using the T4TemplateWriter (https://github.com/msopentech/vipr-t4templatewriter). Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the Apache License 2.0; see LICENSE in the source repository root for authoritative license information. ******************************************************************************/ package com.microsoft.services.outlook; /** * The type Conversation Thread. */ public class ConversationThread extends Entity { public ConversationThread(){ setODataType("#Microsoft.OutlookServices.ConversationThread"); } private java.util.List<Recipient> ToRecipients = null; /** * Gets the To Recipients. * * @return the java.util.List<Recipient> */ public java.util.List<Recipient> getToRecipients() { return this.ToRecipients; } /** * Sets the To Recipients. * * @param value the java.util.List<Recipient> */ public void setToRecipients(java.util.List<Recipient> value) { this.ToRecipients = value; valueChanged("ToRecipients", value); } private String Topic; /** * Gets the Topic. * * @return the String */ public String getTopic() { return this.Topic; } /** * Sets the Topic. * * @param value the String */ public void setTopic(String value) { this.Topic = value; valueChanged("Topic", value); } private Boolean HasAttachments; /** * Gets the Has Attachments. * * @return the Boolean */ public Boolean getHasAttachments() { return this.HasAttachments; } /** * Sets the Has Attachments. * * @param value the Boolean */ public void setHasAttachments(Boolean value) { this.HasAttachments = value; valueChanged("HasAttachments", value); } private java.util.Calendar LastDeliveredDateTime; /** * Gets the Last Delivered Date Time. * * @return the java.util.Calendar */ public java.util.Calendar getLastDeliveredDateTime() { return this.LastDeliveredDateTime; } /** * Sets the Last Delivered Date Time. * * @param value the java.util.Calendar */ public void setLastDeliveredDateTime(java.util.Calendar value) { this.LastDeliveredDateTime = value; valueChanged("LastDeliveredDateTime", value); } private java.util.List<String> UniqueSenders = null; /** * Gets the Unique Senders. * * @return the java.util.List<String> */ public java.util.List<String> getUniqueSenders() { return this.UniqueSenders; } /** * Sets the Unique Senders. * * @param value the java.util.List<String> */ public void setUniqueSenders(java.util.List<String> value) { this.UniqueSenders = value; valueChanged("UniqueSenders", value); } private java.util.List<Recipient> CcRecipients = null; /** * Gets the Cc Recipients. * * @return the java.util.List<Recipient> */ public java.util.List<Recipient> getCcRecipients() { return this.CcRecipients; } /** * Sets the Cc Recipients. * * @param value the java.util.List<Recipient> */ public void setCcRecipients(java.util.List<Recipient> value) { this.CcRecipients = value; valueChanged("CcRecipients", value); } private String Preview; /** * Gets the Preview. * * @return the String */ public String getPreview() { return this.Preview; } /** * Sets the Preview. * * @param value the String */ public void setPreview(String value) { this.Preview = value; valueChanged("Preview", value); } private Boolean IsLocked; /** * Gets the Is Locked. * * @return the Boolean */ public Boolean getIsLocked() { return this.IsLocked; } /** * Sets the Is Locked. * * @param value the Boolean */ public void setIsLocked(Boolean value) { this.IsLocked = value; valueChanged("IsLocked", value); } private java.util.List<Post> Posts = null; /** * Gets the Posts. * * @return the java.util.List<Post> */ public java.util.List<Post> getPosts() { return this.Posts; } /** * Sets the Posts. * * @param value the java.util.List<Post> */ public void setPosts(java.util.List<Post> value) { this.Posts = value; valueChanged("Posts", value); } }
. Time of appearance and amplitudes of miniature end-plate potentials were studied statistically during the carbacholine action on the rat soleus muscle. Facilitating effect of carbacholine on the spontaneous quantal transmitter release did not change time distribution of miniature end-plate potentials. It is suggested that this effect of carbacholine was rather due to an increase in the activity of releasing sites, than due to an increase in their number.
<gh_stars>1-10 import { MapDocumentNode, ProfileDocumentNode, SecurityScheme, SecurityValues, } from '@superfaceai/ast'; import { PerformError, Profile, Provider, Result, SuperfaceClient, SuperJson, UseCase, } from '@superfaceai/one-sdk'; import { ServiceSelector } from '@superfaceai/one-sdk/dist/private'; import { CompleteSuperfaceTestConfig } from '.'; /* eslint-disable @typescript-eslint/no-unsafe-return */ interface UseCaseOptions { isOk?: boolean; isErr?: boolean; result?: Result<unknown, PerformError>; } export const getUseCaseMock = jest.fn< UseCase, Parameters<(name: string, options?: UseCaseOptions) => UseCase> >((name: string, options?: UseCaseOptions) => ({ ...Object.create(UseCase.prototype), perform: jest.fn().mockResolvedValue({ isOk: options?.isOk !== undefined ? jest.fn().mockResolvedValue(options.isOk) : jest.fn(), isErr: options?.isErr !== undefined ? jest.fn().mockResolvedValue(options.isErr) : jest.fn(), unwrap: options?.result?.unwrap ?? jest.fn(), value: options?.result?.isOk() && options.result.value, error: options?.result?.isErr() && options.result.error, }), name, })); export interface ProfileOptions { version?: string; cacheKey?: string; } export const getProfileMock = jest.fn< Promise<Profile>, Parameters<(profileId: string, options?: ProfileOptions) => Promise<Profile>> >(async (profileId: string, options?: ProfileOptions) => ({ ...Object.create(Profile.prototype), client: jest.createMockFromModule<SuperfaceClient>( '@superfaceai/one-sdk/dist/client/client' ), configuration: { id: profileId ?? 'profile', version: options?.version ?? '1.0.0', cacheKey: options?.cacheKey ?? '', }, getUseCase: getUseCaseMock, })); export interface ProviderOptions { securityValues?: SecurityValues[]; } export const getProviderMock = jest.fn< Promise<Provider>, Parameters< (providerName: string, options?: ProviderOptions) => Promise<Provider> > >(async (providerName: string, options?: ProviderOptions) => ({ ...Object.create(Provider.prototype), configuration: { name: providerName, security: options?.securityValues ?? [], }, })); export interface SuperfaceClientOptions { superJson?: SuperJson; profileAst?: ProfileDocumentNode; mapAst?: MapDocumentNode; providerName?: string; configuration?: { baseUrl: string; securitySchemes?: SecurityScheme[]; parameters?: Record<string, string>; }; } const DEFAULT_SUPERJSON = new SuperJson({ profiles: { profile: { file: 'path/to/profile.supr', providers: { provider: { file: 'path/to/map.suma', }, }, }, }, providers: { provider: { file: 'path/to/provider.json', security: [], }, }, }); export const SuperfaceClientMock = jest.fn< SuperfaceClient, Parameters<(options?: SuperfaceClientOptions) => SuperfaceClient> >((options?: SuperfaceClientOptions) => ({ ...Object.create(SuperfaceClient.prototype), superJson: options?.superJson ?? DEFAULT_SUPERJSON, getProfile: getProfileMock, getProvider: getProviderMock, cacheBoundProfileProvider: jest.fn().mockReturnValue({ profileAst: options?.profileAst ?? {}, mapAst: options?.mapAst ?? {}, providerName: options?.providerName ?? 'provider', configuration: { services: ServiceSelector.withDefaultUrl( options?.configuration?.baseUrl ?? 'https://base.url' ), security: options?.configuration?.securitySchemes ?? [], parameters: options?.configuration?.parameters, }, }), })); export const getMockedSfConfig = async (options?: { superJson?: SuperJson; isOk?: boolean; isErr?: boolean; result?: Result<unknown, PerformError>; baseUrl?: string; securitySchemes?: SecurityScheme[]; securityValues?: SecurityValues[]; parameters?: Record<string, string>; }): Promise<CompleteSuperfaceTestConfig> => ({ client: new SuperfaceClientMock({ superJson: options?.superJson ?? DEFAULT_SUPERJSON, configuration: { baseUrl: options?.baseUrl ?? 'https://base.url', securitySchemes: options?.securitySchemes, parameters: options?.parameters, }, }), profile: await getProfileMock('profile'), provider: await getProviderMock('provider', { securityValues: options?.securityValues ?? [], }), useCase: getUseCaseMock('usecase', { isOk: options?.isOk ?? true, isErr: options?.isErr, result: options?.result, }), });
Members of John Ritter's on-screen family (Amy Davidson, Katey Segal and Kaley Cuoco) struggle to cope in the aftermath of his sudden death. In the midst of last week's storm surrounding CBS and "The Reagans," you may have missed something sweet and lovely that happened over on ABC. On Tuesday night, the cast, producers and writers of "8 Simple Rules for Dating My Teenage Daughter" took their tragic loss of a co-star and friend and, in a nod to moving on, created honest, wonderful television. That's saying something. Considering the way television can bungle these things, the sitcom's return without John Ritter, whose sudden death of an aortic dissection in September shocked so many, could have been a lot worse. After Ritter's death, the cast and the network mourned, quite publicly, to strong ratings that, overnight, turned the network's modest hit into a legitimate contender bleeding viewers from its comedy competition on NBC. Having seen that power, the announcement that "8 Simple Rules" would continue was understandably met with a bit of cynicism. Some wondered if the Ritter-less series would exploit the beloved 54-year-old's death for the sake of ratings, drumming us over the head with ersatz sorrow. Others perhaps couldn't see the point of going on, citing past failures like "Chico and the Man." In that sitcom, principal star Freddie Prinze committed suicide in 1977, the show's third season. Though producers attempted to replace him with a younger star, the show died only a season later. The same fate befell "NewsRadio" when Phil Hartman was shot dead by his wife, who then turned the gun on herself, in 1998. In came Jon Lovitz, but the magic was gone. Television's even less forgiving now than it was then, which means that the revamped "8 Simple Rules" has a mountain climb ahead of it. More than 20 million tuned in for the Hennessys' laugh-free return last week, making it a rare success in a lackluster November. Moreover, the attention was well-deserved. The tears and mourning blurred the line between the characters' feelings and the cast's true emotions, creating an hour that felt tragically, realistically heartrending. The episode, which wasn't available for critics to review last week, brought home the random, stupefying nature of death as the cast and characters addressed this new gap in their lives. The writers could have fallen back on maudlin sap and fancy speeches, but commendably, they didn't. Instead, the Hennessys' sorrow was allowed to exist as hollow, unchartered space, exactly as it should be. Conveyed through a series of glimpses, the episode's deft writing showed how death swoops in and weighs down the simplest of conversations. Their final moments with their dad were typical gaffes and random moments -- Cate (Katey Sagal) laughing at her husband's lost socks, Rory (Martin Spanjers) tripping over Paul's shoes left on the stair, Bridget (Kaley Cuoco) and Kerry (Amy Davidson) lamenting their meaningless insults as he left -- all imbued with terrible importance after his death. James Garner's entry as Jim, Cate's dad, provided a rock-steady, somewhat grizzled counterpoint to the occasional outbursts of adolescent rage. And all speech became dumb, pointless, yet kind, as characters struggled to say and do the right thing, in a situation for which there is no foolproof emotional etiquette. "You know what his last words to me were?" Cate said to her kids. " 'Hasta la vista baby!' Such a cornball. He would say anything, he didn't care how silly he was. He just wanted to make us laugh. ... So, somebody's going to have to take up the cornball slack around here." A fitting statement, because Paul's funeral is over. Tonight at 8 on KOMO/4, the series takes the Hennessys back to their half-hour time slot, while the specter of Dad's death remains present. The laugh track is supposed to return, although to maintain the series' feeling, the formula's going to need some tinkering. Perhaps a solution will be to incorporate Garner's grumpy old man into the mix, but he's more grump than cornball. Ritter was clearly the hub of "8 Simple Rules' " humor, and if Sagal takes up some of his paternal quirks for the sake of wringing out a few laughs, viewers might not buy it. Based on last week's efforts, however, I'm curious to tune in for a few more episodes. This season has proven that there's little rhyme or reason to why a show makes it or doesn't. In that kind of environment, an experiment like this will be worth monitoring. Last week "8 Simple Rules" showed what it can do at its best. Maybe the cast and crew can harness that drive and determination into creating success out of sadness. I have the distinct feeling you don't care in either of these cases, but The WB's cut "Tarzan's" vines. On the other hand, "Jake 2.0" has been picked up for a full season. We'll ponder that some other time. ... Back on the subject of "8 Simple Rules" and its challenges, CBS is airing "The Andy Griffith Reunion: Back to Mayberry" tonight at 8 and, yes, it is as warm as Aunt Bee's pie. Griffith hosts alongside Don Knotts , Ron Howard and Jim Nabors . ... CNBC's "The News With Brian Williams" is losing Brian Williams in January. Williams, who is due to take over as anchor of the " NBC Nightly News " in November 2004, will relinquish his role at the helm of the low-rated cable news show, NBC announced yesterday. CNBC will keep a nightly newscast with an anchor to be named later.
A pair of SEC squads meet in Columbia on Saturday afternoon attempting to win their first conference game of the season, as the Missouri Tigers play host to the Kentucky Wildcats. Kentucky is coming off one of its most impressive performances of the season last week in a 29-24 defeat to nationally-ranked Georgia, a game in which it led in the third quarter. While it may have been a moral victory for the Wildcats, the fact remains that they only have one actual victory on the season, a 47-14 defeat of Kent State on Sept. 8. They have won just four of their 20 conference games since the start of the 2010 season. Missouri’s transition from the Big 12 Conference to the SEC has not been a smooth one. All four of the Tigers’ losses this season have come in conference play, including most recently in blowout fashion versus top-ranked Alabama on Oct. 13, 42-10. The Wildcats have won both previous matchups in the all-time series with Missouri, but the two teams have not met since 1968. Kentucky has one of the worst offenses in the country, ranking 13th out of 14 SEC teams in both points per game (19.3) and yards per game (309.3). With Maxwell Smith and Patrick Towles out with injuries, the Wildcats are down to their third option at quarterback in Jalen Whitlow, who has played poorly, completing less than 47 percent of his passes for 368 yards with a touchdown and two interceptions. Morgan Newton has been even worse (10-of-32, three INTs), although he threw for a touchdown and ran for another against Georgia. The lack of continuity at quarterback has hindered La’Rod King’s effectiveness in recent weeks, but he still leads the team in all major receiving categories (35 receptions, 396 yards, four TDs). UK has also suffered injuries at running back, but has found some success with the tandem of Raymond Sanders (76 carries, 396 yards, three TDs) and Jonathan George (60 carries, 288 yards, three TDs). The defense (32.4 ppg, 426.6 ypg) has not picked up the slack for the offense. The unit has a difficult time getting off the field, as opponents convert 54 percent of their third down attempts and average more than 33 minutes of possession time. Avery Williamson (81 tackles) is among the top tacklers in the nation and he also has an interception, a forced fumble, and a fumble recovery. Alvin Dupree (6.5 TFL, 3.5 sacks), Collins Ukwu (three sacks, two QB hurries), and Donte Rumph (four TFL, three sacks) can all get after the quarterback. Missouri’s offense is only slightly better than Kentucky’s (23.1 ppg, 323.7 ypg), and it has also struggled with a lack of identity at the quarterback position. James Franklin (161 passing ypg, four TDs, two INTs) has been sidelined with an injury, but even in the five games he has appeared, he seems to have taken a step back from an outstanding 2011 season. Freshman Corbin Berkstresser hasn’t fared any better, completing less than 48 percent of his passes for 627 yards, three touchdowns and three interceptions. Kendial Lawrence has been a reliable performer in the UM backfield with 524 yards and five scores on 95 carries, but the team as a whole averages just 3.7 ypc and no other player has more than 120 rushing yards. Marcus Lucas (32 receptions, 327 yards, two TDs) and T.J. Moe (26 receptions, 250 yards, TD) headline a modest receiving corps. This off-season’s prized recruit Dorial Green-Beckham has not contributed much in his freshman year (seven receptions, 128 yards, TD). Perhaps the Tigers’ most dangerous weapon sees most of his time on special teams. Marcus Murphy leads the nation with four return touchdowns, which includes a 98-yard kickoff return against Alabama. After giving up a total of 31 points in its previous two games, the Missouri defense had no answer for Alabama last week, allowing 42 points and more than 530 yards of offense. The front seven has played well, with Sheldon Richardson, Kony Ealy, Michael Sam and Jimmy Burge all recording at least 5.5 tackles for loss and three sacks. Kip Edwards (24 tackles) has a nose for the football, recording two interceptions and three fumble recoveries.
""" Operation wrappers for piclient Some code cannot be created using py4j in python. Therefore following operation was created. Note that usually operation uses cache with Long keys, so it's impossible to use following statement correctly: cache.get(1) - 1 will be automatically interpreted into Java side as Integer value. """ import collections from enum import Enum from .generator_utils import AffinityCountKeyGeneratorBuilder, AffinityPartitionKeyGeneratorBuilder from ..piclient import get_gateway # Description of Ignite Transaction (concurrency, isolation, timeout, size and optional label) TxDescriptor = collections.namedtuple('TxDescriptor', 'concurrency isolation timeout size label', defaults=('PESSIMISTIC', 'REPEATABLE_READ', 0, 10000, None)) # Operation class names from piclient java part class Operation(Enum): affinity_operation = "AffinityOperation" async_operation = "ASyncOperation" broadcast_message_operation = "BroadcastMessageOperation" broke_data_entry_operation = "BrokeDataEntryOperation" create_change_entry_write_version_or_value_operation = "ChangeEntryWriteVersionOrValueOperation" clear_entry_locally_operation = "ClearEntryLocallyOperation" checksum_operation = "ChecksumOperation" cq_operation = "ContinuousQueryOperation" cpu_load_operation = "CpuLoadOperation" dbt_atomic_long_operation = "DistributedAtomicLongOperation" dbt_checksum_operation = "DistributedChecksumOperation" event_collect_operation = "EventCollectOperation" event_pending_operation = "EventPendingOperation" jmx_ports_operation = "GetJmxPortsOperation" kill_on_cp_operation = "KillOnCheckpointOperation" multicache_transfer_operation = "MultiCacheTransferTaskOperation" put_all_operation = "PutAllOperation" put_operation = "PutOperation" put_with_optional_remove_operation = "PutWithOptionalRemoveOperation" remove_operation = "RemoveOperation" shuffle_binary_operation = "ShuffleBinaryOperation" single_cache_transfer_operation = "SingleCacheTransferTaskOperation" start_dynamic_caches_operation = "StartDynamicCachesOperation" streamer_operation = "StreamerOperation" sum_operation = "SumOperation" tx_control_operation = "TransactionControlOperation" tx_put_operation = "TxPutOperation" wal_statistics_operation = "WalRecordsStatisticsCollector" starvation_in_fork_join_pool = "ForkJoinPoolStarvationOperation" throw_custom_error_operation = "ThrowCustomErrorOperation" create_tables_operation = "org.apache.ignite.piclient.operations.impl.combination.setup.CreateTablesOperation" create_indexes_operation = "org.apache.ignite.piclient.operations.impl.combination.setup.CreateIndexesOperation" wait_snapshot_end_operation = "org.apache.ignite.piclient.operations.impl.combination.setup.WaitForSnapshotsOperation" def create_async_operation(operation_method, *args, gateway=None, **kwargs): """ Wrap operation to AsyncOperation Evaluating will be started as soon as threads resource will be available on client (Default piclient started with cores/2 thread available) :param operation_method: operation method to async :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) return gateway.entry_point.getOperationsService() \ .createAsyncOperation(operation_method(*args, **kwargs, gateway=gateway)) def create_combine_operation(class_path, gateway=None): """ Run combine operations in org.apache.ignite.piclient.operations.impl.combination Endless operation, running even after exception :param class_path: full class path :param gateway: optional gateway (default taken from threading.current_thread) :return: operation """ gateway = get_gateway(gateway) operation = _create_operation(class_path, gateway=gateway) return operation def create_tables_operation(tables_count, values_count, gateway=None): """ Create tables operation Custom count of tables columns (intcol integer, numbcol number, varchcol varchar, varch2col varchar2, primary key (intcol, numbcol)) :param tables_count: tables count to create :param values_count: values count inserted in each table :param gateway: optional gateway (default taken from threading.current_thread) :return: operation """ gateway = get_gateway(gateway) operation = _create_operation(Operation.create_tables_operation.value, tables_count, values_count, gateway=gateway) return operation def create_indexes_operation(count, gateway=None): """ Create indexes in all existed tables :param count: amount of indexes to create :param gateway: optional gateway (default taken from threading.current_thread) :return: operation """ gateway = get_gateway(gateway) operation = _create_operation(Operation.create_indexes_operation.value, count, gateway=gateway) return operation def wait_snapshot_operation(wait_time, gateway=None): """ Wait until some snapshot operation will end Might be snapshot creation or snapshot restore :param wait_time: time to wait :param gateway: optional gateway (default taken from threading.current_thread) :return: operation """ gateway = get_gateway(gateway) operation = _create_operation(Operation.wait_snapshot_end_operation.value, wait_time, gateway=gateway) return operation def create_event_collect_operation(events, with_exception=None, gateway=None): """ Start operation that will collect all events and trace them into log Returns Collection<Map<String, String>> for each node there will be Map<node_UUID, event> Should be merged manually at this time :param events: events to collect (org.apache.ignite.IgniteEvents) :param with_exception: with exception? :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.event_collect_operation.value, convert_to_java_array(gateway.jvm.java.lang.Integer, events, gateway), gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_event_pending_operation(events, with_exception=None, gateway=None): """ Start listener that will release execution flow when events happens :param events: events to wat (org.apache.ignite.IgniteEvents) :param with_exception: with exception? :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.event_pending_operation.value, convert_to_java_array(gateway.jvm.java.lang.Integer, events, gateway), gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_put_with_optional_remove_operation(cache_name, start, end, remove_probability, node_consistent_id=None, tx_description=None, use_monotonic_value=None, monotonic_value_seed=None, value_type=None, with_exception=None, gateway=None): """ Create may be put operation (put keys from start to end deleting some keys during put with deleteProb probability) :param with_exception: throw exception o execution or just print stacktrace :param value_type: cache value type :param cache_name: cache name :param start: start key :param end: end key :param tx_description: TxDescriptor allowed transactions :param use_monotonic_value False, put values will be equal to key (as usual for other operations), True, put values will be monotonically increasing starting from monotonic_value_seed. That helps catch key updates reordering issues. :param monotonic_value_seed :param remove_probability: delete inserted keys probability :param node_consistent_id: put all primary keys to node with given consistent id :return: number of keys inserted minus number of keys deleted during insertion :param gateway: optional gateway (default taken from threading.current_thread) """ gateway = get_gateway(gateway) operation = _create_operation(Operation.put_with_optional_remove_operation.value, cache_name, start, end, remove_probability, gateway=gateway) if node_consistent_id: operation.setKeyGenerator(gateway.jvm.org.apache.ignite.piclient.operations.generators.AffinityKeyGenerator( start, end, node_consistent_id, cache_name )) # there may be no transaction if tx_description: operation.setWithTransaction() _add_allowed_transactions_to_loading(operation, (tx_description,), gateway) if use_monotonic_value is not None: operation.setUseMonotonicValue(use_monotonic_value) if monotonic_value_seed is not None: operation.setMonotonicValueSeed(monotonic_value_seed) if value_type is not None: operation.setValueType(value_type) if with_exception is not None: operation.setWithException(with_exception) if value_type is not None: operation.setValueType(value_type) return operation def create_cpu_load_operation(load_factor, num_cores, thread_per_core, gateway=None): """ Provide CPU load on each server node in cluster :param load_factor: how cpu % should be used :param num_cores: how many % cores should be used :param thread_per_core: threads per core :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.cpu_load_operation.value, load_factor, num_cores, thread_per_core, gateway=gateway) return operation def create_affinity_operation(with_exception=None, gateway=None): """ Take affinity definition for each node :param with_exception: throw exception o execution or just print stacktrace :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.affinity_operation.value, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_kill_on_checkpoint_operation(node_id, sleep, with_exception=None, gateway=None): """ Take affinity definition for each node :param node_id: node id to kill :param with_exception: throw exception o execution or just print stacktrace :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.kill_on_cp_operation.value, node_id, sleep, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_wal_records_operation(with_exception=None, gateway=None): """ Take affinity definition for each node :param with_exception: throw exception o execution or just print stacktrace :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.wal_statistics_operation.value, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_distributed_atomic_long(with_exception=None, gateway=None): """ Take affinity definition for each node :param with_exception: throw exception o execution or just print stacktrace :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.dbt_atomic_long_operation.value, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_message_operation(message, with_exception=None, gateway=None): """ Create message in each nodes log :param message to broadcast :param with_exception: throw exception o execution or just print stacktrace :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.broadcast_message_operation.value, message, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_distributed_checksum_operation(with_exception=None, gateway=None): """ Operation that calculate checksum over cache values :param with_exception: throw exception o execution or just print stacktrace :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.dbt_checksum_operation.value, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def start_dynamic_caches_operation(filepath=None, batch_size=500, with_exception=None, gateway=None): """ Operation that start caches list dynamic :param filepath: caches.xml path :param batch_size: batch to create :param with_exception: throw exception o execution or just print stacktrace :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.start_dynamic_caches_operation.value, filepath, batch_size, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_continuous_query_operation(cache_name, duration, scan_query=None, entry_update_listener=None, entry_filter_factory=None, with_exception=None, gateway=None): """ Operation that start caches list dynamic :param cache_name: cache name for ContinuousQuery :param duration: duration of running ContinuousQuery :param scan_query: default IgniteBiPredicatePositiveKey :param entry_update_listener: default CacheEntryUpdatedListenerLogUpdate :param entry_filter_factory: default CacheEntryEventFilterFactoryPositiveKey :param with_exception: throw exception o execution or just print stacktrace :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.cq_operation.value, cache_name, duration, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) if scan_query: operation.setScanQuery(scan_query) if entry_update_listener: operation.setEntryUpdatedListener(entry_update_listener) if entry_filter_factory: operation.setEventFilterFactory(entry_filter_factory) return operation def create_checksum_operation(cache_name, start=None, end=None, step=None, with_exception=None, gateway=None): """ Operation that calculate checksum over cache values :param cache_name: cache name to calculate checksum :param start: start key :param end: end key :param step: step :param with_exception: throw exception o execution or just print stacktrace :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.checksum_operation.value, cache_name, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) if start: operation.setStartKey(start) if end: operation.setEndKey(end) if step: operation.setStep(step) return operation def create_remove_operation(cache_name, start, end, with_exception=None, key_type=None, gateway=None): """ Create streamer operation It puts values into defined cache name and use Long key. :param with_exception: throw exception o execution or just print stacktrace :param cache_name: cache name :param start: start key :param end: end key :param key_type: key type (default Long, optional - see org.apache.ignite.piclient.model entries) e.g. 'org.apache.ignite.piclient.model.AllTypes' :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.remove_operation.value, cache_name, start, end, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) if key_type: operation.setKeyType(key_type) return operation def create_streamer_operation(cache_name, start, end, with_exception=None, value_type=None, parallel_operations=None, buff_size=None, allow_overwrite=None, gateway=None): """ Create streamer operation It puts values into defined cache name and use Long key. :param with_exception: throw exception o execution or just print stacktrace :param cache_name: cache name :param start: start key :param end: end key :param value_type: value type (default Long, optional - see org.apache.ignite.piclient.model entries) e.g. 'org.apache.ignite.piclient.model.values.AllTypes' :param parallel_operations: number of parallel operations :param buff_size: buffer size :param allow_overwrite: allowOverwrite flag :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.streamer_operation.value, cache_name, start, end, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) if value_type: operation.setValueType(value_type) if parallel_operations: operation.setParallelOperations(parallel_operations) if buff_size: operation.setBuffSize(buff_size) if allow_overwrite: operation.setAllowOverwrite(allow_overwrite) return operation def create_put_all_operation(cache_name, start, end, batch_size, key_type=None, value_type=None, with_exception=None, gateway=None): """ Create streamer operation It puts values into defined cache name and use Long key. :param with_exception: throw exception o execution or just print stacktrace :param cache_name: cache name :param start: start key :param end: end key :param batch_size: batch size to put :param key_type: key type (default Long, optional - see org.apache.ignite.piclient.model entries) :param value_type: value type (default Long, optional - see org.apache.ignite.piclient.model entries) e.g. 'org.apache.ignite.piclient.model.values.AllTypes' :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.put_all_operation.value, cache_name, start, end, batch_size, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) if value_type: operation.setValueType(value_type) if key_type: operation.setKeyType(key_type) return operation def create_transactional_put_all_operation( values_to_cache, start, end, batch_size, tx_description=TxDescriptor(), with_exception=None, gateway=None): """ Create tx put all operation It puts values into defined cache name and use Long key. :param with_exception: throw exception o execution or just print stacktrace :param values_to_cache: cache name :param start: start key :param end: end key :param batch_size: batch put size :param tx_description: TxDescriptor object :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.tx_put_operation.value, values_to_cache, start, end, batch_size, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) _add_allowed_transactions_to_loading(operation, (tx_description,), gateway) return operation def create_sum_operation(cache_name, start, end, field_name, with_exception=None, gateway=None): """ Create streamer operation It puts values into defined cache name and use Long key. :param with_exception: throw exception o execution or just print stacktrace :param cache_name: cache name :param start: start key :param end: end key :param field_name: field to sum :param gateway: py4j gateway :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.sum_operation.value, cache_name, start, end, field_name, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_transaction_control_operation( cache_name, key, value, tx_description=TxDescriptor(), with_exception=None, gateway=None): """ Create transaction operation and return control :param tx_description: :param with_exception: throw exception o execution or just print stacktrace :param cache_name: cache name :param key: key to put :param value: value to put :param tx_description: TxDescriptor object :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.tx_control_operation.value, cache_name, key, value, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) _add_allowed_transactions_to_loading(operation, (tx_description,), gateway) return operation def create_broke_data_entry_operation(cache_name, key, is_primary, *broke_types, with_exception=None, gateway=None): """ Create broke partition operation (to touch idle_verify utility) Partition id will be automatically found for defined key (first will be taken) and returned as result Note that Long value should be used for cache :param with_exception: throw exception o execution or just print stacktrace :param cache_name: cache name :param key: key to broke :param is_primary: broke primary partition or backup :param broke_types: COUNTER/VALUE/INDEX or both :return: partitionId that was broken :param gateway: optional gateway (default taken from threading.current_thread) """ gateway = get_gateway(gateway) operation = _create_operation(Operation.broke_data_entry_operation.value, cache_name, key, is_primary, convert_to_java_array(gateway.jvm.java.lang.String, broke_types, gateway), gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_change_entry_write_version_or_value_operation(cache_name, key, is_primary, *broke_types, with_exception=None, gateway=None): """ Create broke data entry operation :param with_exception: throw exception o execution or just print stacktrace :param cache_name: cache name :param key: key to broke (only Long) :param is_primary: broke primary partition or backup :param broke_types: RANDOM_MINOR_WRITE_VERSION/CHANGE_VALUE or both :return: null :param gateway: optional gateway (default taken from threading.current_thread) """ gateway = get_gateway(gateway) operation = _create_operation(Operation.create_change_entry_write_version_or_value_operation.value, cache_name, key, is_primary, convert_to_java_array(gateway.jvm.java.lang.String, broke_types, gateway), gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def clear_entry_locally_operation(cache_name, key, is_primary, with_exception=None, gateway=None): """ Clear entry on node :param with_exception: throw exception o execution or just print stacktrace :param cache_name: cache name :param key: key to broke :param is_primary: broke primary partition or backup :return: null :param gateway: optional gateway (default taken from threading.current_thread) """ gateway = get_gateway(gateway) operation = _create_operation(Operation.clear_entry_locally_operation.value, cache_name, key, is_primary, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) return operation def create_account_runner_operation(cache_name, start, end, commit_possibility, allowed_transactions=None, delay=None, run_for_seconds=None, with_exception=None, gateway=None): """ Operation that transfer money from one account into another Used cache should contains 'org.apache.ignite.piclient.model.values.Account' values :param with_exception: throw exception o execution or just print stacktrace :param cache_name: cache name :param start: start key :param end: end key :param run_for_seconds: how long proceed this type of operations (in seconds) :param commit_possibility: commit possibility (values in [0,1]) :param allowed_transactions: list of TxDescriptor objects :param delay: delay between transactions (in milliseconds) :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.single_cache_transfer_operation.value, cache_name, start, end, commit_possibility, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) if run_for_seconds: operation.setRunForSeconds(run_for_seconds) if delay: operation.setDelay(delay) if with_exception is not None: operation.setWithException(with_exception) _add_allowed_transactions_to_loading(operation, allowed_transactions, gateway) return operation def create_cross_cache_account_runner_operation(cache_names, start, end, commit_possibility, allowed_transactions=None, delay=None, run_for_seconds=None, keys_count=None, collision_possibility=None, cache_load_map=None, with_exception=None, gateway=None): """ Operation that transfer money from one account into another between caches Used caches should contains 'org.apache.ignite.piclient.model.values.Account' values :param with_exception: throw exception o execution or just print stacktrace :param cache_names: cache names :param start: start key :param end: end key :param run_for_seconds: how long proceed this type of operations (in seconds) :param cache_load_map: map that contains additional loading desriptors: cache name -> { 'affinity_node_id': affinity node id (NOT NODE_ID in Ignite.app) 'include': does include node to affinity node 'metric_postfix': metrics postfix that will be added to plot (tx_metrics list that passed to TransactionalLoading should be also modified) } :param keys_count: keys count to affinity count key generator :param collision_possibility: possibility to chose key that was already used with this possibility last one of 10 used keys in loading will be selected :param commit_possibility: commit possibility (values in [0,1]) :param allowed_transactions: list of TxDescriptor objects :param delay: delay between transactions (in milliseconds) :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.multicache_transfer_operation.value, convert_to_java_array(gateway.jvm.java.lang.String, cache_names, gateway), start, end, commit_possibility, gateway=gateway) if with_exception is not None: operation.setWithException(with_exception) if cache_load_map and keys_count: for cache in cache_names: if cache_load_map[cache]['key_generator_builder']: operation.setKeyGenerator(cache, cache_load_map[cache]['key_generator_builder'].set_gateway(gateway).build()) if cache_load_map[cache]['metric_postfix']: operation.setMetricPostfix(cache_load_map[cache]['metric_postfix']) if run_for_seconds: operation.setRunForSeconds(run_for_seconds) if delay: operation.setDelay(delay) if with_exception is not None: operation.setWithException(with_exception) _add_allowed_transactions_to_loading(operation, allowed_transactions, gateway) return operation def create_starvation_in_fork_join_pool(gateway=None): """ Provide starvation in JVM pool :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.starvation_in_fork_join_pool.value, gateway=gateway) return operation def create_throw_custom_error_operation(error_class='', node_id='', gateway=None): """ Provide call throw new java.lang.error_class call in specific JVM belongs to node with node_id :param error_class: error class to throw :param node_id: node_id that use for found specific JVM. :param gateway: optional gateway (default taken from threading.current_thread) :return: """ gateway = get_gateway(gateway) operation = _create_operation(Operation.throw_custom_error_operation.value, error_class, node_id, gateway=gateway) return operation def convert_to_java_array(java_type, python_array, gateway): typed_java_array = gateway.new_array(java_type, len(python_array)) for i, arg in enumerate(python_array): typed_java_array[i] = arg return typed_java_array def _add_allowed_transactions_to_loading(operation, allowed_transactions, gateway): """ Convert Python dictionary with tx_description objects to a java HashMap and pass it into addTransactionType() method to AbstractTransactionOperation objects :param operation: operation to pass allowed transaction :param allowed_transactions: list TxDescriptor objects :param gateway: gateway :return: """ if not allowed_transactions: allowed_transactions = ( TxDescriptor(), # default pessimistic repeatable_read tx TxDescriptor(concurrency='OPTIMISTIC', isolation='SERIALIZABLE') ) for allowed_transaction in allowed_transactions: hash_map = gateway.jvm.java.util.HashMap() hash_map['concurrency'] = '{}'.format(allowed_transaction.concurrency) hash_map['isolation'] = '{}'.format(allowed_transaction.isolation) hash_map['timeout'] = '{}'.format(allowed_transaction.timeout) hash_map['size'] = '{}'.format(allowed_transaction.size) if allowed_transaction.label: hash_map['label'] = '{}'.format(allowed_transaction.label) operation.addTransactionType(hash_map) def _create_operation(class_name, *args, gateway): object_class = gateway.jvm.java.lang.Object args_array = gateway.new_array(object_class, len(args)) for i, arg in enumerate(args): args_array[i] = arg return gateway.entry_point.getOperationsService().createOperation(class_name, args_array)
MANCHESTER UNITED face Crystal Palace at Selhurst Park this evening looking to end an appalling run of three consecutive defeats. United have lost to Everton, West Brom and Chelsea in recent weeks, and perhaps more worryingly - failed to score in any of them. Crystal Palace haven't fared much better. The Eagles have also lost their last three and are destined to finish in the bottom half. Manager Louis van Gaal has brought in Phil Jones and Luke Shaw as United look to halt their losing streak. Paddy McNair and Robin van Persie make way for the duo in south London, with the latter not even included on the bench. Record signing Angel di Maria is also absent from the squad with an injury. Palace make three changes with Pape Souare, captain Mile Jedinak and Glenn Murray all brought in. Could former Man United defender Nemanja Vidic be heading back to Old Trafford?
Outbreak of multidrug-resistant CTX-M-15-producing Enterobacter cloacae in a neonatal intensive care unit. Newborns are rarely infected by extended-spectrum -lactamase (ESBL)-producing members of the Enterobacteriaceae. In a neonatal intensive care unit, 14 newborns were infected or colonized by CTX-M-15-producing Enterobacter cloacae. All seven infected patients had underlying medical conditions, and five of them were treated successfully with meropenem, whilst one untreated patient died. Paediatric infections caused by multidrug-resistant ESBL-producing Enterobacter cloacae constitute a critical clinical and epidemiological issue.
The concept, key technologies and applications of temporal-spatial information infrastructure Abstract Smart city is the development of digital city; as its main supporting technology, the digital city geo-spatial framework has to be upgraded to the temporal-spatial information infrastructure (TSII). First, this paper proposes the concept and basic framework of smart city and defines the concept of TSII processes, integration, mining analysis, and share time-stamps geographic data and the related policy, regulations and standards, technology, facilities, mechanism, and human resources. The framework has four components: the benchmark of time and space, temporal-spatial big data, the cloud service platform and the related supporting environment. Second, the temporal-spatial big data and cloud service platform are elaborated. Finally, an application of TSII constructed by the Xicheng District Planning Bureau in Beijing is introduced, which provides a useful reference for the construction of smart city. Introduction A city is not only an area in which the majority of active economic development occurs with the highest concentration of information, but also an area with the largest population and number of social management challenges (Liu, Gong, and Yu 2014). Urban modernization is the dominant driving force of social and economic development. To solve a series of problems in the development of urban modernization, scholars have proposed the concept of "digital city, " which originated from the "digital Earth. " In recent years, some scholars have also proposed the concept of the "smart city. " The Chinese Government pays significant attention to the construction of smart city, and the National New Urbanization Planning has proposed an information network broadband, informative planning and management, intelligent infrastructure, convenient public services, modern industry, and fine social management. In 2014, the National Development and Reform Commission and other eight departments jointly issued "about promoting the guidance to the healthy construction of smart city" (referred to as Guidances in this paper) to strengthen coordination and promote healthy development. The National Development and Reform Commission, the Ministry of Science and Technology, Housing and Urban-Rural Development, Industry and Information Technology, and the National Geographic Information Bureau of Surveying and Mapping conducted exploratory trial work in their fields. Smart city is the continuation of digital city, which is under the impetus of the National Geographic Information Bureau of Surveying and Mapping in China. Beginning in 2006, more than 300 cities in China performed construction on the Digital City Geographic Framework. By the integration of the urban population and legal, economic, social, and cultural information in an information platform, this framework becomes an indispensable information resource and supports information sharing and exchange among all departments and industries in the city (). Compared with digital city, smart city is supported by the Internet of Things, big data, cloud computing, mobile Internet, and other new technologies. At the stage of the smart city construction, new requirements for the existing geo-spatial framework are proposed regarding interaction experience, real-time response, mobility, control, and autonomy. The geo-spatial framework needs to be updated to a temporal-spatial information infrastructure (TSII) (Gong and Wang 2013;). To promote the construction of smart city in China, this paper introduces the concepts, framework, main technologies, and application of TSII. The paper is organized as follows: Section 2 introduces the related basic concepts, including the smart city and the TSII, and its historical evolution. Section 3 describes the temporal-spatial big data of the TSII. The main technology of a spatial-temporal information cloud platform is elaborated in Section 4. An application is presented OPEN ACCESS Although these definitions substantially differ, the main purpose of smart city construction is to support intensive, intelligent, green, and low-carbon new urbanization development, and enable the market to have a decisive role in the allocation of resources. Smart city will motivate the government to collect material, information, and intelligence resources; promote the innovative application of a new generation of information technology; strengthen intelligent urban management and services; actively develop people's livelihoods; effectively improve the comprehensive carrying capacity and well-being of urban residents; and promote the quality and level of urbanization development. Based on the existing experiences of smart city construction both at home and abroad, each city has unique characteristics that are incorporated into a common total framework, and the construction stages and identified problems differ from one another (Li, Shao, and Yang 2012). As shown in Figure 1, the framework includes the perception layer, the network layer, the public facilities layer, the database layer, the cloud information platform layer, the wisdom application and user layer, and related system of Institution and Policy standards. in Section 5, and the paper is summarized in the last section. Smart city Smart city is termed via the use of new generation information technologies (such as the Internet of Things, cloud computing, big data, and geographic information integration) to promote city planning, construction, management, and service (Guidances). From a developing point of view, digital city is in the preliminary phase of urban information. And smart city is an intelligent digital city and the extension of a digital city (Li, Shao, and Yang 2011). From a technical point of view, digital city can be seamlessly connected to physical city via the Internet of Things. Sensory data are processed in real-time using big data, cloud computing, and mobile Internet technologies and subsequently provide intelligent services. From a composition point of view, smart city should include real-time information intelligence equipment, such as human senses, or a human behavior organ system and a response and disposal thematic system. Temporal-spatial information infrastructure We introduce infrastructure and spatial information infrastructure. Infrastructure usually refers to the following: social production, people who provide public services, material engineering facilities which are employed to ensure the normal order of a country or region, and social economic activities of a public service system. Spatial information infrastructure primarily refers to the spatial data framework, spatial data coordination, the management and distribution system, the spatial data exchange website, and the spatial data transfer standards. Based on the spatial data infrastructure and information highway, the digital Earth technology is extensively applied to the Earth's surface spatial data query, visualization, analysis, and knowledge mining (Liu, Gong, and Yu 2015;Liu, Zhang, and Gong 2014;Sui 2011). The concept of TSII denotes the processes, integration, mining analysis, and sharing of time-stamps geographic data. It also includes related policy, regulations and standards, technology, facilities, mechanism, and human resources. TSII is the foundation of smart city construction; its construction is one of the important responsibilities of the Surveying and Mapping Geographic Information Department. The Guidances defines the TSII as the "basis of unified geo-spatial framework, TSII collects geo-spatial data and related database, and all related departments and industries business information can overlap on it. " As shown in Figure 1, the TSII primarily includes three main components: Big data platform: gathering space-time information resources, integrate static real-time geographic information, and construct a temporal-spatial big data center. Cloud service platform: using a service bus to construct a cloud service platform to support ondemand applications. Supporting environment: the related policy, regulations, and standards. From geographic spatial framework to TSII Compared with digital city, smart city has three main characteristics: the main data in the digital city phase is static. However, dynamic information becomes the main data source in the smart city phase. "Physical city" and "digital city" are only connected in the smart city phase. Information analysis and knowledge generation are emphasized in the smart city phase, whereas city information sharing occurs in the digital city phase (Gong and Wang 2013). At the stage of digital city, the contents of the geospatial framework contain the benchmark of space, a geographic information database, and a geographic information service (Wang 2014). With the development of smart city construction, the geo-spatial framework needs to be updated to TSII. As shown in Figure 2, four corresponding promotions occur: promote the space datum to a space-time datum; promote the geographic information database to a temporal-spatial information database; promote the geographic information service to a temporal-spatial information cloud service; promote the dispersed cloud cluster supporting environment to an intensive cloud environment. The temporal-spatial big data and cloud platform are deployed in the same cloud environment, and information exchange is based on the service bus. Temporal-spatial big data platform In the smart city phase, temporal-spatial data include not only different versions of history data but also the location data and streaming data collected by the Internet of Things or an intelligent device. These geographic information data, which have time-stamps, are managed in a unified temporal-spatial big data platform. As shown in Figure 2, the temporal-spatial information management platform is primarily composed of data gathering, data processing, and data management. Multi-source and multi-heterogeneous structured and unstructured data are gathered, stored, processed, and integrated in this platform. Data gathering For static geographic data (including historical and vision geographic information data), offline copies are entered into a database from the department of surveying and mapping geographic information on a regular basis. For location data and streaming data from the Internet of Things device, the data are appended to a big data center by a multi-level dynamic service via wired or wireless Internet access. Real-time perception data are usually deployed in specific professional departments and their interpretation and metadata are added in real time by push or draw patterns. The source data are invoked as necessary to avoid information overload, which is also known as infobesity. Structured and unstructured static or streaming data must have time, space, and attribute characteristics. Time indicates the timeliness of data, space represents the space features, and attributes indicate the semantic information. Based on the existing geographic information database, static data information can be updated to temporal-spatial information by expansion and reorganization. For streaming data and multi-level summary data, data are imported in real time. Data processing Data are the core content of urban management. Although these data are employed in a variety of formats, unified pre-processing and fusion are required to satisfy various application requirements. Structured and unstructured temporal-spatial big data, which are and Gong 2014). For thematic information, according to the segmentation of gazetteer identification and based on ontology and word similarity, a local fuzzy method is used to achieve efficient, accurate, and practical gazetteer matching (Janowicz and Kessler 2008). Relying on the temporal-spatial benchmark, a name/address positioning and geometry matching method accomplishes the ordering and unstructured information processing. Data management Temporal-spatial data management primarily includes dynamic big data acquisition, analysis, and visualization, analog inference, and data mining. Data management tools can support data input and output, editing and processing, query, statistics, visualization, dynamic update, history data management, meta-data management, and security management. It supports different types of data fusion, long phase comparisons, and change information extraction, as well as the superposition of temporalspatial data classification, space-time analysis, time series analysis, and forecasting analysis. To satisfy the application requirements of smart city, a data management platform should also support analog inference and big data mining. Analog inference includes temporal-spatial process simulation and a decision-making plan. Taking an event or situation as the object, space-time process simulation retrieves presented online in real time, should undergo a serialization process both online and offline. Data processing mainly unifies format, temporal-spatial benchmark, and space serialization. A unified format refers to different geographic information data that can realize non-destructive format conversion and the topology relation will be established in the conversion process, in which contradictions are eliminated and the data become disorganized. In the process of unifying temporal-spatial benchmark, unifying the space benchmark is conducted first. Data matching and merging methods are employed to realize the multi-scale, vector, and image data update. Then, the time benchmark is unified to ensure the time consistency among different scale data-sets. Space serialization primarily refers to the establishment of coordinates for the space of structured data by a data matching method. The main process is as follows: Gazetteer feature extraction: some data with spatial coordinate information can be integrated by geometry matching (Mostern and Johnson 2008). For data without coordinate information, the property contains the address. Part of the data only contains some place names. Based on semantic and geographic ontology cognition, the place name feature can be extracted by combining Chinese word segmentation and data comparison. Data matching: for data with spatial location information, it can be performed by geometric feature matching (Liu, Zhang, Figure 3, the cloud platform contains the basic capabilities, such as Software as a Service (SASS), Platform as a Service (PASS), and Infrastructure as a Service (IAAS). With a knowledge database and resource library at its core, a cloud services system is constructed using a service bus with name/address matching and business flow engine to provide on-demand services for all types of business applications. Based on an existing geographic service platform, the cloud service system expands the elastic distribution of the computing and storage capacity by adding the common sensor access service, intelligent interpretation service and space-time analysis, and a simulated deduction service. In addition, the place/name address matching and business flow engine are added to the application. This section focuses on static geographical information data, dynamic data service, function service, a data matching engine, and a work flow engine. Static geographical data service Static geographical data primarily contains vectors, images, place names, addresses, building models, and other new products. These data are produced and published in the cloud platform in accordance with national standards. These services include web feature services, web map services, web covering services, three-dimensional web model services, three-dimensional terrain the related corresponding geographic object and the time, space and attribute content to realize the digital representation of scenes and events. By adjusting the main parameters, the decision-making plan of dynamic inference or manual intervention calculates the effects. Big data mining primarily employs a historical reasoning method, decision tree and genetic algorithm, cluster analysis, connection analysis, online analytical processing, neural network, discriminant analysis, and logical analysis, and then support vector machine, and Bayes' theory to perform an analysis of data mining tools and enable big custom routing data mining ability. Temporal-spatial cloud service platform Smart city temporal-spatial information cloud platform (referred to as the cloud platform) employs a ubiquitous network, sensing devices, and intelligent computing to collect various types of temporal-spatial information and form perception in real time. It is an important spatial information infrastructure for smart city construction, with which the decision-making is more intelligent and broader, and the information service is more spiritual, safer, and more reliable. A temporal-spatial information cloud platform uses cloud computing technology to provide geographic location data and interpret perception, function, and development service. Connect Things IntelliSense location services equipment to provide streaming data interpretation services. Multi-level summary data services for streaming data-set. Push and pull the multi-level summary data and the streaming data sharing services. Cloud management platform A cloud management platform is the core of the system. Its main tasks are as follows: Integrate the accessed large-scale data-set to provide services for government or industry users. As a basic data exchange platform, developers can employ data aggregation, transformation, processing, and a mining interface to construct their own application systems. In addition to geographic information services (such as data function, interfaces, and features services), it also provides infrastructure services capabilities, on-demand service capacity, and operation and maintenance monitoring capabilities. Infrastructure services capability: the host system can store the data and related functions to support the end user who is deployed in the cloud environment. Based on the cloud operating system, the related resources are dynamically allocated for high concurrent access, and each user can flexibly mobilize and release the resources to complete a task. A service-hosting environment, which originates from cloud computing centers, is part of the information infrastructure services and provides a unified portal to users via the aggregation service. data services, three-dimensional texture services, and address geocoding services. Dynamic real-time data service In smart city, a ground acquisition system (such as Radio Frequency Identification, cameras, and environmental sensors) and satellite technologies are combined to form a longitudinal three-dimensional cover and a horizontal comprehensive seamless sense perception network. A cloud platform provides dynamic accumulation of streaming data and a multi-level dynamic data service (Gong and Wang 2013). Real-time location service A TSII accesses sensor network data, which has the ability of spatial orientation, to provide real-time location services for different users. The infrastructure integrates the continuous running satellite positioning service into a cloud platform and provides a controlled and authorized rear differential GPS network service that is based on wired and wireless networks; incorporates the existing digital observing systems into a cloud platform and provides real-time data services to update historical geographic information; provides real-time location information and map-matching services. Streaming data and multi-level summary information Connect Things IntelliSense location services equipment to access real-time location information. information. User management consists of the list of users, user group management, role management, and audit approval. Data match engine As a bridge between the spatial information and other information, name/address matching engine is used to precisely position and help users to integrate thematic information (;Devogele, Parent, and Spaccapietra 1998). By the geometric and semantic matching method, large various sources of non-spatial data are integrated and analyzed to provide location business services (such as navigation). On-demand service: provide temporal data acquisition, perception and location, implement streaming data interpretation, develop interface and map features services, and use professional feature signature and knowledge databases to develop a certain learning capability knowledge engine. Based on natural language or main information in a human-machine collaborative environment, the end user can automatically or intelligently assemble and provide services on demand. Operation monitoring: the system should include platform settings, user management, business review, Internet monitoring, resource hosts, resource publishing, and other capabilities. The user can modify the basic information, database information, and server terminate instance, and data calls. Run monitoring consists of log monitoring services, graphical operation monitoring, real-time operation tracking service, and service instance state controlling. Application In the process of TSII construction, which adheres to the demand-and problem-oriented, livelihood issues, information sharing and business integration should be emphasized. A variety of construction and operation models exist, such as the PPP model, which can be employed by application sectors and jointly implemented with cloud platform construction and city maintenance Work flow engine In accordance with the logic and rules, work flow is a model that properly represents the entire work process, automatically calculates its implementation, and helps the user to customize the thematic system (). Rules library management primarily includes a predefined standardization rules module and its flow relationship, predefined process examples, and existing business processes, such as sample storage, analysis, edit, and return operations. Run service management consists of business process loading and interpretation, service instances creation, such as run, suspend, and resume, companies. We introduce an application of TSII, which is constructed by the Xicheng District in Beijing. By conducting a temporal database, temporal-spatial information cloud platform, supporting environment improvements, the Planning Bureau in Beijing uses cloud computing, database mining and networking technologies to construct a TSII to provide spatiotemporal big data management and service. The entire solution consists of a big data management platform, a cloud service platform, and numerous applications. The interface of the cloud platform is shown in Figure 4. The multi-source data are unified and managed by a big data management and service platform and the new data are imported, exported, and integrated into this platform. Figure 5(a) shows the main interface of the platform, and Figure 5(b) shows a data matching process. The non-spatial data might be mistaken for location information, thus, the integration and analysis are unified in the application. The user can match the plurality of address information every time. This system is extensively applied in public security, planning, land, transportation, environmental monitoring, social management, national parks, and other fields. Based on the temporal-spatial information intelligent assembly services of the cloud platform, a social service response management GIS system is developed in Figures 6(a) and (b). Conclusions To improve smart city construction, the Chinese State Bureau of Surveying and Mapping Geographic Information has promoted TSII construction in many cities. Aiming at reducing the construction costs, the existing achievement of the geo-spatial framework in digital city needs to be completely utilized. This paper proposes the concepts, main technologies, and framework of TSII and provides guidance and inspiration for people and companies in the process of TSII construction. Funding This study is supported by the Fundamental Research Project of the Chinese Academy of Surveying and Mapping . Notes on contributors Chengming Li is a researcher of the Chinese Academy of Surveying and Mapping. His research interests include smart city, cartographic generalization, and geographic information engineering and applications. Po Liu is an assistant researcher of the Chinese Academy of Surveying and Mapping. His research interests include virtual geographical environments, geographic information engineering and applications.
/* * Loads a user from shared preferences, returns null if no user found * returns user if user found. */ public UserModel load() { Gson gson = GeoChanGsonOffline.getGson(); SharedPreferences settings = getSharedPreferences(); String jsonString = settings.getString("theUser", null); if (jsonString == null) { return null; } else { UserModel theUser = gson.fromJson(jsonString, UserModel.class); return theUser; } }
<gh_stars>100-1000 import os from copy import copy import requests from datadog_checks.base import AgentCheck from datadog_checks.base.utils.subprocess_output import get_subprocess_output TIMEOUT = 10 JOB_URL = "/api/job" EXTRUDER_URL = "/api/printer/tool" BED_URL = "/api/printer/bed" class OctoPrintCheck(AgentCheck): def __init__(self, name, init_config, instances): super(OctoPrintCheck, self).__init__(name, init_config, instances) self.url = self.instance.get('url') self.octo_api_key = self.instance.get('octo_api_key') self.tags = self.instance.get('tags', []) self.log.debug('OctoPrint monitoring starting on %s', self.url) def get_rpi_core_temp(self): if os.path.isfile("/usr/bin/vcgencmd"): temp, err, retcode = get_subprocess_output( ["/usr/bin/vcgencmd", "measure_temp"], self.log, raise_on_empty_output=True ) self.log.debug('rpi core temp - temp: %s', temp) self.log.debug('rpi core temp - err: %s', err) self.log.debug('rpi core temp - retcode: %s', retcode) temp = temp.replace("temp=", "").replace("'C", "") if temp.startswith("VCHI initialization failed"): self.log.info( "Unable to get rPi temp. To resolve, add the 'video' group to the 'dd-agent' user" " by running `sudo usermod -aG video dd-agent`" ) temp = 0.0 elif os.path.isfile("/sys/class/thermal/thermal_zone0/temp"): temp, err, retcode = get_subprocess_output( ["cat", "/sys/class/thermal/thermal_zone0/temp"], self.log, raise_on_empty_output=True ) temp = temp / 1000 else: self.log.info( "The command typically used to get the core temperature, /usr/bin/vcgencmd," "is not available on this system." ) temp = 0.0 return float(temp) def seconds_to_minutes(self, seconds): if not seconds: return 0 else: return int(seconds / 60) # Get stats from REST API as json def get_api_info(self, path): url = self.url + path key = self.octo_api_key headers = {"X-Api-Key": key, "content-type": "application/json"} req = requests.get(url, timeout=TIMEOUT, headers=headers) return req.json() def check(self, instance): tags = copy(self.tags) rpi_core_temp = self.get_rpi_core_temp() self.gauge("octoprint.rpi_core_temp", rpi_core_temp, tags=tags) # get job data job_info = self.get_api_info(JOB_URL) # # Job State state = job_info["state"] # States: Printing, Paused, Cancelled, Operational... if state == "Operational": printer_state = 0 elif state == "Paused": printer_state = 1 elif state == "Printing": printer_state = 2 else: printer_state = -1 self.gauge("octoprint.printer_state", printer_state, tags=tags) # Print Job Percent Completed and Time Estimate if Job Active est_print_time = self.seconds_to_minutes(job_info["job"]["estimatedPrintTime"]) if est_print_time > 0: pct_completed = job_info["progress"]["completion"] print('type of est print time: {}'.format(type(est_print_time))) self.gauge("octoprint.est_print_time", est_print_time, tags=tags) self.gauge("octoprint.pct_completed", pct_completed, tags=tags) # Print Job Elapsed and Remaining Times print_job_time = self.seconds_to_minutes(job_info["progress"]["printTime"]) print_job_time_left = self.seconds_to_minutes(job_info["progress"]["printTimeLeft"]) self.gauge("octoprint.print_job_time", print_job_time, tags=tags) self.gauge("octoprint.print_job_time_left", print_job_time_left, tags=tags) # Extruder Temperatures extruder_temps = self.get_api_info(EXTRUDER_URL) for key in extruder_temps.keys(): tooltags = tags + ['toolname:' + key] current_tool_temp = extruder_temps[key]["actual"] target_tool_temp = extruder_temps[key]["target"] self.gauge("octoprint.current_tool_temp", current_tool_temp, tags=tooltags) self.gauge("octoprint.target_tool_temp", target_tool_temp, tags=tooltags) # Bed Temperatures bed_temp = self.get_api_info(BED_URL) for key in bed_temp.keys(): bedtags = tags + ['bedname:' + key] current_bed_temp = bed_temp[key]["actual"] target_bed_temp = bed_temp[key]["target"] self.gauge("octoprint.current_bed_temp", current_bed_temp, tags=bedtags) self.gauge("octoprint.target_bed_temp", target_bed_temp, tags=bedtags)
Effects of Three Commercially Available Plant Growth Regulators and One Plant Growth Enhancer on Pepper (Capsicum annuum L.) Yield and Pigment Content ABSTRACT Commercially available plant growth regulators (PGR) and plant growth enhancers (PGE) may be used to increase crop productivity and improve fertilizer and water use efficiency. Little independent field-scale research has been conducted concerning the benefit of these materials in actual production systems. This study was initiated to determine the efficacy of foliar PGR and PGE application in the production of paprika and cayenne cultivars of pepper (Capsicum annuum L.). Three PGRs and one PGE were foliarly applied to several production pepper fields in the Pecos River valley of southeastern New Mexico during the 1997 and 1998 growing seasons. The Reagan loam (fine-silty, mixed, superactive thermic Ustic Haplocalcid) soils were furrow irrigated with artesian groundwater, and individual replicated treatment blocks were a minimum 0.5 ha in size. All peppers were hand harvested. The paprika peppers were dried to constant weight, yields were determined, and paprika pigment extracts were analyzed spectrometrically. In 1997 and 1998, yield was increased by 15.1 to 24.2% in plants treated with some PGRs. PGE treatment resulted in significant paprika yield increases in 1997 and 1998 and a significant yield increase in cayenne in 1997, but did not have a significant effect on paprika pigment content. The results indicated that foliar application of PGRs and PGEs to pepper cultivars can enhance yields in commercial production fields.
Escaping introns in COI through cDNA barcoding of mushrooms: Pleurotus as a test case Abstract DNA barcoding involves the use of one or more short, standardized DNA fragments for the rapid identification of species. A 648bp segment near the 5 terminus of the mitochondrial cytochrome c oxidase subunit I (COI) gene has been adopted as the universal DNA barcode for members of the animal kingdom, but its utility in mushrooms is complicated by the frequent occurrence of large introns. As a consequence, ITS has been adopted as the standard DNA barcode marker for mushrooms despite several shortcomings. This study employed newly designed primers coupled with cDNA analysis to examine COI sequence diversity in six species of Pleurotus and compared these results with those for ITS. The ability of the COI gene to discriminate six species of Pleurotus, the commonly cultivated oyster mushroom, was examined by analysis of cDNA. The amplification success, sequence variation within and among species, and the ability to design effective primers was tested. We compared ITS sequences to their COI cDNA counterparts for all isolates. ITS discriminated between all six species, but some sequence results were uninterpretable, because of length variation among ITS copies. By comparison, a complete COI sequences were recovered from all but three individuals of Pleurotus giganteus where only the 5 region was obtained. The COI sequences permitted the resolution of all species when partial data was excluded for P. giganteus. Our results suggest that COI can be a useful barcode marker for mushrooms when cDNA analysis is adopted, permitting identifications in cases where ITS cannot be recovered or where it offers higher resolution when fresh tissue is. The suitability of this approach remains to be confirmed for other mushrooms. ITS has been adopted as the standard DNA barcode marker for mushrooms despite several shortcomings. This study employed newly designed primers coupled with cDNA analysis to examine COI sequence diversity in six species of Pleurotus and compared these results with those for ITS. The ability of the COI gene to discriminate six species of Pleurotus, the commonly cultivated oyster mushroom, was examined by analysis of cDNA. The amplification success, sequence variation within and among species, and the ability to design effective primers was tested. We compared ITS sequences to their COI cDNA counterparts for all isolates. ITS discriminated between all six species, but some sequence results were uninterpretable, because of length variation among ITS copies. By comparison, a complete COI sequences were recovered from all but three individuals of Pleurotus giganteus where only the 5 region was obtained. The COI sequences permitted the resolution of all species when partial data was excluded for P. giganteus. Our results suggest that COI can be a useful barcode marker for mushrooms when cDNA analysis is adopted, permitting identifications in cases where ITS cannot be recovered or where it offers higher resolution when fresh tissue is. The suitability of this approach remains to be confirmed for other mushrooms. K E Y W O R D S COI, DNA barcoding, internal transcribed spacer, oyster mushrooms, taxonomic verification | INTRODUCTION DNA barcoding employs short, standardized DNA fragments for the rapid identification of species (Gilmore, Graefenhan, Louis Seize, & Seifert, 2009;Hebert, Cywinska, & Ball, 2003;Nguyen & Seifert, 2008;). This approach is particularly valuable for verifying species identification, and for the evaluation of taxonomic diversity in organisms with cryptic morphology such as fungi (Dentinger, Didukh, & Moncalvo, 2011). The use of molecular tools is essential for identifying and classifying the 90%-95% of undescribed fungi ; The ribosomal internal transcribed spacer (ITS), a highly variable region between the conserved sequences of the small subunit, 5.8S, and large subunit rRNA genes, has been adopted as the primary DNA barcode marker for fungi (Schoch, Seifert, Huhndorf, et al., 2012). The ideal DNA barcode region is easy to amplify and variable enough to discriminate species, a condition that is best met when variation within species is low and divergence between species is high, a situation which creates a "barcode gap" (;). A 648-bp segment near the 5 terminus of the mitochondrial cytochrome c oxidase subunit I (COI) gene has been adopted as the DNA barcode region for animals because its performance in species discrimination is high and it is usually easy to recover (). Contrary to animals, no single gene region has been found that serves as an ideal DNA barcode for fungi and plants. As a consequence, a multi-locus barcode approach has been adopted to improve resolution across plants and fungi (;), and ITS has been adopted as the standard barcode region for fungi (Avin, Bhassu, Shin, & Sabaratnam, 2012;Begerow, Nilsson, Unterseher, & Maier, 2010;Schoch, Seifert, Huhndorf, et al., 2012;Seifert, 2009) although studies have shown that this gene region often fails to distinguish closely related fungal species (Schoch, Seifert, Caldeira, et al., 2012). Despite the acceptance of ITS as the fungal barcode, length variation in this region makes sequence alignment difficult across divergent taxa (Dentinger et al.. Additional markers beyond ITS are needed for fungal barcoding, but finding suitable loci that can be easily amplified across the diversity of fungi remains a challenge (;). COI has potential to address this gap because alignment of this locus across a divergent set of taxa is trivial (). A few studies have compared the resolution of ITS and COI in sets of closely allied species. COI was more effective than ITS in Penicillium (), while COI and ITS were equally effective in Leohumicola, (Nguyen & Seifert, 2008). In the Agaricomycotina, COI and ITS generally delivered similar resolution, but the prevalence of introns resulted in COI not being recovered from many taxa (). Conversely, COI sequences showed low divergences in Fusarium () and Aspergillus (), although data interpretation was complicated by the apparent presence of multiple copies of COI, perhaps reflecting the recovery of nuclear pseudogenes. The strong performance of COI as a DNA barcode in animals () suggests the value of exploring its use as a marker in mushrooms. Similar to the multi-locus barcode approach used in plants, COI could be used in conjunction with ITS for the identification of fungal species. There is one barrier to the implementation; the prevalence of introns in the COI gene of many fungal species including mushrooms is well documented (Seifert, 2009;). For example, nine introns occur in Pleurotus ostreatus (Wang, Zeng, Hon, Zhang, & Leung, 2008), 19 in Agaricus bisporus (), 15 in Trametes cingulata (Haridas & Gantt, 2010) and four in Agrocybe aegerita (Gonzalez, Barroso, & Labarre, 1998). These introns are often long, leading to extreme variation in length of the COI gene from approximately 1,584 bp in species lacking introns to over 22 kb in those with many introns (;;Haridas & Gantt, 2010;). The presence of these introns impedes sequence recovery by conventional PCR (Seifert, 2009;), a factor which has supported the adoption of ITS as the sole DNA barcode for mushrooms ). Although COI seems to have the potential to reliably identify taxa, there is a need for more detailed study. In particular, given the prevalence of introns and the apparent occurrence of nuclear pseudogenes, it is critical to adopt RT-PCR to properly recover and evaluate the capacity of COI sequences to resolve fungal species. In this study, we examine the ability of the COI gene to discriminate six species of Pleurotus. We test amplification success, sequence variation within and among species, and the ability to design effective primers. We also recover ITS sequences from all isolates to allow their comparison with the sequences recovered through the analysis of cDNA from COI. | Sample collection The 24 strains examined in this study included representatives of six species of Pleurotus (Table 1). They were mostly obtained from mushroom farms in Malaysia or from the University of Malaya collection. A few isolates were newly collected from Malaysia, while others were imported from China or Iraq (Table 1). The species assignment for each isolate was verified by comparison of morphological traits of basidiocarps and mycelial cultures. | DNA and RNA extraction and cDNA synthesis Total genomic DNA was extracted from fresh mycelium by a rapid protocol (Avin, Bhassu, & Sabaratnam, 2013). Briefly, after adding sufficient 2% SDS buffer, the samples were homogenized at 65°C for 30 min. The mixture was purified twice with phenol: CHCl 3 : Isoamyl alcohol (25: 24: 1). DNA was precipitated with cold T A B L E 1 List of species and strains used in this study and length of amplicons for COI and ITS. Bold process IDs for the samples sequenced in this are also indicated and are publically available isopropanol, and then pelleted by centrifugation at 4°C for 15 min at 11,000 g. The resultant DNA pellet was dissolved in TE buffer and stored at -20°C. Total RNA was isolated from fresh mycelium using Trizol (Invitrogen, USA). Briefly, sufficient Trizol was added to the homogenized mycelia and incubated at 25°C for 15 min, then purified by chloroform. RNA was precipitated by cold ethanol and the pellet was washed twice with 70% ethanol. The RNA pellet was then dissolved in RNAase free water and stored at -80°C. Samples that did not successfully amplify in the first round of RT-PCR were re-extracted using Nucleospin ® RNA columns (Macherey-Nagel, Germany) following the manufacturers protocol. This included a DNAase treatment prior to elution in nuclease free water. Total cDNA was synthesized from the RNA extracts using an Access One Step RT-PCR system kit (Promega, USA). The first mixture was generated by gently mixing 1.0 l of total extracted RNA, 1.0 l of Oligo dt primer, and 3.0 l of Nuclease-free H 2 O that was incubated for 5 min at 70°C. The second mixture was prepared by mixing Mixtures I and II were then combined for each sample and incubated for: 5 min at 25°C, 60 min at 42°C, and 15 min at 70°C before being stored at -20°C. | Primer design The coding sequence of COI from the mitochondrial genome of P. ostreatus (19: EF204913) was used as a reference to design primers ( Figure 1). Several criteria, including the generation of proper length fragments (800-900 bp) with enough conserved sites in the binding regions were employed to design primers. NCBI Primer-BLAST was used to design primer pairs for two cDNA regions that spanned the coding sequence of COI (Rozen & Skaletsky, 2000;). Figure 1 shows the location and orientation of these primers on the open reading frames of COI. Primer ID, sequence and annealing temperatures are provided in Table 2. | PCR and reverse transcription (RT)-PCR conditions PCR amplification of the COI cDNA employed an initial denaturation at 95°C for 5 min; followed by 30 cycles with denaturation at 94°C, annealing at 55°C and extension at 72°C for 1 min; followed by a final Flexi DNA Polymerase (Promega, USA). We used genomic DNA to amplify and sequence the ITS region with primers ITS1 and ITS4 using standard protocols (White, Bruns, Lee, & Taylor, 1990), or with local primers ITS1-UM2 and ITS2-UM2 (Avin, Bhassu, Shin, & Vikineswary, 2014). Successfully amplified PCR products were purified using the Nucleospin Extract II Kit (Chemopharm), and bidirectionally sequenced using an ABI 3730XL automated sequencer. Sequences along with voucher information were deposited in the Barcode of Life Data System (BOLD Process IDs; CDB001-CDB024-15) (Ratnasingham & Hebert, 2007) and are publicly available in NCBI GenBank (Table 1). For both genes, ML trees were constructed in MEGA 6 () under the selected model; branch topology was optimized using extensive subtree pruning and regrafting (SPR) with branch swap filter selected. The stability of nodes was inferred by non-parametric bootstrapping, using 1,000 heuristic bootstrap pseudoreplicates. DNAsp ver. 5.10 was used to calculate the haplotype data file and genetic divergences (Librado & Rozas, 2009). To estimate the significance of variance within and among species, an AMOVA (analysis of molecular variance) was calculated using Arlequin ver. | RESULTS An interpretable ITS sequence was recovered from 20 of the 24 specimens, including at least one representative of each species with sequences varying in length from 592 to 625 bp (Table 1). A COI sequence was recovered from all specimens, but only a partial COI-3 sequence was obtained from specimens of P. giganteus. Near full length COI sequences were generated by aligning and assembling a consensus of the 5 and 3 reads for the five species with reads for both regions (Table 1 and 3). Because the COI sequences were generated from cDNA template generated by RT-PCR they lacked introns, while ITS was amplified using standard PCR (Figure 2 and Table 3). The percentage of variable sites for all six species was computed for both genes (Table 3). Across all 1,516 sites for COI, 76.8% were conserved, while 23.1% were variable with 12.3% being parsimony informative and 10.9% singletons. By comparison, 37.7% of the 715 ITS sites were conserved, while 55.2% were variable with 38.4% being parsimony informative, and 17.0% singletons (Table 3). Due to the indels in ITS, the mean divergence for all 20 sequences was higher for ITS (0.199) than for COI (0.059). Intra-specific divergences were generally slightly higher for ITS than COI, but so too were inter-specific divergences. Barcode gap analysis supports higher interspecific and intraspecific distances for ITS than COI. Both markers indicate P. ostreatus, P. eryngii, and P. pulmonarius are relatively close (Table 4) and fall under the 2% divergence threshold for COI and ITS (except P. eryngii). However, the use of the closely related mushrooms in our analysis with small sample sizes may explain the low divergence threshold (below 2%). The maximum intraspecific distance was greater for both COI and ITS in P. ostreatus. Otherwise, intraspecific distances were low for the remaining Pleurotus species with multiple representatives per species. Figure 2a-d shows ML trees for COI and ITS with bootstrap values for each node based on 1,000 replicates. ITS (Figure 2c,d) discriminated all six species with strong support, but sequences from four of eight specimens of P. ostreatus failed. COI sequences were recovered from all specimens, albeit just partial COI-5 sequences for P. giganteus. COI failed to distinguish between P. pulmonarius and P. giganteus when partial 5 sequences were included, but when partial sequences were excluded, COI distinguished between these two species with strong support (Figure 2). Overall, both markers readily distinguished between species with moderate to strong support. When sampling was improved for ITS with Genbank sequences, there was strong support for the monophyly of the six Pleurotus species, results confirming our morphological identifications (Figure 2d). | DISCUSSION In contrast to prior studies that failed to recover COI through conventional PCR-based approaches (;), the cDNA approach employed in this analysis recovered full COI sequences from all six species of oyster mushrooms (barring a few incomplete recoveries for P. giganteus). The past failures of T A B L E 2 List of the primers used for amplification of COI and ITS from Pleurotus species F I G U R E 2 COI and ITS phylogenetic analyzes. (a-d) Phylogeny reconstruction based on maximum likelihood under a GTR+G model for COI and a TN92+I model for ITS. Numbers at the nodes indicate the percentage of bootstrap replicates supporting a given topology, although bootstrap values below 50% are not indicated. Samples ID and species delimitations are indicated at the tips of the tree. One COI tree for Pleurotus giganteus is based on 759-bp COI-5 fragments, while sequences for the other taxa were full length 1,516 bp. Twenty-two ITS sequences and two mitochondrial sequences were retrieved from GenBank; they are indicated in yellow standard PCR were undoubtedly due to the presence of several large introns in the COI gene of Pleurotus (;Seifert, 2009;). However, cDNA barcoding escapes this problem, generating amplicons that are easily aligned. The present study generated a 1,516-bp COI sequences from 21 of the 24 specimens, failing only to recover full sequence information from the 3 region of P. giganteus. Our failure to amplify the 3 end of P. giganteus reflects the need to further optimize COI primers for Pleurotus (and other mushrooms) given the diagnostic ability of the 3 end of this gene. Alternatively, more samples of different species should be sequenced and aligned to design appropriate primer pairs on the most conserved regios. ITS sequences were recovered from all six species, but results from four of the 24 specimens were uninterpretable due to sequence length variation. Although the number of species examined in this study was small, the success of COI in discriminating these taxa justifies a larger-scale effort to validate the effectiveness of COI as a barcode for Pleurotus and other mushrooms. Paralogues (multiple copies) of COI and low success in species delimitation rate were reported in a study on the important pathogenic and commonly isolated Fusarium () and also in certain genera of the Agaricomycotina (). These paralogues likely represent nuclear encoded pseudogenes of COI () Schoch, Seifert, Huhndorf, et al. concluded that ribosomal markers (e.g., ITS) have fewer problems with PCR amplification than protein-coding markers (e.g., COI), the difficulties in generating a reliable alignment are an important drawback to the use of ITS as a DNA barcode marker (;). Furthermore, sequence variation among paralogues can result in uncertain base calls. Despite these caveats, the availability of ITS sequences from a large number of fungal species in GenBank is a major advantage that often outweighs the complications introduced by alignment problems. The current study suggests that the COI can be an additional barcode marker for particular taxonomic groups of fungi when ITS is unsuitable (e.g., some genera in Ascomycota or some species of mushrooms discussed in this study) or for examining fresh material through a cDNA based approach. However, this approach needs to be extended to determine its suitability for other fungi. Moreover, COI sequences generated phylogenetic groupings for Pleurotus similar to those for ITS while having the advantage of being easily aligned. These results justify the broader examination of cDNA-based analysis to test the potential of COI as a barcode marker that could complement ITS, in much the same fashion that two gene regions (rbcL, matK) have been adopted as the standard barcode regions for plants (). Future efforts should explore the use of COI in groups where ITS is unable to deliver species-level resolution. CONFLICT OF INTEREST None declared. DATA ACCESSIBILITY The DNA sequences are available in the Barcode of Life Data System (BOLD) and National Center for Biotechnology Information (NCBI) which have shown in Table 1. AUTHOR CONTRIBUTIONS Farhat A. Avin: designed research, performed research, analyzed data, wrote the paper; Subha Bhassu: project adviser, edited the paper, analysis adviser; Dr. Tan Yee Shin: project adviser, edited the paper, analysis adviser; Thomas W. A. Braukmann: analyzed data, edited the paper; Vikineswary Sabaratnam: project leader, project financial leader, project adviser, edited the paper; Paul Hebert: project adviser, edited the paper, analysis adviser.
Recorder's Court (Detroit) The Recorder's Court, in Detroit, Michigan was a state court of limited jurisdiction which had, for most of its history, exclusive jurisdiction over traffic and ordinance matters, and over all felony cases committed in the City of Detroit. Its jurisdiction did not extend to civil suits. It was merged into the Wayne County Circuit Court, the general jurisdiction court in Wayne County, following the pattern of the rest of the state of Michigan in October 1997. Origin It traces its roots to the Mayor's Court in Detroit, formed in 1824. This municipal court probably owed its name to the fact that from 1827 until 1857, the official name of the City of Detroit was "The Mayor, Recorder and Alderman of Detroit." Merger into Wayne County Circuit Court The merger of the Recorder's court and Wayne County (Third Judicial) Circuit Court was not without controversy. It was made pursuant to a 1997 state law which also consolidated the state's probate courts into a family court, a far less controversial change. A lawsuit brought by Richard Kuhn opposed the merger, but did not prevail. Prior to the merger, "judges of Recorder's Court were elected from Detroit, so unsurprisingly, most of them were African-American. Then Detroit Recorder's Court was abolished — or rather, it was merged with Wayne County Circuit Court. The Recorder's Court judges became Circuit Court judges, and have to run for re-election in Wayne County as a whole, which is predominantly white." At the time of its merger, and now as reconfigured as a part of the Wayne County Circuit Court, the court has been housed in the Frank Murphy Hall of Justice.
package edu.rosehulman.giraph.sample; import com.facebook.hiveio.input.HiveInputDescription; import com.facebook.hiveio.record.HiveReadableRecord; import com.facebook.hiveio.schema.HiveTableSchema; import org.apache.giraph.hive.input.vertex.SimpleNoEdgesHiveToVertex; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; public class LongTextVertex extends SimpleNoEdgesHiveToVertex<LongWritable, Text> { public LongWritable getVertexId(HiveReadableRecord hiveReadableRecord) { return new LongWritable(hiveReadableRecord.getLong(1)); } public Text getVertexValue(HiveReadableRecord hiveReadableRecord) { return new Text(hiveReadableRecord.getString(0)); } public void checkInput(HiveInputDescription hiveInputDescription, HiveTableSchema hiveTableSchema) { } }
<gh_stars>0 #include "UTWeaponLockerPlugin.h" #include "UnrealNetwork.h" #include "UTSquadAI.h" #include "UTWeaponLocker.h" #include "MessageLog.h" #include "UObjectToken.h" #include "UTWorldSettings.h" #include "UTPickupMessage.h" #include "UTWeap_BioRifle.h" #include "UTWeap_ShockRifle.h" #include "UTWeap_RocketLauncher.h" #include "UTWeap_LinkGun.h" #include "UTWeap_FlakCannon.h" #define LOCTEXT_NAMESPACE "UTWeaponLocker" static FString NewLine = FString(TEXT("\n")); static FString NewParagraph = FString(TEXT("\n\n")); FCollisionResponseParams WorldResponseParams = []() { FCollisionResponseParams Result(ECR_Ignore); Result.CollisionResponse.WorldStatic = ECR_Block; Result.CollisionResponse.WorldDynamic = ECR_Block; return Result; }(); AUTWeaponLocker::AUTWeaponLocker(const FObjectInitializer& ObjectInitializer) : Super(ObjectInitializer //.DoNotCreateDefaultSubobject(TEXT("TimerEffect")) // TimerEffect is not optional .DoNotCreateDefaultSubobject(TEXT("BaseEffect")) ) { //// Structure to hold one-time initialization //struct FConstructorStatics //{ // ConstructorHelpers::FObjectFinder<UStaticMesh> BaseMesh; // FConstructorStatics() // : BaseMesh(TEXT("Class'/UTWeaponLockerPlugin/Mesh/S_GP_Ons_Weapon_Locker.S_GP_Ons_Weapon_Locker'")) // { // } //}; //static FConstructorStatics ConstructorStatics; RootComponent = ObjectInitializer.CreateDefaultSubobject<USceneComponent, USceneComponent>(this, TEXT("DummyRoot"), false); if (Collision) { Collision->InitCapsuleSize(96.0f, 150.0f); Collision->Mobility = EComponentMobility::Movable; Collision->AttachParent = RootComponent; Collision->OnComponentBeginOverlap.AddDynamic(this, &AUTWeaponLocker::OnOverlapBegin); Collision->RelativeLocation.Z = 150.f; } BaseMesh = ObjectInitializer.CreateDefaultSubobject<UStaticMeshComponent>(this, TEXT("BaseMeshComp")); if (BaseMesh) { BaseMesh->SetCollisionEnabled(ECollisionEnabled::NoCollision); //BaseMesh->SetStaticMesh(ConstructorStatics.BaseMesh.Object); BaseMesh->AttachParent = RootComponent; } if (TimerEffect != NULL) { TimerEffect->Mobility = EComponentMobility::Movable; TimerEffect->AttachParent = RootComponent; TimerEffect->SetActive(false, false); } LockerRespawnTime = 30.f; LockerString = LOCTEXT("LockerString", "Weapon Locker"); LockerPositions.Add(FVector(20.f, -34.6f, 0.f)); LockerPositions.Add(FVector(-20.f, 34.6f, 0.f)); LockerPositions.Add(FVector(40.f, 0.f, 0.f)); LockerPositions.Add(FVector(-40.f, 0.f, 0.f)); LockerPositions.Add(FVector(20.f, 34.6f, 0.f)); LockerPositions.Add(FVector(-20.f, -34.6f, 0.f)); LockerRotations.Add(FRotator(0.f, 30.f, -5.f)); LockerRotations.Add(FRotator(0.f, 210.f, -5.f)); LockerRotations.Add(FRotator(0.f, 90.f, -5.f)); LockerRotations.Add(FRotator(0.f, 270.f, -5.f)); LockerRotations.Add(FRotator(0.f, 150.f, -5.f)); LockerRotations.Add(FRotator(0.f, 330.f, -5.f)); LockerFloatHeight = 84.f; WeaponLockerOffset = FVector(0.f, 0.f, 15.f); WeaponLockerRotation = FRotator(0.f, 0.f, 270.f); WeaponLockerScale3D = FVector(0.875, 0.80f, 0.875f); ProximityDistanceSquared = 1500000.0f; ScaleRate = 2.f; bClearCustomersOnReset = true; GlobalState = ObjectInitializer.CreateDefaultSubobject<UUTWeaponLockerState>(this, TEXT("StateGlobal")); FStateInfo PickupStateInfo(UUTWeaponLockerStatePickup::StaticClass(), true); States.Add(PickupStateInfo); FStateInfo DisabledStateInfo(UUTWeaponLockerStateDisabled::StaticClass()); States.Add(DisabledStateInfo); FStateInfo SleepingStateInfo(UUTWeaponLockerStateSleeping::StaticClass()); States.Add(SleepingStateInfo); // Structure to hold one-time initialization struct FConstructorStaticsWarn { ConstructorHelpers::FObjectFinder<UClass> Redeemer; ConstructorHelpers::FObjectFinder<UClass> Sniper; //ConstructorHelpers::FObjectFinder<UClass> Avril; FConstructorStaticsWarn() : Redeemer(TEXT("Class'/Game/RestrictedAssets/Weapons/Redeemer/BP_Redeemer.BP_Redeemer_C'")) , Sniper(TEXT("Class'/Game/RestrictedAssets/Weapons/Sniper/BP_Sniper.BP_Sniper_C'")) //, Avril(TEXT("Class'/Game/RestrictedAssets/Weapons/Avril/BP_Avril.BP_Avril_C'")) { } }; static FConstructorStaticsWarn ConstructorStaticsWarn; WarnIfInLocker.AddUnique(ConstructorStaticsWarn.Redeemer.Object); WarnIfInLocker.AddUnique(ConstructorStaticsWarn.Sniper.Object); //WarnIfInLocker.AddUnique(ConstructorStaticsWarn.Avril.Object); WarnIfInLocker.Remove(NULL); // Structure to hold one-time initialization struct FConstructorStaticsAmmo { ConstructorHelpers::FObjectFinder<UClass> Stinger; FConstructorStaticsAmmo() : Stinger(TEXT("Class'/Game/RestrictedAssets/Weapons/Minigun/BP_Minigun.BP_Minigun_C'")) { } }; static FConstructorStaticsAmmo ConstructorStaticsAmmo; // negative to multiple ammo by its absolute value WeaponLockerAmmo.Add(AUTWeap_BioRifle::StaticClass(), -2.f); WeaponLockerAmmo.Add(AUTWeap_ShockRifle::StaticClass(), -1.5f); WeaponLockerAmmo.Add(AUTWeap_RocketLauncher::StaticClass(), -2.f); WeaponLockerAmmo.Add(AUTWeap_LinkGun::StaticClass(), -2.f); WeaponLockerAmmo.Add(AUTWeap_FlakCannon::StaticClass(), -2.f); WeaponLockerAmmo.Add(ConstructorStaticsAmmo.Stinger.Object, -1.5f); // runtime vars initialisation MaxDesireability = 0.f; bIsActive = false; bIsDisabled = false; bIsSleeping = false; bScalingUp = false; CurrentWeaponScaleX = 0.f; bPlayerNearby = false; bForceNearbyPlayers = false; NextProximityCheckTime = 0.f; // initially replicate weapons bReplacementWeaponsDirty = true; } void AUTWeaponLocker::PreInitializeComponents() { Super::PreInitializeComponents(); if (!HasAnyFlags(RF_ClassDefaultObject)) { for (auto& StateInfo : States) { if (StateInfo.StateClass) { if (StateInfo.State == NULL) { StateInfo.State = NewObject<UUTWeaponLockerState>(this, StateInfo.StateClass); } if (StateInfo.bAuto) { AutoState = StateInfo.State; } if (StateInfo.StateName == FName(TEXT("Pickup"))) PickupState = StateInfo.State; else if (StateInfo.StateName == FName(TEXT("Disabled"))) DisabledState = StateInfo.State; else if (StateInfo.StateName == FName(TEXT("Sleeping"))) SleepingState = StateInfo.State; } } } if (InitialState == NULL && AutoState == NULL) { InitialState = GlobalState; } #if WITH_EDITOR CreateEditorPickupMeshes(); #endif } void AUTWeaponLocker::PostInitializeComponents() { Super::PostInitializeComponents(); ReceivePreBeginPlay(); WeaponsCopy = Weapons; } void AUTWeaponLocker::BeginPlay() { if (bIsDisabled) { return; } Super::BeginPlay(); InitializeWeapons(); SetInitialState(); } void AUTWeaponLocker::GetLifetimeReplicatedProps(TArray<class FLifetimeProperty>& OutLifetimeProps) const { Super::GetLifetimeReplicatedProps(OutLifetimeProps); DOREPLIFETIME(AUTWeaponLocker, LockerRespawnTime); DOREPLIFETIME(AUTWeaponLocker, bIsSleeping); DOREPLIFETIME(AUTWeaponLocker, bIsDisabled); DOREPLIFETIME_CONDITION(AUTWeaponLocker, Weapons, COND_None); DOREPLIFETIME_CONDITION(AUTWeaponLocker, ReplacementWeapons, COND_Custom); } void AUTWeaponLocker::PreReplication(IRepChangedPropertyTracker & ChangedPropertyTracker) { if (bReplacementWeaponsDirty) { DOREPLIFETIME_ACTIVE_OVERRIDE(AUTWeaponLocker, ReplacementWeapons, true); bReplacementWeaponsDirty = false; } Super::PreReplication(ChangedPropertyTracker); } void AUTWeaponLocker::Tick(float DeltaTime) { Super::Tick(DeltaTime); if (CurrentState) { CurrentState->Tick(DeltaTime); } } void AUTWeaponLocker::PostActorCreated() { Super::PostActorCreated(); // no need load the editor mesh when there is no editor, meshes are created dynamically in Tick #if WITH_EDITOR if (!IsRunningCommandlet()) { CreateEditorPickupMeshes(); } #endif } void AUTWeaponLocker::InitializeWeapons_Implementation() { // clear out null entries for (int32 i = 0; i < Weapons.Num(); i++) { if (Weapons[i].WeaponClass == NULL) { Weapons.RemoveAt(i, 1); i--; } } if (LockerWeapons.Num() > 0) { DestroyWeapons(); LockerWeapons.Empty(); } // initialize weapons MaxDesireability = 0.f; for (int32 i = 0; i < Weapons.Num(); i++) { // add desirability MaxDesireability += Weapons[i].WeaponClass.GetDefaultObject()->BaseAISelectRating; // create local weapon info object LockerWeapons.Add(FWeaponInfo(Weapons[i].WeaponClass)); } // force locker to re-create weapon meshes bPlayerNearby = false; } void AUTWeaponLocker::DestroyWeapons_Implementation() { UE_LOG(LogDebug, Verbose, TEXT("%s::DestroyWeapons"), *GetName()); for (int32 i = 0; i < LockerWeapons.Num(); i++) { if (LockerWeapons[i].PickupMesh != NULL) { UnregisterComponentTree(LockerWeapons[i].PickupMesh); LockerWeapons[i].PickupMesh = NULL; } } } void AUTWeaponLocker::CreatePickupMeshForSlot(UMeshComponent*& PickupMesh, int32 SlotIndex, TSubclassOf<AUTInventory> PickupInventoryType) { if (LockerPositions.IsValidIndex(SlotIndex)) { const FRotator RotationOffset = LockerRotations[SlotIndex] + WeaponLockerRotation; AUTPickupInventory::CreatePickupMesh(this, PickupMesh, PickupInventoryType, 0.f, RotationOffset, false); if (PickupMesh != NULL) { FVector LocationOffset(LockerPositions[SlotIndex] + WeaponLockerOffset); LocationOffset += FVector(0.f, 0.f, LockerFloatHeight); PickupMesh->SetRelativeLocation(LocationOffset); if (!WeaponLockerScale3D.IsZero()) { PickupMesh->SetRelativeScale3D(PickupMesh->RelativeScale3D * WeaponLockerScale3D); } } } } void AUTWeaponLocker::OnRep_Weapons_Implementation() { if (Weapons != WeaponsCopy) { WeaponsCopy = Weapons; InitializeWeapons(); } } void AUTWeaponLocker::OnRep_ReplacementWeapons() { for (int32 i = 0; i < ARRAY_COUNT(ReplacementWeapons); i++) { if (ReplacementWeapons[i].bReplaced) { if (i >= Weapons.Num()) { Weapons.SetNum(i + 1); } Weapons[i].WeaponClass = ReplacementWeapons[i].WeaponClass; if (LockerWeapons.IsValidIndex(i) && LockerWeapons[i].PickupMesh != NULL) { UnregisterComponentTree(LockerWeapons[i].PickupMesh); LockerWeapons[i].PickupMesh = NULL; } } } InitializeWeapons(); } void AUTWeaponLocker::OnRep_IsDisabled_Implementation() { SetInitialStateGlobal(); } void AUTWeaponLocker::Reset() { AActor::Reset(); if (!State.bActive) { WakeUp(); } if (!bIsDisabled && IsInState(SleepingState)) { GotoState(PickupState); } if (bClearCustomersOnReset) { // clear customers, so players are able to pick up weapons again on reset Customers.Empty(); } } void AUTWeaponLocker::Reset_Implementation() { static bool bRecursionGuard = false; if (!bRecursionGuard) { TGuardValue<bool> Guard(bRecursionGuard, true); AUTWeaponLocker::Reset(); } } bool AUTWeaponLocker::AllowPickupBy_Implementation(APawn* Other, bool bDefaultAllowPickup) { // TODO: vehicle consideration return bDefaultAllowPickup && Cast<AUTCharacter>(Other) != NULL && !((AUTCharacter*)Other)->IsRagdoll() && ((AUTCharacter*)Other)->bCanPickupItems && !HasCustomer(Other); } void AUTWeaponLocker::OnOverlapBegin(AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromSweep, const FHitResult& SweepHitResult) { APawn* P = Cast<APawn>(OtherActor); if (P == NULL || P->bTearOff || !AllowPickupBy(P, true)) { return; } else if (P->Controller == NULL) { // re-check later in case this Pawn is in the middle of spawning, exiting a vehicle, etc // and will have a Controller shortly GetWorldTimerManager().SetTimer(RecheckValidTouchHandle, this, &AUTWeaponLocker::RecheckValidTouch, 0.2f, false); return; } else { // make sure not touching through wall FVector TraceEnd = Collision ? Collision->GetComponentLocation() : GetActorLocation(); if (GetWorld()->LineTraceTestByChannel(P->GetActorLocation(), TraceEnd, ECC_Pawn, FCollisionQueryParams(), WorldResponseParams)) { GetWorldTimerManager().SetTimer(RecheckValidTouchHandle, this, &AUTWeaponLocker::RecheckValidTouch, 0.5f, false); return; } } GetWorldTimerManager().ClearTimer(RecheckValidTouchHandle); ProcessTouch(P); } void AUTWeaponLocker::ProcessTouch_Implementation(APawn* TouchedBy) { UE_LOG(LogDebug, Verbose, TEXT("%s::ProcessTouch - TouchedBy: %s"), *GetName(), *GetNameSafe(TouchedBy)); if (CurrentState && CurrentState->OverrideProcessTouch(TouchedBy)) { UE_LOG(LogDebug, Verbose, TEXT("%s::ProcessTouch - Overriden"), *GetName()); return; } Super::ProcessTouch_Implementation(TouchedBy); } void AUTWeaponLocker::CheckTouching() { UE_LOG(LogDebug, Verbose, TEXT("%s::CheckTouching"), *GetName()); bForceNearbyPlayers = true; GetWorldTimerManager().ClearTimer(CheckTouchingHandle); TArray<AActor*> Touching; GetOverlappingActors(Touching, APawn::StaticClass()); FHitResult UnusedHitResult; for (AActor* TouchingActor : Touching) { APawn* P = Cast<APawn>(TouchingActor); if (P != NULL && P->GetMovementComponent() != NULL) { OnOverlapBegin(P, Cast<UPrimitiveComponent>(P->GetMovementComponent()->UpdatedComponent), INDEX_NONE, false, UnusedHitResult); } } bForceNearbyPlayers = false; // see if we should reset the timer float NextCheckInterval = 0.0f; for (const auto& PrevCustomer : Customers) { NextCheckInterval = FMath::Max<float>(NextCheckInterval, PrevCustomer.NextPickupTime - GetWorld()->TimeSeconds); } if (NextCheckInterval > 0.0f) { GetWorldTimerManager().SetTimer(CheckTouchingHandle, this, &AUTWeaponLocker::CheckTouching, NextCheckInterval, false); } } void AUTWeaponLocker::RecheckValidTouch() { UE_LOG(LogDebug, Verbose, TEXT("%s::RecheckValidTouch"), *GetName()); CheckTouching(); } void AUTWeaponLocker::GiveTo_Implementation(APawn* Target) { UE_LOG(LogDebug, Verbose, TEXT("%s::GiveTo - Target: %s"), *GetName(), *GetNameSafe(Target)); HandlePickUpWeapons(Target, true); } void AUTWeaponLocker::AnnouncePickup(AUTCharacter* P, TSubclassOf<AUTInventory> NewInventoryType, AUTInventory* NewInventory/* = nullptr*/) { if (auto PC = Cast<APlayerController>(P->GetController())) { PC->ClientReceiveLocalizedMessage(UUTPickupMessage::StaticClass(), 0, P->PlayerState, NULL, NewInventoryType); } } void AUTWeaponLocker::HandlePickUpWeapons(AActor* Other, bool bHideWeapons) { UE_LOG(LogDebug, Verbose, TEXT("%s::HandlePickUpWeapons - Other: %s - bHideWeapons: %i"), *GetName(), *GetNameSafe(Other), (int)bHideWeapons); if (CurrentState) { CurrentState->HandlePickUpWeapons(Other, bHideWeapons); } } void AUTWeaponLocker::GiveLockerWeapons(AActor* Other, bool bHideWeapons) { UE_LOG(LogDebug, Verbose, TEXT("%s::GiveLockerWeapons - Other: %s - bHideWeapons: %i"), *GetName(), *GetNameSafe(Other), (int)bHideWeapons); AUTCharacter* Recipient = Cast<AUTCharacter>(Other); if (Recipient == NULL) return; #if WITH_EDITOR const UEnum* EnumPtr = FindObject<UEnum>(ANY_PACKAGE, TEXT("ENetRole"), true); UE_LOG(LogDebug, Verbose, TEXT("%s::GiveLockerWeapons - Role: %s"), *GetName(), EnumPtr ? *EnumPtr->GetDisplayNameText(Role.GetValue()).ToString() : *FString(TEXT(""))); #endif APawn* DriverPawn = Recipient->DrivenVehicle ? Recipient->DrivenVehicle : Recipient; if (DriverPawn && DriverPawn->IsLocallyControlled()) { if (bHideWeapons && bIsActive) { UE_LOG(LogDebug, Verbose, TEXT("%s::GiveLockerWeapons - Register player %s and start timer ShowActive in %i"), *GetName(), *GetNameSafe(DriverPawn->GetController()), LockerRespawnTime); // register local player by bind to the Died event in order // to track when the local player dies... to reset the locker RegisterLocalPlayer(DriverPawn->GetController()); ShowHidden(); GetWorldTimerManager().SetTimer(HideWeaponsHandle, this, &AUTWeaponLocker::ShowActive, LockerRespawnTime, false); } else { UE_LOG(LogDebug, Verbose, TEXT("%s::GiveLockerWeapons - Clear timer ShowActive"), *GetName()); GetWorldTimerManager().ClearTimer(HideWeaponsHandle); } } if (Role < ROLE_Authority) return; if (bHideWeapons && !AddCustomer(Recipient)) return; bool bWeaponAdded = false; AUTGameMode* UTGameMode = GetWorld()->GetAuthGameMode<AUTGameMode>(); for (int32 i = 0; i < Weapons.Num(); i++) { bool bAllowPickup = true; TSubclassOf<AUTWeapon> LocalInventoryType = Weapons[i].WeaponClass; if (LocalInventoryType && (UTGameMode == NULL || !UTGameMode->OverridePickupQuery(Recipient, LocalInventoryType, this, bAllowPickup) || bAllowPickup)) { AUTWeapon* Copy = Recipient->FindInventoryType<AUTWeapon>(LocalInventoryType, true); if (Copy == NULL || !Copy->StackPickup(NULL)) { FActorSpawnParameters Params; Params.SpawnCollisionHandlingOverride = ESpawnActorCollisionHandlingMethod::AlwaysSpawn; Params.Instigator = Recipient; Copy = GetWorld()->SpawnActor<AUTWeapon>(LocalInventoryType, GetActorLocation(), GetActorRotation(), Params); if (Copy) { Recipient->AddInventory(Copy, true); bWeaponAdded = true; } } if (Copy) { int32 LockerAmmo = GetLockerAmmo(LocalInventoryType); if (LockerAmmo - Copy->Ammo > 0) { Copy->AddAmmo(LockerAmmo - Copy->Ammo); } AnnouncePickup(Recipient, LocalInventoryType); if (Copy->PickupSound) { UUTGameplayStatics::UTPlaySound(GetWorld(), Copy->PickupSound, this, SRT_All, false, FVector::ZeroVector, NULL, Recipient, false); } } } } if (bWeaponAdded) { Recipient->DeactivateSpawnProtection(); } } void AUTWeaponLocker::RegisterLocalPlayer(AController* C) { if (C == NULL) return; if (AUTCharacter* UTChar = Cast<AUTCharacter>(C->GetPawn())) { UTChar->OnDied.AddUniqueDynamic(this, &AUTWeaponLocker::OnPawnDied); } } void AUTWeaponLocker::NotifyLocalPlayerDead(APlayerController* PC) { if (CurrentState) { CurrentState->NotifyLocalPlayerDead(PC); } } void AUTWeaponLocker::OnPawnDied(AController* Killer, const UDamageType* DamageType) { AUTPlayerController* LocalPC(nullptr); for (FConstPlayerControllerIterator Iterator = GetWorld()->GetPlayerControllerIterator(); Iterator; ++Iterator) { AUTPlayerController* PC = Cast<AUTPlayerController>(*Iterator); if (PC && PC->IsLocalPlayerController()) { auto UTChar = Cast<AUTCharacter>(PC->GetPawn()); if (UTChar == NULL || UTChar->Health <= 0) { LocalPC = PC; break; } } } if (LocalPC) { NotifyLocalPlayerDead(LocalPC); } } void UUTWeaponLockerStatePickup::NotifyLocalPlayerDead_Implementation(APlayerController* PC) { GetOuterAUTWeaponLocker()->ShowActive(); } void UUTWeaponLockerStatePickup::HandlePickUpWeapons_Implementation(AActor* Other, bool bHideWeapons) { UE_LOG(LogDebug, Verbose, TEXT("%s::HandlePickUpWeapons (Pickup) - Other: %s - bHideWeapons: %i"), *GetName(), *GetNameSafe(Other), (int)bHideWeapons); GetOuterAUTWeaponLocker()->GiveLockerWeapons(Other, bHideWeapons); if (APawn* P = Cast<APawn>(Other)) { GetOuterAUTWeaponLocker()->OnPickupStatusChange.Broadcast(GetOuterAUTWeaponLocker(), P, ELockerPickupStatus::Taken); } } void AUTWeaponLocker::SetPlayerNearby(APlayerController* PC, bool bNewPlayerNearby, bool bPlayEffects, bool bForce/* = false*/) { UE_LOG(LogDebug, Verbose, TEXT("%s::SetPlayerNearby - PC: %s - bNewPlayerNearby: %i - bPlayEffects: %i - bForce: %i"), *GetName(), *GetNameSafe(PC), (int)bNewPlayerNearby, (int)bPlayEffects, (int)bForce); if (bNewPlayerNearby != bPlayerNearby || bForce) { bPlayerNearby = bNewPlayerNearby; if (GetNetMode() == NM_DedicatedServer) { return; } if (bPlayerNearby) { UE_LOG(LogDebug, Verbose, TEXT("%s::SetPlayerNearby - Show weapons"), *GetName()); bScalingUp = true; CurrentWeaponScaleX = 0.1f; for (int32 i = 0; i < LockerWeapons.Num(); i++) { if (LockerWeapons[i].PickupMesh == NULL) { CreatePickupMeshForSlot(LockerWeapons[i].PickupMesh, i, LockerWeapons[i].WeaponClass); if (LockerWeapons[i].PickupMesh != NULL) { FVector NewScale = LockerWeapons[i].PickupMesh->GetComponentScale(); if (ScaleRate > 0.f) { LockerWeapons[i].DesiredScale3D = NewScale; NewScale.X *= 0.1; NewScale.Z *= 0.1; LockerWeapons[i].PickupMesh->SetWorldScale3D(NewScale); } } } if (LockerWeapons[i].PickupMesh != NULL) { LockerWeapons[i].PickupMesh->SetHiddenInGame(false); if (bPlayEffects) { const FRotator RotationOffset = LockerRotations.IsValidIndex(i) ? LockerRotations[i] : FRotator::ZeroRotator; UGameplayStatics::SpawnEmitterAtLocation(this, WeaponSpawnEffectTemplate, LockerWeapons[i].PickupMesh->GetComponentLocation(), RotationOffset); } } } if (ProximityEffect) { ProximityEffect->SetActive(true); } GetWorld()->GetTimerManager().ClearTimer(DestroyWeaponsHandle); } else { UE_LOG(LogDebug, Verbose, TEXT("%s::SetPlayerNearby - Hide weapons"), *GetName()); AUTWorldSettings* WS = Cast<AUTWorldSettings>(GetWorld()->GetWorldSettings()); bPlayEffects = bPlayEffects && (WS == NULL || WS->EffectIsRelevant(this, GetActorLocation(), true, true, 10000.f, 0.f)); bScalingUp = false; for (int32 i = 0; i < LockerWeapons.Num(); i++) { if (LockerWeapons[i].PickupMesh != NULL) { LockerWeapons[i].PickupMesh->SetHiddenInGame(true); if (bPlayEffects) { const FRotator RotationOffset = LockerRotations.IsValidIndex(i) ? LockerRotations[i] : FRotator::ZeroRotator; UGameplayStatics::SpawnEmitterAtLocation(this, WeaponSpawnEffectTemplate, LockerWeapons[i].PickupMesh->GetComponentLocation(), RotationOffset); } } } if (ProximityEffect) { ProximityEffect->DeactivateSystem(); } GetWorldTimerManager().SetTimer(DestroyWeaponsHandle, this, &AUTWeaponLocker::DestroyWeapons, 5.f, false); } OnPlayerNearByChanged(PC, bPlayEffects); } } void AUTWeaponLocker::SetLockerRespawnTime(float NewLockerRespawnTime, bool bAutoSleep, TEnumAsByte<ELockerCustomerAction::Type> CustomersAction) { float OldTime(LockerRespawnTime); LockerRespawnTime = NewLockerRespawnTime; switch (CustomersAction) { case ELockerCustomerAction::Clear: Customers.Empty(); break; case ELockerCustomerAction::Update: for (auto& Customer : Customers) { Customer.NextPickupTime -= OldTime; Customer.NextPickupTime += NewLockerRespawnTime; } break; } OnLockerRespawnTimeSet(OldTime); // update locally if (GetNetMode() != NM_DedicatedServer) { OnRep_LockerRespawnTimeChanged(OldTime); } if (bAutoSleep) { StartSleeping(); } } void AUTWeaponLocker::OnRep_LockerRespawnTimeChanged_Implementation(float OldLockerRespawnTime) { // restart local timer to re-draw weapons if (GetWorldTimerManager().IsTimerActive(HideWeaponsHandle)) { float Elapsed = GetWorldTimerManager().GetTimerElapsed(HideWeaponsHandle); float NewTimer = Elapsed - LockerRespawnTime; if (NewTimer > 0.f) { GetWorldTimerManager().SetTimer(HideWeaponsHandle, this, &AUTWeaponLocker::ShowActive, NewTimer, false); } else { GetWorldTimerManager().SetTimerForNextTick(this, &AUTWeaponLocker::ShowActive); } } // conditionally check whether to goto Sleep state on client only, // server is already calling it right when the value has changed through the setter if (Role < ROLE_Authority) { StartSleeping(); } } bool AUTWeaponLocker::AddCustomer(APawn* P) { AUTCharacter* UTChar = Cast<AUTCharacter>(P); if (UTChar == NULL) return false; if (Customers.Num() > 0) { for (int32 i = 0; i < Customers.Num(); i++) { if (Customers[i].NextPickupTime < GetWorld()->TimeSeconds) { if (Customers[i].P == P) { Customers[i].NextPickupTime = GetWorld()->TimeSeconds + LockerRespawnTime; return true; } Customers.RemoveAt(i, 1); i--; } else if (Customers[i].P == P) { return false; } } } FWeaponPickupCustomer PT(P, GetWorld()->TimeSeconds + LockerRespawnTime); Customers.Add(PT); return true; } bool AUTWeaponLocker::HasCustomer(APawn* TestPawn) { for (int32 i = Customers.Num() - 1; i >= 0; i--) { if (Customers[i].P == NULL || Customers[i].P->bTearOff || Customers[i].P->IsPendingKillPending()) { Customers.RemoveAt(i); } else if (Customers[i].P == TestPawn) { return (GetWorld()->TimeSeconds < Customers[i].NextPickupTime); } } return false; } void AUTWeaponLocker::AddWeapon(TSubclassOf<AUTWeapon> NewWeaponClass) { if (Role < ROLE_Authority) return; FWeaponEntry NewWeaponEntry(NewWeaponClass); Weapons.Add(NewWeaponEntry); InitializeWeapons(); } void AUTWeaponLocker::ReplaceWeapon(int32 Index, TSubclassOf<AUTWeapon> NewWeaponClass) { if (Role < ROLE_Authority) return; if (Index >= 0) { bReplacementWeaponsDirty = true; if (Index >= Weapons.Num()) { Weapons.SetNum(Index + 1); } Weapons[Index].WeaponClass = NewWeaponClass; if (Index < ARRAY_COUNT(ReplacementWeapons)) { ReplacementWeapons[Index].bReplaced = true; ReplacementWeapons[Index].WeaponClass = NewWeaponClass; } // update weapons locally if (GetNetMode() != NM_DedicatedServer) { OnRep_ReplacementWeapons(); } } } void AUTWeaponLocker::StartSleeping_Implementation() { UE_LOG(LogDebug, Verbose, TEXT("%s::StartSleeping"), *GetName()); // override original sleeping mechanism if (CurrentState) { CurrentState->StartSleeping(); } } void AUTWeaponLocker::ShowActive() { UE_LOG(LogDebug, Verbose, TEXT("%s::ShowActive"), *GetName()); if (CurrentState) { CurrentState->ShowActive(); } } // Note: This is only in LockerPickup state void UUTWeaponLockerStatePickup::ShowActive_Implementation() { UE_LOG(LogDebug, Verbose, TEXT("%s::ShowActive (Pickup)"), *GetName()); GetOuterAUTWeaponLocker()->bIsActive = true; GetOuterAUTWeaponLocker()->NextProximityCheckTime = 0.f; if (GetOuterAUTWeaponLocker()->AmbientEffect) { GetOuterAUTWeaponLocker()->AmbientEffect->SetTemplate(GetOuterAUTWeaponLocker()->ActiveEffectTemplate); } if (GetWorld()->GetNetMode() != NM_DedicatedServer) { GetOuterAUTWeaponLocker()->OnPickupStatusChange.Broadcast(GetOuterAUTWeaponLocker(), NULL, ELockerPickupStatus::Available); } if (GetWorld()->GetNetMode() == NM_Client) { UE_LOG(LogDebug, Verbose, TEXT("%s::ShowActive (Pickup) - Call CheckTouching on client"), *GetName()); GetOuterAUTWeaponLocker()->CheckTouching(); } } void AUTWeaponLocker::ShowHidden() { UE_LOG(LogDebug, Verbose, TEXT("%s::ShowHidden"), *GetName()); if (CurrentState) { CurrentState->ShowHidden(); } else { ShowHiddenGlobal(); } } void AUTWeaponLocker::ShowHiddenGlobal_Implementation() { UE_LOG(LogDebug, Verbose, TEXT("%s::ShowHiddenGlobal"), *GetName()); bIsActive = false; SetPlayerNearby(nullptr, false, false, bForceNearbyPlayers); if (AmbientEffect) { AmbientEffect->SetTemplate(InactiveEffectTemplate); } } void AUTWeaponLocker::SetInitialState() { if (CurrentState) { CurrentState->SetInitialState(); } else { SetInitialStateGlobal(); } } void AUTWeaponLocker::SetInitialStateGlobal_Implementation() { if (bIsDisabled) { GotoState(DisabledState); } else { GotoState(InitialState != NULL ? InitialState : AutoState); } } void AUTWeaponLocker::GotoState(UUTWeaponLockerState* NewState) { if (NewState == NULL || !NewState->IsIn(this)) { UE_LOG(LogDebug, Warning, TEXT("Attempt to send %s to invalid state %s"), *GetName(), *GetNameSafe(NewState)); NewState = GlobalState; } if (NewState) { if (CurrentState != NewState) { UUTWeaponLockerState* PrevState = CurrentState; if (CurrentState != NULL) { CurrentState->EndState(NewState); // NOTE: may trigger another GotoState() call } if (CurrentState == PrevState) { CurrentState = NewState; CurrentState->BeginState(PrevState); // NOTE: may trigger another GotoState() call StateChanged(); } } } } void AUTWeaponLocker::StateChanged_Implementation() { } float AUTWeaponLocker::BotDesireability_Implementation(APawn* Asker, float TotalDistance) { if (CurrentState) { return CurrentState->BotDesireability(Asker, TotalDistance); } return BotDesireabilityGlobal(Asker, TotalDistance); } float AUTWeaponLocker::BotDesireabilityGlobal_Implementation(APawn* Asker, float TotalDistance) { AUTCharacter* UTChar = Cast<AUTCharacter>(Asker); if (bHidden || Asker == NULL || UTChar == NULL) return 0.f; AUTBot* Bot = Cast<AUTBot>(Asker->Controller); if (Bot == NULL || HasCustomer(Asker)) return 0.f; float desire = 0.f; // see if bot already has a weapon of this type for (int32 i = 0; i < Weapons.Num(); i++) { if (Weapons[i].WeaponClass != NULL) { AUTWeapon* AlreadyHas = UTChar->FindInventoryType<AUTWeapon>(Weapons[i].WeaponClass, true); if (AlreadyHas == NULL) { desire += Weapons[i].WeaponClass.GetDefaultObject()->BaseAISelectRating; } else if (AlreadyHas->Ammo < AlreadyHas->GetClass()->GetDefaultObject<AUTWeapon>()->Ammo) // NeedAmmo() { desire += 0.15; } } } if (Bot->HuntingTarget && UTChar->GetWeapon() && (desire * 0.833 < UTChar->GetWeapon()->BaseAISelectRating - 0.1f)) { return 0.f; } // incentivize bot to get this weapon if it doesn't have a good weapon already if ((UTChar->GetWeapon() == NULL) || (UTChar->GetWeapon()->BaseAISelectRating < 0.5f)) { return 2.f * desire; } return desire; } float AUTWeaponLocker::DetourWeight_Implementation(APawn* Asker, float PathDistance) { AUTCharacter* UTChar = Cast<AUTCharacter>(Asker); if (bHidden || Asker == NULL || UTChar == NULL) return 0.f; AUTBot* Bot = Cast<AUTBot>(Asker->Controller); if (Bot == NULL || HasCustomer(Asker)) return 0.f; float desire = 0.f; // see if bot already has a weapon of this type for (int32 i = 0; i < Weapons.Num(); i++) { AUTWeapon* AlreadyHas = UTChar->FindInventoryType<AUTWeapon>(Weapons[i].WeaponClass, true); if (AlreadyHas == NULL) { desire += Weapons[i].WeaponClass.GetDefaultObject()->BaseAISelectRating; } else if (AlreadyHas->Ammo < AlreadyHas->GetClass()->GetDefaultObject<AUTWeapon>()->Ammo) // NeedAmmo() { desire += 0.15; } } float PathWeight = PathDistance; if (Bot->GetSquad() && Bot->GetSquad()->HasHighPriorityObjective(Bot) && ((UTChar->GetWeapon() && UTChar->GetWeapon()->BaseAISelectRating > 0.5) || (PathWeight > 400))) { return 0.2f / PathWeight; } return PathWeight != 0.f ? desire / PathWeight : 0.f; } #if WITH_EDITOR void AUTWeaponLocker::OnConstruction(const FTransform& Transform) { Super::OnConstruction(Transform); CreateEditorPickupMeshes(); } void AUTWeaponLocker::PreEditUndo() { CleanupEditorPickupMeshes(); Super::PreEditUndo(); } void AUTWeaponLocker::PostEditUndo() { Super::PostEditUndo(); CreateEditorPickupMeshes(); } void AUTWeaponLocker::CleanupEditorPickupMeshes() { for (auto& EditorMesh : EditorMeshes) { if (EditorMesh) { UnregisterComponentTree(EditorMesh); EditorMesh->DestroyComponent(); EditorMesh = NULL; } } EditorMeshes.Empty(); OnEditorPickupMeshesCleanUp(); } void AUTWeaponLocker::CreateEditorPickupMeshes() { if (GetWorld() != NULL && GetWorld()->WorldType == EWorldType::Editor) { CleanupEditorPickupMeshes(); for (int32 i = 0; i < Weapons.Num() && i < LockerPositions.Num(); i++) { EditorMeshes.AddZeroed(1); if (Weapons[i].WeaponClass != NULL) { CreatePickupMeshForSlot(EditorMeshes[i], i, Weapons[i].WeaponClass); if (EditorMeshes[i] != NULL) { EditorMeshes[i]->SetHiddenInGame(true); } } } OnEditorPickupMeshesCreated(); } } void AUTWeaponLocker::CheckForErrors() { Super::CheckForErrors(); for (int32 i = 0; i < Weapons.Num(); i++) { FFormatNamedArguments Arguments; Arguments.Add(TEXT("ActorName"), FText::FromString(GetName())); FText Message = FText(); if (Weapons[i].WeaponClass == NULL) { Message = LOCTEXT("MapCheck_Message_WeaponClassInLockerEmpty", "Empty weapon class in weapon locker"); } else { Arguments.Add(TEXT("WeaponClass"), FText::FromString(Weapons[i].WeaponClass->GetName())); if (RespawnTime < Weapons[i].WeaponClass.GetDefaultObject()->RespawnTime) { static const FNumberFormattingOptions RespawnTimeFormat = FNumberFormattingOptions() .SetMinimumFractionalDigits(0) .SetMaximumFractionalDigits(1); Arguments.Add(TEXT("LockerRespawnTime"), FText::AsNumber(RespawnTime, &RespawnTimeFormat)); Arguments.Add(TEXT("WeaponRespawnTime"), FText::AsNumber(Weapons[i].WeaponClass.GetDefaultObject()->RespawnTime, &RespawnTimeFormat)); FMessageLog("MapCheck").Warning() ->AddToken(FUObjectToken::Create(this)) ->AddToken(FTextToken::Create(FText::Format(LOCTEXT("MapCheck_Message_WeaponClassRespawnTimeFaster", "{WeaponClass} is in weapon lockers which will respawn items each {LockerRespawnTime}s (instead of {WeaponRespawnTime}s)."), Arguments))); } TSubclassOf<AUTWeapon> WeaponSuper = Weapons[i].WeaponClass; while (WeaponSuper != NULL) { if (WarnIfInLocker.Find(WeaponSuper) != INDEX_NONE) { Arguments.Add(TEXT("ActorName"), FText::FromString(GetName())); Arguments.Add(TEXT("WarnClass"), FText::FromString(WeaponSuper->GetName())); Message = LOCTEXT("MapCheck_Message_WeaponClassInLockerWarning", "{WeaponClass} should not be in weapon lockers."); WeaponSuper = NULL; } else { WeaponSuper = WeaponSuper->GetSuperClass(); } } } if (!Message.IsEmpty()) { FMessageLog("MapCheck").Warning() ->AddToken(FUObjectToken::Create(this)) ->AddToken(FTextToken::Create(FText::Format(Message, Arguments))); } } TArray<FString> StateErrors; if (HasStateErrors(StateErrors)) { FFormatNamedArguments Arguments; Arguments.Add(TEXT("ActorName"), FText::FromString(GetName())); for (auto StateError : StateErrors) { FMessageLog("MapCheck").Warning() ->AddToken(FUObjectToken::Create(this)) ->AddToken(FTextToken::Create(FText::Format(FText::FromString(StateError), Arguments))); } } } void AUTWeaponLocker::PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) { Super::PostEditChangeProperty(PropertyChangedEvent); const FName PropertyName = (PropertyChangedEvent.Property != nullptr) ? PropertyChangedEvent.Property->GetFName() : NAME_None; const FName MemberName = (PropertyChangedEvent.MemberProperty != nullptr) ? PropertyChangedEvent.MemberProperty->GetFName() : NAME_None; if (PropertyName != MemberName && MemberName == GET_MEMBER_NAME_CHECKED(AUTWeaponLocker, States)) { // count auto states TArray<FString> AutoStates; TArray<FString> NoNamesStates; TArray<FString> DupNamesStates; TArray<FName> StateNames; for (int32 i = 0; i < States.Num(); i++) { FString StateInfo = FString::Printf(TEXT("[%i] Name: %s State: %s"), i, *States[i].StateName.ToString(), *GetNameSafe(States[i].StateClass)); if (States[i].bAuto) { AutoStates.Add(StateInfo); } if (States[i].StateName.IsNone()) { NoNamesStates.Add(StateInfo); } else { if (StateNames.Find(States[i].StateName) > INDEX_NONE) { DupNamesStates.Add(StateInfo); } StateNames.Add(States[i].StateName); } } FString OptionalNewLine = NewLine; TArray<FString> MessageStrs; if (AutoStates.Num() > 1) { MessageStrs.Add(FString::Printf(TEXT("Multiple States have 'Auto' flag set: %s%s"), *OptionalNewLine, *FString::Join(AutoStates, *NewLine))); } if (NoNamesStates.Num() > 0) { MessageStrs.Add(FString::Printf(TEXT("Some States have no name: %s%s"), *OptionalNewLine, *FString::Join(NoNamesStates, *NewLine))); } if (DupNamesStates.Num() > 0) { MessageStrs.Add(FString::Printf(TEXT("Duplicate state names found: %s%s"), *OptionalNewLine, *FString::Join(DupNamesStates, *NewLine))); } if (MessageStrs.Num() > 0) { FMessageDialog::Open(EAppMsgType::Ok, FText::FromString(FString::Join(MessageStrs, *NewParagraph))); } } else if (MemberName == GET_MEMBER_NAME_CHECKED(AUTWeaponLocker, Weapons)) { CreateEditorPickupMeshes(); } } void AUTWeaponLocker::PostEditChangeChainProperty(FPropertyChangedChainEvent& PropertyChangedEvent) { const FName MemberPropertyName = PropertyChangedEvent.PropertyChain.GetActiveMemberNode()->GetValue()->GetFName(); const FName PropertyName = PropertyChangedEvent.PropertyChain.GetActiveNode()->GetValue()->GetFName(); if (PropertyChangedEvent.Property && PropertyName != MemberPropertyName && MemberPropertyName == GET_MEMBER_NAME_CHECKED(AUTWeaponLocker, States)) { if (PropertyChangedEvent.ChangeType == EPropertyChangeType::ValueSet) { const FName TailPropName = PropertyChangedEvent.PropertyChain.GetTail()->GetValue()->GetFName(); int32 NewArrayIndex = PropertyChangedEvent.GetArrayIndex(MemberPropertyName.ToString()); if (States.IsValidIndex(NewArrayIndex)) { if (TailPropName == GET_MEMBER_NAME_CHECKED(FStateInfo, StateClass)) { TSubclassOf<UScriptState> DefaultStateClass = States[NewArrayIndex].StateClass; if (DefaultStateClass && (States[NewArrayIndex].StateName.IsNone() || !States[NewArrayIndex].bUserChanged)) { States[NewArrayIndex].StateName = DefaultStateClass.GetDefaultObject()->DefaultStateName; } } else if (TailPropName == GET_MEMBER_NAME_CHECKED(FStateInfo, StateName)) { States[NewArrayIndex].bUserChanged = !States[NewArrayIndex].StateName.IsNone(); } } } } Super::PostEditChangeChainProperty(PropertyChangedEvent); } bool AUTWeaponLocker::HasStateErrors(TArray<FString>& StateErrors) { TArray<FString> AutoStates; TArray<FString> NoNamesStates; TArray<FString> DupNamesStates; TArray<FString> InvalidStates; TArray<FName> StateNames; for (int32 i = 0; i < States.Num(); i++) { FString StateInfo = FString::Printf(TEXT("[%i] Name: %s State: %s"), i, *States[i].StateName.ToString(), *GetNameSafe(States[i].StateClass)); if (States[i].bAuto) { AutoStates.Add(StateInfo); } if (States[i].StateName.IsNone()) { NoNamesStates.Add(StateInfo); } else { if (StateNames.Find(States[i].StateName) > INDEX_NONE) { DupNamesStates.Add(StateInfo); } StateNames.Add(States[i].StateName); } if (States[i].StateClass == NULL) { InvalidStates.Add(StateInfo); } } if (AutoStates.Num() > 1) { StateErrors.Add(FString(TEXT("Multiple states have 'Auto' flag set."))); } if (NoNamesStates.Num() > 0) { StateErrors.Add(FString(TEXT("Some states have no name."))); } if (DupNamesStates.Num() > 0) { StateErrors.Add(FString(TEXT("Duplicate state names found."))); } if (InvalidStates.Num() > 0) { StateErrors.Add(FString(TEXT("Some states have no class set."))); } return StateErrors.Num() > 0; } #endif // WITH_EDITOR void UUTWeaponLockerStateDisabled::SetInitialState_Implementation() { // override return; } void UUTWeaponLockerStateDisabled::BeginState_Implementation(const UUTWeaponLockerState* PrevState) { Super::BeginState_Implementation(PrevState); GetOuterAUTWeaponLocker()->SetActorHiddenInGame(true); GetOuterAUTWeaponLocker()->SetActorEnableCollision(false); GetOuterAUTWeaponLocker()->ShowHidden(); } float UUTWeaponLockerStateDisabled::BotDesireability_Implementation(APawn* Asker, float TotalDistance) { return 0.f; } bool UUTWeaponLockerStateDisabled::OverrideProcessTouch_Implementation(APawn* TouchedBy) { return true; } void UUTWeaponLockerStateSleeping::StartSleeping_Implementation() { } void UUTWeaponLockerStateSleeping::BeginState_Implementation(const UUTWeaponLockerState* PrevState) { Super::BeginState_Implementation(PrevState); GetOuterAUTWeaponLocker()->bForceNearbyPlayers = true; GetOuterAUTWeaponLocker()->SetActorEnableCollision(false); GetOuterAUTWeaponLocker()->ShowHidden(); GetOuterAUTWeaponLocker()->bIsSleeping = true; if (GetWorld()->GetNetMode() != NM_DedicatedServer) { GetOuterAUTWeaponLocker()->OnRep_IsSleeping(); } GetOuterAUTWeaponLocker()->bForceNearbyPlayers = false; } void UUTWeaponLockerStateSleeping::EndState_Implementation(const UUTWeaponLockerState* NextState) { Super::EndState_Implementation(NextState); GetOuterAUTWeaponLocker()->bIsSleeping = false; if (GetWorld()->GetNetMode() != NM_DedicatedServer) { GetOuterAUTWeaponLocker()->OnRep_IsSleeping(); } } float UUTWeaponLockerStateSleeping::BotDesireability_Implementation(APawn* Asker, float TotalDistance) { return 0.f; } bool UUTWeaponLockerStateSleeping::OverrideProcessTouch_Implementation(APawn* TouchedBy) { return true; } void UUTWeaponLockerStatePickup::BeginState_Implementation(const UUTWeaponLockerState* PrevState) { Super::BeginState_Implementation(PrevState); // allow to re-enable weapon locker GetOuterAUTWeaponLocker()->SetActorHiddenInGame(false); GetOuterAUTWeaponLocker()->SetActorEnableCollision(true); GetOuterAUTWeaponLocker()->ShowActive(); } bool UUTWeaponLockerStatePickup::OverrideProcessTouch_Implementation(APawn* TouchedBy) { UE_LOG(LogDebug, Verbose, TEXT("%s::OverrideProcessTouch (Pickup) - TouchedBy: %s"), *GetName(), *GetNameSafe(TouchedBy)); // handle client effects (hiding weapons in locker). // ProcessTouch is aborting on the cient machine and won't trigger GiveLockerWeapons // HandlePickUpWeapons is aborting itself and only hiding the weapons if (GetOuterAUTWeaponLocker()->Role < ROLE_Authority && GetOuterAUTWeaponLocker()->bIsActive && TouchedBy && TouchedBy->Controller && TouchedBy->Controller->IsLocalController()) { if (GetOuterAUTWeaponLocker()->AllowPickupBy(TouchedBy, true)) { UE_LOG(LogDebug, Verbose, TEXT("%s::OverrideProcessTouch (Pickup) - Handle client touch"), *GetName()); GetOuterAUTWeaponLocker()->HandlePickUpWeapons(TouchedBy, true); GetOuterAUTWeaponLocker()->StartSleeping(); } } return false; } void UUTWeaponLockerStatePickup::StartSleeping_Implementation() { UE_LOG(LogDebug, Verbose, TEXT("%s::StartSleeping (Pickup)"), *GetName()); if (auto WL = GetOuterAUTWeaponLocker()) { if (WL->LockerRespawnTime <= 0.f) { WL->GotoState(WL->SleepingState); } else if (WL->Role == ROLE_Authority) { const bool bTimerActive = WL->GetWorldTimerManager().IsTimerActive(*WL->GetCheckTouchingHandle()); UE_LOG(LogDebug, Verbose, TEXT("%s::StartSleeping - CheckTouching Timer active: %s"), *GetName(), bTimerActive ? TEXT("true") : TEXT("false")); if (!bTimerActive) { WL->GetWorldTimerManager().SetTimer(*WL->GetCheckTouchingHandle(), WL, &AUTWeaponLocker::CheckTouching, WL->LockerRespawnTime, false); } } } } void UUTWeaponLockerStatePickup::Tick_Implementation(float DeltaTime) { Super::Tick_Implementation(DeltaTime); auto WL = GetOuterAUTWeaponLocker(); if (WL == NULL) return; if (GetWorld()->GetNetMode() != NM_DedicatedServer) { if (GetWorld()->TimeSeconds > WL->NextProximityCheckTime) { APlayerController* NearbyPC = nullptr; bool bNewPlayerNearby = false; WL->NextProximityCheckTime = GetWorld()->TimeSeconds + 0.2f + 0.1f * FMath::FRand(); if (WL->bIsActive) { for (FLocalPlayerIterator It(GEngine, GetWorld()); It; ++It) { if (It->PlayerController != NULL && It->PlayerController->GetPawn()) { float Dist(FVector::DistSquared(WL->GetActorLocation(), It->PlayerController->GetPawn()->GetActorLocation())); if (Dist < WL->ProximityDistanceSquared) { UE_LOG(LogDebug, Verbose, TEXT("%s::Tick (Pickup) - %s is near this locker"), *GetName(), *GetNameSafe(It->PlayerController)); bNewPlayerNearby = true; NearbyPC = It->PlayerController; break; } } } } if (bNewPlayerNearby != WL->bPlayerNearby) { WL->SetPlayerNearby(NearbyPC, bNewPlayerNearby, true); } } if (WL->bScalingUp && WL->ScaleRate > 0.f) { WL->CurrentWeaponScaleX += DeltaTime * WL->ScaleRate; if (WL->CurrentWeaponScaleX >= 1.f) { WL->CurrentWeaponScaleX = 1.f; WL->bScalingUp = false; } for (int32 i = 0; i < WL->LockerWeapons.Num(); i++) { if (WL->LockerWeapons[i].PickupMesh != NULL) { FVector NewScale = WL->LockerWeapons[i].DesiredScale3D * WL->CurrentWeaponScaleX; NewScale.Y = WL->LockerWeapons[i].DesiredScale3D.Y; WL->LockerWeapons[i].PickupMesh->SetWorldScale3D(NewScale); } } } } } #undef LOCTEXT_NAMESPACE
/** * Copyright (c) 2017 Dell Inc., or its subsidiaries. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 */ package io.pravega.tools.pravegastreamstat.logs; import io.pravega.common.io.StreamHelpers; import io.pravega.common.util.BitConverter; import io.pravega.common.util.ByteArraySegment; import lombok.Data; import lombok.Getter; import io.pravega.tools.pravegastreamstat.service.PrintHelper; import java.io.IOException; /** * Data in log */ class DataFrame { // region static member private final static int HEADER_LENGTH = 6; // endregion // region instance variables @Getter private byte[] header = new byte[HEADER_LENGTH]; private ByteArraySegment contents; private int currentPosition = 0; // endregion // region constructor /** * The constructor to create a data frame from the log reader's read item. * @param item the read item given from data frame. */ DataFrame(LogReader.ReadItem item) { try { if (item.getPayload().read(header) != HEADER_LENGTH) { throw new IOException(); } contents = new ByteArraySegment(StreamHelpers.readAll(item.getPayload(), item.getLength() - 6)); } catch (IOException e) { contents = new ByteArraySegment(new byte[0]); } } // endregion /** * Get an entry from the frame. * @return the entry got. */ DataEntry getEntry() { if (currentPosition >= contents.getLength()) { return null; } // Integrity check. This means that we have a corrupt frame. if (this.currentPosition + DataEntry.HEADER_SIZE > contents.getLength()) { return null; } // Determine the length of the next record && advance the position by the appropriate amount of bytes. DataEntry entry = new DataEntry(this.contents.subSegment(this.currentPosition, DataEntry.HEADER_SIZE)); this.currentPosition += DataEntry.HEADER_SIZE; // Integrity check. This means that we have a corrupt frame. if (this.currentPosition + entry.getEntryLength() > contents.getLength() || entry.getEntryLength() < 0) { return null; } // Get the result contents && advance the positions. ByteArraySegment resultContents = this.contents.subSegment(this.currentPosition, entry.getEntryLength()); this.currentPosition += entry.getEntryLength(); entry.setData(resultContents); return entry; } /** * The class that hold an entry from the data frame. */ @Data public static class DataEntry { private static final int HEADER_SIZE = Integer.BYTES + Byte.BYTES; private static final int FLAGS_OFFSET = Integer.BYTES; private static final byte FIRST_ENTRY_MASK = 1; private static final byte LAST_ENTRY_MASK = 2; private Integer entryLength; private boolean firstRecordEntry; private boolean lastRecordEntry; private ByteArraySegment data; /** * The constructor to create an entry according to the header. * @param header The entry header. */ DataEntry(ByteArraySegment header) { this.entryLength = BitConverter.readInt(header, 0); byte flags = header.get(FLAGS_OFFSET); this.firstRecordEntry = (flags & FIRST_ENTRY_MASK) == FIRST_ENTRY_MASK; this.lastRecordEntry = (flags & LAST_ENTRY_MASK) == LAST_ENTRY_MASK; this.data = null; } /** * Print the information stored in the entry. */ public void print() { PrintHelper.print(PrintHelper.Color.CYAN, String.format("DataFrameEntry: entryLength = %d, " + "firstRecordEntry = %b, lastRecordEntry = %b, data = %d", entryLength, firstRecordEntry, lastRecordEntry, data.getLength())); PrintHelper.println(); } } }
A new report on immigration detention released Wednesday by the American Immigration Council examines the most recent government data on the United States’ complex, sprawling network of facilities used to detain immigrants. The report, “The Landscape of Immigration Detention in the United States,” reveals that detained individuals were commonly held in facilities operated by private entities and located in remote areas, far away from basic community support structures and legal advocacy networks. As government officials and policymakers assess additional funding for Immigration and Customs Enforcement detention, this report details key aspects of the current U.S. detention system and raises important questions about the implications of further expansion efforts. The report draws on government and other records for the 355,729 individuals detained by ICE in fiscal year 2015, the year with the most recent publicly available data. ICE relied on 638 sites scattered throughout the United States to detain individuals, often moving them from one facility to another. About 67 percent of detained individuals were held in privately operated facilities, and 64 percent were confined in a remotely located facility. Detention length was significantly longer in privately operated facilities and in remotely located facilities. Over 48,800 detention facility-related grievances were reported by detainees and community members. The number of grievances was significantly higher in privately operated facilities and in remotely located facilities. Nearly half of detainees (48 percent) were confined at least once in a facility that was located more than 60 miles away from the nearest immigration attorney providing low- or no-cost removal defense services. The majority (60 percent) of adults who were detained were transferred at least once during their detention, leading to confinement in multiple locations.
<reponame>cccaaannn/SimpleBlog import Status from "./enums/Status"; import TokenType from "./enums/TokenType"; interface TokenPayload { id: string, status: Status, username: string, email: string, role: string, type: TokenType }; export { TokenPayload };
<reponame>torstenwerner/mastertheboss package org.acme.remote.service import io.undertow.Undertow; import io.undertow.util.Headers; public class UndertowServer { public static void main(String[] args) { Undertow server1 = Undertow.builder().addHttpListener(8090, "localhost").setHandler(exchange -> { exchange.getResponseHeaders() .put(Headers.CONTENT_TYPE, "text/plain"); exchange.getResponseSender().send("Hello Service 1"); }).build(); server1.start(); Undertow server2 = Undertow.builder().addHttpListener(8091, "localhost").setHandler(exchange -> { exchange.getResponseHeaders() .put(Headers.CONTENT_TYPE, "text/plain"); exchange.getResponseSender().send("Hello Service 2"); }).build(); server2.start(); } }
What is the best shape for a fuzzy set in function approximation? The choice of fuzzy set functions affects how well fuzzy systems approximate functions. The most common fuzzy sets are triangles, trapezoids, and Gaussian bell curves. We compared these sets with many others on a wide range of approximand functions in one, two, and three dimensions. Supervised learning tuned the IF-part set functions and the centroids and volumes of the THEN-part sets. We compared the set functions based on how closely the adaptive fuzzy system converged to the approximand. The sinc function sin(x)/x performed best or nearly best in most cases.
<filename>external/webkit/Source/WebKit2/UIProcess/API/qt/tests/util.h /* Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ // Functions and macros that really need to be in QTestLib #include <QEventLoop> #include <QSignalSpy> #include <QTimer> #if !defined(TESTS_SOURCE_DIR) #define TESTS_SOURCE_DIR "" #endif /** * Starts an event loop that runs until the given signal is received. * Optionally the event loop * can return earlier on a timeout. * * \return \p true if the requested signal was received * \p false on timeout */ static bool waitForSignal(QObject* obj, const char* signal, int timeout = 10000) { QEventLoop loop; QObject::connect(obj, signal, &loop, SLOT(quit())); QTimer timer; QSignalSpy timeoutSpy(&timer, SIGNAL(timeout())); if (timeout > 0) { QObject::connect(&timer, SIGNAL(timeout()), &loop, SLOT(quit())); timer.setSingleShot(true); timer.start(timeout); } loop.exec(); return timeoutSpy.isEmpty(); } // Will try to wait for the condition while allowing event processing #define QTRY_VERIFY(__expr) \ do { \ const int __step = 50; \ const int __timeout = 5000; \ if (!(__expr)) { \ QTest::qWait(0); \ } \ for (int __i = 0; __i < __timeout && !(__expr); __i+=__step) { \ QTest::qWait(__step); \ } \ QVERIFY(__expr); \ } while(0) // Will try to wait for the condition while allowing event processing #define QTRY_COMPARE(__expr, __expected) \ do { \ const int __step = 50; \ const int __timeout = 5000; \ if ((__expr) != (__expected)) { \ QTest::qWait(0); \ } \ for (int __i = 0; __i < __timeout && ((__expr) != (__expected)); __i+=__step) { \ QTest::qWait(__step); \ } \ QCOMPARE(__expr, __expected); \ } while(0)
Physical activity event regularity and health outcome - 'Undiscovered country' in cohort accelerometer data The increasing use of wearable devices for measuring long-term activity data allows for detailed analyses of real-life behavioral patterns and for the identification of new parameters such as activity event regularity. Thus far, the medical relevance of this new regularity parameter is unknown. The objective of the research work for this paper is to investigate associations between activity regularity and resting systolic blood pressure, as an exemplary well-established cardiovascular risk factor. Using accelerometer and blood pressure data of N=5695 subjects from the NHANES 2005-6 cohort study, three characteristic physical activity parameters (regularity, duration and intensity) were computed and compared for the upper and lower quartiles of subjects with regard to their blood pressure values. Results show statistically significant differences in the parameters regularity (p<0.001) and duration (p=0.008) of physical activity events, but not in intensity (p=0.889). Results confirm that subjects with low resting systolic blood pressure not only are active for longer periods of time, but also are more regularly active. It also shows that low-intensity, short-lived physical activity (< 10 min.) is associated with health-related outcome parameters. More research is necessary to make full use of detailed activity behavior data, and in particular to uncover relations between physical activity patterns and health outcome.
/* * Copyright (c) 2018, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.internal.apps.license.manager.client.utils; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CredentialsProvider; import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.ssl.SSLContexts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.wso2.internal.apps.license.manager.client.exception.LicenseManagerException; import java.io.IOException; import java.io.InputStream; import java.security.KeyManagementException; import java.security.KeyStore; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.cert.CertificateException; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLContext; /** * Util functions which are required while executing the backend services for license manager. */ public class ServiceUtils { private static final Logger log = LoggerFactory.getLogger(PropertyReader.class); /** * Create a trusted http client to initiate a secure connection with micro services. * * @return closeableHttpClient * @throws LicenseManagerException if the connection initiation fails */ public static CloseableHttpClient createTrustedHttpClient() throws LicenseManagerException { PropertyReader properties = new PropertyReader(); // Setting up authentication. UsernamePasswordCredentials credentials = new UsernamePasswordCredentials(properties.getMicroServiceUsername(), properties.getMicroServicePassword()); CredentialsProvider provider = new BasicCredentialsProvider(); provider.setCredentials(AuthScope.ANY, credentials); HttpClientBuilder httpClientBuilder = HttpClientBuilder.create(); // Get the keystore file. InputStream file = Thread.currentThread().getContextClassLoader() .getResourceAsStream(properties.getTrustStoreServiceName()); try { // Make the trusted connection. KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); keyStore.load(file, properties.getTrustStoreServicePassword().toCharArray()); SSLContext sslContext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); // // Only for staging tests HostnameVerifier allowAllHosts = new NoopHostnameVerifier(); SSLConnectionSocketFactory sslSocketFactory = new SSLConnectionSocketFactory(sslContext, allowAllHosts); // Only for production // SSLConnectionSocketFactory sslSocketFactory = new SSLConnectionSocketFactory(sslContext); httpClientBuilder.setSSLSocketFactory(sslSocketFactory); if (log.isDebugEnabled()) { log.debug("A secure connection is established with the micro service. "); } return httpClientBuilder.setDefaultCredentialsProvider(provider).build(); } catch (KeyStoreException | CertificateException | NoSuchAlgorithmException | IOException | KeyManagementException e) { throw new LicenseManagerException("Failed to initiate the connection. ", e); } } }
import ItemBonus from './ItemBonus' import ItemSuffixType from '../enum/ItemSuffixType' export default interface ItemSuffix { id: number type: ItemSuffixType bonus: ItemBonus[] }
The Socio-Demographic Characteristics of the Clients of Female Sex Workers and their Perspectives, Behaviours and Attitude on HIV and AIDS: A Questionnaire Based Survey from Pokhara, Nepal. BACKGROUND The clients of Female Sex Workers (FSWs) have been represented from different socio-demographic backgrounds and their risk behaviour depends upon their learning skills and attitudes and its impacts on practice. OBJECTIVE The general objective of this study was to find the socio-demographic characteristics, background, knowledge, behaviour, and the attitude on STIs, HIV and AIDS of the clients of female sex workers. MATERIAL AND METHODS This questionnaire based, cross sectional study was conducted on 109 clients of FSWs during the time period from January 2010 to July 2010 in Pokhara (submetropolitan city of the Kaski district), Nepal. The locations were the Baglung Bus Park, Lakeside and Mahendrapool. RESULT Among the 109 respondents, the mininum and the maximum age were 18 and 50 years. According to the religion-wise distribution, 77.1% of the respondents were Hindus, 10.1% were Buddhists, 8.3% were Christians, 1% were Muslims and 3.7% were from other religions. 67.9% of the respondents were tested for HIV at least once, whereas nearly one third (32.1%) of the respondents were never tested for HIV. 49.5% of the respondents answered that there was no difference between HIV and AIDS. Among the respondents, 89.9% respondents knew how to be safe from STIs and the HIV infections and 99.1% knew about HIV and AIDS. CONCLUSION HIV and AIDS is a cross cutting issue in the present situation rather than a health issue. One of the most at risk groups is the clients of female workers who frequently have risk behaviours like having sex with female sex workers. The sexual behaviour among these clients depends upon their attitude, knowledge and perception, which also influence several internal and external factors. In our study, the clients of the female sex workers had some extent of knowledge about the condom, sexually transmitted infections and HIV/AIDS. They were made aware on these by different organizations who were working in the field of HIV and AIDS, but still they intended to practise unsafe sex with the female sex workers.
<gh_stars>0 /* * Copyright (c) 2016, Alliance for Open Media. All rights reserved * * This source code is subject to the terms of the BSD 2 Clause License and * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License * was not distributed with this source code in the LICENSE file, you can * obtain it at www.aomedia.org/license/software. If the Alliance for Open * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ #ifndef INCLUDE_LIBYUV_SCALE_ROW_H_ // NOLINT #define INCLUDE_LIBYUV_SCALE_ROW_H_ #include "libyuv/basic_types.h" #include "libyuv/scale.h" #ifdef __cplusplus namespace libyuv { extern "C" { #endif #if defined(__pnacl__) || defined(__CLR_VER) || \ (defined(__i386__) && !defined(__SSE2__)) #define LIBYUV_DISABLE_X86 #endif // Visual C 2012 required for AVX2. #if defined(_M_IX86) && !defined(__clang__) && \ defined(_MSC_VER) && _MSC_VER >= 1700 #define VISUALC_HAS_AVX2 1 #endif // VisualStudio >= 2012 // The following are available on all x86 platforms: #if !defined(LIBYUV_DISABLE_X86) && \ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) #define HAS_FIXEDDIV1_X86 #define HAS_FIXEDDIV_X86 #define HAS_SCALEARGBCOLS_SSE2 #define HAS_SCALEARGBCOLSUP2_SSE2 #define HAS_SCALEARGBFILTERCOLS_SSSE3 #define HAS_SCALEARGBROWDOWN2_SSE2 #define HAS_SCALEARGBROWDOWNEVEN_SSE2 #define HAS_SCALECOLSUP2_SSE2 #define HAS_SCALEFILTERCOLS_SSSE3 #define HAS_SCALEROWDOWN2_SSE2 #define HAS_SCALEROWDOWN34_SSSE3 #define HAS_SCALEROWDOWN38_SSSE3 #define HAS_SCALEROWDOWN4_SSE2 #endif // The following are available on VS2012: #if !defined(LIBYUV_DISABLE_X86) && defined(VISUALC_HAS_AVX2) #define HAS_SCALEADDROW_AVX2 #define HAS_SCALEROWDOWN2_AVX2 #define HAS_SCALEROWDOWN4_AVX2 #endif // The following are available on Visual C: #if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && !defined(__clang__) #define HAS_SCALEADDROW_SSE2 #endif // The following are available on Neon platforms: #if !defined(LIBYUV_DISABLE_NEON) && !defined(__native_client__) && \ (defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__)) #define HAS_SCALEARGBCOLS_NEON #define HAS_SCALEARGBROWDOWN2_NEON #define HAS_SCALEARGBROWDOWNEVEN_NEON #define HAS_SCALEFILTERCOLS_NEON #define HAS_SCALEROWDOWN2_NEON #define HAS_SCALEROWDOWN34_NEON #define HAS_SCALEROWDOWN38_NEON #define HAS_SCALEROWDOWN4_NEON #define HAS_SCALEARGBFILTERCOLS_NEON #endif // The following are available on Mips platforms: #if !defined(LIBYUV_DISABLE_MIPS) && !defined(__native_client__) && \ defined(__mips__) && defined(__mips_dsp) && (__mips_dsp_rev >= 2) #define HAS_SCALEROWDOWN2_MIPS_DSPR2 #define HAS_SCALEROWDOWN4_MIPS_DSPR2 #define HAS_SCALEROWDOWN34_MIPS_DSPR2 #define HAS_SCALEROWDOWN38_MIPS_DSPR2 #endif // Scale ARGB vertically with bilinear interpolation. void ScalePlaneVertical(int src_height, int dst_width, int dst_height, int src_stride, int dst_stride, const uint8* src_argb, uint8* dst_argb, int x, int y, int dy, int bpp, enum FilterMode filtering); void ScalePlaneVertical_16(int src_height, int dst_width, int dst_height, int src_stride, int dst_stride, const uint16* src_argb, uint16* dst_argb, int x, int y, int dy, int wpp, enum FilterMode filtering); // Simplify the filtering based on scale factors. enum FilterMode ScaleFilterReduce(int src_width, int src_height, int dst_width, int dst_height, enum FilterMode filtering); // Divide num by div and return as 16.16 fixed point result. int FixedDiv_C(int num, int div); int FixedDiv_X86(int num, int div); // Divide num - 1 by div - 1 and return as 16.16 fixed point result. int FixedDiv1_C(int num, int div); int FixedDiv1_X86(int num, int div); #ifdef HAS_FIXEDDIV_X86 #define FixedDiv FixedDiv_X86 #define FixedDiv1 FixedDiv1_X86 #else #define FixedDiv FixedDiv_C #define FixedDiv1 FixedDiv1_C #endif // Compute slope values for stepping. void ScaleSlope(int src_width, int src_height, int dst_width, int dst_height, enum FilterMode filtering, int* x, int* y, int* dx, int* dy); void ScaleRowDown2_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown2_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width); void ScaleRowDown2Linear_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown2Linear_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width); void ScaleRowDown2Box_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown2Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width); void ScaleRowDown4_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown4_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width); void ScaleRowDown4Box_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown4Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width); void ScaleRowDown34_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown34_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width); void ScaleRowDown34_0_Box_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* d, int dst_width); void ScaleRowDown34_0_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* d, int dst_width); void ScaleRowDown34_1_Box_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* d, int dst_width); void ScaleRowDown34_1_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* d, int dst_width); void ScaleCols_C(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx); void ScaleCols_16_C(uint16* dst_ptr, const uint16* src_ptr, int dst_width, int x, int dx); void ScaleColsUp2_C(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int, int); void ScaleColsUp2_16_C(uint16* dst_ptr, const uint16* src_ptr, int dst_width, int, int); void ScaleFilterCols_C(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx); void ScaleFilterCols_16_C(uint16* dst_ptr, const uint16* src_ptr, int dst_width, int x, int dx); void ScaleFilterCols64_C(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx); void ScaleFilterCols64_16_C(uint16* dst_ptr, const uint16* src_ptr, int dst_width, int x, int dx); void ScaleRowDown38_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown38_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width); void ScaleRowDown38_3_Box_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown38_3_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst_ptr, int dst_width); void ScaleRowDown38_2_Box_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown38_2_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst_ptr, int dst_width); void ScaleAddRow_C(const uint8* src_ptr, uint16* dst_ptr, int src_width); void ScaleAddRow_16_C(const uint16* src_ptr, uint32* dst_ptr, int src_width); void ScaleARGBRowDown2_C(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2Linear_C(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2Box_C(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDownEven_C(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); void ScaleARGBRowDownEvenBox_C(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); void ScaleARGBCols_C(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); void ScaleARGBCols64_C(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); void ScaleARGBColsUp2_C(uint8* dst_argb, const uint8* src_argb, int dst_width, int, int); void ScaleARGBFilterCols_C(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); void ScaleARGBFilterCols64_C(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); // Specialized scalers for x86. void ScaleRowDown2_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Linear_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Linear_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_1_Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_0_Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown38_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown38_3_Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown38_2_Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Linear_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Box_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Linear_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Box_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4Box_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4Box_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_1_Box_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_0_Box_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown38_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown38_3_Box_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown38_2_Box_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleAddRow_SSE2(const uint8* src_ptr, uint16* dst_ptr, int src_width); void ScaleAddRow_AVX2(const uint8* src_ptr, uint16* dst_ptr, int src_width); void ScaleAddRow_Any_SSE2(const uint8* src_ptr, uint16* dst_ptr, int src_width); void ScaleAddRow_Any_AVX2(const uint8* src_ptr, uint16* dst_ptr, int src_width); void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx); void ScaleColsUp2_SSE2(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx); // ARGB Column functions void ScaleARGBCols_SSE2(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); void ScaleARGBFilterCols_SSSE3(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); void ScaleARGBColsUp2_SSE2(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); void ScaleARGBFilterCols_NEON(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); void ScaleARGBCols_NEON(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); void ScaleARGBFilterCols_Any_NEON(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); void ScaleARGBCols_Any_NEON(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); // ARGB Row functions void ScaleARGBRowDown2_SSE2(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2Linear_SSE2(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2Box_SSE2(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleARGBRowDown2Linear_NEON(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleARGBRowDown2_Any_SSE2(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2Linear_Any_SSE2(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2Box_Any_SSE2(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleARGBRowDown2Linear_Any_NEON(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width); void ScaleARGBRowDown2Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleARGBRowDownEven_SSE2(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); void ScaleARGBRowDownEvenBox_SSE2(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); void ScaleARGBRowDownEven_NEON(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); void ScaleARGBRowDownEvenBox_NEON(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); void ScaleARGBRowDownEven_Any_SSE2(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); void ScaleARGBRowDownEvenBox_Any_SSE2(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); void ScaleARGBRowDownEven_Any_NEON(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); void ScaleARGBRowDownEvenBox_Any_NEON(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width); // ScaleRowDown2Box also used by planar functions // NEON downscalers with interpolation. // Note - not static due to reuse in convert for 444 to 420. void ScaleRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown2Linear_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown4_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); // Down scale from 4 to 3 pixels. Use the neon multilane read/write // to load up the every 4th pixel into a 4 different registers. // Point samples 32 pixels to 24 pixels. void ScaleRowDown34_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_0_Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_1_Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); // 32 -> 12 void ScaleRowDown38_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); // 32x3 -> 12x1 void ScaleRowDown38_3_Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); // 32x2 -> 12x1 void ScaleRowDown38_2_Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown2Linear_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown2Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown4_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_0_Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown34_1_Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); // 32 -> 12 void ScaleRowDown38_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); // 32x3 -> 12x1 void ScaleRowDown38_3_Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); // 32x2 -> 12x1 void ScaleRowDown38_2_Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleAddRow_NEON(const uint8* src_ptr, uint16* dst_ptr, int src_width); void ScaleAddRow_Any_NEON(const uint8* src_ptr, uint16* dst_ptr, int src_width); void ScaleFilterCols_NEON(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx); void ScaleFilterCols_Any_NEON(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx); void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown34_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown34_0_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* d, int dst_width); void ScaleRowDown34_1_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* d, int dst_width); void ScaleRowDown38_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown38_3_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); #ifdef __cplusplus } // extern "C" } // namespace libyuv #endif #endif // INCLUDE_LIBYUV_SCALE_ROW_H_ NOLINT
This application is the US national phase of International Application No. PCT/GB99/03055, filed 14 Sep. 1999, which designated the U.S., the entire content of which is hereby incorporated by reference. 1. Field of the Invention This invention relates to the fabrication of optical waveguides. 2. Discussion of Prior Art One known technique for fabricating optical waveguides is the so-called direct bonding (or direct interfacial bonding) technique. Direct bonding (DB) is a fabrication technique that uses the Van der Waals forces present when two atomically flat bodies approach each other to create a bond between two bodies. If the bodies are laminas of optical material having appropriate refractive indices, the material laminas can be joined to form waveguiding boundaries. In one established way to form such a bond the surfaces of two pieces of optical material are polished so as to be very flat (i.e. substantially flat at atomic dimensions). The crystalline structures of the two polished faces are preferably aligned with each other and the polished faces are pressed together. A heat treatment can be useful to encourage a pyroelectric effect and the exchange of electrons between the two surfaces. This gives rise to an electrostatic attraction between the two surfaces, which tends to expel any remaining air or liquid from between the two surfaces. A final annealing step can improve the bond strength further. A DB bond can be formed irrespective of the lattice constants and orientation of the bodies involved and causes no degradation on the crystalline microstructure or either material. By contacting surfaces in such a non-destructive way, DB preserves the bulk characteristics of each bonded material whilst avoiding possible problems caused by lattice defects, such as increased propagation loss and optical damage. EP-0598395 describes forming an optical waveguide device by direct bonding of a support substrate and a low refractive index layer on a glass substrate, then etching the glass substrate. This invention provides an optical waveguide comprising at least a guiding lamina of optical material bonded by direct interfacial bonding to a superstructure lamina of optical material, in which regions of the guiding lamina have modified optical properties so as to define a light guiding path along the guiding lamina characterised in that the waveguide further comprises a second superstructure lamina bonded by direct interfacial bonding to the guiding lamina. The invention recognises and addresses the shortcomings of previous proposals for the use of DB structures in optical waveguides. In such previous proposals, a flat lamina of a material having a raised refractive index (forming a waveguide xe2x80x9ccorexe2x80x9d) is bonded between two laminas of material having a lower refractive index (forming a waveguide xe2x80x9csuperstructurexe2x80x9d). While this provided a bulk guiding structure, the large lateral dimension of the flat xe2x80x9ccorexe2x80x9d lamina meant that the arrangement was not useful for many waveguiding applications or as a single-mode waveguide. In contrast, in the invention, regions of the core lamina have modified optical properties so as to define a light guiding path along the core lamina. This can give a greatly increased flexibility of use and allow the guiding path to be much more tightly defined than in previous arrangements. Although the method is suitable for use with many types of materials, such as glasses, it is preferred that the core lamina is a ferroelectric material, allowing the modified regions to be generated by electrical poling. A particularly useful ferroelectric material having well-studied optical and electrical properties, is periodically poled lithium niobate (PPLN). PPLN combines a large non-linear coefficient, a widely-controllable phase-matching wavelength, and zero walk-off characteristics that make it an ideal material to achieve quasi-phase matching (QPM) for non-linear frequency conversion. With recent improvements in the efficiency of second-harmonic generation (SHG) within PPLN substrates, it is recognised in the present invention that the use of such a material in an appropriate waveguide geometry formed using the invention can provide a realisation of various compact non-linear devices based on harmonic or parametric generation. The present method is particularly appropriate for use with PPLN, and has several advantages over other techniques for fabricating waveguides using PPLN such as the so-called xe2x80x9cannealed proton exchangexe2x80x9d technique and the xe2x80x9ctitanium indiffusionxe2x80x9d technique, both of which act on a single PPLN crystal and modify the crystal near the surface in order to create regions of higher refractive index for optical confinement. Previous experiments investigating the bonding characteristics of PPLN have been directed towards fabricating thick multi-laminated stacks of the material for a large physical aperture, and thus high power applications. In contrast, creating a sufficiently thin lamina of PPLN increases the average pump intensity applied to the domain-inverted structure via optical confinement, and thus allows efficient SHG even at low pump powers. Fabrication of such a device is obtainable by bonding PPLN onto a suitable substrate before precision polishing down to waveguide dimensions, a method which has already been demonstrated in the production of LiNbO3 planar waveguides for electro-optic applications. One of the primary attractions offered by this technique is that the non-linearity and domain characteristics of the PPLN structure after bonding should remain unchanged from the bulk materialxe2x80x94a combination that annealed proton exchange and Ti indiffusion methods are close to achieving, but not yet at their full theoretical efficiencies. A further advantage of the present method is the extra flexibility available when designing devices, as combinations of multiple laminas with different material properties are now possible. Viewed from a second aspect this invention provides a method of fabricating an optical waveguide, the method comprising the steps of: (a) bonding, by direct interfacial bonding, a guiding lamina of optical material to a superstructure lamina of optical material; (b) before, during or after step (a), modifying optical properties of regions of the guiding lamina so as to define a light guiding path along the guiding lamina; characterised in that the method further comprises the steps of: (c) after steps (a) and (b), removing material from the guiding lamina to reduce the thickness of the guiding lamina (10); and (d) after step (c), bonding, by direct interfacial bonding, a further superstructure lamina (20) to the guiding lamina.
def localpaths(self, urldata, d): searched = [] path = urldata.decodedurl newpath = path if path[0] == "/": return [path] filespath = d.getVar('FILESPATH') if filespath: logger.debug(2, "Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":")))) newpath, hist = bb.utils.which(filespath, path, history=True) searched.extend(hist) if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1: newpath, hist = bb.utils.which(filespath, ".", history=True) searched.extend(hist) logger.debug(2, "Searching for %s in path: %s" % (path, newpath)) return searched if not os.path.exists(newpath): dldirfile = os.path.join(d.getVar("DL_DIR"), path) logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path)) bb.utils.mkdirhier(os.path.dirname(dldirfile)) searched.append(dldirfile) return searched return searched
The ballsiest pop song since 'Biology' arrives... and it's come all the way from Denmark. At last! It's about time someone realised that the B-52's were (are?) a thrillingly innovative, idiosyncratic musical force, and Alphabeat have made the most of their eureka moment. 'Fantastic Six', their debut UK single, suggests the Danish sextet might have perfected the same grab-bag approach towards pop songwriting as their beehive-sporting heroes. Either way, it's time to slip into your dancing shoes. The array of pop culture references that 'Fantastic Six' crams into its three-and-a-half minutes is simply dazzling – 'Drive My Car', Wham!, 'Uptown Girl', Major Tom and Sesame Street are in all there, among many others – but the myriad influences are twisted into such unexpected shapes, lurching off in such strange directions, that the final product sounds effortlessly original. On the one hand it's a bit like High School Musical as imagined by the Hoxton crowd; on the other, it might just be the ballsiest pop song since 'Biology'.
Facilitators and barriers to behaviour change within a lifestyle program for women with obesity to prevent excess gestational weight gain: a mixed methods evaluation Background Maternal obesity is associated with health risks for women and their babies and is exacerbated by excess gestational weight gain. The aim of this study was to describe womens experiences and perspectives in attending a Healthy Pregnancy Service designed to optimise healthy lifestyle and support recommended gestational weight gain for women with obesity. Methods An explanatory sequential mixed methods study design utilised two questionnaires (completed in early and late pregnancy) to quantify feelings, motivation and satisfaction with the service, followed by semi-structured interviews that explored barriers and enablers of behaviour change. Data were analysed separately and then interpreted together. Results Overall, 49 women attending the service completed either questionnaire 1, 2 or both and were included in the analysis. Fourteen women were interviewed. Prior to pregnancy, many women had gained weight and attempted to lose weight independently, and reported they were highly motivated to achieve a healthy lifestyle. During pregnancy, diet changes were reported as easier to make and sustain than exercise changes. Satisfaction with the service was high. Key factors identified in qualitative analysis were: service support enabled change; motivation to change behaviour, social support, barriers to making change (intrinsic, extrinsic and clinic-related), post-partum lifestyle and needs. On integration of data, qualitative and quantitative findings aligned. Conclusions The Healthy Pregnancy service was valued by women. Barriers and enablers to the delivery of an integrated model of maternity care that supported healthy lifestyle and recommended gestational weight gain were identified. These findings have informed and improved implementation and further scale up of this successful service model, integrating healthy lifestyle into routine antenatal care of women with obesity. Trial registration This trial is registered with the Australian New Zealand Clinical Trials Registry (no.12620000985987). Registration date 30/09/2020, retrospectively registered. http://www.anzctr.org.au/ Supplementary Information The online version contains supplementary material available at 10.1186/s12884-021-04034-7. Contributions to literature Randomised controlled trials designed to improve healthy lifestyle, limit gestational weight gain and improve maternal and infant outcomes are effective. The priority and remaining challenge is to implement these programs into usual maternal care Successful programs require dedicated, well-trained health professionals that can educate, empower and support women to make changes This mixed-methods study identifies barriers and enablers to the delivery of an integrated model of maternity care. These findings contribute to gaps in the literature and will inform and improve implementation and further scale-up of this successful service model Background Maternal obesity and excessive gestational weight gain (GWG) both independently contribute to adverse maternal and neonatal outcomes, as well as to increased risk of postpartum obesity development in mothers and their children. The National Academy of Medicine (previously Institute of Medicine, IOM) recommendations for healthy GWG are specific to a woman's prepregnancy BMI. A systematic review and metaanalysis of more than one million women demonstrated that almost half gained above GWG recommendations leading to adverse maternal and neonatal outcomes. Women above a healthy weight preconception had the highest prevalence of excess GWG. Pregnancy has long been considered 'a teachable moment' for optimised weight gain and obesity prevention. Interventions designed to improve healthy lifestyle, limit gestational weight gain and improve maternal and infant outcomes are effective, supported by level I evidence from a large meta-analysis. The priority and remaining challenge is to implement these programs used in randomised controlled trials (RCTs) into usual maternal care. Successful programs require dedicated, well-trained health professionals that can educate, empower and support women to make changes. Barriers include antenatal health professionals lack of skills, time and confidence discussing sensitive issues, and women can feel stigmatised if staff are not well trained. Generally, women are motivated to make positive changes, but want support and direction from qualified health professionals (e.g. midwives, dieticians, allied health professionals). The Healthy Pregnancy service was established in 2015 at Monash Health, the largest health service in Australia, to care for women with a pre-pregnancy BMI of ≥35 kg/m 2. This co-designed antenatal service integrates an embedded, evidence-based lifestyle intervention, the HeLP-her Healthy Lifestyle in Pregnancy Intervention. The HeLP-her program intervention has been shown to be effective in reproductive aged women in multiple settings outside of pregnancy as well as in pregnancy, including women at increased risk of gestational diabetes in routine antenatal care and is also cost effective. The program is underpinned theoretically by The Social Cognitive Theory and promotes goal setting, self-monitoring, social support and problem solving. Here, in women with obesity at high risk of pregnancy complications, dedicated staff including a physician (endocrinologist) and health coach delivered the program, designed to address the barriers of time constraints and expertise of routine maternity health professionals. The health coach is a qualified exercise physiologist with qualifications in behavioural neuroscience and expertise in motivational interviewing in reproductive aged women for weight gain prevention. This model of care project was designed to incorporate pragmatic implementation research, where evidence is generated in the context of usual clinical care. Mixed methods approaches in implementation research apply both quantitative and qualitative approaches to provide novel insights on implementation of models of care. Here, the aim of this study was to apply mixed methods to explore the experiences and perspectives of the women attending the integrated antenatal clinic and Healthy Pregnancy service, to understand the barriers and enablers to lifestyle change and to identify how this service can be improved to inform sustainable implementation and scale-up. Study design An explanatory sequential mixed methods study design was used. This two-phase design involved using two questionnaires completed by pregnant women at different time points during pregnancy, followed by qualitative interviews. Using this approach, the qualitative results aimed to expand and confirm findings from the quantitative phase and data was integratedin analysis. The consolidated criteria for reporting qualitative research (COREQ) 32-item checklist was used in planning and reporting. This study was approved by the Monash Health Ethics committee (RES-17-0000-313 L). Service setting This study is part of a broader pragmatic implementation trial (The Healthy Lifestyle in Pregnancy Project, HiPP) that evaluated the effect of a lifestyle intervention on gestational weight gain and maternal and infant outcomes in women with maternal obesity (Australian New Zealand Clinical Trials Registry: 12620000985987). The project was implemented within a large hospital network in metropolitan Melbourne, Australia, with approximately 10,000 live births per year. Australia offers universal freely accessible healthcare and Monash Health is the largest health service nationally, situated in a catchment with a low socio-economic status (SES), diverse ethnic background population. The specific service provided care to women with a BMI of 35-43 kg/m 2 with approximately 200 live births per year. Women with a BMI of > 43 kg/m 2 were triaged to deliver at the tertiary hospital site to accommodate their additional needs as standard approach within the health service. The Healthy Pregnancy service embedded a patientled behaviour change lifestyle intervention delivered by a health Coach and a physician (intervention staff) over five sessions integrated with routine pregnancy care. The physician also managed medical complications in pregnancy including gestational diabetes. The intervention was largely delivered by the health coach and focused on behaviour change and self-management of weight, healthy diet and exercise. Skills were practised in goal setting, problem solving and relapse prevention, with the aim to achieve small, sustainable changes to healthy lifestyle behaviours. The sessions were one-on-one and between 20 and 40 min in duration. The first session was longer, with both education provision (i.e. dietary and physical activity guidelines for pregnancy, GWG recommendations) and behavioural skills discussed and practiced. The study design was cognisant of the clinical demands of midwives and obstetricians who are generally time-poor and lack the training and confidence to spend prolonged periods counselling women on healthy lifestyle. Therefore, midwives and obstetricians in the Healthy Pregnancy service did not deliver the behaviour change intervention, but were part of the team and were supportive of the program messages and reinforced this with women throughout. Women who attended the Healthy Pregnancy service between 2016 and 2018 were compared to standard care (those not receiving embedded lifestyle intervention) for the primary outcome of GWG and secondary outcome of maternal and infant outcomes and implementation knowledge. Detailed study design is described previously: the first intervention session coincided with the first medical review, typically between 12 and 18 weeks, and final session at~36 weeks. Intervention uptake was 95, and 87% of women attended 80% or more of the 5 sessions. Health professionals' perspectives of the service also studied. Questionnaire design Questionnaires were developed to understand pregnant women's experience in attending the service to identify barriers and enablers to behaviour change. They were developed by a team with multidisciplinary expertise, including dietetics, exercise physiology, obstetrics, endocrinology and psychology. While the questionnaires were not piloted here, they were based on priori questionnaires used in over a decade of research focused on obesity prevention in reproductive aged women. These include observational studies evaluating health related behaviours among women with gestational diabetes mellitus, polycystic ovary syndrome and type 2 diabetes as well as RCTs to limit gestational weight gain and prevent weight gain in non-pregnant women (studies performed by our group). Here, we used the validated self-management questionnaire for healthrelated diet and exercise behavioursby Sallis. To our knowledge, there is no standard or validated approach to evaluating aspects of care evaluated in our study, which therefore informed the use of our internally developed questionnaires here. Questionnaire one was completed at the first (or close to) the initial session (12-18 weeks). Questions assessed demographics, basic diet and physical activity, risk perception, motivation and readiness to change and selfmanagement strategies. Questionnaire two was completed at (or just after) the final session (36 weeks). Questions assessed satisfaction with service, changes made in pregnancy and corresponding barriers, and selfmanagement strategies. Some questions included a statement (e.g. I think it is important to have a healthy lifestyle during pregnancy) with responses on a 5-point Likert scale. In keeping with a pragmatic clinic trial approach, we did not intend for all women at the Healthy Pregnancy Service to complete the questionnaires, but rather a percentage of these. Questionnaires were initially distributed by mail in early 2017; between April 2017 and February 2018, the questionnaires were distributed in person by a researcher/ clinician at the Healthy Pregnancy Service and completed in the clinic. Questionnaire 1 and 2 are in the Additional files 1 and 2. Qualitative interviews Semi-structured interviews (Additional file 3) were conducted with a sample of women attending clinic to gain a deeper understanding of women's experience attending the service, and the barriers and enablers to behaviour change. Purposive sampling targeted women who were more than 31-32 weeks gestation, were representative of parous and nulliparous as well as those with and without GDM, and would have attended a substantial proportion of their intervention care. Women participating in the qualitative analysis could do so without participating in the quantitative component. Questions were developed based on a preliminary analysis of the questionnaires. Participants were recruited by RG (a female clinicianresearcher) in person. Interviews were conducted over the phone by RG, who had postgraduate expertise in qualitative methods. RG worked in the clinic as a physician for 1 year prior to commencement of the research project and then ceased clinical work to focus on the research. Written informed consent was obtained from all participants prior to the interview. The interviews were between 10 and 25 min duration and conducted in July and August 2017. Data from the interviews was audiotaped and transcribed verbatim by an independent transcribing service. Participant details were deidentified for anonymity. Interviews were collected until data saturation was reached, determined when no new ideas emerged from the interviews. Data analyses Quantitative data Analysed using STATA software, version 15.0. Categorical data were presented as frequency and percentage (n (%)). Continuous data were presented as mean (standard deviation). Responses to 5-point Likert-scaled questions (e.g. strongly agree, agree, somewhat agree, disagree, strongly disagree; or daily, weekly, monthly, occasionally, never; or always, very often, often, occasionally, never) were collapsed into 2 categories (agree/disagree; or regularly/rarely; or often/rarely) respectively. Mann Whitney test (Wilcoxon signed-rank) was used to compare responses to questions repeated in questionnaire 1 and 2. Qualitative data Transcripts were independently analysed and coded by two researchers (RG and CL) using the NVivo 12 software (QSR International Pty Ltd. 2018). Data was searched for concepts in relation to interview questions. Codes were grouped into themes using inductive analysis to meet the aims of the study, in a constant comparative manner using a generic approach as described by Patton and Harding. The objective was for the themes to be strongly linked to the data, so an inductive approach was chosen using raw data to derive the structure of analysis. Consensus regarding the emerging themes was reached between the two researchers. Integration of qualitative and quantitative data was performed with a mixed methods design using the approach described by Fetters et al. design. At the study design level, an explanatory sequential design was used; the quantitative data was collected and analysed first, informing the qualitative data collection and analysis. At the interpretation and reporting level, integration occurred through narrative, with a contiguous approach. Quantitative phase Questionnaires were initially distributed by mail in early 2017, however the response rate was poor (14%). Between April 2017 and February 2018, the questionnaires were distributed in person and completed in the clinic (96% response rate). Of the 157 women attending the lifestyle intervention between 2016 and 2018, 58 women completed either questionnaire 1, 2 or both. After excluding 9 women (either did not attend lifestyle intervention clinic, BMI outside of range, moved to another health service), 49 women were included in the analysis: 44 completed questionnaire 1, 40 completed questionnaire 2 and 35 completed both questionnaire 1 and 2. The demographics of questionnaire 1 participants are shown in Table 1. Risk perceptions, health beliefs, stage of change Twenty-seven (63%) of women identified that 5-9 kg is the ideal weight gain in pregnancy (consistent with guideline recommendations), 14 (33%) thought 0-5 kg was appropriate. Twenty-six (65%) recognised that increased weight gain was not associated with more nutrients for the baby, whereas increased weight was reported as associated with big babies/macrosomia (17 (41%)), diabetes in pregnancy/gestational diabetes (25 (61%)) and high blood pressure in pregnancy (14 (34%)). Early in pregnancy, 42 (95%) agreed that a healthy lifestyle in pregnancy is important and 37 (84%) thought they were at risk of excess weight gain, whilst 43 (100%) believed they could manage healthy lifestyle and weight gain in pregnancy, and 43 (97%) intended to take actions to prevent excess weight gain. Readiness to change Motivation was assessed in Fig. 1. Participants rated importance/readiness/confidence in making healthy lifestyle changes during pregnancy regarding diet/physical activity (PA), and responses were on a scale of 0-10 (0 not at all, to completely 10). Questionnaire 2 Satisfaction with the healthy pregnancy service This is described in Fig. 2. Participants rated their satisfaction with information provided by the antenatal team and their relationship with the health professionals. Comparison of questionnaire 1 and 2 Selected responses to a 29 question survey of selfmanagement strategies based on an adapted tool by Sallis completed early and late in pregnancy are compared in Table 2. Qualitative phase Fourteen women agreed to participate in an interview and all were interviewed. Women were a mix of nulliparous and parous, with and without GDM, and most had completed some post school education and were employed. The demographics of interview participants are shown in Table 3. Interview themes are summarised in Table 4 and described with example quotes below. THEME 1. Service support enabled change Most women felt that the service and the intervention staff (health coach and physician) enabled positive behaviour changes. Key strengths included developing rapport, delivering clear advice and providing awareness that created change. Sub-theme: Rapport with women Women described the intervention staff as interested in their well-being and acknowledged the comfortable environment created. Staff involved them in decision making and supported the decisions women made. "compared to other doctors, that they were really interested in what you have to say. They gave honest opinions, and feedbacks, um, which I really liked as well... I felt like very comfortable when I was talking to everyone, so, um, yeah, it was a very it's a very positive experience going through that clinic". (participant #1) "they would always asked me those questions. "Are you still going through those issues? Do you want to try and, um, find another another solution? Um, do you want to try and take another approach?" I was very pleasedthey weren't pushing or pressuring, or anything. So they weren't persistent. Um, which is also good because you can't really pressure someone into doing things" (participant #1) Sub-theme: Advice provided Women reported receiving clear information around diet, exercise and weighing goals that was easy to take on board and felt the level of support helped them make changes. The intervention team had realistic expectations of achievable goals and provided personalised advice. "I've obviously got restrictions on what I can do because I've got other health concerns but um they tried to work around that, you know, with things Women were able to identify changes that they made after planning with the intervention staff, and a number reported feeling more confident as a result of making changes. "I was more aware of what I was eating and portions. I find that when you buy food out as well, the portions are way too bigmy biggest example just the other day was I got one of those boxes of noodles that you getand it's probably three servings. I didn't realise thatbut when I put it in a bowl out of the boxit changes everything" (participant #4) "one of my cravings was ice cream and they told me to substitute it for frozen yogurt if I could. So I did that, and then the craving for ice cream kind of went away. So I cut that out as well. Also with like milk and cheese, they just said to choose the light option if I could, if I didn't mind the flavour and stuff like that. So I tried that as well and that was good." (participant #3) One participant who was pregnant with her third child, reflected that the intervention gave her tools to limit gestational weight gain that would have been helpful in her earlier pregnancies. "I never had a health coach with my first or second, and I wish I did. So with my first I put on 36 kilos. With my second I put on 19. I'm now on my third, and I've only put on seven. So I wish that back then, when I did have my daughter, and put on 36 kilos, that there was a health coach to show me the rights and wrongs." (participant #7) THEME 2. Drivers of motivation to behaviour change Women described different drivers to making behaviour change, depending on their personal experience. Subtheme: Women's own motivation Those with previous pregnancies or better health literacy came in with more experience and were able to implement changes more independently, sometimes initiating changes before attending the clinic. coming and all of that sort of thing. So yeah, I was fairly confident to start with. Um, seeing them each time and, you know, getting weighed and knowing that I'm definitely doing the right thing, that helped" (participant #5) "knowing that I was pregnant and that I had gestational diabetes (previously), and the chances of me having it again was higher, so making sure that we push forward with changing our diet early on." (participant #12) Some women described their motivation as being intrinsic, feeling that the responsibility of eating well lay within themselves. "all of these (changes) were up to me and it was up to me to make them work. I could gore down a whole pack of doughnuts if I wanted to but it was about controlling it. So I would be the one that would hold myself back if I slipped." (participant 4) Sub-theme: Potential health consequences for baby Women were motivated to make changes, believing that this impacted the health of their child. These feelings were heightened if they developed a complication in pregnancy, with an awareness that lack of action could affect the baby. The fear of experiencing an adverse outcome acted as a significant motivator. "it was more that I was aware that it wasn't just my own health that I was impacting, it was also the baby's, so I had to do these things to give him the best possible chance." (participant #4) "when I found out I had the diabetes. I made really big changes... when it was, you know, for a reason, it was like, "Yeah, I really do need to change that. And I can swap that; it's that easy. I can swap that." (participant #8) "at the start of my pregnancy like I came backI did Down Syndrome test, and it came back as a high risk. And then that kind of you know -and I just wanted to take care of my baby throughout this pregnancy. Like it was, it was a lot different to my first one." (participant #13) "I was a little bit fearful, and that's why I found I had to make those changes." (participant #11) Sub-theme: Being accountable to healthcare professionals Many women found the regular appointments with the intervention staff to check on their progress as a technique for keeping on track. "I did find the accountability very helpful, um, someone actually checking up on me and making sure I'm staying on track and I'm doing the right thing." (participant #14) "sometimes I would fall back into my old ways, and it would encourage me to you know like -she would like encourage me to do like better, and you know eat the proper foods, and exercise a bit more. Yeah. She was good". (participant #13) In contrast, a small number of women described fear of disapproval from the staff for not meeting desired weight goals. This could be seen as a barrier for these women. "sometimes I was really nervous that I was going to go in there and they would be like, "Oh no, you're putting on too much weight" or something like that and I was a little bit worried about that because I know that they weigh me and, you know, those are always really nervous times" (participant #4) THEME 3. Social support A number of women were very open with their family about lifestyle change, and in return received moral support from their family, their partner in particular. "I had that support from my husband, because you know, he does come around with me on these walks, so he's, um, a bit more encouraging with that. So you know, he said, "if you can't do half an hour, we'll walk for 20 minutes now, and then in an hour's time, we'll walk another 20 minutes or so."" (participant #1) "my mum's been doing it with me, so that's good and my dad's been really good with watching my daughter and stuff when we go out, and you know, if we can't take her with us... Um, so yeah, I mean, they've been really supportive in the fact that I need to do the you know, the extra exercise" (participant #7) In some circumstances, women's changes had a positive flow on affect to other family members, with family making changes to the types of food purchased and participating in exercise. "my mother, um, she's started buying things that, you know, that I wouldn't have eaten before. So she'll have skinny milk in the fridge if I come over and things like that" (participant #4) "And my husband's been fantastic. He's changed uh pretty much a lot of the things that he eats himself to support me. So we don't have white bread in the house anymore. We have seeded bread and things like that. So um yeah, my family and friends have been fantastic. My husband is exercising much more now as well. Um, so he's uh more aware of himself in general. It's good." (participant #4) "my husband is very supportive of healthy eating. He wants us all to be healthy. Um, friends are fairly supportive. They've been coming swimming with me and yeah, yeah, my husband has also been coming swimming with me recently. Yeah, it's been good." (participant #9) THEME 4. Barriers to making change A number of barriers to making change were identified, being intrinsic, extrinsic, and clinic-related. Sub-theme: Intrinsic Intrinsic limitations for making change related to fatigue, medical problems making exercising more difficult, managing self-control and sensitivity discussing their weight. "there are days where I'm just so exhausted. I'm like, "Oh my god, I just want to skip it", but I know that I really shouldn't" (participant #7) "I started going for walks like for about half an hour to 40 minutes a few times a week. But that stopped a little bit later, just due to pelvic pain and stuff like that. So I've just been doing shorter walks and getting in as much activity as I can." (participant #3) "making the changes can be really hard, definitely being pregnant. So when you want something, you want to eat that, it's like, "Oh no, sorry, you can't." I've got to go home instead, and make a sandwich" (participant #8) A few women described difficulty talking about their weight during the consultation. They identified this as a sensitive issue, due to previous negative experiences. "I've been bullied, through school and stuff. I'm really self-conscious about my bodyso, um, having to talk about it, it is um It's quite emotional." (participant #8) Overall, most women had a practical attitude towards discussing their weight, and saw this as a necessary step in making change. Staff understand that weight is a sensitive issue and therefore approach GWG with care, working with women and taking a nuanced approach to conversations about weight, depending on the level of sensitivity. "I've always been a bit chubbyand I used to be very sensitive about it but I'm kind of like past that point now so it's more now that I'm pregnant, I wanted to be as healthy as I can possibly beI knew that they were there to help basically so I just thought the best thing is to be as honest as possible rather than hide things." (participant #14) Sub-theme: Extrinsic External barriers related to time, inclement weather, work, other children and finances. "my time constraints with my work schedule, so it wasn't anything that could really change because I work shift work my meal times would be different throughout the day. So sometimes because I would be at work I wouldn't have time to have a snack when I should have had a snack." (participant #6) "I was overloading (at university) and then I went away and did placement. So um that was stress and yeah, that was a barrier to making changes and then after that was finished, I kind of was able to make changes and I had the energy to make changes" (participant #9) "Cost is always a factor, especially healthy food seems to be more expensive than junk food, which is really a pain. I'm like, "I love capsicums but they're so expensive."" (participant #9) Women with children acknowledged that caring for young children was an additional challenge, making exercise outside the home more difficult. "my 18 month old daughter really with the exercise and that but it's just with her, having her as well. Like it's been quite difficult to be able to get outside." (participant #13) Sub-theme: Clinic-related Some women commented that their relationship with healthcare professionals made them feel somewhat uncomfortable initially. Some women acknowledged this may be due to their underlying sensitivity about their weight and felt that staff may have had preconceived ideas about women's lifestyle. "I felt a bit judgedI felt that she just wanted me in, and just wanted me out. She didn't smile." (participant #8) "the physicianshe was a bit, I don't know, distant, I guess. I found it a little bit difficult to kind of connect with herI felt like I could make a decision if I wanted but um she was a bit resistant to what I was telling her to a certain extent, like her listening skills weren't as good as they should beit was probably just that I didn't really click with her when I first met her but, you know, after that I kind of figured out how to kind of get the information that I wanted." (participant #9) "I know this is very much my perception of it, and it wasn't ever intended -but it was very much because you're obese, and you've got a high BMI, you're going to be higher risk for gestational diabetes, and blah, blah, blah. But they were saying all that before they knew that my diet and my exercise were actually quite good and I didn't need to change my diet So I just -I kind of felt personally that they kind of assumed that you're going to have issues with trying to change your diet and all that sort of stuff." (participant #6) Women commented on the waiting time, the fact that the clinic only ran on one particular weekday, and the parking expense as barriers. "it was only Wednesday afternoon from one o'clock onwards that it was available. It didn't offer a lot of flexibility" (participant #2) "pay for parking is a bit dear, next door and just the waiting time sometimes. Like, not all the time, but the waiting time is a burden" (participant #1) Sub-theme: Sustainable lifestyle changes Women were able to identify lifestyle changes that could be sustained post-partum. Most felt that diet changes were more achievable than exercise, and recognised that intensity of changes may be reduced compared to pregnancy. "for suregetting out of the house with a newborn is the best thing for you in terms of, you know, not getting depression and whatnot, which I experienced last time." (participant #7) "the food changes, the majority of the time I can keep up but it would probably be like less umit won't be as full on as it is nowI'll be like a bit more relaxed about it definitely." (participant #14) "the main change was the eating habits and that sort of thing and they've pretty much stuck now so I'll keep going the way I am." (participant #5) Women without children were less confident in their ability to find time to exercise and cook healthy foods once their baby was born. "I hope that once the baby is born, I will stop being in pain and it'll be easier to cook um and maintain like a good balance of meat and veggies and carbohydrates in my diethopefully I'll be able to go back to walking the dogs." (participant #9) So I think um I would like to increase my exercise, again more once I'm not pregnant. The only difficulty will be that we'll be trying to do it with a newborn it will just be once again like finding time to do -go food shopping, and meal preps, and all that sort of stuff, which I will, I will do. It's just now how will I do this with a baby as well. (participant #6) Sub-theme: Support required to maintain lifestyle changes Some, but not all women were interested in receiving support around diet and exercise post-partum. More women expressed interest in support in a face-to-face setting rather than via email. Some preferred group settings and others felt more comfortable with individual settings. "talking to other mothers who have children; um, you know, finding other strategies of you know, what they're doing, how how it's worked for them, how it hasn't worked for them" (participant #1) "I think maybe diabetes wise, like I know I'm gestational, and I most likely may not have it after. But I think maybe ways to stop that from being something more permanent." (participant #11) "I think um especially exercising after you've had a baby, like knowing what you can do would be really beneficial for new mums, and making sure like little changes of interacting with your baby while you're making those healthy choices and stuff like that. Because you don't always know what you can and can't do physically after you've had the baby." (participant #12) Discussion In this mixed-methods study evaluating pregnant women's experiences of The Healthy Lifestyle in Pregnancy Project (HiPP), we identified patient perspective barriers and enablers for the implementation of an integrated healthy lifestyle intervention embedded in routine antenatal care for women with obesity. Overall, women have good risk perception and are motivated to make healthy lifestyle changes, but initially lack sufficient skills to implement them. Qualitative data identified themes of: service support enabled change; drivers of motivation to behaviour change; social support; barriers to making change and post-partum lifestyle and needs. Overall, qualitative and quantitative findings aligned. These learnings provide insight into important factors for improving the implementation model. In early pregnancy, 70% of women reported gaining weight in the year prior. This is comparable to an Australian study of women in preconception, showing 54% had weight gain in the previous 12 months. Here, the vast majority (80%) had attempted to lose weight, but had done so independently, with only 32% consulting a health professional. This concurs with findings from another preconception study, where few women had health checks prior to pregnancy to optimise their health and/or for weight management advice. Generally, women had reasonable risk perception, and were able to recognise the target weight gain and risks related to excess weight gain, consistent with existing literature. This may be partly explained by good background education levels and because women had completed their first midwife appointment (and possibly their first intervention session), where they would have received basic lifestyle information. Of interest, women entered pregnancy with high expectations, all (100%) believed that they could manage healthy lifestyles and weight in pregnancy and almost all (97%) intended to take actions to prevent excess weight gain. However, when directly questioned about their confidence regarding eating and physical activity, confidence was lower at 74 and 67% respectively. This may be related to the style of question, with scaled question format more likely to reflect the person's true feelings. Self-management strategy questions highlighted that women have good intentions for behaviour change but find implementation and relapse management difficult. This highlights that practising behaviour change is key to improving self-management and confidence, whilst reducing barriers. Additionally, women were more likely to be motivated to change and to keep track of their diet (55%) than exercise (41%). As both diet and exercise interventions offer benefits in pregnancy, interventions should aim to support enhancing both components. Later in pregnancy, women reported strong satisfaction with the service provided, with 88% reporting satisfaction with information provided about lifestyle and 90% describing a good relationship with their healthcare provider. The reported satisfaction is higher than that reported in Australia in routine antenatal care. This strong satisfaction may have contributed to improved self-management and improvement in diet (making time to prepare healthy meals, having food available for quick healthy meals, more likely to try new foods and recipes, and replacing snack foods with healthier alternatives). Additionally, women weighed themselves more regularly over the course of the intervention. Self-weighing has previously been shown to enhance intervention efficacy in the context of intervention support, but not in control groups without lifestyle support. Quantitative data showed that women have healthy lifestyle intentions, but in some cases lack sufficient skills and confidence to implement them, emphasising that pregnancy is a 'teachable moment'. Qualitative analysis explored how and why behaviour change was or was not made, and the strengths and weaknesses of the intervention. Key themes identified that the service clearly enabled change, with strong rapport between intervention staff and women. This confirms the overall high satisfaction women had with the service, with the majority of women having a good relationship with their health professional. Continuity of care with the same intervention staff promoted relationship building and trust, as described in other studies. This social support is an important technique effective in lifestyle pregnancy interventions. Influence of family was an important theme in behaviour change, with a stronger support system facilitating positive change, with demonstrated reach to other close family members also shown. With women previously reported to be the main influencers of family lifestyle behaviours, this has significant public health implications, with potential for wider beneficial effects beyond individual improvements to health behaviours. Exploratory studies have indicated that women desire clear, unambiguous and personalised strategies for making lifestyle changes in pregnancy. Confidence is considered a key element for behaviour change during pregnancy according to the Theory of Planned behaviour. The challenging factor is how to enhance confidence and motivation to implement behaviour change. Here, women report that intervention staff largely provide this support, enabling success and a sense of achievement and improved confidence. When women saw they had healthy weight gain, this positively impacted their self-esteem, as previously described. Women noted that dietary changes were easier to sustain as the pregnancy progressed, compared with physical activity, across both quantitative and qualitative data highlighting the need for effective, supportive strategies to target realistic and achievable physical activity goals. Quantitative analysis demonstrated improved self-management behaviours and in the qualitative component, a key theme was that the clinic enabled behaviour change, identifying a good fit of data integration. Women identified a number of motivators for behaviour change, either intrinsic, due to concern regarding their baby's well-being, or extrinsic such as being accountable to health professionals, reflecting international research. Women also identified barriers to making change. Intrinsic (fatigue, medical problems, self-control) and extrinsic (time, inclement weather, work, other children and finances) barriers are universally recognised in this field, as women are challenged to balance everyday demands. Here, the interviews expanded insights of barriers gleaned from quantitative analysis. In contrast to many qualitative studies of health professionals' experiences that describe reticence discussing obesity/gestational weight gain for fear of upsetting women, here very few women identified talking about their weight as a sensitive issue, and this may be due to intervention staff expertise in this area. Staff understand that weight is a sensitive issue and therefore approach GWG with care, working with women and taking a nuanced approach to conversations about weight, depending on the level of sensitivity. Reflecting on the relationship between healthcare providers and women, some described feeling judged in the initial stages, consistent with other studies. Some women speculated this may be related to stigma around their weight and feeling vulnerable to negative attitudes that have been described previously. These feelings were expressed in the interview and not in the questionnaires, highlighting the benefit of qualitative analysis. Women wish to feel understood and treated with respect and this feedback can expand learnings and be applied to improve the healthcare provider and recipient relationships. There was variation in the anticipated sustainability of post-partum lifestyle changes between women with and without other children. Women without children were less confident in their ability to find time to exercise. Some women wanted postnatal support in varying formats. Engaging women postpartum is difficult and these factors need to be incorporated into future implementation models. Evidence is emerging that engagement in pregnancy and continuation post-partum is more successful than isolated post-partum approaches. With health benefits for mother and child demonstrated with healthy lifestyle in preconception, pregnancy and postpartum, a continuum approach would be ideal to support women at this high-risk period. Overall, this study demonstrates that women are gaining weight preconception and appear very motivated at the commencement of pregnancy to improve lifestyle, but lack of confidence hampers their success. Women want uncomplicated, clear advice. This intervention is designed to implement small, achievable changes that keep expectations realistic and remove the overwhelming feeling of having to change everything at once, by focussing on what is important to the woman at the time. Practising these techniques enhances self-management, problem solving and self-efficacy and changes are associated with weight gain prevention which in turn improves confidence in women. Facilitating factors are social support and rapport with intervention staff. Pregnancy is a time where increased support is needed by women and this intervention assisted in promoting this both within and outside of the intervention which is likely to be another factor associated with its success. Strengths and limitations A strength of this research is the mixed-methods design, evaluating a pragmatic lifestyle intervention delivered embedded in routine maternity care, reflecting real world settings. By using quantitative and qualitative methods, we enriched our understanding of women's experiences. In most aspects, results were strongly aligned, with coherence of quantitative and qualitative findings, with more in-depth insights from the thematic analysis. Additionally, the study included women from a low SES, diverse ethnic background catchment, increasing generalisability. The findings complement those of our health professionals' perspectives. Together, the studies have a role in informing implementation and scale of evidence-based, cost-effective antenatal lifestyle interventions. Possible limitations include the researcher's prior clinical experience in the service that may have influenced interpretation of the participant's response, however thematic analysis was completed by two independent researchers. Additionally, this experience is of a single clinical service in a larger health setting and will need to be generalised. Conclusions Overall, healthy lifestyle was a high priority for pregnant women with obesity. Positive pregnancy care and lifestyle intervention experiences were reported, including satisfaction and being well-supported and involved. Prior to the intervention, women were able to identify strategies they could use to manage their lifestyle, but had less confidence to implement these changes, with confidence bolstered by the intervention. Ultimately, embedding an effective lifestyle intervention into routine care with dedicated trained health professionals enabled women to feel confident and empowered to make changes. Women identified weaknesses and strengths in their pregnancy care experiences and suggested ideas for improved service provision. Combining these findings with health professional perspectives will inform the scale-up of effective guideline recommended interventions in pregnancy more broadly.
<reponame>1056215801/-MITC-YING_TAN<filename>modules/community-yingtan/community-yingtan-server/src/main/java/com/mit/community/service/PerceptionService.java<gh_stars>1-10 package com.mit.community.service; import com.baomidou.mybatisplus.mapper.EntityWrapper; import com.mit.community.entity.*; import com.mit.community.module.pass.mapper.BaoJinMapper; import com.mit.community.module.pass.mapper.PerceptionMapper; import org.apache.commons.collections.map.HashedMap; import org.apache.commons.lang3.StringUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.*; @Service public class PerceptionService { @Autowired private PerceptionMapper perceptionMapper; @Autowired private BaoJinMapper baoJinMapper; public WarnInfo getWarnInfoByType(List<String> communityCode){ EntityWrapper<WarnInfo> wrapper = new EntityWrapper<>(); wrapper.in("communityCode", communityCode); wrapper.orderBy("gmt_create", false); wrapper.last("LIMIT 1"); List<WarnInfo> list = baoJinMapper.selectList(wrapper); if (!list.isEmpty()){ return list.get(0); } else { return null; } } public CarPerception getCarPerception(int type, List<String> communityCodes){ CarPerception carPerception = new CarPerception(); if(communityCodes.isEmpty()){ communityCodes.add(null); } if (type == 1){//按日 SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd"); Date d = new Date(); String dateNowStr = sdf.format(d) + "%"; List<Current> srList = perceptionMapper.getSjByDay("进",dateNowStr,communityCodes); Map<String,String> sr = padding(srList, type); srList = map2List(sr,type); List<Current> scList = perceptionMapper.getSjByDay("出",dateNowStr,communityCodes); Map<String,String> sc = padding(scList, type); scList = map2List(sc,type); List<Current> xqList = perceptionMapper.getXqByDay(dateNowStr,communityCodes);//本小区 Map<String,String> xq = padding(xqList, type);//本小区 xqList = map2List(xq,type); List<Current> wlList = perceptionMapper.getXqByDay(dateNowStr,communityCodes);//这里需要修改 Map<String,String> wl = padding(wlList, type);//外来 wlList = map2List(wl,type); carPerception.setSr(srList); carPerception.setSc(scList); carPerception.setBxq(xqList); carPerception.setWl(wlList); return carPerception; } else if (type == 2){ SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM"); Date d = new Date(); String dateNowStr = sdf.format(d) + "%"; List<Current> srList = perceptionMapper.getSjByMonth("进",dateNowStr,communityCodes); Map<String,String> sr = padding(srList, type); srList = map2List(sr,type); List<Current> scList = perceptionMapper.getSjByMonth("出",dateNowStr,communityCodes); Map<String,String> sc = padding(scList, type); scList = map2List(sc,type); List<Current> xqList = perceptionMapper.getXqByMonth(dateNowStr,communityCodes);//本小区 Map<String,String> xq = padding(xqList, type);//本小区 xqList = map2List(xq,type); List<Current> wlList = perceptionMapper.getXqByMonth(dateNowStr,communityCodes);//这里需要修改 Map<String,String> wl = padding(wlList, type);//外来 wlList = map2List(wl,type); carPerception.setSr(srList); carPerception.setSc(scList); carPerception.setBxq(xqList); carPerception.setWl(wlList); return carPerception; } return carPerception; } public WarnPerception getWarnPerception(int type, List<String> communityCodes){ WarnPerception warnPerception = new WarnPerception(); if(communityCodes.isEmpty()){ communityCodes.add(null); }// SELECT DATE_FORMAT(gmt_create,'%d') AS TIME,COUNT(*) AS COUNT FROM household WHERE gmt_create BETWEEN '2019-03-28' AND '2019-04-04' GROUP BY TIME ORDER BY gmt_create ASC if (type == 1){//7天 SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd"); Date d = new Date(); String dateNowStr = sdf.format(d);//当天日期 String dayBefore = getPastDate(7);//七天之前的日期 List<Current> dcList = perceptionMapper.getDcBySomeDay(dayBefore,dateNowStr,communityCodes); //List<Current> dcList = new ArrayList<>(); Map<String,String> dc = paddingWarn(dcList, type); dcList = map2ListWarn(dc); List<Current> xfList = perceptionMapper.getXfBySomeDay(dayBefore,dateNowStr,communityCodes); Map<String,String> xf = paddingWarn(xfList, type); xfList = map2ListWarn(xf); //List<Current> txList = perceptionMapper.getXqByDay(dateNowStr,communityCodes);//本小区 List<Current> txList = new ArrayList<>(); Map<String,String> tx = paddingWarn(txList, type);// txList = map2ListWarn(tx); List<Current> mwgList = perceptionMapper.getMwgBySomeDay(dayBefore,dateNowStr,communityCodes); Map<String,String> mwg = paddingWarn(mwgList, type);// mwgList = map2ListWarn(mwg); warnPerception.setDc(dcList); warnPerception.setXf(xfList); warnPerception.setTx(txList); warnPerception.setMwg(mwgList); return warnPerception; } else if (type == 2){//30天 SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd"); Date d = new Date(); String dateNowStr = sdf.format(d);//当天日期 String dayBefore = getPastDate(7);//七天之前的日期 //List<Current> dcList = perceptionMapper.getSjByDay("进",dateNowStr,communityCodes); List<Current> dcList = new ArrayList<>(); Map<String,String> dc = paddingWarn(dcList, type); dcList = map2ListWarn(dc); List<Current> xfList = perceptionMapper.getXfBySomeDay(dayBefore,dateNowStr,communityCodes); Map<String,String> xf = paddingWarn(xfList, type); xfList = map2ListWarn(xf); //List<Current> txList = perceptionMapper.getXqByDay(dateNowStr,communityCodes);//本小区 List<Current> txList = new ArrayList<>(); Map<String,String> tx = paddingWarn(txList, type);// txList = map2ListWarn(tx); List<Current> mwgList = perceptionMapper.getMwgBySomeDay(dayBefore,dateNowStr,communityCodes); Map<String,String> mwg = paddingWarn(mwgList, type);// mwgList = map2ListWarn(mwg); warnPerception.setDc(dcList); warnPerception.setXf(xfList); warnPerception.setTx(txList); warnPerception.setMwg(mwgList); return warnPerception; } return warnPerception; } public DevicePerception getDevicePerception(List<String> communityCodes){ DevicePerception devicePerception = new DevicePerception(); if(communityCodes.isEmpty()){ communityCodes.add(null); } Map<String,String> yg = new HashedMap(); int lx = perceptionMapper.getYgStatusByStatus(3,communityCodes); yg.put("lx",String.valueOf(lx)); int zc = perceptionMapper.getYgStatusByStatus(1,communityCodes); yg.put("zc",String.valueOf(zc)); int yc = perceptionMapper.getYgStatusByStatus(2,communityCodes); yg.put("yc",String.valueOf(yc)); int gj = perceptionMapper.getYgWarnByStatus(communityCodes); yg.put("gj",String.valueOf(gj)); devicePerception.setYg(yg); Map<String,String> dc = new HashedMap(); //int dclx = perceptionMapper.getYgStatusByStatus(3,communityCodes); dc.put("lx","0"); //int dczc = perceptionMapper.getYgStatusByStatus(1,communityCodes); dc.put("zc","0"); //int dcyc = perceptionMapper.getYgStatusByStatus(2,communityCodes); dc.put("yc","0"); //int dcgj = perceptionMapper.getYgWarnByStatus(communityCodes); dc.put("gj","0"); devicePerception.setDc(dc); Map<String,String> jg = new HashedMap(); int jglx = perceptionMapper.getJgStatusByStatus(3,communityCodes); jg.put("lx",String.valueOf(jglx)); int jgzc = perceptionMapper.getJgStatusByStatus(1,communityCodes); jg.put("zc",String.valueOf(jgzc)); int jgyc = perceptionMapper.getJgStatusByStatus(2,communityCodes); jg.put("yc",String.valueOf(jgyc)); int jggj = perceptionMapper.getJgWarnByStatus(communityCodes); jg.put("gj",String.valueOf(jggj)); devicePerception.setJg(jg); Map<String,String> sxj = new HashedMap(); //int dclx = perceptionMapper.getYgStatusByStatus(3,communityCodes); sxj.put("lx","0"); //int dczc = perceptionMapper.getYgStatusByStatus(1,communityCodes); sxj.put("zc","0"); //int dcyc = perceptionMapper.getYgStatusByStatus(2,communityCodes); sxj.put("yc","0"); //int dcgj = perceptionMapper.getYgWarnByStatus(communityCodes); sxj.put("gj","0"); devicePerception.setSxj(sxj); Map<String,String> mj = new HashedMap(); //int dclx = perceptionMapper.getYgStatusByStatus(3,communityCodes); mj.put("lx","0"); //int dczc = perceptionMapper.getYgStatusByStatus(1,communityCodes); mj.put("zc","0"); //int dcyc = perceptionMapper.getYgStatusByStatus(2,communityCodes); mj.put("yc","0"); //int dcgj = perceptionMapper.getYgWarnByStatus(communityCodes); mj.put("gj","0"); devicePerception.setMj(mj); return devicePerception; } public int getMjCount(List<String> communityCodes){ if(communityCodes.isEmpty()){ communityCodes.add(null); } return perceptionMapper.getMjCount(communityCodes); } public int getJgCount(List<String> communityCodes){ if(communityCodes.isEmpty()){ communityCodes.add(null); } return perceptionMapper.getJgCount(communityCodes); } public int getYgCount(List<String> communityCodes){ if(communityCodes.isEmpty()){ communityCodes.add(null); } return perceptionMapper.getYgCount(communityCodes); } public int getPerceptionService(List<String> communityCodes){ if(communityCodes.isEmpty()){ communityCodes.add(null); } return perceptionMapper.getPerceptionService(communityCodes); } public List<Current> map2List(Map<String,String> map,int type){ List<Current> list = new ArrayList<>(); List<String> time = new ArrayList(map.keySet()); List<String> count = new ArrayList(map.values()); Current current = null; for(int i=0; i<time.size(); i++) { current = new Current(); current.setTime(time.get(i)); current.setCount(Integer.parseInt(count.get(i))); list.add(current); } //在对list进行排序 if(type == 1){ Collections.sort(list, new Comparator<Current>(){ /* * int compare(Person p1, Person p2) 返回一个基本类型的整型, * 返回负数表示:p1 小于p2, * 返回0 表示:p1和p2相等, * 返回正数表示:p1大于p2 */ public int compare(Current p1, Current p2) { //按照Current的time进行升序排列 if(Integer.parseInt(p1.getTime().split(":")[0]) > Integer.parseInt(p2.getTime().split(":")[0])){ return 1; } if(Integer.parseInt(p1.getTime().split(":")[0]) == Integer.parseInt(p2.getTime().split(":")[0])){ return 0; } return -1; } }); } if (type == 2) { Collections.sort(list, new Comparator<Current>(){ public int compare(Current p1, Current p2) { //按照Current的time进行升序排列 if(Integer.parseInt(p1.getTime().split("-")[1]) > Integer.parseInt(p2.getTime().split("-")[1])){ return 1; } if(Integer.parseInt(p1.getTime().split("-")[1]) == Integer.parseInt(p2.getTime().split("-")[1])){ return 0; } return -1; } }); } return list; } public List<Current> map2ListWarn(Map<String,String> map){ List<Current> list = new ArrayList<>(); List<String> time = new ArrayList(map.keySet()); List<String> count = new ArrayList(map.values()); Current current = null; for(int i=0; i<time.size(); i++) { current = new Current(); current.setTime(time.get(i)); current.setCount(Integer.parseInt(count.get(i))); list.add(current); } //对list进行排序 DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); Collections.sort(list, new Comparator<Current>(){ public int compare(Current p1, Current p2) { //按照Current的time进行升序排列 try { if(dateFormat.parse(p1.getTime()).getTime() > dateFormat.parse(p2.getTime()).getTime()){ return 1; } if(dateFormat.parse(p1.getTime()).getTime() == dateFormat.parse(p2.getTime()).getTime()){ return 0; } return -1; } catch (ParseException e){ e.printStackTrace(); } return -1; } }); for (int i=0; i<list.size(); i++) { list.get(i).setTime(list.get(i).getTime().substring(list.get(i).getTime().length()-5,list.get(i).getTime().length())); } return list; } public Map<String,String> padding(List<Current> list, int type){//要看0是不是第一个 Map<String,String> sc = new HashedMap(); if(type == 1){ if (!list.isEmpty()) { for(int i=0; i < list.size(); i++){ sc.put(String.valueOf(list.get(i).getTime()) + ":00",String.valueOf(list.get(i).getCount())); } } for (int i=0; i < 24; i++){ String value = sc.get(i + ":00"); if(!StringUtils.isNotBlank(value)){ sc.put(i + ":00", "0"); } } } else if (type == 2){ Calendar a = Calendar.getInstance(); a.set(Calendar.DATE, 1); a.roll(Calendar.DATE, -1); int maxDate = a.get(Calendar.DATE);//当月天数 int month = a.get(Calendar.MONTH) + 1;//月份 if (!list.isEmpty()) { for(int i=0; i < list.size(); i++){ sc.put(month + "-" + list.get(i).getTime(),String.valueOf(list.get(i).getCount())); } } for(int i=1; i <= maxDate; i++){ String value = null; if(i < 10){ value = sc.get(month + "-" + "0" + i); if (!StringUtils.isNotBlank(value)){ sc.put(month + "-" + "0" + i, "0"); } } else if (i >= 10){ value = sc.get(month + "-" + i); if (!StringUtils.isNotBlank(value)){ sc.put(month + "-" + i, "0"); } } } //return sc; } return sc; } public Map<String,String> paddingWarn(List<Current> list, int type){ Map<String,String> sc = new HashedMap(); if (!list.isEmpty()) { for (int i=0; i<list.size(); i++){ sc.put(list.get(i).getTime(),String.valueOf(list.get(i).getCount())); } } if(type == 1){ List<String> sevenDayBefore = test(7);//b.substring(b.length()-5,b.length()) for(int i=0; i < 7; i++){ String b = sevenDayBefore.get(i); String value = sc.get(b); if(!StringUtils.isNotBlank(value)){ sc.put(b,"0"); } } } else if (type == 2){ List<String> sevenDayBefore = test(30); for(int i=0; i < 30; i++){ String b = sevenDayBefore.get(i); String value = sc.get(b); if(!StringUtils.isNotBlank(value)){ sc.put(b,"0"); } } } return sc; } /** * 获取过去或者未来 任意天内的日期数组 * @param intervals intervals天内 * @return 日期数组 */ public static ArrayList<String> test(int intervals ) { ArrayList<String> pastDaysList = new ArrayList<>(); //ArrayList<String> fetureDaysList = new ArrayList<>(); for (int i = intervals-1; i >=0; i--) { pastDaysList.add(getPastDate(i)); //fetureDaysList.add(getFetureDate(i)); } return pastDaysList; } /** * 获取过去第几天的日期 * * @param past * @return */ public static String getPastDate(int past) { Calendar calendar = Calendar.getInstance(); calendar.set(Calendar.DAY_OF_YEAR, calendar.get(Calendar.DAY_OF_YEAR) - past); Date today = calendar.getTime(); SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd"); String result = format.format(today); //Log.e(null, result); return result; } /** * 获取未来 第 past 天的日期 * @param past * @return */ public static String getFetureDate(int past) { Calendar calendar = Calendar.getInstance(); calendar.set(Calendar.DAY_OF_YEAR, calendar.get(Calendar.DAY_OF_YEAR) + past); Date today = calendar.getTime(); SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd"); String result = format.format(today); //Log.e(null, result); return result; } }
<filename>istio/tests/common.py # (C) Datadog, Inc. 2019-present # All rights reserved # Licensed under Simplified BSD License (see LICENSE) CHECK_NAME = 'istio' MOCK_V2_MESH_INSTANCE = { 'istio_mesh_endpoint': 'http://localhost:15090/metrics', 'use_openmetrics': True, } MOCK_V2_MESH_OVERRIDE_INSTANCE = { 'istio_mesh_endpoint': 'http://localhost:15090/metrics', 'use_openmetrics': True, 'extra_metrics': [ {'istio_request': {'name': 'request', 'type': 'counter_gauge'}}, {'istio_tcp_connections_closed': {'name': 'tcp.connections_closed', 'type': 'counter_gauge'}}, {'istio_tcp_connections_opened': {'name': 'tcp.connections_opened', 'type': 'counter_gauge'}}, {'istio_tcp_received_bytes': {'name': 'tcp.received_bytes', 'type': 'counter_gauge'}}, {'istio_tcp_sent_bytes': {'name': 'tcp.send_bytes', 'type': 'counter_gauge'}}, ], } MOCK_LEGACY_MESH_INSTANCE = { 'istio_mesh_endpoint': 'http://localhost:15090/metrics', } MOCK_V2_ISTIOD_INSTANCE = { 'istiod_endpoint': 'http://localhost:8080/metrics', 'use_openmetrics': True, } MOCK_LEGACY_ISTIOD_INSTANCE = {'istiod_endpoint': 'http://localhost:8080/metrics'} CONFIG_EXCLUDE_LABELS = [ "source_version", "destination_version", "source_canonical_revision", "destination_canonical_revision", "source_principal", "destination_principal", "source_cluster", "destination_cluster", "source_canonical_service", "destination_canonical_service", "source_workload_namespace", "destination_workload_namespace", "request_protocol", "connection_security_policy", ] LEGACY_MESH_METRICS = [ 'istio.mesh.request.count', 'istio.mesh.request.size.count', 'istio.mesh.request.size.sum', 'istio.mesh.response.size.count', 'istio.mesh.response.size.sum', # Counts submitted with `send_monotonic_with_gauge` 'istio.mesh.request.count.total', 'istio.mesh.request.size.count.total', 'istio.mesh.request.size.sum.total', 'istio.mesh.response.size.count.total', 'istio.mesh.response.size.sum.total', ] MESH_MERICS_1_5 = [ 'istio.mesh.request.duration.milliseconds.count', 'istio.mesh.request.duration.milliseconds.sum', # TCP Metrics are supported in post 1.4 istio fixture 'istio.mesh.tcp.connections_closed.total', 'istio.mesh.tcp.connections_opened.total', 'istio.mesh.tcp.received_bytes.total', 'istio.mesh.tcp.send_bytes.total', # Counts submitted with `send_monotonic_with_gauge` 'istio.mesh.request.duration.milliseconds.count.total', 'istio.mesh.request.duration.milliseconds.sum.total', 'istio.mesh.tcp.connections_closed.total.total', 'istio.mesh.tcp.connections_opened.total.total', 'istio.mesh.tcp.received_bytes.total.total', 'istio.mesh.tcp.send_bytes.total.total', ] MESH_METRICS_MAPPER = { 'istio_request_duration_milliseconds': 'request.duration.milliseconds', 'istio_request_count': 'request.count', 'istio_request_duration': 'request.duration', 'istio_request_size': 'request.size', 'istio_response_size': 'response.size', 'istio_requests_total': 'request.count', 'istio_request_duration_seconds': 'request.duration', 'istio_request_bytes': 'request.size', 'istio_response_bytes': 'response.size', 'istio_tcp_connections_closed_total': 'tcp.connections_closed.total', 'istio_tcp_connections_opened_total': 'tcp.connections_opened.total', 'istio_tcp_received_bytes_total': 'tcp.received_bytes.total', 'istio_tcp_sent_bytes_total': 'tcp.send_bytes.total', 'istio_request_messages_total': 'request.messages.total', 'istio_response_messages_total': 'response.messages.total', } ISTIOD_METRICS = [ 'istio.citadel.server.root_cert_expiry_timestamp', 'istio.galley.endpoint_no_pod', 'istio.galley.validation.config_update_error', 'istio.galley.validation.config_update', 'istio.galley.validation.failed', 'istio.go.gc_duration_seconds.quantile', 'istio.go.gc_duration_seconds.sum', 'istio.go.gc_duration_seconds.count', 'istio.go.goroutines', 'istio.go.info', 'istio.go.memstats.alloc_bytes', 'istio.go.memstats.alloc_bytes_total', 'istio.go.memstats.buck_hash_sys_bytes', 'istio.go.memstats.frees_total', 'istio.go.memstats.gc_cpu_fraction', 'istio.go.memstats.gc_sys_bytes', 'istio.go.memstats.heap_alloc_bytes', 'istio.go.memstats.heap_idle_bytes', 'istio.go.memstats.heap_inuse_bytes', 'istio.go.memstats.heap_objects', 'istio.go.memstats.heap_released_bytes', 'istio.go.memstats.heap_sys_bytes', 'istio.go.memstats.last_gc_time_seconds', 'istio.go.memstats.lookups_total', 'istio.go.memstats.mallocs_total', 'istio.go.memstats.mcache_inuse_bytes', 'istio.go.memstats.mcache_sys_bytes', 'istio.go.memstats.mspan_inuse_bytes', 'istio.go.memstats.mspan_sys_bytes', 'istio.go.memstats.next_gc_bytes', 'istio.go.memstats.other_sys_bytes', 'istio.go.memstats.stack_inuse_bytes', 'istio.go.memstats.stack_sys_bytes', 'istio.go.memstats.sys_bytes', 'istio.go.threads', 'istio.grpc.server.handled_total', 'istio.grpc.server.handling_seconds.count', 'istio.grpc.server.handling_seconds.sum', 'istio.grpc.server.msg_received_total', 'istio.grpc.server.msg_sent_total', 'istio.grpc.server.started_total', 'istio.pilot.conflict.inbound_listener', 'istio.pilot.conflict.outbound_listener.http_over_current_tcp', 'istio.pilot.conflict.outbound_listener.http_over_https', 'istio.pilot.conflict.outbound_listener.tcp_over_current_http', 'istio.pilot.conflict.outbound_listener.tcp_over_current_tcp', 'istio.pilot.destrule_subsets', 'istio.pilot.duplicate_envoy_clusters', 'istio.pilot.eds_no_instances', 'istio.pilot.endpoint_not_ready', 'istio.pilot.inbound_updates', 'istio.pilot.k8s.cfg_events', 'istio.pilot.k8s.reg_events', 'istio.pilot.no_ip', 'istio.pilot.proxy_convergence_time.count', 'istio.pilot.proxy_convergence_time.sum', 'istio.pilot.proxy_queue_time.count', 'istio.pilot.proxy_queue_time.sum', 'istio.pilot.push.triggers', 'istio.pilot.services', 'istio.pilot.virt_services', 'istio.pilot.vservice_dup_domain', 'istio.pilot.xds', 'istio.pilot.xds.eds_all_locality_endpoints', 'istio.pilot.xds.eds_instances', 'istio.pilot.xds.push.time.count', 'istio.pilot.xds.push.time.sum', 'istio.pilot.xds.pushes', 'istio.process.cpu_seconds_total', 'istio.process.max_fds', 'istio.process.open_fds', 'istio.process.resident_memory_bytes', 'istio.process.start_time_seconds', 'istio.process.virtual_memory_bytes', 'istio.process.virtual_memory_max_bytes', 'istio.sidecar_injection.requests_total', 'istio.sidecar_injection.success_total', ] V2_MESH_METRICS = [ 'istio.mesh.tcp.connections_closed.count', 'istio.mesh.tcp.connections_opened.count', 'istio.mesh.tcp.received_bytes.count', 'istio.mesh.request.count', 'istio.mesh.request.duration.milliseconds.bucket', 'istio.mesh.request.duration.milliseconds.sum', 'istio.mesh.request.duration.milliseconds.count', 'istio.mesh.response.size.bucket', 'istio.mesh.response.size.sum', 'istio.mesh.response.size.count', 'istio.mesh.request.size.bucket', 'istio.mesh.request.size.sum', 'istio.mesh.request.size.count', 'istio.mesh.tcp.send_bytes.count', ] V2_MESH_COUNTER_GAUGE = [ 'istio.mesh.tcp.connections_closed.total', 'istio.mesh.tcp.connections_opened.total', 'istio.mesh.tcp.received_bytes.total', 'istio.mesh.tcp.send_bytes.total', ] ISTIOD_V2_METRICS = [ 'istio.citadel.server.root_cert_expiry_timestamp', 'istio.galley.endpoint_no_pod', 'istio.galley.validation.config_update_error.count', 'istio.galley.validation.config_update.count', 'istio.galley.validation.failed.count', 'istio.go.gc_duration_seconds.quantile', 'istio.go.gc_duration_seconds.sum', 'istio.go.gc_duration_seconds.count', 'istio.go.goroutines', 'istio.go.info', 'istio.go.memstats.alloc_bytes', 'istio.go.memstats.buck_hash_sys_bytes', 'istio.go.memstats.frees.count', 'istio.go.memstats.gc_cpu_fraction', 'istio.go.memstats.gc_sys_bytes', 'istio.go.memstats.heap_alloc_bytes', 'istio.go.memstats.heap_idle_bytes', 'istio.go.memstats.heap_inuse_bytes', 'istio.go.memstats.heap_objects', 'istio.go.memstats.heap_released_bytes', 'istio.go.memstats.heap_sys_bytes', 'istio.go.memstats.last_gc_time_seconds', 'istio.go.memstats.lookups.count', 'istio.go.memstats.mallocs.count', 'istio.go.memstats.mcache_inuse_bytes', 'istio.go.memstats.mcache_sys_bytes', 'istio.go.memstats.mspan_inuse_bytes', 'istio.go.memstats.mspan_sys_bytes', 'istio.go.memstats.next_gc_bytes', 'istio.go.memstats.other_sys_bytes', 'istio.go.memstats.stack_inuse_bytes', 'istio.go.memstats.stack_sys_bytes', 'istio.go.memstats.sys_bytes', 'istio.go.threads', 'istio.grpc.server.handled.count', 'istio.grpc.server.handling_seconds.bucket', 'istio.grpc.server.handling_seconds.sum', 'istio.grpc.server.handling_seconds.count', 'istio.grpc.server.msg_received.count', 'istio.grpc.server.msg_sent.count', 'istio.grpc.server.started.count', 'istio.pilot.conflict.inbound_listener', 'istio.pilot.conflict.outbound_listener.http_over_current_tcp', 'istio.pilot.conflict.outbound_listener.http_over_https', 'istio.pilot.conflict.outbound_listener.tcp_over_current_http', 'istio.pilot.conflict.outbound_listener.tcp_over_current_tcp', 'istio.pilot.destrule_subsets', 'istio.pilot.duplicate_envoy_clusters', 'istio.pilot.eds_no_instances', 'istio.pilot.endpoint_not_ready', 'istio.pilot.inbound_updates.count', 'istio.pilot.k8s.cfg_events.count', 'istio.pilot.k8s.reg_events.count', 'istio.pilot.no_ip', 'istio.pilot.proxy_convergence_time.bucket', 'istio.pilot.proxy_convergence_time.sum', 'istio.pilot.proxy_convergence_time.count', 'istio.pilot.proxy_queue_time.bucket', 'istio.pilot.proxy_queue_time.sum', 'istio.pilot.proxy_queue_time.count', 'istio.pilot.push.triggers.count', 'istio.pilot.services', 'istio.pilot.virt_services', 'istio.pilot.vservice_dup_domain', 'istio.pilot.xds', 'istio.pilot.xds.eds_all_locality_endpoints', 'istio.pilot.xds.eds_instances', 'istio.pilot.xds.push.time.bucket', 'istio.pilot.xds.push.time.sum', 'istio.pilot.xds.push.time.count', 'istio.pilot.xds.pushes.count', 'istio.process.cpu_seconds.count', 'istio.process.max_fds', 'istio.process.open_fds', 'istio.process.resident_memory_bytes', 'istio.process.start_time_seconds', 'istio.process.virtual_memory_bytes', 'istio.process.virtual_memory_max_bytes', 'istio.sidecar_injection.requests.count', 'istio.sidecar_injection.success.count', ]
// GetOnePost returns one post and error, if any func (m *DBModel) FindOnePost(id primitive.ObjectID) (*Post, error) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() collection := m.DB.Collection("posts") var post Post filter := Post{ID: id} err := collection.FindOne(ctx, filter).Decode(&post) if err != nil { return nil, err } return &post, nil }
The design of data acquisition system based on MIC-2000 A data acquisition system was designed, which used the MIC-2000 industrial computer as the hardware and Windows CE as the Operating System. The hardware components of the data acquisition system were introduced briefly. It was mainly explained that how to design the application of different IO board based on Windows CE Operating System. The performance of the designed system was tested successfully on double-dimension tank. The control of liquid level was realized effectively, and the design requirements were reached completely.
Kevin Mayer 2008–2010: Youth career: gold medals at Youth (under-18) and Junior (under-20) World Championships He won the octathlon gold medal at the 2009 World Youth Championships and the decathlon gold medal at the 2010 World Junior Championships. 2011–2012: Gold medal at the Junior (under-20) European Championships and first Olympic Games He competed at the 2012 Summer Olympics, finishing 15th. 2013–2015: First senior medals at back-to-back European Championships, fourth place at the 2013 World Championships He won a silver medal in the heptathlon at the 2013 European Athletics Indoor Championships with a new personal best of 6297 points. At the 2013 European Cup Combined Events he topped the podium and set personal bests in the 100 metres (11.04 sec), long jump (7.63 m), shot put (14.95 m) and the discus throw (44.89 m). On 12 August 2015, Mayer announced his withdrawal from the 2015 World Championships because of a hamstring injury sustained in the end of July 2015. 2016: Second place at the Götzis Hypo-Meeting and Olympic silver medal, new personal best On 6 March 2016, Mayer announced his withdrawal from the 2016 World Indoor Championships because of a heel injury sustained during the hurdles race at the French Indoor Athletics Championships held at the end of February in Aubière. At the 2016 Rio Olympics, Mayer earned a silver medal with a new personal best of 8834 points, finishing behind only two-time gold medallist Ashton Eaton, who was the world record holder at the time. Highlights of his campaign include equalling or improving personal outdoor bests in four events (100 m, shot put, 400 m, pole vault) and performing seasonal bests in three others (long jump, high jump, 1500 m); he was also the best decathlete in two disciplines (the shot put, with 15.76 m and 836 points; and the pole vault, with 5.40 m and 1035 points, sharing first place with Thomas van der Plaetsen) and lead the standings at the end of the first day of the competition (i.e. after 5 out of 10 events). Furthermore, this performance ranks as the sixth-best personal best score in the men's decathlon, and showed a marked improvement in form for Mayer, bettering his former personal best of 8521 points (set during the aforementioned silver medal run at the 2014 European Athletics Championships) by 313 points and the French national record by 260 points. 2017: European Indoor Champion and World Champion In the run up to the 2017 European Athletics Indoor Championships, Mayer's first combined events competition of the year was an indoor track and field triathlon (60m hurdles, shot put, long jump) at the National Indoor Meeting of Paris on 8 February. Mayer didn't receive a score in the 60m hurdles, but finished first in the other two events against the other two remaining participants, Bastien Auzeil and Gaël Querin. Nevertheless, he finished last (out of three), with 1652 points. His first heptathlon was at the French Elite Indoor Championships in Bordeaux (organized by the French Athletics Federation) on 18–19 February. While Mayer didn't get marks in the long jump, he finished in first place in the other five events in participated in. He decided not to participate in the final 1,000 metres and therefore finished the heptathlon without a score. At the 2017 European Athletics Indoor Championships in Belgrade on 4–5 March, his first international competition of the year, Mayer won the gold medal in the men's heptathlon with a new European record (6479 points), beating Jorge Ureña (silver, 6227 points) and Adam Helcelet (bronze, 6110 points). He achieved three indoor personal bests (in the long jump, 60m hurdles, and the pole vault) and finished third or best in six out of seven events (except the 1000m, where he finished fourth) and established personal indoor records in five (all but the shot put and the 1000m). This score is also the second-best personal record score for a men's indoor heptathlon (behind Ashton Eaton's world record of 6645 points); Mayer beat the previous European indoor record, set by Roman Šebrle at the 2004 IAAF World Indoor Championships in Budapest, by 41 points. On 15 April, he had his first outdoor competition at a triathlon (200m, high jump, discus throw) in L'Étang-Salé, Réunion. Mayer won all three events, finishing in first place with 2642 points. Back in Europe, Mayer took part in his first decathlon of the year in his adopted hometown of Montpellier on 13–14 May. Despite not scoring a valid mark in two events in the first day, the long jump and the high jump, Mayer achieved a season best mark in the discus and the shot put. Afterwards, Mayer attended events in Valence and Paris over the next month and a half to prepare some disciplines for the summer season. He then took part in his last competition before the World Championships, the French Elite Outdoor Championships in Marseille, whose men's decathlon competition took place on 14–15 July. Mayer struggled with the rainy and windy conditions and the conditions of the track, eventually not finishing the competition after a few no marks. At the 2017 World Championships in Athletics in London, Mayer completed his first decathlon of the year and won his first World Championships gold medal with a world-leading score of 8768 points, ahead of Germans Rico Freimuth (silver, 8564 points) and Kai Kazmirek (bronze, 8488 points); this was also France's first international gold medal in the decathlon. Despite not coming first in any specific discipline, Mayer achieved new personal bests in the 100 metres (10.70s, 929 points), 400 metres (48.28s, 897 points), and the 110 metres hurdles (13.75, 1007 points). A setback in the pole vault – where he cleared his only mark at 5.10m only at the third and last try – 30 cm below his personal record – prevented him from breaking his personal record. In 2018, he broke Ashton Eaton's world record, establishing a new mark of 9126 points in Talence, France. Personal life Mayer was born on 10 February 1992 in Argenteuil, a commune in the northwest suburbs of Paris, to André and Carole Mayer. His paternal family and German surname have their origin in the northeastern region of Lorraine, where his father grew up; some of his relatives still live in the Moselle department next to the border with Germany. He has three brothers: Thibault, Thomas and Sébastien; the family was raised in La Roche-de-Glun, a small town by the Rhône river in the southeast of the country (Drôme department), where his parents still live. Mayer started practicing athletics at the sports association EA Tain-Tournon close to his hometown; after the club's merger with two other Drôme-based athletics associations in 2013, Mayer continued representing it through the new institution, EA Rhône Vercors 26-07. He trains at the CREPS Montpellier, a training center for high-performance athletes, since 2008; ever since moving to Montpellier, he has been coached by Bertrand Valcin. He studied for a fr:Diplôme universitaire de technologie en mesures physiques (a technological degree in physical measurements, including metrology and instrumentation) at the University of Montpellier-Sète.
Does getting put on hold make you slam down the phone in frustration? How likely you are to react like this may depend on where you live. Americans will likely waste more than 900 million hours waiting on hold this year, according to an analysis of more than four million phone calls from consumers to businesses released this week by mobile advertising analytics firm Marchex. And a survey by text-message service TalkTo found that more than half of Americans say they spend 10 to 20 minutes every week — or 43 days of their life — on hold. To consumers, this is incredibly irritating: One survey found that being put on hold was one of consumers’ top three phone pet peeves (the other two were automated attendants and the person on the other line having bad manners, or having a bad attitude). In some states, residents are far less patient with a customer service representative or assistant who puts them on hold. People who live in Kentucky are the fastest to hang up, followed by those in Ohio, North Carolina, New York and West Virginia, the Marchex analysis finds. Residents in these five states were more than twice as likely as those in the bottom to five to hang up when put on hold. Residents of Kentucky, North Carolina and West Virginia may be quick to hang up because they live in a culture where politeness is valued, and customer service agents often aren’t that mannerly, says John Busby, the senior vice president of marketing and consumer insights at Marchex; rather than confront them on this, they hang up. (Although, of course, many might be so polite they stay on the line.) On the other hand, New Yorkers may land on top of the list simply because they may, in fact, be less patient or have less time to spend on hold. Meanwhile, people in Louisiana, Colorado, Florida, Illinois and Minnesota will stay on hold the longest, hanging up less than half as often as those who live in the most impatient states. The good news: There are ways you can avoid getting put on hold for a long time. First, call in the morning. A study by customer service software firm ZenDesk found that between 9 a.m. and 11 a.m. (the earlier the better in this window), you will typically get the fastest response; wait until the afternoon, and response time triples. Second, use the technology available to make your experience better. First, consider a free app like LucyPhone, which will wait on hold for you and alert you when the customer service agent finally picks up. Next, check out sites like GetHuman and ContactHelp, which can provide you with alternate customer service contacts that may have less wait time. Finally, most companies now have many other ways to contact them for help — ranging from email to web chats — that won’t require you to wait on the phone, and many will even call you back. Experts say it’s also worth using social media to reach out to many companies.
<gh_stars>1-10 # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2013,2014,2017 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sqlalchemy from six import string_types from aquilon.aqdb.types import StringEnum class StringEnumColumn(sqlalchemy.types.TypeDecorator): impl = sqlalchemy.types.String def __init__(self, cls, size, permissive_reads=False): if not issubclass(cls, StringEnum): # pragma: no cover raise ValueError("StringEnumColumn column's wrap StringEnum classes") self._wrapped_class = cls self._permissive_reads = permissive_reads super(StringEnumColumn, self).__init__(size) # return a value suitable for sending to the database def process_bind_param(self, value, dialect): if value is None or (isinstance(value, string_types) and value == ''): return None return self._wrapped_class.to_database(value) # return a value read from the database def process_result_value(self, value, dialect): if value is None or value == '': return None # Force value to be a string as it may well be unicode return self._wrapped_class.from_database(value, self._permissive_reads)
import React from 'react'; import { ILanguagePickerStrings } from './model'; import { LangTag, ScriptName } from './langPicker/types'; import { makeStyles, Theme, createStyles } from '@material-ui/core/styles'; import { List, ListItem, ListItemText, Typography } from '@material-ui/core'; import { debounce } from 'lodash'; const useStyles = makeStyles((theme: Theme) => createStyles({ root: { flexGrow: 1, maxWidth: 752, }, paper: { backgroundColor: theme.palette.background.paper, }, list: { overflowY: 'scroll', }, title: { margin: theme.spacing(4, 0, 2), }, firstLine: { display: 'flex', }, grow: { flexGrow: 1, }, }) ); interface IProps { list: number[]; choose: (tag: LangTag) => void; secondary?: boolean; langTags: LangTag[]; scriptName: ScriptName; t: ILanguagePickerStrings; } export function LanguageChoice(props: IProps) { const { list, langTags, scriptName, secondary, t, choose } = props; const classes = useStyles(); const [dense] = React.useState(true); const [height, setHeight] = React.useState(window.innerHeight); const handleChoose = (tag: LangTag) => () => { choose(tag); }; const handleKeydown = (tag: LangTag) => (e: any) => { if (e.keyCode === 32 || e.keyCode === 13) { choose(tag); } }; React.useEffect(() => { const handleResize = debounce(() => setHeight(window.innerHeight), 100); window.addEventListener('resize', handleResize); return () => { window.removeEventListener('resize', handleResize); }; }, []); const scriptDetail = (tag: LangTag) => { const tagParts = tag.tag.split('-'); return tagParts.length > 1 && tagParts[1].length === 4 ? t.inScript.replace('$1', scriptName[tagParts[1]]) : ''; }; const detail = (tag: LangTag) => { return ( <> <Typography component={'span'}> {t.languageOf .replace('$1', tag.regionname ? tag.regionname : '') .replace('$2', scriptDetail(tag))} </Typography> <br /> <Typography component={'span'}> {tag.names ? tag.names.join(', ') : ''} </Typography> </> ); }; const langElems = (refList: number[], refTags: LangTag[]) => { return refList.map((r, i) => { const tag = refTags[r]; return ( <ListItem key={`${tag.tag} ${i}`} button onClick={handleChoose(tag)} onKeyDown={handleKeydown(tag)} > <ListItemText primary={ <div className={classes.firstLine}> <Typography>{tag.name}</Typography> <div className={classes.grow}>{'\u00A0'}</div> <Typography>{tag.tag}</Typography> </div> } secondary={secondary ? detail(tag) : null} /> </ListItem> ); }); }; return ( <div className={classes.root}> <div className={classes.paper}> <List dense={dense} className={classes.list} style={{ maxHeight: Math.max(height - 450, 200) }} > {langElems(list, langTags)} </List> </div> </div> ); } export default LanguageChoice;
def DempsterShaferUncertainty(logits): num_classes = tf.shape(logits)[-1] num_classes = tf.cast(num_classes, dtype=logits.dtype) belief_mass = tf.reduce_sum(tf.exp(logits), axis=-1) return num_classes / (belief_mass + num_classes)
Q: Why do websites force the user to use a long and complicated password when brute force isn't possible? if you have a phone with 4-digit pin and the phone does not get locked no matter how many times you put the wrong pin it makes sense to use 10 or 16 digits as it'll increase the time the attacker needs to crack the pin and make it sounds impossible to crack. However these-days even simple websites stops the user from trying random passwords after 3 or 5 attempts, to my understanding this kill the entire concept of brute force. There has to be something wrong with my way of thinking, what am I missing? A: Nowadays, the scenario password policies need to defend against is the theft of the complete password database. This is something which happens with alarming frequency even to very reputed websites. When the developers of the website follow common best practices, then password databases usually don't contain the plaintext passwords but instead store a hash of every password. An attacker which has access to this data can perform an offline brute-force attack by calculating the hash-values of common weak passwords and compare them to the hashes in the database. To defend against this kind of attack, users need to be motivated to use strong passwords.
import json import responses from tests import load_fixture, temp_env from unittest import TestCase class TestParseArgs(TestCase): def _do_parse(self, args): # force config regeneration between tests from lintreview.cli import parse_args from lintreview.config import load_config import lintreview.web as web web.config = load_config() web.app.config.update(web.config) parse_args(args) @responses.activate def test_can_create_repo_webhook_via_cli(self): responses.add( responses.GET, 'https://api.github.com/repos/markstory/lint-test', json=json.loads(load_fixture('repository.json')), status=200 ) # no hooks exist responses.add( responses.GET, 'https://api.github.com/repos/markstory/lint-test/hooks?per_page=100', json=[], status=200 ) # allow for hook creation responses.add( responses.POST, 'https://api.github.com/repos/markstory/lint-test/hooks', json=json.loads(load_fixture('webhook_list.json'))[0], status=201 ) mock_env = { 'LINTREVIEW_SETTINGS': 'settings.sample.py', 'LINTREVIEW_SERVER_NAME': 'example.com' } with temp_env(mock_env): self._do_parse(['register', '--user', 'cool-token', 'markstory', 'lint-test']) webhook_creation_request = responses.calls[2].request self.assertEqual(webhook_creation_request.headers['Authorization'], 'token cool-token') self.assertEqual( webhook_creation_request.url, 'https://api.github.com/repos/markstory/lint-test/hooks' ) self.assertEqual(webhook_creation_request.method, responses.POST) self.assertEqual( json.loads(webhook_creation_request.body), { "name": "web", "config": { "url": "http://example.com/review/start", "content_type": "json" }, "events": ["pull_request"], "active": True } ) @responses.activate def test_create_repo_webhook_does_nothing_if_webhook_already_exists(self): responses.add( responses.GET, 'https://api.github.com/repos/markstory/lint-test', json=json.loads(load_fixture('repository.json')), status=200 ) # hook already exists responses.add( responses.GET, 'https://api.github.com/repos/markstory/lint-test/hooks?per_page=100', json=json.loads(load_fixture('webhook_list.json')), status=200 ) mock_env = { 'LINTREVIEW_SETTINGS': 'settings.sample.py', 'LINTREVIEW_SERVER_NAME': 'example.com' } with temp_env(mock_env): self._do_parse(['register', '--user', 'cool-token', 'markstory', 'lint-test']) for request_call in responses.calls: self.assertNotEqual(request_call.request.method, responses.POST) @responses.activate def test_remove_repo_webhook_sends_correct_request_if_webhook_exists(self): responses.add( responses.GET, 'https://api.github.com/repos/markstory/lint-test', json=json.loads(load_fixture('repository.json')), status=200 ) webhook_list_json = json.loads(load_fixture('webhook_list.json')) # hook exists responses.add( responses.GET, 'https://api.github.com/repos/markstory/lint-test/hooks?per_page=100', json=webhook_list_json, status=200 ) responses.add( responses.GET, 'https://api.github.com/repos/markstory/lint-test/hooks/706986', json=webhook_list_json[0], status=200 ) responses.add( responses.DELETE, 'https://api.github.com/repos/markstory/lint-test/hooks/706986', status=204 ) mock_env = { 'LINTREVIEW_SETTINGS': 'settings.sample.py', 'LINTREVIEW_SERVER_NAME': 'example.com' } with temp_env(mock_env): self._do_parse(['unregister', '--user', 'cool-token', 'markstory', 'lint-test']) webhook_deletion_request = responses.calls[3].request self.assertEqual(webhook_deletion_request.headers['Authorization'], 'token cool-token') self.assertEqual( webhook_deletion_request.url, 'https://api.github.com/repos/markstory/lint-test/hooks/706986' ) self.assertEqual(webhook_deletion_request.method, responses.DELETE) @responses.activate def test_remove_repo_webhook_blows_up_if_webhook_does_not_exist(self): responses.add( responses.GET, 'https://api.github.com/repos/markstory/lint-test', json=json.loads(load_fixture('repository.json')), status=200 ) # hook does not exist responses.add( responses.GET, 'https://api.github.com/repos/markstory/lint-test/hooks?per_page=100', json=[], status=200 ) mock_env = { 'LINTREVIEW_SETTINGS': 'settings.sample.py', 'LINTREVIEW_SERVER_NAME': 'example.com' } with temp_env(mock_env): with self.assertRaises(SystemExit): self._do_parse(['unregister', '--user', 'cool-token', 'markstory', 'lint-test']) @responses.activate def test_can_create_org_webhook_via_cli(self): responses.add( responses.GET, 'https://api.github.com/orgs/github', json=json.loads(load_fixture('organization.json')), status=200 ) # no hooks exist responses.add( responses.GET, 'https://api.github.com/orgs/github/hooks?per_page=100', json=[], status=200 ) # allow for hook creation responses.add( responses.POST, 'https://api.github.com/orgs/github/hooks', json=json.loads(load_fixture('org_webhook_list.json'))[0], status=201 ) mock_env = { 'LINTREVIEW_SETTINGS': 'settings.sample.py', 'LINTREVIEW_SERVER_NAME': 'example.com' } with temp_env(mock_env): self._do_parse(['org-register', '--user', 'cool-token', 'github']) webhook_creation_request = responses.calls[2].request self.assertEqual(webhook_creation_request.headers['Authorization'], 'token cool-token') self.assertEqual(webhook_creation_request.url, 'https://api.github.com/orgs/github/hooks') self.assertEqual(webhook_creation_request.method, responses.POST) self.assertEqual( json.loads(webhook_creation_request.body), { "name": "web", "config": { "url": "http://example.com/review/start", "content_type": "json" }, "events": ["pull_request"], "active": True } ) @responses.activate def test_create_org_webhook_does_nothing_if_webhook_already_exists(self): responses.add( responses.GET, 'https://api.github.com/orgs/github', json=json.loads(load_fixture('organization.json')), status=200 ) # hook already exists responses.add( responses.GET, 'https://api.github.com/orgs/github/hooks?per_page=100', json=json.loads(load_fixture('org_webhook_list.json')), status=200 ) mock_env = { 'LINTREVIEW_SETTINGS': 'settings.sample.py', 'LINTREVIEW_SERVER_NAME': 'example.com' } with temp_env(mock_env): self._do_parse(['org-register', '--user', 'cool-token', 'github']) for request_call in responses.calls: self.assertNotEqual(request_call.request.method, responses.POST) @responses.activate def test_remove_org_webhook_sends_correct_request_if_webhook_exists(self): responses.add( responses.GET, 'https://api.github.com/orgs/github', json=json.loads(load_fixture('organization.json')), status=200 ) webhook_list_json = json.loads(load_fixture('org_webhook_list.json')) # hook exists responses.add( responses.GET, 'https://api.github.com/orgs/github/hooks?per_page=100', json=webhook_list_json, status=200 ) responses.add( responses.GET, 'https://api.github.com/orgs/github/hooks/706986', json=webhook_list_json[0], status=200 ) responses.add( responses.DELETE, 'https://api.github.com/orgs/github/hooks/706986', status=204 ) mock_env = { 'LINTREVIEW_SETTINGS': 'settings.sample.py', 'LINTREVIEW_SERVER_NAME': 'example.com' } with temp_env(mock_env): self._do_parse(['org-unregister', '--user', 'cool-token', 'github']) webhook_deletion_request = responses.calls[3].request self.assertEqual(webhook_deletion_request.headers['Authorization'], 'token cool-token') self.assertEqual( webhook_deletion_request.url, 'https://api.github.com/orgs/github/hooks/706986' ) self.assertEqual(webhook_deletion_request.method, responses.DELETE) @responses.activate def test_remove_org_webhook_blows_up_if_webhook_does_not_exist(self): responses.add( responses.GET, 'https://api.github.com/orgs/github', json=json.loads(load_fixture('organization.json')), status=200 ) # hook does not exist responses.add( responses.GET, 'https://api.github.com/orgs/github/hooks?per_page=100', json=[], status=200 ) mock_env = { 'LINTREVIEW_SETTINGS': 'settings.sample.py', 'LINTREVIEW_SERVER_NAME': 'example.com' } with temp_env(mock_env): with self.assertRaises(SystemExit): self._do_parse(['org-unregister', '--user', 'cool-token', 'github'])
Fast Intra-prediction Mode Decision Algorithm Based on the Probability The relevance of the spatial domain is mainly eliminated by intra-prediction encoding. First, the traditional intra-prediction algorithms are introduced. And then, according to the correlation between the most likely mode and the best mode, a mode matrix is arrived at, the use of this mode matrix for early termination. The experimental results show that when we use this matrix in the proposed algorithm, we can achieve about 9%-15% efficiency improvement with no any image quality degradation; If we use simple matrix, we can achieve about 33%-48% of the computing performance upgrade with the drop in PSNR 0.03db.
<gh_stars>100-1000 /* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package executorinfo import ( "container/list" "errors" "github.com/mesos/mesos-go/mesosproto" ) // Cache is an LRU cache for executor info objects. // It is not safe for concurrent use. type Cache struct { maxEntries int ll *list.List cache map[string]*list.Element // by hostname } type entry struct { hostname string info *mesosproto.ExecutorInfo } // NewCache creates a new cache. // If maxEntries is zero, an error is being returned. func NewCache(maxEntries int) (*Cache, error) { if maxEntries <= 0 { return nil, errors.New("invalid maxEntries value") } return &Cache{ maxEntries: maxEntries, ll: list.New(), // least recently used sorted linked list cache: make(map[string]*list.Element), }, nil } // Add adds an executor info associated with the given hostname to the cache. func (c *Cache) Add(hostname string, e *mesosproto.ExecutorInfo) { if ee, ok := c.cache[hostname]; ok { c.ll.MoveToFront(ee) ee.Value.(*entry).info = e return } el := c.ll.PushFront(&entry{hostname, e}) c.cache[hostname] = el if c.ll.Len() > c.maxEntries { c.RemoveOldest() } } // Get looks up a hostname's executor info from the cache. func (c *Cache) Get(hostname string) (e *mesosproto.ExecutorInfo, ok bool) { if el, hit := c.cache[hostname]; hit { c.ll.MoveToFront(el) return el.Value.(*entry).info, true } return } // Remove removes the provided hostname from the cache. func (c *Cache) Remove(hostname string) { if el, hit := c.cache[hostname]; hit { c.removeElement(el) } } // RemoveOldest removes the oldest item from the cache. func (c *Cache) RemoveOldest() { oldest := c.ll.Back() if oldest != nil { c.removeElement(oldest) } } func (c *Cache) removeElement(el *list.Element) { c.ll.Remove(el) kv := el.Value.(*entry) delete(c.cache, kv.hostname) }
/** * Generate log in screen. * @param name - object that are logging * @param texto - the message */ public static void gera_log(String name, String texto){ System.out.println(name + ": " + texto); if(!cmdmode){ log.append(name + ": " + texto + "\n"); log.setCaretPosition(log.getText().length()); } }
1. Technology Field Embodiments relate generally to communications modules. More particularly, example embodiments relate to a bail release mechanism for removing communications modules from within receptacles. 2. Related Technology Communication modules, such as electronic or optoelectronic transceiver or transponder modules, are increasingly used in electronic and optoelectronic communication. Some modules are pluggable, which permits the module to be inserted into and removed from a receptacle of a host device, such as a host computer, switching hub, network router, or switch box. Some host devices include multiple receptacles and can therefore accommodate multiple modules simultaneously. Each module typically communicates with a printed circuit board of the host device by transmitting and/or receiving electrical signals to and/or from the host device printed circuit board. These electrical signals can also be transmitted by the module outside the host device as optical and/or electrical signals. In order for a module to be pluggable, various latching mechanisms have been developed to secure modules within host device receptacles and to release modules from within host device receptacles. One such latching mechanism requires the use of a de-latching sleeve between the module and the receptacle. De-latching sleeves can be undesirable as the sleeves can get caught between the module and the receptacle and/or the sliding action can cause excess friction and wear out the parts. Another latching mechanism requires the use of a forward-biased wedge that can be slid backwards to disengage the module from the receptacle. The de-latch action for these types of mechanisms can be awkward as one has to slide the wedge inwards and at the same time pull the module outward. Further, the forward biasing of the wedge can require the integration of a cumbersome spring or other biasing member into the module design. Yet another latching mechanism requires that one or more components on the module retract into the interior of the module, thereby disengaging from the receptacle and allowing removal of the module from the receptacle. However, space constraints within the module may prevent implementation of this solution. The subject matter claimed herein is not limited to embodiments that solve any disadvantages or that operate only in environments such as those described above. Rather, this background is only provided to illustrate one exemplary technology area where some embodiments described herein may be practiced
<reponame>Romero027/service-mesh-measurement<gh_stars>1-10 extern crate tiny_http; extern crate rustc_serialize; use rustc_serialize::json::Json; use tiny_http::{Server, Response}; use std::collections::HashMap; use reqwest::Error; use std::env; use std::thread; use std::string::String; use std::clone::Clone; fn rem_first_and_last(value: &str) -> &str { let mut chars = value.chars(); chars.next(); chars.next_back(); chars.as_str() } fn query_service(data: &HashMap<&str, String>, socket: &str, method: &str) -> std::result::Result<String, reqwest::Error> { let wordcount_client = reqwest::blocking::Client::new(); let url: String = "http://".to_owned() + socket + "/" + method; let wordcount_response = match wordcount_client.post(&url) .json(data) .send() { Ok(response) => response, Err(e) => { return Err(e); } }; let wordcount_result = match wordcount_response.text() { Ok(re) => re.to_string(), Err(e) => { return Err(e); } }; Ok(wordcount_result) } fn main() { let args: Vec<String> = env::args().collect(); if args.len() != 4 { panic!("Error. Input needs to contain: server socket, wordcount socket, reverse socket"); } let server_socket = &args[1]; let wordcount_socket = args[2].clone(); let reverse_socket = args[3].clone(); let server = Server::http(server_socket).unwrap(); for mut request in server.incoming_requests() { let wc_sock: String = wordcount_socket.clone(); let re_sock: String = reverse_socket.clone(); let _ = thread::spawn(move || { match request.url() { "/run" => { let mut content = String::new(); request.as_reader().read_to_string(&mut content).unwrap(); let json: Json = content.parse().unwrap(); let obj = json.as_object().unwrap(); let text: String = obj.get("text").unwrap().to_string(); let text: String = rem_first_and_last(&text).to_string(); let mid_string: String = obj.get("mid").unwrap().to_string(); let mid_strip = mid_string.replace("\"", ""); let mid = match mid_strip.parse::<u64>() { Ok(i) => i, Err(e) => { panic!("http message id {:?}", e); } }; let mut map = HashMap::new(); map.insert("text", text.to_string()); map.insert("mid", mid.to_string()); println!("run wordcount"); let wordcount_result = match query_service(&map, &wc_sock, "wordcount") { Ok(result) => result, Err(e) => { println!("Error. wordcount http {}", e); "wordcount error".to_string() } }; println!("run reverse"); let reverse_result = match query_service(&map, &re_sock, "reverse") { Ok(r) => r, Err(e) => { println!("Error. reverse http {}", e); "reverse error".to_string() } }; let response = Response::from_string(wordcount_result + "\n" + &reverse_result + "\n"); request.respond(response).expect("Method wordcount. Could not respond"); }, "/stop" => { println!("server stopped"); let wordcount_result = query_service(&HashMap::new(), &wc_sock, "stop").unwrap(); let reverse_result = query_service(&HashMap::new(), &re_sock, "stop").unwrap(); let response = Response::from_string(wordcount_result + "\n" + &reverse_result + "\n" + "gateway server stopped\n"); request.respond(response).expect("Method stop. Could not respond"); return; } _ =>{ let response_msg = format!("Unknown method. {}\n", request.url()); println!("{}", response_msg); let response = Response::from_string(response_msg); request.respond(response).expect("Method unknown. Could not respond"); } } }); } }
It doesn’t matter how you care to frame it, the Xbox One is struggling to compete with the PS4. A number of missteps both pre- and post-launch put Microsoft on its back foot, and the company has been desperately trying to get units moving ever since. The latest tactic? A price cut for the next two months. The Xbox One without Kinect currently costs $399, but from November 2nd that will be reduced by $50 to just $349 in the US. The $50 cut applies to any and all official bundles, meaning you can technically save up to $150 depending on which one you choose to pick up. Examples of the lower priced bundles include the console with Sunset Overdrive for $349. The Assassin’s Creed bundle for $349 without Kinect and $449 with, and the Call of Duty Advanced Warfare Bundle is $449, which sounds expensive until you realize it includes a 1TB hard drive, custom console case and controller, as well as the Zero Day edition of the game. All of the major US retailers are taking part in this promotion, including Amazon, Best Buy, GameStop, Microsoft Stores, Target, Toys ‘R Us and Walmart. It’s Microsoft’s attempt to get you to pick up the console now rather than waiting until Black Friday for a bargain. Also, by extending the pricing into the new year it means there’s a good chance Christmas money could be spent on the Xbox One. The new pricing comes into effect on November 2nd for all but the Call of Duty Advanced Warfare Bundle, which will be available November 3rd. It will then run until January 3rd, 2015, at which point the console should go back up to $399. I say should because if this promotion does its job and boosts sales significantly I can see Microsoft deciding to make $349 the permanent price of entry.
#Faça um Programa que peça a temperatura em graus Fahrenheit, transforme e mostre a temperatura em graus Celsius. C = 5 * ((F-32) / 9). tempF = int(input("temperatura em graus Fahrenheit: ")) tempC = 5 * ((tempF-32) / 9) print("{0} graus Fahrenheit em graus Celsius: {1}".format(tempF, tempC))
<gh_stars>0 from django.contrib import admin # Register your models here. from .models import Produto class ProdutoAdmin(admin.ModelAdmin): list_display = ('nome','livro','imagem') admin.site.register(Produto, ProdutoAdmin)
package com.baiyi.opscloud.packer.sys.delegate; import com.baiyi.opscloud.common.util.ExtendUtil; import com.baiyi.opscloud.domain.annotation.DesensitizedMethod; import com.baiyi.opscloud.domain.param.IExtend; import com.baiyi.opscloud.domain.vo.sys.CredentialVO; import com.baiyi.opscloud.factory.credential.CredentialCustomerFactory; import com.baiyi.opscloud.packer.IWrapper; import lombok.RequiredArgsConstructor; import org.springframework.stereotype.Component; /** * @Author baiyi * @Date 2022/2/24 10:37 AM * @Version 1.0 */ @Component @RequiredArgsConstructor public class CredentialPackerDelegate implements IWrapper<CredentialVO.Credential> { @Override @DesensitizedMethod public void wrap(CredentialVO.Credential credential, IExtend iExtend) { if (!ExtendUtil.isExtend(iExtend)) return; credential.setQuantityUsed(CredentialCustomerFactory.countByCredentialId(credential.getId())); } }
The Supreme Court on Monday refused to revisit its order on collecting green tax from vehicles entering the Capital, saying it would invoke its special powers to save “Delhi and its people” from the city’s toxic air. “Day in and day out we read in the newspapers that Delhi tops the list of the most polluted cities in the world. People here dread to go out of their houses,” said a bench headed by Chief Justice of India HL Dattu, warning of contempt if the order was not implemented. The court was hearing an application in which the consortium tasked with collecting the environment compensation charge said it was not ready to assume the “humongous” additional responsibility and wanted out. SMYR Consortium — the municipal corporation-appointed concessionaire — also collects tax from goods vehicles entering Delhi. “We were really upset to know that the collection did not start on the date fixed by us. At one point we thought of issuing notice to the toll-tax operator also,” the Chief Justice of India told senior advocate Harish Salve, on whose plea the court ordered the green tax. The court, which has on several occasions expressed concerns about Delhi’s dirty air, had on October 12 ordered the cess — ranging from Rs 700 to Rs 1,300 — on diesel-guzzling trucks entering Delhi. The court had also said it would wait for four months before hearing pleas against the order. The collection was to start from November 1, but SMYR Consortium took five days to implement the order. The South Delhi municipal corporation is the nodal authority for collecting toll tax and signed a contract earlier this year with a private firm for the project. The concessionaire challenged the order, saying it was beyond the terms of the agreement it had with the civic body. “This is extremely excessive and improper. There is no application of mind to the contractual agreement which has been entered into,” senior counsel Shyam Divan said. The order, passed in its absence, was an added responsibility, Divan said, offering to withdraw from the toll-collecting contract. The case will now be heard on November 27. Particulate matter 2.5mm — the result of combustion from vehicles, power plants, and other industrial activities — is among the most common pollutants in the city and diesel its biggest contributor. First Published: Nov 17, 2015 11:21 IST
package setup import ( "context" "os" "github.com/solo-io/go-utils/contextutils" "github.com/solo-io/go-utils/stats" "github.com/solo-io/supergloo/pkg/api/clientset" configsetup "github.com/solo-io/supergloo/pkg/config/setup" installsetup "github.com/solo-io/supergloo/pkg/install/setup" registrationsetup "github.com/solo-io/supergloo/pkg/registration/setup" ) // customCtx and customErrHandler are expected to be passed by tests func Main(customCtx context.Context, customErrHandler func(error)) error { if os.Getenv("START_STATS_SERVER") != "" { stats.StartStatsServer() } rootCtx := createRootContext(customCtx) clientSet, err := clientset.ClientsetFromContext(rootCtx) if err != nil { return err } if err := installsetup.RunInstallEventLoop(rootCtx, clientSet, customErrHandler); err != nil { return err } if err := registrationsetup.RunRegistrationEventLoop(rootCtx, clientSet, customErrHandler); err != nil { return err } if err := configsetup.RunConfigEventLoop(rootCtx, clientSet, customErrHandler); err != nil { return err } <-rootCtx.Done() return nil } func createRootContext(customCtx context.Context) context.Context { rootCtx := customCtx if rootCtx == nil { rootCtx = context.Background() } rootCtx = contextutils.WithLogger(rootCtx, "supergloo") return rootCtx }
use super::*; use crate::helpers::models::problem::*; use crate::models::problem::{TravelTime, VehicleDetail, VehiclePlace}; use crate::models::solution::Route; struct OnlyDistanceCost {} impl TransportCost for OnlyDistanceCost { fn duration_approx(&self, _: &Profile, _: Location, _: Location) -> Duration { 0. } fn distance_approx(&self, _: &Profile, from: Location, to: Location) -> Distance { fake_routing(from, to) } fn duration(&self, _: &Route, _: Location, _: Location, _: TravelTime) -> Duration { 0. } fn distance(&self, _: &Route, from: Location, to: Location, _: TravelTime) -> Distance { fake_routing(from, to) } } impl Default for OnlyDistanceCost { fn default() -> Self { Self {} } } struct ProfileAwareTransportCost { func: Box<dyn Fn(&Profile, f64) -> f64 + Sync + Send>, } impl ProfileAwareTransportCost { pub fn new(func: Box<dyn Fn(&Profile, f64) -> f64 + Sync + Send>) -> ProfileAwareTransportCost { ProfileAwareTransportCost { func } } } impl TransportCost for ProfileAwareTransportCost { fn duration_approx(&self, _: &Profile, _: Location, _: Location) -> Duration { 0. } fn distance_approx(&self, profile: &Profile, from: Location, to: Location) -> Distance { (self.func)(profile, fake_routing(from, to)) } fn duration(&self, _: &Route, _: Location, _: Location, _: TravelTime) -> Duration { 0. } fn distance(&self, route: &Route, from: Location, to: Location, _: TravelTime) -> Distance { (self.func)(&route.actor.vehicle.profile, fake_routing(from, to)) } } struct FixedTransportCost { duration_cost: f64, distance_cost: f64, } impl TransportCost for FixedTransportCost { fn duration_approx(&self, _: &Profile, _: Location, _: Location) -> Duration { self.duration_cost } fn distance_approx(&self, _: &Profile, _: Location, _: Location) -> Distance { self.distance_cost } fn duration(&self, _: &Route, _: Location, _: Location, _: TravelTime) -> Duration { self.duration_cost } fn distance(&self, _: &Route, _: Location, _: Location, _: TravelTime) -> Distance { self.distance_cost } } impl FixedTransportCost { pub fn new(duration_cost: f64, distance_cost: f64) -> Arc<dyn TransportCost + Send + Sync> { Arc::new(Self { duration_cost, distance_cost }) } } fn create_profile_aware_transport_cost() -> Arc<dyn TransportCost + Sync + Send> { Arc::new(ProfileAwareTransportCost::new(Box::new(|p, d| if p.index == 2 { 10.0 - d } else { d }))) } fn create_only_distance_transport_cost() -> Arc<dyn TransportCost + Sync + Send> { Arc::new(OnlyDistanceCost::default()) } fn create_costs() -> Costs { Costs { fixed: 10.0, per_distance: 1.0, per_driving_time: 1.0, per_waiting_time: 1.0, per_service_time: 1.0 } } #[test] fn all_returns_all_jobs() { let jobs = vec![Job::Single(Arc::new(test_single())), Job::Single(Arc::new(test_single()))]; assert_eq!(Jobs::new(&test_fleet(), jobs, &create_only_distance_transport_cost()).all().count(), 2) } parameterized_test! {calculates_proper_cost_between_single_jobs, (left, right, expected), { assert_eq!(get_cost_between_jobs(&Profile::default(), &create_costs(), create_only_distance_transport_cost().as_ref(), &Job::Single(left), &Job::Single(right)), expected); }} calculates_proper_cost_between_single_jobs! { case1: (test_single_with_location(Some(0)), test_single_with_location(Some(10)), 10.0), case2: (test_single_with_location(Some(0)), test_single_with_location(None), 0.0), case3: (test_single_with_location(None), test_single_with_location(None), 0.0), case4: (test_single_with_location(Some(3)), test_single_with_locations(vec![Some(5), Some(2)]), 1.0), case5: (test_single_with_locations(vec![Some(2), Some(1)]), test_single_with_locations(vec![Some(10), Some(9)]), 7.0), } parameterized_test! {calculates_proper_cost_between_multi_jobs, (left, right, expected), { assert_eq!(get_cost_between_jobs(&Profile::default(), &create_costs(), create_only_distance_transport_cost().as_ref(), &Job::Multi(left), &Job::Multi(right)), expected); }} calculates_proper_cost_between_multi_jobs! { case1: (test_multi_job_with_locations(vec![vec![Some(1)], vec![Some(2)]]), test_multi_job_with_locations(vec![vec![Some(8)], vec![Some(9)]]), 6.0), case2: (test_multi_job_with_locations(vec![vec![Some(1)], vec![Some(2)]]), test_multi_job_with_locations(vec![vec![None], vec![Some(9)]]), 0.0), case3: (test_multi_job_with_locations(vec![vec![None], vec![None]]), test_multi_job_with_locations(vec![vec![None], vec![Some(9)]]), 0.0), case4: (test_multi_job_with_locations(vec![vec![None], vec![None]]), test_multi_job_with_locations(vec![vec![None], vec![None]]), 0.0), } parameterized_test! {returns_proper_job_neighbours, (index, expected), { returns_proper_job_neighbours_impl(index, expected.iter().map(|s| s.to_string()).collect()); }} returns_proper_job_neighbours! { case1: (0, vec!["s1", "s2", "s3", "s4"]), case2: (1, vec!["s0", "s2", "s3", "s4"]), case3: (2, vec!["s1", "s3", "s0", "s4"]), case4: (3, vec!["s2", "s4", "s1", "s0"]), } fn returns_proper_job_neighbours_impl(index: usize, expected: Vec<String>) { let p1 = Profile::new(1, None); let fleet = FleetBuilder::default() .add_driver(test_driver()) .add_vehicles(vec![ VehicleBuilder::default().id("v1").profile(p1.clone()).details(vec![test_vehicle_detail()]).build(), VehicleBuilder::default().id("v2").profile(p1.clone()).details(vec![test_vehicle_detail()]).build(), ]) .build(); let species = vec![ SingleBuilder::default().id("s0").location(Some(0)).build_as_job_ref(), SingleBuilder::default().id("s1").location(Some(1)).build_as_job_ref(), SingleBuilder::default().id("s2").location(Some(2)).build_as_job_ref(), SingleBuilder::default().id("s3").location(Some(3)).build_as_job_ref(), SingleBuilder::default().id("s4").location(Some(4)).build_as_job_ref(), ]; let jobs = Jobs::new(&fleet, species.clone(), &create_profile_aware_transport_cost()); let result: Vec<String> = jobs.neighbors(&p1, species.get(index).unwrap(), 0.0).map(|(j, _)| get_job_id(j).clone()).collect(); assert_eq!(result, expected); } parameterized_test! {returns_proper_job_ranks, (index, profile, expected), { returns_proper_job_ranks_impl(index, profile, expected); }} returns_proper_job_ranks! { case1: (0, 1, 0.0), case2: (1, 1, 5.0), case3: (2, 1, 6.0), case4: (3, 1, 16.0), case5: (0, 3, 30.0), case6: (1, 3, 20.0), case7: (2, 3, 9.0), case8: (3, 3, 1.0), } fn returns_proper_job_ranks_impl(index: usize, profile_index: usize, expected: Distance) { let profile = Profile::new(profile_index, None); let p1 = Profile::new(1, None); let p3 = Profile::new(3, None); let create_vehicle_detail = |start_location: usize| VehicleDetail { start: Some(VehiclePlace { location: start_location, time: TimeInterval::default() }), end: Some(VehiclePlace { location: 0, time: TimeInterval::default() }), }; let fleet = FleetBuilder::default() .add_driver(test_driver()) .add_vehicles(vec![ VehicleBuilder::default().id("v1_1").profile(p1.clone()).details(vec![create_vehicle_detail(0)]).build(), VehicleBuilder::default().id("v1_2").profile(p1).details(vec![create_vehicle_detail(15)]).build(), VehicleBuilder::default().id("v2_1").profile(p3).details(vec![create_vehicle_detail(30)]).build(), ]) .build(); let species = vec![ SingleBuilder::default().id("s0").location(Some(0)).build_as_job_ref(), SingleBuilder::default().id("s1").location(Some(10)).build_as_job_ref(), SingleBuilder::default().id("s2").location(Some(21)).build_as_job_ref(), SingleBuilder::default().id("s3").location(Some(31)).build_as_job_ref(), ]; let jobs = Jobs::new(&fleet, species.clone(), &create_profile_aware_transport_cost()); let result = jobs.rank(&profile, species.get(index).unwrap()); assert_eq!(result, expected); } #[test] fn can_use_multi_job_bind_and_roots() { let job = test_multi_job_with_locations(vec![vec![Some(0)], vec![Some(1)]]); let jobs = vec![Job::Multi(job.clone())]; let jobs = Jobs::new(&test_fleet(), jobs, &create_only_distance_transport_cost()); let job = Job::Multi(Multi::roots(&job.jobs.first().unwrap()).unwrap()); assert_eq!(jobs.neighbors(&Profile::default(), &job, 0.0).count(), 0); } parameterized_test! {can_handle_negative_distances_durations, (duration_cost, distance_cost), { can_handle_negative_distances_durations_impl(FixedTransportCost::new(duration_cost, distance_cost)); }} can_handle_negative_distances_durations! { case01: (-1., 1.), case02: (1., -1.), case03: (-1., -1.), case04: (-1., 0.), } fn can_handle_negative_distances_durations_impl(transport_costs: Arc<dyn TransportCost + Send + Sync>) { let profile = Profile::default(); let species = vec![ SingleBuilder::default().id("s0").location(Some(0)).build_as_job_ref(), SingleBuilder::default().id("s1").location(Some(1)).build_as_job_ref(), ]; let jobs = Jobs::new(&test_fleet(), species.clone(), &transport_costs); for job in &species { assert!(jobs .neighbors(&profile, job, 0.0) .all(|(_, cost)| { (*cost - UNREACHABLE_COST).abs() < std::f64::EPSILON })); } }
A federal jury convicted two Detroit police officers of felony extortion Monday, but cleared them of nine other charges in a five-week trial that accused them of arranging drug transactions with civilians so they could rob and extort them. David Hansberry and Bryan Watson, members of the now-disbanded narcotics unit of the Detroit Police Department, face 20 years in prison on charges of conspiracy to interfere with commerce by extortion and robbery. Hansberry, 35, and Watson, 47, were indicted in April 2015 by a federal grand jury on charges of carrying out traffic stops and fake arrests to allegedly steal drugs, money and property. Indicted but cleared in the case was Kevlin “Omar” Brown, a longtime friend of Hansberry. Defense attorneys for all three men declined comment after the verdict Monday afternoon. U.S. Attorney Barbara L. McQuade said the evidence showed Hansberry, who was a sergeant at the time, and Watson failed to log into evidence money and drugs seized during searches of homes. Instead, they split the proceeds and arranged for the sale of the drugs, sharing the proceeds generated by the sales. In one instance in July 2010, Hansberry and Watson participated in a drug seizure that netted more than $3 million, the largest cash seizure by the Detroit Police Department at that time. Only $2.2 million, however, was placed in the evidence room. “These defendants tarnished the badge that is worn with honor by their fellow officers, using their power as police officers to steal money and drugs from criminals who have no recourse,” McQuade said. “In addition to betraying their trust to uphold the law, these officers also put back out onto the streets the drugs that they had seized so that they could split the proceeds. Their greed caused them to poison our neighborhoods with drugs and to diminish public trust in police.” Detroit Police Chief James Craig said on Monday the vast majority of the men and women of the Detroit Police Department are honest and hard-working. “But these defendants betrayed their oath and their fellow officers. We are committed to the highest standards of integrity, and we will remove any officers who do not live up to those high standards,” Craig said. Hansberry, a 16-year veteran, and Watson, who spent 22 years on the police force, were suspended without pay after the indictments were filed last year. Prosecutors said in November they were concerned about the safety of witnesses in the case, and had to relocate them, according to court transcripts obtained by The News. One witness was shot while driving in Metro Detroit, prosecutors said. The allegations against the two cops came as Detroit Police were conducting an internal investigation into widespread wrongdoing in the Narcotics Section, which prompted Craig to disband the unit. Sentencing is Nov. 11 before U.S. District Judge Stephen J. Murphy III. Read or Share this story: http://detne.ws/29wemkD
/** * Service method to purchase a sales listing. This method: * updates the sellers inventory * removes the sales listing * records the sale in sales history * * @param listingId Sales Listing ID to purchase * @param appUser User purchasing the sales listing */ public void buySaleListing(Integer listingId, AppUserDetails appUser) { var buyer = userService.getUserByEmail(appUser.getUsername()); SaleListing listing = retrieveListing(listingId); logger.info("User with ID: {} Request to buy Sale Listing with ID: {}", buyer.getId(), listing.getId()); var sale = new Sale(listing); sale.setBuyerId(buyer.getId()); saleHistoryRepository.save(sale); sendPurchaseNotifications(listing, buyer); updateInventoryItem(listing); saleListingRepository.delete(listing); }
1. Field of the Invention The present invention relates to the field of display technology, and more particular to a double-sided organic light-emitting diode (OLED) display device. 2. The Related Arts In the field of display technology, flat panel displays, such as liquid crystal displays (LCDs) and organic light-emitting diode (OLED) displays, have gradually taken the place of cathode ray tube (CRT) displays. The LCD shows various advantages, such as thin device body, low power consumption, and been free of radiations, and has been widely used in various applications, such as liquid crystal televisions, mobile phones, digital cameras, tablet computers, and display screens of notebook computers. The OLED display has various features, such as being self-luminous, high brightness, fast response, wide view angle, low power consumption, and being capable of flexible displaying and is considered “dream displays” that has attracted the attention of major display manufacturers and is becoming a main stream of the third-generation displays in the field of display technology. Heretofore, the LCD and OLED display are generally single-sided displays. Taking a contemporary OLED display as an example, there are generally several different types, including top emission, bottom emission, and transparent display, among which the top-emission OLED display and the bottom-emission OLED display are capable of only single side displaying, while the transparent OLED display, although allowing a user to observe images from opposite directions of the front and rear sides of the OLED display, is only capable of providing a normally displayed image from one direction, while the image observed on the opposite direction is display in a reversed way, so that no double-sized normal displaying can be achieved. For contemporary LCDs, being constrained to the displaying principle of the LCD, only single-sided displaying or transparent displaying is possible. With the diversification of styles of electronic products, double-sided displaying is becomes a new feature of the next generation of display devices, particularly for display devices for applications in shop exhibition or outdoors displaying. However, double-sided display devices that are currently available are simply structured by combined two independent single-sided display devices in a back-to-back manner in order to display on two opposite sides. The combined structure is bulky and not beautiful, requires a high manufacturing cost, and does not meet the demand of general consumers for lightweight, compactness, and high cost-to-performance ratio.
The Application of FRD with Avalanche Capability for Improvement of Power Conversion Efficiency in Output Rectifier and PFC This paper shows that power efficiency in the power supply with PFC is improved drastically by using the new FRD with high avalanche capability. Firstly the new FRD is applied to the output rectifier of 3 kW output power in order to reduce the loss of it. High frequency switching loss decreases because the new FRD has faster trr. Secondly, we apply the new FRD to the PFC circuit. In the PFC circuit, we will show that turn-on loss of switching device such as MOSFET is dominant. The new FRD enables -di/dt condition to be high, then the total loss is reduced. For PFC loss reduction, it is important that MOSFET turn-on loss and FRD recovery loss have the adequate balance so as to make the total loss minimum. As the result of the above application, we achieve a great improvement of total power efficiency in the power supply from 86.5% to about 90%. We propose that this technology is one of the efficiency improvement methods and is effective in prevention of global warming.
<gh_stars>1-10 package sk.mrtn.pixi.client.interaction; import jsinterop.annotations.JsConstructor; import jsinterop.annotations.JsMethod; import jsinterop.annotations.JsProperty; import jsinterop.annotations.JsType; import sk.mrtn.pixi.client.DisplayObject; import sk.mrtn.pixi.client.Point; /** * Created by klaun on 22/08/16. */ @JsType(isNative = true, namespace = "PIXI.interaction") public class InteractionManager { /** * Should default browser actions automatically be prevented. (default true) */ @JsProperty public boolean autoPreventDefault; /** * The css style of the cursor that is being used */ @JsProperty public String currentCursorStyle; /** * Every update cursor will be reset to this value, * if some element wont override it in its hitTest */ @JsProperty public String defaultCursorStylestring; /** * An event data object to handle all the event tracking/dispatching */ @JsProperty public Object eventData; /** * As this frequency increases the interaction events will be checked more often. * default 10 */ @JsProperty public double interactionFrequency; /** * Tiny little interactiveData pool ! */ @JsProperty public InteractionData[] interactiveDataPoolArray; /** * The mouse data */ @JsProperty public InteractionData mouse; /** * The renderer this interaction manager works for. */ @JsProperty public Object renderer; /** * The current resolution / device pixel ratio. */ @JsProperty public double resolution; @JsConstructor public InteractionManager(Object renderer, Object options){} /** * Destroys the interaction manager */ @JsMethod public native void destroy(); /** * Maps x and y coords from a DOM object and maps them * correctly to the pixi view. The resulting value is stored in the point. * This takes into account the fact that the DOM element could * be scaled and positioned anywhere on the screen. * @param point the point that the result will be stored in * @param x the x coord of the position to map * @param y the y coord of the position to map */ @JsMethod public native void mapPositionToPoint(Point point, double x, double y); /** * TODO: test and create appropriate callback * This function is provides a neat way of crawling through the scene graph * and running a specified function on all interactive objects it finds. * It will also take care of hit testing the interactive objects and passes the hit across in the function. * @param point the point that is tested for collision * @param displayObject the displayObject that will be hit test (recurcsivly crawls its children) * @param func the function that will be called on each interactive object. The displayObject and hit will be passed to the function * @param hitTest (optional) this indicates if the objects inside should be hit test against the point * @param interactive (optional) Whether the displayObject is interactive * @return */ public native boolean processInteractive(Point point, DisplayObject displayObject, Object func, Boolean hitTest, Boolean interactive); /** * Updates the state of interactive objects. * Invoked by a throttled ticker update from * PIXI.ticker.shared. * @param deltaTime time delta since last tick */ public native void update(double deltaTime); }
/// /// Acquires an exclusive lock to the interior of the lock, if this lock has not already been locked for reading. /// Otherwise, returns None pub fn try_write(&self) -> Option<FusedRwLockGuard<T>> { // Optimization, since a true return from self.is_locked is guaranteed to continue forever if !self.is_locked() { let guard = self.inner.write(); if !self.is_locked() { Some(FusedRwLockGuard { _guard: guard, // Safety: // Because self.locked is not set, there are no readers. Other writers are excluded by the fact that the WriteGuard is held. inner: unsafe { &mut *self.object.get() }, }) } else { None } } else { None } }
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "apr_thread_proc.h" #include "apr_thread_mutex.h" #include "apr_thread_rwlock.h" #include "apr_file_io.h" #include "apr_errno.h" #include "apr_general.h" #include "apr_getopt.h" #include "errno.h" #include <stdio.h> #include <stdlib.h> #include "testutil.h" #if !APR_HAS_THREADS int main(void) { printf("This program won't work on this platform because there is no " "support for threads.\n"); return 0; } #else /* !APR_HAS_THREADS */ #define DEFAULT_MAX_COUNTER 1000000 #define MAX_THREADS 6 static int verbose = 0; static long mutex_counter; static long max_counter = DEFAULT_MAX_COUNTER; static apr_thread_mutex_t *thread_lock; void * APR_THREAD_FUNC thread_mutex_func(apr_thread_t *thd, void *data); apr_status_t test_thread_mutex(int num_threads); /* apr_thread_mutex_t */ static apr_thread_rwlock_t *thread_rwlock; void * APR_THREAD_FUNC thread_rwlock_func(apr_thread_t *thd, void *data); apr_status_t test_thread_rwlock(int num_threads); /* apr_thread_rwlock_t */ int test_thread_mutex_nested(int num_threads); apr_pool_t *pool; int i = 0, x = 0; void * APR_THREAD_FUNC thread_mutex_func(apr_thread_t *thd, void *data) { int i; for (i = 0; i < max_counter; i++) { apr_thread_mutex_lock(thread_lock); mutex_counter++; apr_thread_mutex_unlock(thread_lock); } return NULL; } void * APR_THREAD_FUNC thread_rwlock_func(apr_thread_t *thd, void *data) { int i; for (i = 0; i < max_counter; i++) { apr_thread_rwlock_wrlock(thread_rwlock); mutex_counter++; apr_thread_rwlock_unlock(thread_rwlock); } return NULL; } int test_thread_mutex(int num_threads) { apr_thread_t *t[MAX_THREADS]; apr_status_t s[MAX_THREADS]; apr_time_t time_start, time_stop; int i; mutex_counter = 0; printf("apr_thread_mutex_t Tests\n"); printf("%-60s", " Initializing the apr_thread_mutex_t (UNNESTED)"); s[0] = apr_thread_mutex_create(&thread_lock, APR_THREAD_MUTEX_UNNESTED, pool); if (s[0] != APR_SUCCESS) { printf("Failed!\n"); return s[0]; } printf("OK\n"); apr_thread_mutex_lock(thread_lock); /* set_concurrency(4)? -aaron */ printf(" Starting %d threads ", num_threads); for (i = 0; i < num_threads; ++i) { s[i] = apr_thread_create(&t[i], NULL, thread_mutex_func, NULL, pool); if (s[i] != APR_SUCCESS) { printf("Failed!\n"); return s[i]; } } printf("OK\n"); time_start = apr_time_now(); apr_thread_mutex_unlock(thread_lock); /* printf("%-60s", " Waiting for threads to exit"); */ for (i = 0; i < num_threads; ++i) { apr_thread_join(&s[i], t[i]); } /* printf("OK\n"); */ time_stop = apr_time_now(); printf("microseconds: %" APR_INT64_T_FMT " usec\n", (time_stop - time_start)); if (mutex_counter != max_counter * num_threads) printf("error: counter = %ld\n", mutex_counter); return APR_SUCCESS; } int test_thread_mutex_nested(int num_threads) { apr_thread_t *t[MAX_THREADS]; apr_status_t s[MAX_THREADS]; apr_time_t time_start, time_stop; int i; mutex_counter = 0; printf("apr_thread_mutex_t Tests\n"); printf("%-60s", " Initializing the apr_thread_mutex_t (NESTED)"); s[0] = apr_thread_mutex_create(&thread_lock, APR_THREAD_MUTEX_NESTED, pool); if (s[0] != APR_SUCCESS) { printf("Failed!\n"); return s[0]; } printf("OK\n"); apr_thread_mutex_lock(thread_lock); /* set_concurrency(4)? -aaron */ printf(" Starting %d threads ", num_threads); for (i = 0; i < num_threads; ++i) { s[i] = apr_thread_create(&t[i], NULL, thread_mutex_func, NULL, pool); if (s[i] != APR_SUCCESS) { printf("Failed!\n"); return s[i]; } } printf("OK\n"); time_start = apr_time_now(); apr_thread_mutex_unlock(thread_lock); /* printf("%-60s", " Waiting for threads to exit"); */ for (i = 0; i < num_threads; ++i) { apr_thread_join(&s[i], t[i]); } /* printf("OK\n"); */ time_stop = apr_time_now(); printf("microseconds: %" APR_INT64_T_FMT " usec\n", (time_stop - time_start)); if (mutex_counter != max_counter * num_threads) printf("error: counter = %ld\n", mutex_counter); return APR_SUCCESS; } int test_thread_rwlock(int num_threads) { apr_thread_t *t[MAX_THREADS]; apr_status_t s[MAX_THREADS]; apr_time_t time_start, time_stop; int i; mutex_counter = 0; printf("apr_thread_rwlock_t Tests\n"); printf("%-60s", " Initializing the apr_thread_rwlock_t"); s[0] = apr_thread_rwlock_create(&thread_rwlock, pool); if (s[0] != APR_SUCCESS) { printf("Failed!\n"); return s[0]; } printf("OK\n"); apr_thread_rwlock_wrlock(thread_rwlock); /* set_concurrency(4)? -aaron */ printf(" Starting %d threads ", num_threads); for (i = 0; i < num_threads; ++i) { s[i] = apr_thread_create(&t[i], NULL, thread_rwlock_func, NULL, pool); if (s[i] != APR_SUCCESS) { printf("Failed!\n"); return s[i]; } } printf("OK\n"); time_start = apr_time_now(); apr_thread_rwlock_unlock(thread_rwlock); /* printf("%-60s", " Waiting for threads to exit"); */ for (i = 0; i < num_threads; ++i) { apr_thread_join(&s[i], t[i]); } /* printf("OK\n"); */ time_stop = apr_time_now(); printf("microseconds: %" APR_INT64_T_FMT " usec\n", (time_stop - time_start)); if (mutex_counter != max_counter * num_threads) printf("error: counter = %ld\n", mutex_counter); return APR_SUCCESS; } int main(int argc, const char * const *argv) { apr_status_t rv; char errmsg[200]; apr_getopt_t *opt; char optchar; const char *optarg; printf("APR Lock Performance Test\n==============\n\n"); apr_initialize(); atexit(apr_terminate); if (apr_pool_create(&pool, NULL) != APR_SUCCESS) exit(-1); if ((rv = apr_getopt_init(&opt, pool, argc, argv)) != APR_SUCCESS) { fprintf(stderr, "Could not set up to parse options: [%d] %s\n", rv, apr_strerror(rv, errmsg, sizeof errmsg)); exit(-1); } while ((rv = apr_getopt(opt, "c:v", &optchar, &optarg)) == APR_SUCCESS) { if (optchar == 'c') { max_counter = atol(optarg); } else if (optchar == 'v') { verbose = 1; } } if (rv != APR_SUCCESS && rv != APR_EOF) { fprintf(stderr, "Could not parse options: [%d] %s\n", rv, apr_strerror(rv, errmsg, sizeof errmsg)); exit(-1); } for (i = 1; i <= MAX_THREADS; ++i) { if ((rv = test_thread_mutex(i)) != APR_SUCCESS) { fprintf(stderr,"thread_mutex test failed : [%d] %s\n", rv, apr_strerror(rv, (char*)errmsg, 200)); exit(-3); } if ((rv = test_thread_mutex_nested(i)) != APR_SUCCESS) { fprintf(stderr,"thread_mutex (NESTED) test failed : [%d] %s\n", rv, apr_strerror(rv, (char*)errmsg, 200)); exit(-4); } if ((rv = test_thread_rwlock(i)) != APR_SUCCESS) { fprintf(stderr,"thread_rwlock test failed : [%d] %s\n", rv, apr_strerror(rv, (char*)errmsg, 200)); exit(-6); } } return 0; } #endif /* !APR_HAS_THREADS */
An in Vitro Selection System for TNA (3-2)--l-Threose nucleic acid (TNA) is an unnatural polymer that possesses the rare ability to base-pair with RNA, DNA, and itself. This feature, coupled with its chemical simplicity, makes TNA of interest as a possible progenitor of RNA during the early history of life. To evaluate the functional potential of TNA, we have developed a system for the in vitro selection of TNA. We identified the Therminator DNA polymerase as a remarkably efficient DNA-dependent TNA polymerase capable of polymerizing more than 50 tNTPs. We have also developed a method of covalently linking a DNA template to the TNA strand that it encodes, thus obviating the need for a TNA-dependent DNA polymerase during cycles of selection. (3-2)-R-L-Threose nucleic acid (TNA, Figure 1A) is an unnatural nucleic acid that was identified during an extensive evaluation of alternative sugar-phosphate backbones aimed at explaining the structure of the biological nucleic acids. 1,2 TNA possesses the ability to specifically base-pair with RNA, DNA, and itself. 2 This capability, together with the chemical simplicity of threose relative to ribose, suggests that TNA could have acted as an evolutionary competitor of RNA or even have preceded RNA as the genetic molecule of life. We are attempting to investigate the functional potential of TNA by implementing an in vitro selection scheme for TNA. 3,4 Here, we show that a mutant archaeal family B DNA polymerase is capable of polymerizing more than 50 nucleotides of TNA on a DNA template. We also demonstrate the display of single-stranded TNA covalently linked to its encoding duplex DNA, thus enabling the selection of functional TNA sequences and the amplification or recovery of the attached DNA. We and others have previously shown that certain family B archaeal DNA polymerases possess the ability to synthesize limited stretches of TNA on a DNA template. 5,6 Our recent synthesis of all four TNA triphosphates (tNTPs) enabled us to test polymerases for more extensive activity. 7 The Therminator DNA polymerase is an engineered exonuclease-deficient form of "9°N" DNA polymerase containing an A485L mutation. 8 It is capable of efficiently incorporating a wide spectrum of modified nucleotides. We tested the ability of this polymerase to accept tNTPs as substrates using a DNA primer/template construct containing a 50-nucleotide singlestranded template region in which all four DNA nucleobases were represented ( Figure 1B). Since previous work had shown that pairing diaminopurine opposite thymine increases the efficiencies of both template-directed ligation and polymerization, 5,9 we used diaminopurine triphosphate (tDTP) instead of tATP. The Therminator polymerase catalyzed the synthesis of >20% full-length 50nucleotide TNA product within 24 h ( Figure 1C). We reasoned that if transcribed TNA could be covalently linked to its DNA template, we could perform functional selections for TNA molecules and rescue the successful genotypes by PCR amplification of the attached DNA. This approach is analogous to the selection of functional peptides and proteins by mRNA display; 10 the use of DNA display for peptide and PNA selections has also been proposed. 10,11 By starting with a library of single-stranded DNA hairpins, the 3 end of each hairpin could act as a primer for TNA transcription across the randomized DNA template region (Figure 2A). A primer annealed to the loop region of the hairpin could then initiate strand-displacement synthesis, liberating the TNA strand to allow folding and linearizing the DNA template by making it double-stranded. To test this idea, we synthesized a single-stranded DNA hairpin and transcribed the 60-nucleotide single-stranded region using the Therminator DNA polymerase and tNTPs ( Figure 2B). Native gel analysis of the reaction indicated efficient TNA transcription ( Figure 2C). Full-length transcripts were purified from the gel and subjected to a strand-displacement reaction with dNTPs, Therminator polymerase, and an end-labeled primer. With the hairpin construct in excess, most of the labeled primer was extended to full length, indicating very efficient strand displacement ( Figure 2D). To verify that the resulting DNA-DNA duplexes were stable against strandinvasion by TNA, the strand-displaced constructs were treated with the restriction enzyme BsrBI, which specifically cleaves 15 base pairs from the end of the DNA template. The strand-displaced products were completely digested by BsrBI. BbsI, a control enzyme without a site in the DNA sequence, showed no activity ( Figure 2D). BsrBI did not cleave ssDNA (data not shown). In vitro selection for TNA function will only work if the TNA transcript is a reasonably faithful copy of its DNA template. The number of errors in a full-length TNA transcript is a function of both the misincorporation rate and the rate of primer extension from mismatched sites and is therefore time-dependent (since at longer times, full-length material accumulates by extension of strands that had paused at sites of misincorporation). To study the time- Howard Hughes Medical Institute and Massachusetts General Hospital. Boston College. § Chinese Academy of Sciences. dependent fidelity of TNA synthesis by the Therminator polymerase, we examined the effect of omitting one tNTP at a time from short primer-extension reactions (Supporting Information Figure 1). For each reaction, the DNA template contained only one template site complementary to the omitted tNTP. This site was positioned 11 nucleotides downstream of the DNA primer terminus to ensure that fidelity was measured when the polymerase was fully contacting the TNA strand. A control reaction containing all four tNTPs was performed in parallel for each template. We compared the amount of full-length product in each -tNTP reaction versus control reactions at multiple time points to estimate the fidelity for each base. This assay accounts for both the slow rate of misincor-poration as well as the slow rate of extension from mismatched primer termini, but underestimates the actual fidelity because of the lack of competition between correct and incorrect nucleotides. These experiments suggest that the Therminator polymerase incorporates tGTP and tCTP with ∼99 and ∼98% accuracy, respectively, in full-length TNA strands, even at relatively long incubation times (Supporting Information Table 1). The fidelities for incorporation of tDTP and tTTP were much lower (∼94% at 1 h, 10-20% of the primer fully extended), presumably reflecting misincorporation of tGTP and tCTP because of wobble base-pairing. Although the overall fidelity of TNA synthesis is lower than previously measured DNA synthesis fidelities, 12 more than 10% of TNA pool molecules would be error-free in material purified when TNA synthesis had reached 10-20% as we would do during a selection (Supporting Information Table 2). The actual error rate in material synthesized in the presence of all four tNTPs, and with high D/G and T/C ratios, may be much lower. Since typical in vitro selection experiments involve enrichment factors of more than 100-1000-fold per round, even a 10-fold reduction due to incorrectly transcribed sequences would yield an acceptable enrichment factor of at least 10-100-fold per round. We have identified the Therminator DNA polymerase as an enzyme capable of reasonably efficient and faithful DNA-templated TNA polymerization. The DNA-display technique developed here for the selection of functional TNA molecules is uniquely suited for the directed evolution of unnatural nucleic acids because it does not require a polymerase that can copy the unnatural nucleic acid back into DNA. Thus, any polymer that can be synthesized by a DNA-templated reaction, either enzymatically or nonenzymatically, 11 can be used for selection. We hope to investigate the potential of TNA for forming ligand-binding sites or catalytically active structures in order to evaluate its possible role as a progenitor of RNA.
Preferring Copies with No Originals: Does the Army Training Strategy Train to Fail? "You know, I know this steak doesn't exist. I know that when I put it in my mouth, the Matrix is telling my brain that it is juicy and delicious. After nine years, you know what I realize? Ignorance is bliss." --Cypher (From the motion picture The Matrix) THE U.S. ARMY spends a vast amount of energy, resources, and time on training, perpetually seeking improvements to forge a better force. The latest Army Training Strategy (October 2012) tasks our Army to "hold commanders responsible for training units and developing leaders through the development and execution of progressive, challenging, and realistic training." This implies a shared understanding of what training is realistic, and what is not. Although our training strategy employs the terms "training realism," "replication," "operational relevant training," and "adaptive" throughout the short document, it never defines or differentiates this lexicon. Without any contextual depth in these myriad concepts, is it possible that due to fundamental flaws in our training strategy we are unaware when we conduct unrealistic training instead? In other words, do we train to fail? This article does not suggest failure with respect to military trainers, tactics, operational or strategic level training objectives; one must look at an even bigger picture above all of these things. Our training centers are full of dynamic, dedicated military professionals who might take offense at the notion of "training to fail"; however if our overarching training philosophy is faulty, even the best efforts will not matter. To contemplate our training philosophy, can we consider on a holistic and ontological level how the Army approaches training, and how we "think about thinking" with respect to training? To bring some context to this abstract proposal, I introduce in this article several design concepts that draw from post-modern philosophical and sociological fields that help us consider whether our Army may inadvertently train to fail, and how it has effectively insulated itself from even questioning these institutionalisms. "Design" as it relates to military applications has a broad range of conceptual, holistic applications for dealing with complexity, although most services attempt to brand their own design approach for self-relevant concerns. Army design methodology does not include any of these concepts in U.S. Army doctrine nor does our training strategy specifically reference design theory. However, critical reflection and holistic, systemic approaches might illustrate our training shortfalls. To conduct this inquiry, we draw from philosopher Jean Baudrillard's concept of simulation and simulacra. We also reference sociologists Peter Berger and Thomas Luckmann's collaborative concept of "social knowledge construction," to demonstrate how the Army potentially trains in an approach that is in conflict with what we expect our training to accomplish. Are we spending our energies, resources, and time in training approaches that are detrimental to our overarching goals because they train us in the wrong ways? To return to the plot of the science fiction movie quoted at the beginning, shall we swallow the red pill and face uncomfortable truths, or swallow the blue pill and continue enjoying the false realities we create for ourselves through training the force toward national policy goals? The writers behind The Matrix were heavily influenced by Baudrillard's work on simulacra, which emphasizes a stark contrast between false "realities" that we as a society often prefer over the painful, bleak, and more challenging "real world" we tend to avoid. This proves useful in that while Baudrillard's work is relatively unknown, the Matrix movies are extremely popular in Western society and address the same existential concept. This article's introductory quote features a conversation between a treacherous character and an agent of the Matrix where the conspirator acknowledges his shared understanding that the steak he is eating within the Matrix is imaginary; it is "fake steak.
On Sunday, December 26, 2004 JACK DAVID GERTZ, beloved son of Dorothy Gertz (nee Bloom) and the late Morris Gertz, beloved brother of Eleanor Hoffman of Delray Beach, FL, brother-in-law of Leonard Hoffman, adored uncle of Steven and Susan Hoffman, Craig and Lisa Hoffman, Susan and Jonathan Shapiro and Nancy Hoffman, beloved great uncle of Dan, Cheri, Laura, Jacob, Sarah and Nathan. Services at SOL LEVINSON & BROS, INC., 8900 Reisterstown Rd. at Mt. Wilson Lane on Tuesday, December 28 at 10 A.M. Interment Hebrew Young Men Cemetery-5800 Windsor Mill Road. Please omit flowers. Memorial contributions in his memory may be directed to The A. R. C. of Baltimore, 7215 York Road (21212). In mourning at 449 Noah Court, Westminster, MD (21157) thru Thursday.
// validateEmbeddedTxBasic validates an EmbeddedTx based on things that don't // depend on the context. func validateEmbeddedTxBasic(etx types.EmbeddedTx) (err sdk.Error) { sigs := etx.Signatures if len(sigs) == 0 { return sdk.ErrUnauthorized("transaction missing signatures") } signerAddrs := etx.GetRequiredSigners() if len(sigs) != len(signerAddrs) { return sdk.ErrUnauthorized("invalid number of transaction signers") } return nil }
<filename>bus-starter/src/main/java/org/aoju/bus/starter/office/OfficeProviderService.java /********************************************************************************* * * * The MIT License (MIT) * * * * Copyright (c) 2015-2021 aoju.org and other contributors. * * * * Permission is hereby granted, free of charge, to any person obtaining a copy * * of this software and associated documentation files (the "Software"), to deal * * in the Software without restriction, including without limitation the rights * * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * * copies of the Software, and to permit persons to whom the Software is * * furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in * * all copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * * THE SOFTWARE. * * * ********************************************************************************/ package org.aoju.bus.starter.office; import lombok.RequiredArgsConstructor; import org.aoju.bus.core.lang.exception.InstrumentException; import org.aoju.bus.office.Builder; import org.aoju.bus.office.Provider; import org.aoju.bus.office.Registry; import org.aoju.bus.office.provider.LocalOfficeProvider; import org.aoju.bus.office.provider.OnlineOfficeProvider; import org.springframework.stereotype.Component; /** * 文档在线预览服务提供 * * @author <NAME> * @version 6.2.3 * @since JDK 1.8+ */ @Component @RequiredArgsConstructor public class OfficeProviderService { public OfficeProviderService(Provider localProvider, Provider onlineProvider) { Registry.getInstance().register(Registry.LOCAL, localProvider); Registry.getInstance().register(Registry.ONLINE, onlineProvider); } public Provider require(String type) { if (Registry.getInstance().contains(type)) { if (Registry.LOCAL.equals(type)) { return (LocalOfficeProvider) Registry.getInstance().require(Registry.LOCAL); } if (Registry.ONLINE.equals(type)) { return (OnlineOfficeProvider) Registry.getInstance().require(Registry.ONLINE); } } throw new InstrumentException(Builder.FAILURE); } }
<reponame>ZVampirEM77/SSM /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.smartdata.hdfs.metric; import org.smartdata.metrics.FileAccessEvent; import org.smartdata.metrics.FileAccessEventCollector; import org.smartdata.metrics.FileAccessEventSource; public class NNMetricsAccessEventSource implements FileAccessEventSource { private final NNMetricsAccessEventCollector collector; public NNMetricsAccessEventSource() { this.collector = new NNMetricsAccessEventCollector(); } @Override public FileAccessEventCollector getCollector() { return this.collector; } @Override public void insertEventFromSmartClient(FileAccessEvent event) { // Do nothing. } @Override public void close() { this.collector.close(); } }
def fromfile(self, f): gpxtree = ElementTree(file=f) self.gpx = gpxtree.getroot() for node in self.gpx.findall(ns + "wpt"): self.parse_wpt(node) for node in self.gpx.findall(ns + "trk"): self.parse_trk(node) for node in self.gpx.findall(ns + "rte"): self.parse_rte(node) return
<reponame>LewisGaul/declarative-cli # August 2020, <NAME> """ Parsing of a CLI schema. """ __all__ = ("Arg", "NodeBase", "RootNode", "SubNode") import typing from typing import Dict, List, Optional class Arg: """Schema arg.""" def __init__( self, *, name: str, help_: str, command: Optional[str] = None, positional: bool = False, type_: typing.Type = str, enum: Optional[List] = None, default: Optional = None, ): self.name = name self.help = help_ self.command = command self.positional = positional self.type = type_ self.enum = enum self.default = default @classmethod def from_dict(cls, data: Dict[str, typing.Any]) -> "Arg": kwargs = data.copy() kwargs["help_"] = kwargs.pop("help") kwargs["type_"] = cls._process_type_field(kwargs.pop("type", "string")) return cls(**kwargs) @staticmethod def _process_type_field(value: str) -> typing.Type: # TODO: 'type' should be an enum rather than a Python type. accepted_types = { "integer": int, "string": str, "float": float, "flag": bool, "text": list, } if value in accepted_types: value = accepted_types[value] else: raise ValueError( "Unrecognised type {!r}, accepted types are: {}".format( value, ", ".join(accepted_types) ) ) return value class NodeBase: """Base class for nodes.""" def __init__( self, *, keyword: Optional[str] = None, help_: str, command: Optional[str] = None, args: Optional[List[Arg]] = None, subtree: Optional[List["NodeBase"]] = None, ): self.keyword = keyword self.help = help_ self.command = command self.args = args if args else [] self.subtree = subtree if subtree else [] self.parent = None # type: Optional[NodeBase] for x in subtree: x.parent = self @classmethod def from_dict(cls, data: Dict[str, typing.Any]) -> "NodeBase": kwargs = data.copy() kwargs["help_"] = kwargs.pop("help") kwargs["args"] = cls._process_args_field(kwargs.pop("args", [])) kwargs["subtree"] = cls._process_subtree_field(kwargs.pop("subtree", [])) return cls(**kwargs) @staticmethod def _process_subtree_field(value: List[Dict[str, typing.Any]]) -> List["NodeBase"]: return [SubNode.from_dict(x) for x in value] @staticmethod def _process_args_field(value: List[Dict[str, typing.Any]]) -> List[Arg]: return [Arg.from_dict(x) for x in value] class RootNode(NodeBase): """Root schema node.""" def __init__(self, **kwargs): if "keyword" in kwargs: raise TypeError("__init__() got an unexpected keyword argument 'keyword'") super().__init__(**kwargs) def __repr__(self): return "<RootNode>" class SubNode(NodeBase): """Sub schema node.""" def __init__(self, *, keyword: str, **kwargs): kwargs["keyword"] = keyword super().__init__(**kwargs) def __repr__(self): keywords = [] node = self while node.keyword: keywords.insert(0, node.keyword) node = node.parent return "<SubNode({})>".format(".".join(keywords))
lista1 = [] lista2 = [] lista3 = [] print() print(f'{" 3 LISTAS ":=^40}') while True: l1 = int(input('\nDigite um valor: ')) lista1.append(l1) if l1 % 2 == 0: lista2.append(l1) elif l1 % 2 == 1: lista3.append(l1) esc = ' ' while esc not in 'SN': esc = str(input('Quer continuar? [S/N] ')).strip().upper()[0] if esc == 'N': break print() print('-='*21) print(f'\nA lista completa de valores é:\n{lista1}') print(f'\nA lista somente com pares é:\n{lista2}') print(f'\nE a lista somente com ímpares é:\n{lista3}')
/** Processes a single node. Classes that implement InterfaceWorkflow should call this to process a node instead of node.process. */ func (workflow *Workflow) processNode(node nodes.InterfaceNode, workflowContext *nodes.WorkflowContext) (err error) { defer func() { if r := recover(); r != nil { err = errors.New(fmt.Sprintf("panic: %s", r)) } }() err = node.Process(node, workflowContext) return err }
<filename>hoodex/main.py # -*- coding: utf-8 -*- """Main module.""" import configparser import os import sys from hoodex.plex import HoodexPlexServer PLEX_USER_NAME = "" PLEX_PASSWORD = "" PLEX_SERVER = "" PLEX_LIBRARIES = "" def run_hoodex_loading_config_file(config_file): print("Loading config file: {config}".format(config=config_file)) if not os.path.isfile(config_file): sys.exit("Missing File: Unable to find config file {config_file}".format(config_file=config_file)) global PLEX_USER_NAME, PLEX_PASSWORD, PLEX_SERVER, PLEX_LIBRARIES config = configparser.ConfigParser() config.read(filenames=config_file) if config.items('hoodex') is None: sys.exit( "Bad Configuration: Config file {config_file} has missing hoodex section".format(config_file=config_file)) elif all(keys in ('PlexUserName', 'PlexPassword', 'PlexServer', 'PlexLibraries') for keys in config['hoodex']): sys.exit("Bad Configuration: Config file {config_file} has some missing keys".format(config_file=config_file)) PLEX_USER_NAME = config['hoodex']['PlexUserName'] PLEX_PASSWORD = config['hoodex']['PlexPassword'] PLEX_SERVER = config['hoodex']['PlexServer'] PLEX_LIBRARIES = config['hoodex']['PlexLibraries'] def run_hoodex(plex_user=None, plex_password=None, plex_server=None, plex_libraries=None): if plex_user is None: plex_user = PLEX_USER_NAME if plex_password is None: plex_password = <PASSWORD> if plex_server is None: plex_server = PLEX_SERVER if plex_libraries is None: plex_libraries = PLEX_LIBRARIES plex_server = HoodexPlexServer(user=plex_user, password=<PASSWORD>, server=plex_server) libraries_dict = {} for library in str.split(str(plex_libraries), ','): libraries_dict[library] = plex_server.get_last_added(library_name=library) print(libraries_dict) return libraries_dict
// Copyright 2013 <NAME>. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package imagick /* #cgo !no_pkgconfig pkg-config: MagickWand MagickCore #include <wand/MagickWand.h> */ import "C" type PreviewType int const ( PREVIEW_UNDEFINED PreviewType = C.UndefinedPreview PREVIEW_ROTATE PreviewType = C.RotatePreview PREVIEW_SHEAR PreviewType = C.ShearPreview PREVIEW_ROLL PreviewType = C.RollPreview PREVIEW_HUE PreviewType = C.HuePreview PREVIEW_SATURATION PreviewType = C.SaturationPreview PREVIEW_BRIGHTNESS PreviewType = C.BrightnessPreview PREVIEW_GAMMA PreviewType = C.GammaPreview PREVIEW_SPIFF PreviewType = C.SpiffPreview PREVIEW_DULL PreviewType = C.DullPreview PREVIEW_GRAYSCALE PreviewType = C.GrayscalePreview PREVIEW_QUANTIZE PreviewType = C.QuantizePreview PREVIEW_DESPECKLE PreviewType = C.DespecklePreview PREVIEW_REDUCE_NOISE PreviewType = C.ReduceNoisePreview PREVIEW_ADD_NOISE PreviewType = C.AddNoisePreview PREVIEW_SHARPEN PreviewType = C.SharpenPreview PREVIEW_BLUR PreviewType = C.BlurPreview PREVIEW_THRESHOLD PreviewType = C.ThresholdPreview PREVIEW_EDGE_DETECT PreviewType = C.EdgeDetectPreview PREVIEW_SPREAD PreviewType = C.SpreadPreview PREVIEW_SOLARIZE PreviewType = C.SolarizePreview PREVIEW_SHADE PreviewType = C.ShadePreview PREVIEW_RAISE PreviewType = C.RaisePreview PREVIEW_SEGMENT PreviewType = C.SegmentPreview PREVIEW_SWIRL PreviewType = C.SwirlPreview PREVIEW_IMPLODE PreviewType = C.ImplodePreview PREVIEW_WAVE PreviewType = C.WavePreview PREVIEW_OIL_PAINT PreviewType = C.OilPaintPreview PREVIEW_CHARCOAL_DRAWING PreviewType = C.CharcoalDrawingPreview PREVIEW_JPEG PreviewType = C.JPEGPreview )
<reponame>perry-js/perry<gh_stars>10-100 import { IPerryReport, IPerryReportInfo, } from '@perry/perry-interfaces'; import aggregateReport from '.'; const reportInfo: IPerryReportInfo = { description: 'Testing this aggregateReport', screenshotUrl: 'http://urlscreenshot.com', title: 'Testing', }; const expectedReport: IPerryReport = { clicks: [], cookies: document.cookie, description: reportInfo.description, errors: [], logs: [], notify: [], recorder: [], screenshotUrl: reportInfo.screenshotUrl, title: reportInfo.title, warns: [], }; describe('aggregateReport', () => { it('should return an empty report by default', () => { expect(aggregateReport(reportInfo)).toEqual(expectedReport); }); });
/** * Write array of byte to file. * @param fname file name * @param buf array of byte * @throws ExtractException */ private static void writeFile(final String fname, final byte buf[]) throws ExtractException { try { FileOutputStream f = new FileOutputStream(fname); f.write( buf ); f.close(); } catch (IOException ex) { throw new ExtractException("IO exception while writing file "+fname); } }
public class Produto{ private String nome; private double preco; private double volume; private double peso; }
#include <bits/stdc++.h> using namespace std; const int INF = 1e9 + 5; const int N = int(2e5) + 5; int a[N]; int main() { int n; cin >> n; vector <pair<long long, int>> p; long long d[N], sum = 0, e[N], l[N]; for (int i = 1; i <= n; ++i) { cin >> a[i]; sum += a[i]; p.push_back({ sum,i }); } sort(p.begin(), p.end()); l[p[0].second] = (p[0].first == 0) ? 1 : 0; for (int i = 1; i < n; ++i) { if (p[i].first == p[i - 1].first) { l[p[i].second] = p[i - 1].second + 1; } else l[p[i].second] = (p[i].first == 0) ? 1 : 0; } e[0] = 0; d[0] = 0; for (int k = 0; k < n; ++k) { e[k + 1] = max(e[k], l[k + 1]); d[k + 1] = d[k] + k + 1 - e[k + 1]; } cout << d[n] << "\n"; }
The present invention relates to a semiconductor device and, more particularly, to a semiconductor device having a contact area for connecting upper and lower conductive layers. FIG. 1 shows a conventional semiconductor device having a contact area for connecting upper and lower conductive layers. This semiconductor device has semiconductor substrate 1, insulating layer 2 formed on substrate 1 and having a thickness of, e.g., 4,000.ANG. (=0.4 .mu.m), lower conductive layer 3 formed on insulating layer 2 and having a thickness of, e.g., 3,000 to 4,000.ANG., thick insulating layer 4 formed on layers 2 and 3 to expose part of layer 3 and having a thickness of, e.g., 7,000.ANG., and upper conductive layer 5 formed on layer 4 and the exposed portion of lower conductive layer 3. The contact portion of lower and upper conductive layers 3 and 5 constitutes contact area 6. Layers 2 and 4 are made of the same insulating material such as SiO.sub.2. Conventionally, when part of layer 3 is exposed, i.e., when a contact hole is formed in layer 4, an anisotropic etching method, e.g., a reactive ion etching method (RIE), is used to partially etch layer 4, so that the integrated circuit can be micropatterned. Along with the micronization of the integrated circuit pattern, the width and thickness of layer 3 must be reduced. Furthermore, in a static RAM, a polycrystalline silicon layer for forming lower conductive layer 3 must be selectively made very thin in order to make the selected part highly resistive and form a high-resistance element of the polycrystalline silicon layer as the load element of each memory cell. In this manner, as the integrated circuit is micropatterned and highly integrated, the thickness of layer 3 tends to be further reduced. This is also applied to an active element formed in part of the polycrystalline silicon layer. When a contact hole is formed in layer 4 by the RIE method, the ratio of the etching rate of layer 4 to the etching rate of layer 3 (etching selectivity) is not very large, but usually less than 10. Accordingly, when lower conductive layer 3A having a very small thickness of, e.g., 500.ANG., is formed, if part of insulating layer 4 is etched at a small etching selectivity and an etching time 1.5 times a normal etching time in consideration of an allowable error or processing margin, not only the part of layer 4 but also the corresponding portion of layer 3A is etched. In addition, part of insulating layer 2 is etched to partially expose substrate 1. In this state, when upper conductive layer 5A is formed, it is in direct contact with the exposed portion of substrate 1.
Immunity and immune suppression in human ovarian cancer. Clinical outcomes in ovarian cancer are heterogeneous, independent of common features such as stage, response to therapy and grade. This disparity in outcomes warrants further exploration into tumor and host characteristics. One compelling issue is the response of the patient's immune system to her ovarian cancer. Several studies have confirmed a prominent role for the immune system in modifying disease course. This has led to the identification and evaluation of novel immune-modulating therapeutic approaches such as vaccination and antibody therapy. Antitumor immunity, however, is often negated by immune suppression mechanisms present in the tumor microenvironment. Thus, in the future, research into immunotherapy targeting ovarian cancer will probably become increasingly focused on combination approaches that simultaneously augment immunity while preventing local immune suppression. In this article, we summarize important immunological issues that could influence ovarian cancer outcome, including tumor antigens, endogenous immune responses, immune escape and new and developing immunotherapeutic strategies.
<gh_stars>1-10 /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jackrabbit.webdav.security.report; import org.apache.jackrabbit.webdav.DavConstants; import org.apache.jackrabbit.webdav.DavException; import org.apache.jackrabbit.webdav.DavResource; import org.apache.jackrabbit.webdav.DavServletResponse; import org.apache.jackrabbit.webdav.property.DavPropertyName; import org.apache.jackrabbit.webdav.property.DavPropertyNameSet; import org.apache.jackrabbit.webdav.security.SecurityConstants; import org.apache.jackrabbit.webdav.version.report.Report; import org.apache.jackrabbit.webdav.version.report.ReportInfo; import org.apache.jackrabbit.webdav.version.report.ReportType; import org.apache.jackrabbit.webdav.xml.DomUtil; import org.apache.jackrabbit.webdav.xml.Namespace; import org.apache.jackrabbit.webdav.xml.XmlSerializable; import org.w3c.dom.Document; import org.w3c.dom.Element; import java.util.HashSet; import java.util.Set; /** * <code>SearchablePropertyReport</code> identifies those properties that may be * searched using the {@link PrincipalSearchReport DAV:principal-property-search REPORT}. * This report must be supported on all collections identified in the value of * a DAV:principal-collection-set property. * <p> * The request body MUST be an empty DAV:principal-search-property-set element. * <p> * The response body MUST be a DAV:principal-search-property-set XML element * with the following structure: * <pre> * &lt;!ELEMENT principal-search-property-set (principal-search-property*) &gt; * &lt;!ELEMENT principal-search-property (prop, description) &gt; * prop: see RFC 2518, Section 12.11 * &lt;!ELEMENT description #PCDATA &gt; * Human readable description. Note, that the language of the description must * be indicated by the xml:lang attribute. * </pre> * * Note that a DAV:principal-search-property XML element is required for each * property that may be searched with the DAV:principal-property-search REPORT. */ public class SearchablePropertyReport implements Report { /** * The report name */ public static final String REPORT_NAME = "principal-search-property-set"; /** * The report type */ public static final ReportType REPORT_TYPE = ReportType.register(REPORT_NAME, SecurityConstants.NAMESPACE, SearchablePropertyReport.class); /** * Constant used for the DAV:principal-search-property-set response element. */ public static final String XML_PRINCIPAL_SEARCH_PROPERTY_SET = "principal-search-property-set"; /** * Set collecting the DAV:principal-search-property entries. */ private final Set<PrincipalSearchProperty> searchPropertySet = new HashSet<PrincipalSearchProperty>(); /** * @see Report#getType() */ public ReportType getType() { return REPORT_TYPE; } /** * @return false Status code of after a successful completion must be * {@link DavServletResponse#SC_OK 200 (ok)}. * @see Report#isMultiStatusReport() */ public boolean isMultiStatusReport() { return false; } /** * @see Report#init(DavResource, ReportInfo) */ public void init(DavResource resource, ReportInfo info) throws DavException { if (resource == null || info == null) { throw new DavException(DavServletResponse.SC_BAD_REQUEST, "Unable to run report: WebDAV Resource and ReportInfo must not be null."); } if (!getType().isRequestedReportType(info)) { throw new DavException(DavServletResponse.SC_BAD_REQUEST, "Expected report type: '" + getType().getReportName() + "', found: '" + info.getReportName() + ";" + "'."); } if (info.getDepth() > DavConstants.DEPTH_0) { throw new DavException(DavServletResponse.SC_BAD_REQUEST, "Invalid Depth header: " + info.getDepth()); } } /** * @see Report#toXml(Document) */ public Element toXml(Document document) { Element rootElem = DomUtil.createElement(document, XML_PRINCIPAL_SEARCH_PROPERTY_SET, SecurityConstants.NAMESPACE); for (PrincipalSearchProperty psp : searchPropertySet) { rootElem.appendChild(psp.toXml(document)); } return rootElem; } //-----------------------------------------------------< implementation >--- /** * Add a property name that should be listed in the DAV:principal-search-property-set. * * @param propName a property name that may be searched in the {@link PrincipalSearchReport}. * @param description Human readable description of the searchable property. * @param language defines in which language the description is written. * @throws IllegalArgumentException if propName is <code>null</code>. */ public void addPrincipalSearchProperty(DavPropertyName propName, String description, String language) { searchPropertySet.add(new PrincipalSearchProperty(propName, description, language)); } //--------------------------------------------------------< inner class >--- /** * Inner class encapsulating the DAV:principal-search-property */ private class PrincipalSearchProperty implements XmlSerializable { private static final String XML_PRINCIPAL_SEARCH_PROPERTY = "principal-search-property"; private static final String XML_DESCRIPTION = "description"; private static final String ATTR_LANG = "lang"; private final DavPropertyName propName; private final String description; private final String language; private final int hashCode; private PrincipalSearchProperty(DavPropertyName propName, String description, String language) { if (propName == null) { throw new IllegalArgumentException("null is not a valid DavPropertyName for the DAV:principal-search-property."); } this.propName = propName; this.description = description; this.language = language; hashCode = propName.hashCode(); } /** * @see XmlSerializable#toXml(Document) */ public Element toXml(Document document) { Element psElem = DomUtil.createElement(document, XML_PRINCIPAL_SEARCH_PROPERTY, SecurityConstants.NAMESPACE); // create property set from the single property name DavPropertyNameSet pnSet = new DavPropertyNameSet(); pnSet.add(propName); psElem.appendChild(pnSet.toXml(document)); // append description if present if (description != null) { Element desc = DomUtil.addChildElement(psElem, XML_DESCRIPTION, SecurityConstants.NAMESPACE, description); if (language != null) { DomUtil.setAttribute(desc, ATTR_LANG, Namespace.XML_NAMESPACE, language); } } return psElem; } //---------------------------------------------------------< Object >--- @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof PrincipalSearchProperty) { PrincipalSearchProperty other = (PrincipalSearchProperty)obj; // ignore the optional description/language return hashCode == other.hashCode; } return false; } @Override public int hashCode() { return hashCode; } } }
// TODO: Possible covariant of the method 'translateConstraints' defined in the class 'ModelElementFacadeLogic' private String[] translateConstraints( final Collection<ConstraintFacade> constraints, final String translation) { String[] translatedExpressions = null; if (constraints != null && !constraints.isEmpty()) { translatedExpressions = new String[constraints.size()]; final Iterator<ConstraintFacade> constraintIt = constraints.iterator(); for (int ctr = 0; constraintIt.hasNext(); ctr++) { final ConstraintFacade constraint = constraintIt.next(); translatedExpressions[ctr] = constraint.getTranslation(translation); } } return translatedExpressions; }
In the Feb. 13 issue of The New York Review of Books, Timothy Garton Ash argued that anti-Europeanism is on the rise in the United States. This sentiment, he suggested, taps into a variety of cultural prejudices, including the notion that the American love of liberty is a kind of haven from European paternalism. "For millions of Americans, in the nineteenth and twentieth centuries, Europe was the place you escaped from," Ash wrote. It is true that past differences between the United States and Europe on questions of immigration and national identity persist today. Right-wing European politicians still employ anti-immigrant rhetoric -- rhetoric that is far less common or explicit in the United States. But on the question of asylum and refugees, the Bush administration is doing its best imitation of Fortress Europe. Which means that we may not be able to celebrate American attitudes toward immigration as being distinct from -- or better than -- Europe's for long. For months hundreds of Pakistanis per week tried to claim asylum in Canada in advance of a March 21 registration deadline requiring men from Pakistan and other countries to register with the Immigration and Naturalization Service (which has since been folded into the Department of Homeland Security). A similar scenario is playing out again this week, as a Friday registration deadline looms for Egyptians, Indonesians, Kuwaitis, Jordanians and Bangladeshis living in the United States. As I wrote in January, the registration program is punitive and more or less ineffective at stopping potential terrorists. Now many of those fleeing the registration program by heading north are being rejected by Canadian authorities, only to get arrested by American officials when they are forced back across the border. Hundreds of Pakistanis have already been deported or are in deportation hearings. (On March 12 alone, 103 Pakistanis were deported on a chartered plane.) But while much attention has been focused on the registration program, it is only the tip of the Bush administration's creeping nativism. Among the most disturbing developments is a new accord -- signed but not yet implemented -- between Canada and the United States that would make it impossible for an alien coming through one country to seek asylum in the other. So none of the foreign nationals currently heading north would be allowed to even apply for asylum in Canada under the new rules. This "Safe Third Country Agreement," as it is called, takes a page right out of the European Union's anti-immigrant playbook, as EU nations similarly reject outright any asylum seeker who has first passed through another EU country. The ostensible rationale for these laws is to prevent "asylum shopping," which occurs when foreigners go hunting for the country with the most lenient asylum rules. In reality, however, asylum seekers usually go to countries where they have family members or where expatriate communities already exist. Asylum shopping was at most a nominal concern during the U.S.-Canada negotiations that led to the Safe Third Country Agreement. In fact, the accord will likely increase the number of foreigners seeking asylum in the United States, as more asylum seekers currently pass through the United States en route to Canada than vice versa. And how does the Bush administration plan to deal with the extra asylum seekers? Deport them or throw them in jail, of course. (The Canadians are well aware of how tough Americans are on aliens, as our northern neighbors scuttled a similar, earlier agreement prior to September 11 because they thought that U.S. asylum policies were too draconian.) The administration is now pursuing a complementary border agreement with Mexico. President Bush has battened down the hatches in other ways as well. Since 9-11, the number of refugees -- refugees apply from outside the United States, as opposed to asylum seekers who apply once on American soil -- entering the country has dropped precipitously. The administration capped refugee admissions for fiscal year 2002 at 70,000, but only 27,058 actually made it in. To get an idea of how low that number actually is, consider that between 1996 (the last time there was a major change in immigration policy) and 2001, the average number of admitted refugees was about 75,000 per year -- and prior to 1996, it was even higher. The low numbers are as much the result of bureaucratic incompetence as willful obstructionism. During a period of a few months in 2002, for example, there was no money to administer the refugee program because it had accidentally been left out of the appropriations budget. But the increasing layers of security checks imposed on the refugee process are also to blame. Refugees simply aren't getting the newly required clearances needed to board a plane for the United States. Most of the delays have been a consequence of poor interagency communication, not enough government staffers or computers, and inadequate training. Security checks have taken so long that sometimes other approvals, such as medical clearances, expire while a refugee awaits security clearance. These delays have kept thousands of refugees already approved for resettlement from finding safe haven in the United States. The 12,000 Somali Bantu refugees approved earlier this year are a case in point: The Hebrew Immigrant Aid Society (HIAS), a refugee assistance group, estimates that of the 12,000 Bantus granted normal approval, only about 1,200 will actually get their security clearances and enter the United States by the end of this fiscal year. Addressing this problem is next to impossible because the new policies were designed by a secret task force that does not allow refugee advocates any input. The task force is composed of representatives of the many government departments -- the departments of State and Homeland Security, as well as the FBI and the CIA -- that handle refugee applications. No one department is in charge of refugee applications, nor is any one department responsible for fixing the inevitable mistakes that occur. "You still don't know who is ultimately responsible," says Gideon Aronoff, the Washington representative for HIAS, of the lack of bureaucratic accountability for expediting these clearances. Others echo Aronoff's frustration. "We just don't know what's going on and who's making decisions," says Ralston Deffenbaugh Jr., president of the Lutheran Immigration and Refugee Service. "And when we ask they say, 'We can't tell you.'" Though the official reason offered for this secrecy is that classified intelligence is at stake, the true reason is likely more mundane: The administration has an interest in keeping quiet the fact that new, post-9-11 security checks for refugees were poorly planned -- and put in place without enough personnel, training or money. No agency wants to take responsibility for actually approving someone's release, even though -- security checks aside -- refugees are already the most extensively screened individuals entering the United States. It is therefore always easier for officials to deny or at least stall applications because approving them carries a risk, small as it may be, that someone missed a piece of evidence. That's why those lucky applicants who are not initially denied often get stuck in a bureaucratic morass, as their applications get passed back and forth between the State Department, the Homeland Security Department, the FBI and the CIA. Backlogs and delays accumulate because no organization is willing to exercise leadership and actually approve the foreigners for entry. Without anyone to hold accountable for this excess caution, refugees languish in their home countries. "It's almost like it's this whole complicated series of rational bureaucratic decisions up and down the line that have gone mad in their final result," Deffenbaugh says of the security clearance process. Aronoff worries that security concerns have been carried too far. "We have to understand that there are other values as well," he says, "including the humanitarian values of refugee protection that can get lost if you're looking for absolute certainty." During the first half of this fiscal year (October to March), 8,860 refugees have arrived in America. That means the United States is accepting refugees at a fraction of the annual ceiling (70,000) the Bush administration itself has prescribed. It also means that private groups -- which the government pays to help resettle refugees -- may not be able to keep their operations going much longer. If the resettlement infrastructure collapses, it will be hard to build back up. A trickle of a few thousand refugees here and there will become the new norm -- and a program that used to set the United States apart from Europe will practically disappear.
/** * A typescript port of Three.js CSS3DRenderer * * @author iyinchao<<EMAIL>> * * Based on: * https://github.com/mrdoob/three.js/blob/r126/examples/js/renderers/CSS3DRenderer.js * https://github.com/ivee-tech/three-css3drenderer/blob/master/index.js * */ export {CSS3DObject} from './CSS3DObject'; export {CSS3DSprite} from './CSS3DSprite'; export {CSS3DRenderer} from './CSS3DRenderer';
#include<stdio.h> int max(int a,int b) { if(a>=b) return a; else return b; } int main() { int n,c,i; scanf("%d%d",&n,&c); int p[n],t[n]; for(i=0;i<n;i++) scanf("%d",&p[i]); for(i=0;i<n;i++) scanf("%d",&t[i]); int w[n],t2[n]; for(i=0;i<n;i++) { w[i]=p[i]; t2[i]=t[i]; } int s1=0,s2=0; for(i=0;i<n;i++) { w[i]=max(0,w[i]-(c*t2[i])); t2[i+1]=t2[i]+t2[i+1]; s1=s1 + w[i]; } for(i=n-1;i>=0;i--) { p[i]=max(0,p[i]-(c*t[i])); t[i-1]=t[i]+t[i-1]; s2=s2 + p[i]; } if(s1>s2) printf("Limak"); else if(s2>s1) printf("Radewoosh"); else if(s1==s2) printf("Tie"); //printf("%d %d",s1,s2); return 0; }
export const FirebaseConfig = { apiKey: "<KEY>", authDomain: "wdashboard-painel.firebaseapp.com", databaseURL: "https://wdashboard-painel.firebaseio.com", projectId: "wdashboard-painel", storageBucket: "wdashboard-painel.appspot.com", messagingSenderId: "415367101356" };
. Atypical form of tako-tsubo cardiomyopathy (TTC) is associated with regional wall motion abnormalities in basal and/or middle segments or only middle segments with sparing of apical segments or apical and basal segments. We described a case of47-year-old female with atypical form of TTC due to fast atrial fibrillation that converted into ventricular fibrillation in WPW syndrome. The echocardiogram made after direct current cardioversion revealed decreased left ventricular ejection fraction (LVEF 35%) with akinesis of inferior and posterior walls and anterior part of interventricular septum in the middle and the basal segments with hyperkinesis of apical segments. The biochemistry blood samples revealed elevated both troponin T- 0.35 ng/mL and NT-proBNP - 3550 pg/mL plasma level. The ECG showed sinus rhythm 62 bpm, shortened PQ interval 100 ms, widened QRS duration - 115 ms with delta wave, prolonged QT interval - 520 ms, QS in leads: II, III, aVF. NegativeT waves in leads: I, aVL and positive, symmetrical T waves in leads V1-V6. The coronarography revealed normal coronaryarteries. The control echocardiography after 10 days showed normal LVEF 70%, without any wall motion abnormalities. TTC was recognised based on: history of sudden stress situation before, ischaemic ECG changes, positive markers of myocardial injury, transient segmental wall motion abnormalities and normal coronary arteries. The ablation of right postero-septal accessory pathway was successfully performed.
// AddPoint will add the given point to the lower or upper set as required and if // it's not a point that has already been seen func (pppc *PseudoPolygonPointCollector) AddPoint(pt geom.Point) error { if pppc.seen == nil { pppc.seen = make(map[geom.Point]bool) } if len(pppc.upperPoints) == 0 { pppc.upperPoints = append(pppc.upperPoints, pppc.Start) pppc.seen[pppc.Start] = true } if len(pppc.lowerPoints) == 0 { pppc.lowerPoints = append(pppc.lowerPoints, pppc.Start) pppc.seen[pppc.Start] = true } if pppc.seen[pt] { return nil } c := quadedge.Classify(pt, pppc.Start, pppc.End) switch c { case quadedge.LEFT: pppc.lowerPoints = append(pppc.lowerPoints, pt) case quadedge.RIGHT: pppc.upperPoints = append(pppc.upperPoints, pt) case quadedge.ORIGIN, quadedge.BETWEEN, quadedge.DESTINATION: default: if debug { log.Printf("Classification: %v -- %v, %v, %v", c, pt, pppc.Start, pppc.End) return ErrAssumptionFailed() } } return nil }
The ametropic human eye has refractive errors that in first approximation can be described in terms of a sphere, a cylinder, and an axis orientation. This is based on the assumption that a visual defect can be approximately corrected through a lens with simple surfaces such as toroids and spheres. This approximation may correct an error in the refraction of light rays that enter the center of the eye pupil. While it is customary to determine the refractive errors of the human eye by relying on the subjective refraction of the patient under examination when presenting to him a plurality of optotypes through lenses of different refractive power, so-called subjective refraction or manifest refraction, the possibility of measuring the refractive errors of the eye has now been available for several years with the so-called objective refraction. Moreover, it is possible to measure the refractive power of the eye over the entire pupil. The measurable errors include for example spherical aberration, coma, trefoil error, higher orders of spherical aberration, etc. In certain implementations, the objective refraction method is based on determining the wavefront of a propagating light bundle. The functional principal of a wavefront refractor is described in document U.S. Pat. No. 6,382,795 B1, and also includes a synopsis of a plurality of different variants. The refractive errors or imaging errors of the human eye can be mathematically described by means of so-called Zernike polynomials. The errors of the eye in regard to sphere, cylinder, and axis can be described, for example, through second-order Zernike polynomials. These errors are therefore often referred to as second-order aberrations or lower order aberrations. Further errors can be described through higher-order Zernike polynomials. Therefore, these errors are in general referred to as higher-order aberrations. The information gained from a wavefront refractor can be used in the development of improved vision aids or improved eyesight correction methods. A well-known example for an eyesight correction method is the procedure of wavefront-guided refractive surgery. In this procedure, a volume of any desired geometry is removed from the surface of the cornea in order to correct refractive errors, including those of a higher order. In general, in order to determine an eyeglass prescription for visual aids, an eye care professional determines several parameters. In the case of spectacle lenses, for example, the most relevant ones are: refractive values, usually given in form of sphere, cylinder, and axis; fitting parameters, such as pupil distance, fitting height, pantoscopic angle, and others; and near vision addition, for example, in case of progressive lenses. For contact lenses, the set of parameters usually includes at least the refractive values, similar to spectacle lenses, and corneal curvature. A basic criterion for objective refraction algorithms was suggested to be that the objective refraction that most closely matches the subjective refraction data is considered best. This was for example suggested in document U.S. Pat. No. 7,857,451 B2. Document U.S. Pat. No. 7,857,451 B2 shows a method and system for determining the appropriate refraction prescription in the clinical optometry or ophthalmology setting. Data in the form of aberrometric input, patient history and other information, and/or other environmental data are used to optimize a real-world prescription for an individual's optic needs through the use of an equivalent quadratic fitting calculation or a simulated through-focus experiment. A corresponding disclosure is made in document WO 2013/058725 A1 of the same patent family. Wavefront aberration data is used to obtain the objective estimates of optimal second order corrections for wearers. These prescriptions obtained by objective refraction can sometimes vary significantly from the same wearer's prescription obtained via subjective refraction. This may be disadvantageous if the previous or new subjective prescription is judged superior. Document US 2005/0057723 A1 shows method of measuring eye refraction to achieve desired quality according to a selected vision characteristics comprising the steps of selecting a characteristic of vision to correlate to the desired quality of vision from a group of vision characteristics comprising acuity, Strehl ratio, contrast sensitivity, night vision, day vision, and depth of focus, dynamic refraction over a period of time during focus accommodation, and dynamic refraction over a period of time during pupil constriction and dilation; using wavefront aberration measurements to objectively measure the state of the eye refraction that defines the desired vision characteristic; and expressing the measured state of refraction with a mathematical function to enable correction of the pre-selected vision characteristic to achieve the desired quality of vision. The mathematical function of expression may be a Zernike polynomial having both second order and higher order terms or a function determined by spline mathematical calculations. The pre-selected desired vision characteristics may be determined using ray tracing technology. There remains a need in the art to determine an eyeglass prescription for an eye that only has a small optical difference to the prescription obtained via subjective refraction.
package merge.mavens; import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.List; public class CleanMavenProject { public CleanMavenProject() { } public static void main(String[] args) throws IOException { // TODO Auto-generated method stub String configFilePath = args[0]; // String configFilePath = "C:/dev/neonJEE/apitest/merge.mavens/aaa.txt"; File file = new File(configFilePath); List<String> lines = Files.readAllLines(file.toPath(), StandardCharsets.UTF_8); String firstLine = CopyToTempMaven.removeLine(lines); MavenProject targetProject = new MavenProject(firstLine); if(targetProject.getMainJavaDir().exists()){ delete(targetProject.getMainJavaDir()); } if(targetProject.getTestJavaDir().exists()){ delete(targetProject.getTestJavaDir()); } if(targetProject.getMainResourceDir().exists()){ delete(targetProject.getMainResourceDir()); } if(targetProject.getTestResourceDir().exists()){ delete(targetProject.getTestResourceDir()); } } private static void delete(File dir){ File[] files = dir.listFiles(); if(files == null){ return; } for(File file : files){ if(file.isFile()){ file.delete(); } else if(file.isDirectory()){ deleteDir(file); } } } private static void deleteDir(File dir){ File[] files = dir.listFiles(); if(files == null){ return; } for(File file : files){ if(file.isFile()){ file.delete(); } else if(file.isDirectory()){ deleteDir(file); } } dir.delete(); } }
/** * This class serves to display the calendar for a given Gregorian month. * * @author abhisheksa * @see <a href="https://artofmemory.com/blog/how-to-calculate-the-day-of-the-week-4203.html">Source</a> */ public class GregorianCalendarCalculator extends DayFromGregorianDateCalculator { public GregorianCalendarCalculator(int dateNo, int month, int year) { super(dateNo, month, year); } @SuppressWarnings("resource") public static void main(String args[]) { Scanner sc = new Scanner(System.in); int year, month, dateNo; while(true) { System.out.print("Enter an exact possible date exactly in the format of MM/YEAR: "); String dateStr = sc.next().trim(); //Stop the program if 0 is entered. if(dateStr.equals("0")) break; //Parsing dateNo, month, year from provided date year = Integer.parseInt(dateStr.substring(3)); month = Integer.parseInt(dateStr.substring(0,2)); dateNo = 1; GregorianCalendarCalculator dfgdCalculator = new GregorianCalendarCalculator(dateNo, month, year); int dateCode = dfgdCalculator.calculate(); if(dateCode >= 0) dfgdCalculator.printCalendar(dateCode); } } /** * Prints the calendar of the given month. * * @param startDateCode - Takes the calculated date code of 1st date of the month. */ private void printCalendar(int startDateCode) { System.out.println("\n\nSUN\tMON\tTUE\tWED\tTHU\tFRI\tSAT"); for(int index = 1; index <= startDateCode; index++) System.out.print("\t"); for(int currentDateNo = 1; currentDateNo <= (isLeapYear && month == 2? 29 : dayCountInMonths[month-1]); currentDateNo++) { if((currentDateNo + startDateCode) % 7 == 0) System.out.println(currentDateNo + "\t"); else System.out.print(currentDateNo + "\t"); } System.out.println("\n"); } }