content
stringlengths
7
2.61M
<gh_stars>1-10 /* * Copyright (c) 2001-2008 * DecisionSoft Limited. All rights reserved. * Copyright (c) 2004-2008 * Oracle. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * $Id$ */ #ifndef _QUERYPATHTREEGENERATOR_HPP #define _QUERYPATHTREEGENERATOR_HPP #include <map> #include <xqilla/optimizer/ASTVisitor.hpp> #include <xqilla/optimizer/QueryPathNode.hpp> #include <xqilla/context/impl/VariableStoreTemplate.hpp> #include <xqilla/framework/XPath2MemoryManagerImpl.hpp> class NodeTest; typedef std::map<const XMLCh *, QueryPathNode*> QPNMap; /** * Generates QueryPathNode trees of the paths * in the documents that will be navigated. */ class XQILLA_API QueryPathTreeGenerator : public ASTVisitor { public: QueryPathTreeGenerator(DynamicContext *context, Optimizer *parent = 0); virtual ~QueryPathTreeGenerator() { varStore_.clear(); } class XQILLA_API PathResult { public: void join(const QueryPathNode::MVector &o); void join(const PathResult &o); void join(QueryPathNode *o); void markSubtreeValue() const; void markSubtreeResult() const; void markRoot() const; QueryPathNode::Vector returnPaths; }; protected: virtual QueryPathNode *createQueryPathNode(const NodeTest *nodeTest, QueryPathNode::Type type); virtual NodeTest *createNodeTest(const XMLCh *nodeType, const XMLCh *uri = 0, const XMLCh *name = 0); virtual void resetInternal(); virtual void optimize(XQQuery *query); virtual ASTNode *optimize(ASTNode *item); ALL_ASTVISITOR_METHODS(); void push(PathResult result); PathResult pop(); PathResult generate(ASTNode *item); void generateBuiltInStep(QueryPathNode *target, QueryPathNode &node, PathResult &result); void generateParentStep(QueryPathNode *target, QueryPathNode &node, PathResult &result); void generateSelfStep(QueryPathNode *target, QueryPathNode &node, PathResult &result); void generateAncestorStep(QueryPathNode *target, QueryPathNode &node, PathResult &result); void generateAncestorOrSelfStep(QueryPathNode *target, QueryPathNode &node, PathResult &result); void generateFollowingStep(QueryPathNode *target, QueryPathNode &node, PathResult &result); void generatePrecedingStep(QueryPathNode *target, QueryPathNode &node, PathResult &result); void generateSiblingStep(QueryPathNode *target, QueryPathNode &node, PathResult &result); void createAnyNodeResult(PathResult &result); const PathResult &getCurrentContext() const; void setCurrentContext(const PathResult &value); void setVariable(const XMLCh *uri, const XMLCh *name, const PathResult &value); PathResult copyNodes(const PathResult &r); XPath2MemoryManager *mm_; DynamicContext *context_; std::set<const ASTNode*> userFunctionStack_; typedef VarHashEntry<PathResult> VarStoreRef; typedef VariableStoreTemplate<PathResult> VarStore; XPath2MemoryManagerImpl varStoreMemMgr_; VarStore varStore_; ///< Memory owned by varStoreMemMgr_ std::vector<PathResult> results_; QPNMap projectionMap_; }; #endif
/*! * Copyright (c) 2018 by Contributors * \file aocl_common.h * \brief AOCL common header */ #ifndef TVM_RUNTIME_OPENCL_AOCL_AOCL_COMMON_H_ #define TVM_RUNTIME_OPENCL_AOCL_AOCL_COMMON_H_ #include <memory> #include "../opencl_common.h" namespace tvm { namespace runtime { namespace cl { /*! * \brief Process global AOCL workspace. */ class AOCLWorkspace final : public OpenCLWorkspace { public: // override OpenCL device API void Init() final; bool IsOpenCLDevice(TVMContext ctx) final; OpenCLThreadEntry* GetThreadEntry() final; // get the global workspace static const std::shared_ptr<OpenCLWorkspace>& Global(); }; /*! \brief Thread local workspace for AOCL */ class AOCLThreadEntry : public OpenCLThreadEntry { public: // constructor AOCLThreadEntry() : OpenCLThreadEntry(static_cast<DLDeviceType>(kDLAOCL), AOCLWorkspace::Global()) {} // get the global workspace static AOCLThreadEntry* ThreadLocal(); }; } // namespace cl } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_OPENCL_AOCL_AOCL_COMMON_H_
TUSCALOOSA | Five events were concluded in the decathlon and four in the heptathlon Thursday as the Alabama Relays began at the Sam Bailey Track and Field Complex on the University of Alabama campus. In the decathlon, Daryl Brady of Mississippi State leads the field with 3,654 points after five events. Brady won the high jump with a leap of 6 feet, 8.75 inches for 850 points and then placed second in the long jump with a leap of 21-6 for amother 709 points. In the heptathlon, Ashley Wilhelm, who entered the meet unattached, leads the field after four events with 3,327 points. A pair of Mississippi State Bulldogs follow, with Laquinta Aaron second (3,048 points) and Dorthy Youmans (3,021 points) third. On the men's side, Alabama has two participants. Sophomore Kyle Dyar is in seventh place with 3,124 points. Unattached, Justin Brady sits in 10th with 2,877 points. For the Crimson Tide women, freshman Rachel Robbs is the lone competitor. She is in 12th place with 2,223 points. Robbs set a personal best in the shot put en route to winning the event with an effort of 43-3.75. The second day of the Alabama Relays will begin at 9 this morning with the decathlon 110 hurdles. The women will kick off the day with the long jump at 10 a.m. The individual events will also begin, with the 10,000 meters at 5 p.m.
package cc.brino.Brpp.Utils; /* * Copyright (c) 2016 StarFruitBrasil * * Permission is hereby granted, free of charge, to any * person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the * Software without restriction, including without * limitation the rights to use, copy, modify, merge, * publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software * is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice * shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO * THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /** * * Classe para controle dos JSON's * * @author <NAME> * @contributors * @version 10/1/2017 */ import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; import org.json.simple.parser.ParseException; import cc.brino.Brpp.Pref.PrefManager; public class JSONUtils { private static JSONArray Keywords; public static void config(String path) throws FileNotFoundException, IOException, ParseException { JSONParser parser = new JSONParser(); final Path file = Paths.get(path, "lib", "ling", PrefManager.getPref("lingua")+".json"); Object obj = parser.parse(new FileReader(file.toFile())); JSONObject jsonObject = (JSONObject) obj; Keywords = (JSONArray) jsonObject.get("Keywords"); } public static JSONArray getKeywords() { return Keywords; } }
Modification of Very Low Density Lipoproteins Leads to Macrophage Scavenger Receptor Uptake and Cholesteryl Ester Deposition Chemically modified low density lipoproteins (LDL) are recognized by the macrophage scavenger receptor and can lead to substantial cholesteryl ester accumulation in cultured macrophages. Uptake of modified lipoproteins in vivo could contribute to foam cell formation during generation of the atherosclerotic plaque lesion. In the present study, modification of human pre-beta migrating very low density lipoprotein (VLDL) by acetylation led to recognition by the macrophage scavenger receptor as demonstrated in cross-competition experiments with acetylated LDL (ALDL). Recognition by this alternative binding site was associated with increased cholesterol delivery to human macrophages as assessed by suppression of LDL receptor activity, stimulation of cholesterol esterification rates, and accumulation of intracellular cholesteryl ester. Subfractionation of acetylated very low density lipoprotein (AVLDL) by ultracentrifugation in a discontinuous NaCI gradient demonstrated that AVLDL subfractions were equally effective in competing for 125I-ALDL uptake by macrophages when compared on the basis of particle number. These results suggest that modification of VLDL with subsequent recognition by the macrophage scavenger receptor may be a mechanism by which VLDL particles participate in macrophage cholesteryl ester overload.
Frog Fans!  We are two hours from first pitch against Coastal Carolina!  I just caught up with Bob Doyle who keeps up with their baseball team on their message board.  I emailed him an hour ago and he wrote back right away!  Everything but the questions are from Bob.  Small world, he used to live just up the road from me in suburban Chicago.  Enjoy! Every year there is an upstart program that takes their first trip to Omaha, this year it’s CCU. What does it mean for the program and the CCU fan base to make CWS? It is a huge ordeal for the program, the alumni, and the community. With Myrtle Beach being a transient town the fan support has struggled in all sports because most people (myself included) grew up cheering for a different team. I went to the “welcome back” greeting for the team after they swept LSU and I expected maybe 50 people to be there and there were probably 300. I know that doesn’t sound like many, but for CCU that is a lot. I was unable to go to the Send Off, but from what I heard and saw the number was around 600 people. The alumni of the program in particular are extremely excited and about 20-30 of them made the trip to Omaha from all parts of the country. They all love Gilley. CCU has beaten LSU and Florida to make it to this point. Not a small accomplishment! What makes this team tick? Who are two or three players we will likely know too well after tonight? The top of the lineup – Marks and Paez. They are the table setters like you probably saw in the LSU games. Marks is the sparkplug that gets the team going – whether it’s a big hit, a stolen base or just his attitude that gets the whole team going. Also, the senior leadership on the team. In my opinion it has given the team a sense of calmness in the post season. The players you will (hopefully) know all too well are AC (Alex Cunningham) our starting pitcher. He throws low to mid 90s and his off speed stuff has been improving throughout the year. He was our Friday night started for about half the year and then Beckwith got the starting nod and has not looked back. In CF, Billy Cooke. He is absolutely amazing. He made a play against Georgia Tech that was insane. He also has all the alignments on a wrist pad similar to an NFL qb and calls out all the adjustments to the RF Owings and LF Marks – two seniors, and he is a sophomore. He is batting around .350 and seems to hit the ball hard every time. The other guy is our 3b Zach Remillard. He leads the team in HRs with 19 and can play a solid 3b. TCU went from the WAC CUSA MWC Big East to land in B12 over a 16 year period. What conference and division has CCU traveled? I believe they’re joining Sun Belt? You are correct CCU is joining the Sun Belt effective July 2016. We were a founding member in the Big South (1983), but outgrew the conference – especially in baseball and football. Baseball has had to schedule tough out of conference teams for years to counteract the RPI hit we take when playing Big South teams – the same with football. What should we know about the CCU fan base, student culture and school in general. Being a “young” school what has your success in sports fine to unite the fan base? The CCU fan base is growing. Like you mentioned we are a young University. The alums who graduated around my time (2003) feel like we can start the change to making CCU the school that our kids grow up rooting for. It is something that just takes time unfortunately. The student support has grown over the past 10+ years. It’s no coincidence that football started right around that time as well. The success we have had in basketball (NCAA tourney 2 of the last 3 years), FCS Football (playoffs multiple years in a row), the sustained baseball success, and even our soccer team’s success (a few sweet 16 appearances in the last few years) has increased the student turn out. I won’t lie to you, living close to the beach, 9 miles, gives the students a lot of things to do off campus and a lot of them choose to do that instead of going to a game, but as I mentioned earlier, the student support is growing. A few thing to know about Coastal Carolina: 1) It is pronounced chan-tÉ™-ˈklir, 2) Our school is growing by leaps and bounds. When I was a student there were less than 5k students. Now that number is 10,000. 3) A few famous alumni are PGA Tour player Dustin Johnson, actor Michael Kelly, and NFL FB Mike Tolbert. 4) With the sustained athletic success we have upgraded just about every athletic facility we have with soccer being next. Our baseball facilities are some of the best in the country. 5) Lastly, WE’RE GOING TO BEAT YOU! 😉 Haha had to throw that in there. This may provide some more detailed info on CCU baseball: Our Team: Offense: Yea, as many of you have posted, we have hit 96 HRs this year. Our ballpark is not extremely small – I believe it’s 320-330 down the lines, 360-370 in the gaps and 390 to CF with about a 30ft high wall. We also have 102 SB (10th in the nation) and rank 12th in the NCAA in sac bunts so yes we can play Gorilla Ball if we need to, or we can play get ’em on, over and in ball too just ask LSU. They thought our numbers were inflated. My favorite player to watch is our CF Billy Cooke. He is a sophomore and has speed to burn, and he is in charge of the outfield and makes all the adjustment calls…oh yea and our corner outfielders are seniors and take instructions from him. Pretty crazy. He has a little power, but always hits the ball hard – I think he is batting in the high .340s or low .350s. The rest of our offense is pretty evenly balanced. We will have 4 or 5 lefties in the lineup every game, our SS, Michael Paez, is a small guy but has power (14HRs I believe) and on the other side of the ball he can make plays that will leave your jaw on the floor, but then boot the easy 6-4-3 grounder from time to time. Defense: Our defense has been improving all year long. Our outfield has been consistent all year long. Our RF Connor Owings has an above average arm and so does Cooke. I thought our LF and leadoffman Anthony Marks had a subpar arm, but he threw a runner out at the plate in our conference tourney which shocked me. Let’s see….3b Remillard has a great arm and can pick it, but he has the tendancy to airmail it sometimes. Already talked about Paez at SS. Our 2b Seth Lancaster (he had the 2rbi hit in the 9th with 2 out vs NC St) is out for the post season with a torl PCL. He is replaced by our 1b Tyler Chadwick. He played solid there against Florida. At 1st is Kevin Woodall Jr. he is almost as big as Luken Baker lol 6’5″ 240lbs. When Chadwick was playing 1b, Gilley would substitute Woodall in later in games and I am still not sure why other than he is obviously a larger target than the 5’9″ Chadwick. In the Florida game Woodall made two great scoops on throws from Chadwick. Catcher has been a bone of contention for me all year (I’m a season ticket holder). David Parrett had 12hrs last year I think and has not come close to that this year and Matt Beaird has not impressed me at all, BUT in the conference tourney, in Raleigh, and at Alex Box I have to give Beaird credit he was a solid backstop and had a few hits. With the rest of our lineup being pretty potent, we don’t need much from our backstop. Pitching: I will try to wrap this up as I think I may be rambling. Arm angles and speed. Andrew Beckwith will throw from just about any arm angle imaginable and has very good control.but will not blow up any radar guns. Alex Cunningham on the other hand can touch 94 and has shown really good off speed stuff his last few outings. We have a Freshman phenom named Jason Bilous. He turned down $600k from the Dodgers I believe it was to come to Conway. He has hit 98mph and has a ++ changeup. He is working on a 3rd pitch as well. Control is the name of his game. Against Liberty in the Big South Conference title game his control was great and he was unhittable for a while. Zack Hopeck is another guy who could come in from the pen but you will probably not see him. He has pitched against UNC, UNCw, and the Lamecocks and he will not blow you away with speed but he has great control. In our bullpen we have Freshman All American Austin Kitchen – our only LHP. He has struggled lately after doing well to start the season. He was one of the few guys who wasn’t afraid to throw inside and it worked for him. He can hit 91 and has a 12-6 curve. Our closer and All American is Mike Morrison. He can hit 90-91 but his 2 off speed pitches and the fact that he throws them all from the same slot and arm speed is what separates him. Here are a few things that worry me and some other CCU fans: 1) Not Luken Baker – we have played Will Craig from Wake Forest and Peter Alonzo from Florida. Our pitchers have been able to work around them and not give them too much to hit. They went a combined 3 for 10 with 3R, 2Ks, and 0 RBI. 2) Your entire lineup. I believe you have 6 guys batting over .300 and 6 with 10+ stolen bases. Balance and no breaks in the lineup scares me. 3) Your starting pitcher. I believe he is 6’9″ and throws 89-92. Nothing great with the velo but with him being 6’9″ I assume he isn’t releasing the ball around 58ft from the dish but more like 55ft and that 88-92 is going to look like 92-95.
Towards Optimized Photon-Pair Sources for Two-Photon Transitions Two-photon absorption (TPA) is a nonlinear optical process with wide applications in fluorescence microscopy and spectroscopy of organic molecules and compounds, as it enables atoms or molecules to transition into states that may otherwise be inaccessible by a single photon, either because of selection rules or energy conservation. Despite its common use, TPA is a rather inefficient process, often requiring very large photon numbers to produce significant results. It has been demonstrated and analytically described, that using frequency entangled pairs of photons can in some cases enhance the efficiency of TPA. For this to occur, the joint spectral amplitude (JSA) of the photon-pair state, which describes the spectral entanglement between the pair, must be engineered according to the spectral properties of the TPA response-function of the specific system in question. In this work, we consider different cases for the spectral functionality of TPA and propose that the control of the group indices of the modes involved in the pair-generation process is a key property in the design of photon-pair sources for optimized TPA.
Nick Cave will be hosting this year’s Meltdown Festival, to be held over two weeks at London’s Royal Festival Hall starting on June 24. The singer, author and actor will choose the bill and work with organisers to shape Meltdown 99, the seventh to be held at the South Bank venue. Last year’s event was hosted by John Peel. Organiser David Sefton said: “We asked Nick to do it because he’s got such a diverse range of talents and he’s done so many different things. Cave and Meltdown organisers are now working on securing the artists they have on their shortlist. Tickets will be on sale in around six weeks. Meanwhile, Cave plays a solo performance at the Royal Festival Hall with The Dirty Three and Jock Scott on March 30. Tickets are available from NME’s 24-hour ticketline on 0870 1212 500. Click here to go tou our online gig guide.
<reponame>aidanhs/criner use crate::persistence::{new_value_query_recent_first, value_iter, CrateVersionTable}; use crate::{ engine::work, error::Result, model::CrateVersion, persistence::{Db, Keyed, TableAccess}, }; use futures_util::FutureExt; use std::{path::PathBuf, time::SystemTime}; pub async fn process( db: Db, mut progress: prodash::tree::Item, io_bound_processors: u32, cpu_bound_processors: u32, mut processing_progress: prodash::tree::Item, assets_dir: PathBuf, startup_time: SystemTime, ) -> Result<()> { processing_progress.set_name("Downloads and Extractors"); let tx_cpu = { let (tx_cpu, rx) = async_channel::bounded(1); for idx in 0..cpu_bound_processors { let max_retries_on_timeout = 0; let db = db.clone(); let assets_dir = assets_dir.clone(); let progress = processing_progress.add_child(format!("{}:CPU IDLE", idx + 1)); let rx = rx.clone(); crate::spawn(blocking::unblock(move || -> Result<_> { let agent = work::cpubound::Agent::new(assets_dir, &db)?; #[allow(clippy::unit_arg)] // don't know where the unit is supposed to be Ok(futures_lite::future::block_on( work::generic::processor(db, progress, rx, agent, max_retries_on_timeout).map(|r| { if let Err(e) = r { log::warn!("CPU bound processor failed: {}", e); } }), )) })) .detach(); } tx_cpu }; let tx_io = { let (tx_io, rx) = async_channel::bounded(1); for idx in 0..io_bound_processors { let max_retries_on_timeout = 40; crate::spawn( work::generic::processor( db.clone(), processing_progress.add_child(format!("{}: ↓ IDLE", idx + 1)), rx.clone(), work::iobound::Agent::new(&db, tx_cpu.clone(), |crate_name_and_version, task, _| { crate_name_and_version.map(|(crate_name, crate_version)| work::cpubound::ExtractRequest { download_task: task.clone(), crate_name, crate_version, }) })?, max_retries_on_timeout, ) .map(|r| { if let Err(e) = r { log::warn!("iobound processor failed: {}", e); } }), ) .detach(); } tx_io }; blocking::unblock(move || { let versions = db.open_crate_versions()?; let num_versions = versions.count(); progress.init(Some(num_versions as usize), Some("crate versions".into())); let auto_checkpoint_every = 10000; let checkpoint_connection = db.open_connection_with_busy_wait()?; let mut fetched_versions = 0; let mut versions = Vec::with_capacity(auto_checkpoint_every); let mut last_elapsed_for_checkpointing = None; loop { let abort_loop = { progress.blocked("fetching chunk of version to schedule", None); let connection = db.open_connection_no_async_with_busy_wait()?; let mut statement = new_value_query_recent_first( CrateVersionTable::table_name(), &connection, fetched_versions, auto_checkpoint_every, )?; let iter = value_iter::<CrateVersion>(&mut statement)?; versions.clear(); versions.extend(iter); fetched_versions += versions.len(); versions.len() != auto_checkpoint_every }; let tasks = db.open_tasks()?; for (vid, version) in versions.drain(..).enumerate() { let version = version?; progress.set(vid + fetched_versions + 1); progress.halted("wait for task consumers", None); futures_lite::future::block_on(work::schedule::tasks( &assets_dir, &tasks, &version, progress.add_child(format!("schedule {}", version.key())), work::schedule::Scheduling::AtLeastOne, &tx_io, &tx_cpu, startup_time, ))?; } // We have too many writers which cause the WAL to get so large that all reads are slowing to a crawl // Standard SQLITE autocheckpoints are passive, which are not effective in our case as they never // kick in with too many writers. There is no way to change the autocheckpoint mode to something more suitable… :/ progress.blocked( "checkpointing database", last_elapsed_for_checkpointing.map(|d| SystemTime::now() + d), ); let start = SystemTime::now(); checkpoint_connection .lock() .execute_batch("PRAGMA wal_checkpoint(TRUNCATE)")?; last_elapsed_for_checkpointing = Some(SystemTime::now().duration_since(start)?); if abort_loop { break; } } Ok(()) }) .await }
SMEs resilience from continuous improvement lenses Purpose The business environment is increasingly volatile, complex, uncertain and ambiguous. Today, COVID-19 represents a super-disruption situation. This paper aims to explore small and medium-sized enterprises (SMEs) resilience from continuous improvement lenses. It explores the role of continuous improvement in building organizational resilience across SMEs. Design/methodology/approach A Delphi methodology has been adopted to capture evidence and opinions from 38 experts from several Jordan-based SMEs through three-online rounds. Findings The study finds that continuous improvement enhances SMEs resilience in the short term and long term. It can translate the concept of resilience into tangible working constructs for SMEs in visualizing and making decisions about their risks, adapting, absorbing changes and prevailing over time. The role of continuous improvement in building organizational resilience is fourfold; continuous improvement is a cyclical process; it has a vital cultural aspect and can be considered a business philosophy. It also emphasizes a holistic change approach based on small but constant changes. However, SMEs leaders must consider several issues for effective continuous improvement, including a continuous improvement culture and a results-focused approach. Originality/value Organizational resilience has been studied across various contexts; however, there are still unanswered questions for SMEs resilience. This study contributes to theory and practice by examining the role of continuous improvement in SMEs resilience.
A Systematic Review of Teacher-Delivered Behavior-Specific Praise on K12 Student Performance Behavior-specific praise (BSP) is a core component of many positive behavioral interventions and supports at each level of prevention, often used to increase student academic outcomes and/or reduce inappropriate behavior. We conducted a systematic literature review to explore this low-intensity, teacher-delivered strategy, applying Council for Exceptional Children (CEC) quality indicators and standards to determine whether BSP can be considered an evidence-based practice (EBP). Included articles (N = 6) investigated BSP delivered by a classroom teacher in K12 traditional school-based settings with academic and/or behavioral student outcome measures. Findings indicated using BSP increased student time on task, decreased inappropriate behaviors, and reduced student tardiness. All studies met our 80% weighted coding criterion. We concluded BSP can be categorized as a potentially EBP based on CEC guidelines. Limitations and directions for future inquiry are presented.
/** * Defines configurations for input */ public final class InputPolicy { /** * @return list of vod ids to download */ public Set<Integer> getVodIds() { return vodIds; } // CONSTRUCTORS public static InputPolicy from(OptionsProvider optionsProvider, LogPolicy logPolicy) { return from(VodParser.from(optionsProvider, logPolicy)); } public static InputPolicy from(VodParser vodParser) { Null.check(vodParser).ifAny("VoD parser cannot be null"); return new InputPolicy(vodParser.getVodIds()); } public InputPolicy(Set<Integer> vodIds) { Null.checkCollection(vodIds).ifAny("Vod id list cannot be null"); this.vodIds = ImmutableSet.copyOf(vodIds); } // PRIVATE private final Set<Integer> vodIds; }
n = int(input()) for i in range(n): s1 = input() s2 = input() for j in range(len(s1)): for k in range(len(s1)): if (s1[j] == s2[k]): print("YES") break; if (s1[j] == s2[k]): break; else: print("NO")
For several months, it has been plain that Chancellor Alistair Darling struggles to understand how the British economy works. This failure was embarrassingly obvious yesterday. Poorly delivered and lacking in content, his Budget was by a very considerable distance the worst I have ever heard from either a Labour or a Conservative Chancellor in nearly 25 years as a reporter. He made a long series of statements about the performance of the British economy that were either misleading or completely untrue. It was hard to tell whether he was deliberately deceiving the voters, or whether he genuinely believed what he was saying. For instance, he said that one of the Government's achievements was to have "maintained confidence in the banking system". It is nothing short of astonishing for a man who presided over the first collapse in a High Street bank since Victorian times to have dared to make this statement. Darling's assertion about "maintaining confidence" makes Mohamed Al Fayed's allegation that the Princess of Wales was murdered as part of a plot led by the Duke of Edinburgh and the British Secret Service look uncontroversial and soundly based. Even leaving aside the Northern Rock collapse, the British banking system is tottering, with the Bank of England stepping in on a regular basis to make emergency injections of liquidity into Britain's weakened financial institutions. Another piece of fantasy involved the Chancellor's claim that New Labour has provided a stable tax regime for businesses. This remark must have perplexed Britain's hard-pressed business community. As every company executive knows to their bitter cost, Labour has endlessly tinkered with the tax system - changing corporate tax rates no less than seven times in the last eleven years. This confusion has been exacerbated by Darling's own endless dithering about reforms to the capital gains regime over the last six months. Although the Chancellor looked the model of boring and sober respectability as he delivered his lacklustre Budget, appearances can sometimes be misleading. This will go down in history as the Budget of a serial fantasist with scant connection with reality. The biggest falsehood of all was Darling's assertion, repeated many times, that he was delivering a "responsible Budget". In truth, yesterday's statement was the most reckless Budget since the notoriously loose financial package delivered by the Conservative Chancellor Anthony Barber 45 years ago which led to a credit boom and double-digit inflation. A quick study of the facts and figures contained in the Treasury Red Book that accompanied yesterday budget show that Britain's borrowing levels are set to surge to terrifying levels over the next few years. Our national overdraft is now bigger than any European nation and only Japan's, among major international economies, is worse. This year, public debt will grow by £36billion - that's £1,400 for every household in Britain. Darling seems to have been deliberately disingenuous when he stated that he has kept national borrowing within Gordon Brown's famous "sustainable investment rule", which insists public debt should never go over 40 per cent of gross domestic product. He was only able to make this claim because he has ignored an explicit edict from the National Statistical Office which insists that the liabilities from Northern Rock should be added to the national balance sheet. Once they are added, national borrowing comes to 43 per cent. Such financial legerdemain suggests that Darling thinks he is exempt from the normal rules of probity and honesty. These very high levels of debt are especially disturbing because they come at the worst possible time. Prudent governments set aside money and make savings during periods of growth of the kind that Britain has enjoyed over the past decade. This means that when bad times come - as they inevitably will - they have given themselves the flexibility to try to boost the economy with tax-cutting measures. The United States, for example, now feels strong enough to introduce one per cent tax cuts and stimulate its way out of recession. The tragedy facing Alistair Darling is that New Labour has failed to take the necessary precautions. As Tory leader David Cameron pointed out in his brilliant Commons reply: "In the years of plenty, they put nothing aside." This lack of prudence and forward planning has been disastrous for the British economy, with consequences that will be felt by every voter. Indeed, soaring borrowing already meant that Darling was yesterday forced to unveil tax increases at the worst possible moment in the economic cycle. It is no wonder that he was obliged to resort to several sleights of hand in order to get through yesterday's endurance test. Telling the truth would have involved the humiliating confession that New Labour has managed the British economy with imprudence over the past few years. Of course, Darling deserves some sympathy. The real architect of our wretched financial predicament is Gordon Brown. But Darling could hardly blame his predecessor as Chancellor publicly - even though, behind the scenes, reliable sources speak of some angry encounters. So, instead, he was forced into the politics of denial, telling his audience: "Britain is better placed than any other economy to face the slowdown in the world economy." But that is simply not the case. Most other countries - good examples being Australia and Sweden, which has a two per cent budget surplus - have managed their finances far better during the healthy times and will be able to cope much more easily now. Only Hungary, Pakistan and Egypt have worse deficits than Britain has today. The trouble is that the whole country will have to pay a terrible price for Alistair Darling's act of denial. more, because he refuses to acknowledge the true scale of our economic problems he cannot set about finding solutions. A capable Chancellor of the Exchequer - Nigel Lawson or even Gordon Brown at his best - would have set out the size of the problem. He would have provided an overview of how the credit squeeze in the United States is affecting British markets and outlined the measures he was taking to redress the problem. He would have shown how worldwide commodity prices, and especially oil, are affecting domestic demand. He would have provided an analysis of how the massive liabilities incurred by the Government over Northern Rock are to be repaid. He would have shown how Britain's chronic fiscal imbalance is to be remedied. Finally, he would have set out clear-headed plans to curb the bloated public sector and release resources for the dynamic part of the British economy. Poor Alistair Darling was incapable of any of this. By playing the politics of denial, he has put himself at the mercy of events. The tragedy is that all of us will ultimately pay a price for the Chancellor's failure of courage and vision.
What Do the National Calls for Reform Mean for the Gifted and Talented? An Interview with James Gallagher Any effort to make educators strong advocates for their programs must be built on their confidence in the effectiveness and merit of the educational opportunities they provide. In order to gain a broad, expert perspective on how programs for gifted and talented young people are being perceived by the public, particularly in the wake of major national calls for reform in our schools, JEG interviewed Dr. James Gallagher following the annual meeting of the Council for Exceptional Children in Washington, D.C. this April. Dr. Gallagher is known internationally for his long-term educational efforts on behalf of handicapped as well as gifted/ talented students. He is Director of the Frank Porter Graham Child Development Center, and Kenan Professor of Education at the University of North Carolina, Chapel Hill. In addition, Dr. Gallagher has been a previous Director of the Bureau for the Education of the Handicapped in Washington, presently serves as TAG'S representative to the CEC Board of Governors, and recently co-authored an important report The Status of Gifted/ Talented Education in the United States.
Optimization of Gaussian Random Fields Many engineering systems are subject to spatially distributed uncertainty, i.e. uncertainty that can be modeled as a random field. Altering the mean or covariance of this uncertainty will in general change the statistical distribution of the system outputs. We present an approach for computing the sensitivity of the statistics of system outputs with respect to the parameters describing the mean and covariance of the distributed uncertainty. This sensitivity information is then incorporated into a gradient-based optimizer to optimize the structure of the distributed uncertainty to achieve desired output statistics. This framework is applied to perform variance optimization for a model problem and to optimize the manufacturing tolerances of a gas turbine compressor blade. 1. Introduction and motivation. An engineering system maps a set of inputs to a set of outputs, which quantify the performance of the system. In a deterministic design setting, the inputs are assumed to take a single (nominal) value, and the resulting outputs are deterministic functions of the nominal input values. In many engineering systems, the inputs are subject to some uncertainty due to natural variations in the system's environment or due to a lack of knowledge. In this case, the inputs can be modeled as random variables, and the system outputs are also, in general, random variables. The system performance is commonly quantified in terms of the statistics of the outputs, e.g. their mean or variance. The statistical distribution of the system outputs can be changed by either changing the distribution of the input uncertainty, or by changing the design of the system, i.e. how the inputs are mapped to the outputs. Design under uncertainty, also referred to as robust design, is often applied to optimize systems with random outputs. Broadly speaking, robust design methodologies construct designs whose performance remains relatively unchanged when the inputs are perturbed from their nominal value as a result of uncertainty. Examples include topology optimization of structures subject to random field uncertainties, design of gas turbine compressor blades subject to manufacturing variations, and optimization of airfoils subject to geometric uncertainty. In these works, the system design is optimized to minimize the impact of variability on the output statistics. In most applications of robust optimization, the statistical distribution of the input variability is assumed to be constant. In some applications, however, the distribution of the input uncertainty can be controlled. A concrete example is a gas turbine compressor blade subject to geometric variability introduced by the manufacturing process. In this context, the system inputs include the geometry of the compressor blade, which is assumed to be random as a result of random perturbations introduced by the manufacturing process. As will be described in the next section, the randomness in the blade geometry is an example of spatially distributed uncertainty, and can therefore be modeled as a random field. The outputs are chosen to describe the aerothermal performance of the compressor blade, e.g. the total pressure loss coefficient and flow turning. The mean performance of manufactured compressor blades has been shown to degrade as the level of variability (quantified by its standard deviation) increases. The level of variability can be reduced by specifying stricter manufacturing tolerances. However, specifying stricter manufacturing tolerances incurs higher manufacturing costs. Therefore, the cost associated with reducing variability competes with the benefits of improving performance, implying that there may be some optimal level of variability that balances these competing costs. This paper presents a method for optimizing the statistical distribution of random fields that describe the variability in a system's inputs. An efficient approach for computing the sensitivity of system outputs with respect to the parameters defining the distribution of the random field is presented. This sensitivity information is then used by a gradient-based optimizer to optimize these parameters. We apply this framework to perform variance optimization for a model problem as well as to a compressor blade tolerance optimization problem. 2. Gaussian random fields. Random fields provide a convenient method for modeling spatially distributed uncertainty. Random fields have previously been used to model spatially distributed uncertainty in a wide variety of systems, including natural variations in ground permeability, random deviations in material properties for structural optimization problems, and geometric variability in airfoils. Given a probability space (, F, P) and a metric space X, a random field is a measurable mapping e : → R X. In this work, we consider spatially distributed uncertainty in the form of a Gaussian random field e(x, ). The defining characteristic of Gaussian random fields is that for any x 1,..., x n, the vector (e(x 1, ),..., e(x n, )) is distributed as a multivariate Gaussian. Gaussian random fields are uniquely defined by their mean(x) and covariance function C(x 1, x 2 ): where the expectation is taken over. The covariance function describes the smoothness and correlation length of the random field. Figure 1 shows realizations of random fields with different covariance functions. The realizations in the top left, produced with the squared exponential covariance function, are infinitely differentiable, and thus appear very smooth. Conversely, the realizations on the top right, produced with the exponential covariance function, are nowhere differentiable, and thus appear very jagged. The effects of changing the correlation length for the squared exponential kernel are shown in the bottom figures. 2.1. The Karhunen-Love Expansion. The Karhunen-Love (K-L) expansion, also referred to as the proper orthogonal decomposition (POD), can be used to represent a random field as a spectral decomposition of its covariance function. The random field e is assumed to be continuous in the mean square sense: Then, the covariance function C is continuous and We can therefore define the covariance kernel K as which is a symmetric semi-positive definite operator equipped with inner product,, and v, w ∈ L 2 (X). By Mercer's theorem, it follows that C has the spectral decomposition where each pair of eigenvalues i and eigenfuctions i (x) are computed from the following Fredholm equation: Moreover, the eigenfunctions can be chosen orthonormal such that i, j = ij, and the eigenvalues are real, non-negative, and satisfy By the Karhunen-Love theorem, the decomposition of the random field is given by: where the eigenvalues are arranged in descending order such that 1 ≥ 2 ≥... → 0. The distribution of the random variables i () can be determined by taking the inner product of the random field with each of the eigenfunctions: The random variables i () are mutually uncorrelated with zero mean and unit variance. For a Gaussian random field, the i () are independent, identically distributed (i.i.d.) standard normal random variables. To construct the K-L expansion numerically, the Nystrm method is used. The domain X is discretized, and quadrature is used to approximate Equation (2.7). This results in a discrete eigenproblem of the form where C is the discretized covariance matrix. Solving this eigenproblem gives the eigenvalues and eigenvectors evaluated on the discretized domain. The K-L expansion (2.9) is truncated at a finite number of terms, resulting in an approximate spectral expansion of the random field: The truncated expansion minimizes the mean square error, and the decay of the eigenvalues determines the rate of convergence. The level of truncation N KL is often set equal to the smallest k such that the partial scatter S k exceeds some threshold, where the partial scatter is defined as 3. Optimizing the mean and covariance. Consider a system whose performance is subject to spatially distributed uncertainty in the form of a Gaussian random field e(x, ). Each output of the system is a functional of this random field, i.e. F () = F (e(x, )), and is itself a random variable. F can either be a direct functional of the random field, or a functional of the solution of a system of equations subject to random field uncertainty, e.g. the Navier-Stokes equations on a domain with a boundary that is described by e(x, ). We are interested in the statistics s F of this functional, e.g. its mean or variance. In the case of multiple system output statistics, we generalize to the vector of output statistics s F. We aim to optimize the system's statistical response s F by controlling the mean and covariance of the random field e(x, ). The design variables are then the mean of the random field(x), parameterized by the vector p m, and covariance of the random field C(x 1, x 2 ), parameterized by the vector p c. The design vector p = {p m, p c } fully defines the Gaussian random field. We assume that(x; p m ) and C(x 1, x 2 ; p c ) depend smoothly on p m and p c, respectively. Changing the mean and covariance of the random field will in general change the system output statistics, so that s F = s F (p). Figure 2 illustrates the propagation of the random field to the output statistic To optimize the statistical response of the system, we formulate the following optimization problem: where the objective and constraint functions f, g, and h may depend on both the design parameters p and the system output statistics s F (p), and P is the design space for the mean and covariance parameters. Note that, in general, the objective and constraint functions are nonlinear with respect to p. 4. Sample average approximation. To solve (3.1), we employ a gradientbased approach that incorporates sensitivity information to accelerate convergence to an optimal solution. Specifically, the sample average approximation (SAA) method, also referred to as sample path optimization, is used to optimize the mean and covariance of the random field. We limit our attention to the special case where each objective and constraint functions are equal to the mean of an output functional, since this special case encompasses the problems of interest in this work. In the SAA method, the objective functions and constraints are approximated using the Monte Carlo method. For example, the mean of the functional F (e) is estimated as The process for propagating distributed uncertainty to the quantities of interest is summarized below: 1. Generate a N N KL matrix of independent Gaussian random variables. 2. For each Monte Carlo sample, construct a realization of the random field e n (x) using the K-L expansion (2.12). 3. Evaluate the functional of interest F n = F (e n ) for each realization. 4. Estimate the moments of F according to Equation (4.1). The convergence rate of the Monte Carlo estimate (4.1) is O(N −1/2 ), and therefore a large number of Monte Carlo samples are typically required. However, the Monte Carlo samples can be evaluated in parallel, greatly reducing the time required to evaluate Equation (4.1). The SAA method transforms the stochastic optimization problem (3.1) into a deterministic optimization problem. This is achieved by fixing the set of realizations { n } N n=1 of the random input vector used to compute the Monte Carlo estimates of the objective and constraint functions. The SAA method therefore solves the following modified optimization problem, where the objective and constraint functions have been replaced by their Monte Carlo estimates: The subscript N has been added to emphasize the number of samples used to construct the estimators. The deterministic optimization problem that results from fixing the samples can be solved iteratively to update the solution, using the same set of realizations { n } N n=1 at each iteration. The solution of the deterministic optimization problem, denotedp * N, is an estimator of the true solution p *. In the unconstrained case,f N (p * N ) → f (p * ) andp * N → p * as N → ∞ with probability one if p * is a unique minimizer of f and the family {|F (, p)|, p ∈ P} is dominated by a measurable function, i.e. if there exists a measurable function G() such that |F (, p)| ≤ G() for all points ∈. Moreover, if the families {||∇F (, p)||, p ∈ P} and {||∇ 2 F (, p)||, p ∈ P} are dominated by measurable functions, then, assuming the Hessian matrix B = E is nonsingular, → represents convergence in distribution and Thus, the SAA approximate solution and approximate objective function converge like N −1/2. Since the true solution p * is unknown, the quantities B, and 2 are replaced by consistent estimates computed from the same realizations { n } N n=1 used to solve the problem. It is also possible to assess the SAA solution quality by constructing a confidence bound on the optimality gap f (p * N ) − f (p * ). The reduction of the stochastic optimization problem into a deterministic optimization problem allows for the use of one of many algorithms designed for the efficient solution of deterministic optimization problems. Thus, the SAA method is well-suited to solving constrained stochastic optimization problems. A convergence rate of N −1/2 for the constrained problem can also be observed under certain conditions. Numerous methods have been devised for solving deterministic optimization problems with both nonlinear objectives and nonlinear constraints. One such method, the sequential quadratic programming (SQP) method, is reviewed next. 4.1. Sequential quadratic programming. An efficient approach to solving (4.2) is the sequential quadratic programming method. Given an approximate solutionp k, the SQP solves a quadratic programming subproblem to obtain an improved approximate solutionp k+1. This process is repeated to construct a sequence of approximations that converge to a solutionp *. The quadratic subproblems are formed by first constructing the Lagrangian function from the objective and constraint functions. A quadratic objective is constructed from the second-order Taylor series expansion of the Lagrangian, and the constraints are replaced with their linearizations. The solution of the quadratic subproblem produces a search direction, and a linesearch can be applied to update the approximate solution. To construct the second-order Taylor series of the Lagrangian, the Hessian is estimated using a quasi-Newton update formula, such as the Broyden-Fletcher-Goldfarb-Shanno (BFGS) formula. Local convergence of the SQP algorithm requires that the initial approximate solution is close to a local optimum and that the approximate Hessian is close to the true Hessian. Global convergence requires sufficient decrease in a merit function that measures the progress towards an optimum. More details on local and global convergence of SQP methods can be found in. 5. Sensitivity analysis of Gaussian random fields. In this section, we perform sensitivity analysis of a system's output statistics with respect to the mean and covariance of Gaussian random field input uncertainty. This sensitivity information is used to optimize the mean and covariance functions via the SAA method described in the previous section. Pathwise sensitivities. To compute the sensitivity of an output statistic, e.g. ∇ p E, we use the pathwise sensitivity method. The pathwise sensitivity method relies upon interchanging the differentiation and expectation operators. For example, to compute an unbiased estimator of the gradient of f = E with respect to a parameter p, we simply interchange differentiation and integration: Sufficient conditions that allow for this interchange will be discussed subsequently. The pathwise sensitivity method can applied directly to the Monte Carlo estimate of E. Replacing the expectation with its Monte Carlo estimate, and exchanging summation and differentiation gives In the context of the SAA method, the derivatives ∂F n /∂p represent the sensitivity of the random functional F (, p) for a particular realization of the random field e n ≡ e(x, n ) where all random inputs are held fixed. To compute the sensitivity ∂F n /∂p, we first apply the chain rule to rewrite this sensitivity: If the functional F depends explicitly on the random field e, the derivative ∂F n /∂e n can be computed directly. As mentioned previously, F may alternatively be a functional of the solution of some system of equations depending on e. In that case, the derivative ∂F n /∂e n can be computed efficiently using the adjoint method. We now turn our attention to computing the sample path sensitivity ∂e n /∂p. Sample path sensitivities. We consider computing the sensitivity of the sample path e n ≡ e(x, n ; p m, p c ) with respect to the parameters which control the mean and covariance of the random field, i.e. the p m and p c introduced previously. The sensitivity of the sample path with respect to any parameter p m controlling the mean can be analytically derived from the K-L expansion given by Equation (2.12). Since the eigenvalues and eigenvectors in the K-L expansion are independent of p m, only the first term in the K-L expansion depends on p m. Thus, we have Computing the sensitivity of the sample path with respect to a parameter p c controlling the covariance is more involved. The pathwise sensitivity method has typically been applied to problems in computational finance and chemical kinetics where the sample paths of the random process can be differentiated analytically with respect to the parameters of interest. However, the sample path sensitivity of a random field can not, in general, be differentiated analytically with respect a parameter controlling the covariance matrix. For a Gaussian random field, we can use its K-L expansion to compute these sensitivities using eigenvalue/eigenvector perturbation theory. We focus on computing the sensitivities of the discretized random field, since numerical computation of the pathwise sensitivity estimate is the ultimate goal. We first consider the general case of computing the sensitivity of the sample path with respect to a covariance parameter p c, and then the special case where the parameter of interest controls the variance of a random field with fixed correlation function. General case. Since the covariance matrix is a function of p c, its eigenvalues and eigenvectors are also functions of p c. Applying the chain rule to the Karhunen-Love expansion, we have Note that since the pathwise sensitivity approach is used, the random variables i ( n ) remain fixed. Equation (5.5) is only valid if the eigenvalues and eigenvectors in the K-L expansion are differentiable functions of p c. It can be shown, via the implicit function theorem, that if the eigenvalues of C are simple (i.e., have algebraic multiplicity one), then the eigenvalues and eigenvectors of C are infinitely differentiable with respect to p c. If the eigenvalues remain simple as p c is varied over some range of values, then the eigenvalues and eigenvectors are differentiable over that range of p c. For an arbitrarily chosen covariance matrix, varying a parameter p c controlling the covariance function is unlikely the result in duplicate eigenvalues. To see this, first note that the difference between the dimension of the space of n n symmetric positive definite matrices and the dimension of the subspace of n n symmetric positive definite matrices with repeated eigenvalues is at least two, which can be proved using a simple counting argument. A curve in N dimensional space is unlikely to pass through a N − 2 dimensional subspace, e.g. an arbitrary curve in the plane (N = 2) is unlikely to pass through a given point in that plane. This gives rise to the "avoidance of crossing" phenomena: as p c is varied, the eigenvalues of a symmetric matrix are extremely unlikely to cross, and thus are likely to remain simple. Thus, the eigenvalues and eigenvectors are likely to remain differentiable functions of p c as p c is varied. Of course, it is easy to design cases where the eigenvalues cross. For example, consider the matrix over the range p c ∈ (0, ∞). The eigenvalues of this matrix are plotted in Figure 3, which clearly shows the two eigenvalues crossing at p c = 1. At the point of crossing, the eigenvalues are not differentiable with respect to p c, which can be visualized by the "kinks" in the two curves at p c = 1. However, such cases are extremely unlikely to occur for arbitrary covariance matrices, where the elements are not deliberately chosen to produce crossing eigenvalues. When the eigenvalues are simple, the derivatives of the eigenvalues and eigenvectors can be computed using established results from eigenvalue perturbation theory: where (C− i I) + denotes the Moore-Penrose pseudoinverse of the matrix (C− i I). Since the explicit dependence of the entries of the covariance matrix C on p c is assumed to be known, the sensitivities of the eigenvalues and discretized eigenvectors in the K-L expansion can be computed in closed form. One practical issue that arises when using the pathwise sensitivity method results from the sign ambiguity of the eigenvectors. Specifically, although the eigenvector i (p c ) is differentiable with respect to p c (and therefore continuous), perturbing p c by some small may result in i (p c + ) being very different from i (p c ) as a result of sign ambiguity. This issue is resolved by choosing the sign that results in the "closer" eigenvector: if i (p c +)+ i (p c ) 2 < i (p c +)− i (p c ) 2, then the sign of i (p c +) is flipped. Special case: sensitivity with respect to the variance. Computing the sample path sensitivities can be simplified if the parameter p c only scales the variance of the random field, but does not change its correlation function. Consider a random field(x, ) with unit variance, i.e. E = 1 everywhere. Scaling this random field by the function (x) produces the random field e(x, ) = (x)(x, ) with non-stationary variance 2 (x). The covariance function of the process(x, ), denoted (x 1, x 2 ), satisfies the property x 1 = x 2 =⇒ (x 1, x 2 ) = 1. The corresponding covariance function of the scaled process e(x, ) is given by Suppose the function (x) depends smoothly on the parameters p c. Rather than simulating the random field e(x, ) with non-stationary variance, we instead simulate the unit variance field(x, ) and set e n (x) = (x) n (x). Then, the sample path sensitivity with respect to p c can be computed as (5.9) ∂e n ∂p c = ∂e n ∂ ∂ ∂p c = n ∂ ∂p c. This greatly simplifies the sensitivity calculation since the K-L expansion only needs to be computed once. This eliminates the issues caused by the sign ambiguity of the eigenvectors since the same set of eigenvectors are used throughout the optimization. The computational cost of performing optimization with this approach is also lower since it does not require the sensitivity of the K-L expansion to be computed at each optimization step. However, this difference in computational cost may be small compared to the cost of computing the objective and constraint function estimates, which typically require many Monte Carlo simulations to be performed. If each Monte Carlo sample is computationally expensive, e.g. requires solving a system of partial differential equations, then the relative savings will be very small. Figure 4 illustrates scaling a random field with stationary variance to produce realizations of a random field with a spatially varying variance. The original random field, shown at the top, is a zero-mean Gaussian random field with a squared exponential covariance function. The scaled random field, shown on the bottom, is also a zero-mean Gaussian random field. However, the increase in the standard deviation near x = 0 produces realizations with more variability in this region than the original random field with stationary variance. Interchanging differentiation and expectation. As mentioned previously, applying the pathwise sensitivity method requires that the interchange of differentiation and integration is justified. We now address which conditions on F and p ensure that this interchange is justified. The first requirement is that the random vector must be independent of the parameters p. Since we use the K-L expansion to simulate the random field, this is true by construction: changing the parameters p only changes the eigenvalues and eigenvectors in the K-L expansion, thus the random vector is independent of the parameters p. The second requirement is on the regularity of the function F (, p) (for simplicity, we only one parameter p). Interchanging differentiation and integration requires that the following interchange of limit and integration is justified: A necessary and sufficient condition for this interchange to be valid is that the dif- where 1{|Q h | > c} is the indicator function. This condition is not readily verified for practical problems, since the analytical distribution of F is typically unknown. We instead provide a set of sufficient conditions that are more straightforward to verify in practice, following reference. Recall that F is a functional of the random field e(, x; p), and denote by D F ⊂ R || the set of points in where F is differentiable with respect to e. The following are sufficient conditions for the interchange of the limit and expectation in (5.10). (A3) F is Lipschitz continuous, i.e. there exists a constant k F < ∞ such that for all u(x), v(x), (A4) For every x ∈ X, there exists a random variable k e such that for all p 1, p 2 ∈ P, and E < ∞. Conditions (A3) and (A4) imply that F is Lipschitz continuous in p with probability one. Taking F = k F sup x k e, (5.14) We can then bound the difference quotient: and apply the dominated convergence theorem to interchange the expectation and limit in (5.10). Thus, conditions (A1)-(A4) are sufficient conditions for the pathwise sensitivity estimate to be unbiased. Conditions (A3) and (A4) together determine if F is almost surely Lipschitz continuous, and thus determine what type of input parameters and output quantities of interest can be treated with the pathwise sensitivity method. The previous section gave conditions for the differentiability of the sample paths, i.e. that the covariance function depends smoothly on p and have simple eigenvalues. Output functionals that may change discontinuously when smooth perturbations are made to the random field are not Lipschitz continuous almost surely. Thus, condition (A3) excludes failure probabilities, e.g. P(F ≥ c) = E, since the indicator function 1{F ≥ c} is discontinuous when F = c. This difficulty can be remedied to some degree using a smoothed version of the indicator function, but this introduces additional error to the sensitivity. Conditions (A2) and (A3) do permit functions that fail to be differentiable at certain points, as long as the points at which differentiability fails occur with probability zero, and F is continuous at these points. 6. Application: variance optimization. To demonstrate the proposed optimization framework, we consider an optimization problem with the design variables being the variance of a random field. The random field e(x, ) is defined on the domain X = and has a squared exponential correlation function: with correlation length L = 0.1. The standard deviation (x) of the random field is a spatially dependent function. We seek to minimize the sum of two competing cost functions that depend on (x) as a (spatially varying) parameter. The first cost function penalizes variability: where w(x) is a non-negative weighting function. The weighting function specifies which regions are most sensitive to increased variability. Regions where w(x) is large correspond to regions where variability has the largest impact on the system. The second cost function is inversely proportional to the variability: We seek to determine the standard deviation field * (x) that minimizes the sum of the two cost functions: This model problem is analogous to a tolerance optimization problem. Reducing tolerances (thereby increasing the variance 2 (x)) can improve the performance of the system. This behavior is reflected in the cost function f 1. Moreover, certain regions of the domain are more sensitive to variability than others, as expressed by the weight function w(x). On the other hand, it is costly to reduce tolerances, and the cost of reducing tolerances increases monotonically, as reflected in the form of f 2. The optimal solution to (6.4) can be derived analytically using the calculus of variations. The expectation and spatial integration can be interchanged in Equation (6.2) to give (6.5) The first variation of f can then be computed directly: Enforcing stationarity by setting f = 0, the optimal standard deviation field is found to be Note that this optimal is unique since both f 1 and f 2 are strictly convex functionals. As an example, we choose the weight function to be w(x) = 2 + sin(2x). The standard deviation field is discretized with N = 20 cubic B-spline basis functions B i : To demonstrate our method, the Monte Carlo method is used to compute an unbiased estimate of f 1, rather than computing it directly from Equation (6.5): For each Monte Carlo sample, the integral is evaluated using composite Gaussian quadrature with 20 intervals and a third order rule on each interval. The same quadrature rule is used to compute f 2. The SAA equivalent of (6.4) results from replacing the objective function f 1 with its unbiased estimate: This optimization problem is solved using the SQP algorithm with a BFGS update to approximate the Hessian as implemented in the NLopt package. The pathwise estimate of the sensitivity ∂f 1 /∂(x), which is an unbiased estimate of the true gradient, is computed as The sample path sensitivity ∂e n /∂ can be computed using either approach described previously, i.e. by computing the sensitivity of the K-L expansion or by computing sensitivities for a unit-variance random field scaled by (x). We use both approaches to compare their effectiveness. Figures 5 and 6 show optimal solutions obtained using each approach. The shaded blue 95% confidence region is computed by estimating the Hessian matrix B and covariance using the Monte Carlo samples used to compute the optimal solution: The standard error of the optimal solution is then N = 1/2. The plots show that the true optimal solution is largely within the 95% confidence region for each approximate solution. Qualitatively, for a given number of Monte Carlo samples, the solutions obtained using either sensitivity approach are very similar. To further illustrate the convergence of the SAA optimal solution to the true optimal solution, we conduct M = 10 4 independent optimization runs for different values of N. This allows us to examine the distribution of the approximate optimal solution. Since the computational cost of using a scaled unit-variance random field is lower, we use this method to perform each optimization. Figure 7 shows histograms of the error of the SAA optimal solution evaluated at the center of the domain, i.e. * N (0.5) − * (0.5), for various values of N. As expected, the histograms closely resemble Gaussian distributions with standard deviation proportional to N −1/2. Figure 8 illustrates the convergence of the entire optimal solution and optimal value as N is increased. The standard deviation of the optimal solution error * N (x) − * (x) is plotted on the left, and the standard deviation of the optimal value error f (p * N ) − f (p * ) is plotted on the right. We note that both converge like N 1/2 : increasing the number of Monte Carlo samples by a factor of 100 gains a one decimal improvement in solution accuracy. 7. Application: compressor blade tolerance optimization. We now consider an application with engineering relevance: manufacturing tolerance optimization. Specifically, we consider a two-dimensional gas turbine compressor blade that is subject to geometric variability, and determine tolerances for this variability that provides the greatest performance benefit. 7.1. Manufacturing error and tolerance models. Previous studies of geometric variability in compressor blades has indicated that the discrepancy between manufactured blade geometries and the design intent geometry can be accurately modeled as a Gaussian random field. In this context, the random field e(x, ) represents the error between the manufactured surface and the nominal surface in the normal direction at the point x on the nominal blade surface. The mean of the The gradient information used to obtain * is computed using a scaled unit-variance random field. manufacturing error is assumed to be zero everywhere, i.e.(x) = 0. Manufacturing deviations tend to negatively impact the mean performance of compressor blades. We quantify the performance in terms of the total pressure loss coefficient, denoted by, which measure the thermodynamic losses generated by a compressor. The mean total pressure loss coefficient tends to increase as the level of variability, i.e. the variance of the random field e(x, ), is increased. It is possible to reduce this detrimental impact by specifying stricter manufacturing tolerances, thereby reducing the variance of the surface variations. To represent the standard deviation field (x) over the surface of the blade, we use the same cubic B-spline basis introduced previously. The knot placement is chosen to enrich the basis near the leading edge, since previous studies of the impact of geometric variability on compressor performance have shown that most of the increase in loss results from imperfections near the leading edge. A total of N = 31 basis functions were used to parameterize the standard deviation. equation formulation describing the evolution of the integral momentum and kinetic energy shape parameter. In the inviscid regions of the flow field, the steady state Euler equations are discretized over a streamline conforming grid. Transition models are included to predict the onset of turbulent flow in the boundary layer. A convenient feature of MISES is its solution speed. A typical flow solution requires 10-20 Newton Rhapson iterations to converge, which can be performed in a few seconds. Moreover, MISES offers the option to reconverge a flow solution after perturbing the airfoil geometry. Since the perturbations in the geometry introduced by manufacturing variability are small, the flow field corresponding to blades with manufacturing variability can be reconverged very quickly from the flow field computed for the nominal geometry. This offsets some of the computational cost associated with using the standard Monte Carlo method to propagate uncertainty. 7.3. Optimization statement. We seek to optimize the manufacturing tolerances to reduce the detrimental impact of manufacturing variations. To do this, we first define the variability metric V, which measures the total level of manufacturing variations over the entire blade surface: Here ∈ R N parameterizes the standard deviation (x). Specifying stricter tolerances (decreasing V ) incurs higher manufacturing costs. To constrain this cost, we constrain the variability metric to a specific value V b, representing the strictest tolerances deemed acceptable by the manufacturer. The standard deviation of the manufacturing variability is constrained from above to ensure the optimizer does not trade increases in variability in regions of low sensitivity for excessive decreases in variability in regions of high sensitivity. The resulting optimization problem for the optimal tolerances is given below. To solve (7.2) numerically, the SAA method is used and all objective and constraint functions are replaced by their Monte Carlo estimates. The resulting nonlinear optimization problem is solved using SQP. The gradient of the objective and constraints is computed using the pathwise approach described previously. The shape sensitivities are evaluated using second-order accurate finite differences. Numerical results. We apply the proposed method to optimize the tolerances of a two-dimensional fan exit stator cascade. In the absence of geometric variability, the loss coefficient is = 2.22 10 −2. Manufacturing variations are prescribed in the form of a Gaussian random field with standard deviation 8.0 10 −4 (non-dimensionalized by the blade chord). The covariance function of the random field is the same squared exponential function described earlier, with a correlation length L that is reduced near the leading edge of the blade to reflect the manufacturing variations observed in measured blades. The mean loss coefficient of the blade in the presence of manufacturing variability is E = 2.29 10 −2, roughly 3% higher than the loss of the design intent geometry. A total of 75 SQP iterations were required to obtain the optimal solution. Each SQP iteration requires evaluating one evaluation of the gradient of the objective and constraint functions, as well as a number of evaluations of the objective and constraint functions to perform a linesearch. This resulted in 120 Monte Carlo simulations, each comprised of N = 500 flow solutions. Thus, the parallelizability of the Monte Carlo method and the speed of the MISES code had great benefit. The total allowable variability V b was constrained to be 98% of the baseline level of variability. The optimized tolerances are shown in Figure 9. We only show the standard deviation near the leading edge of the blade, since the optimal value over the rest of the blade was equal to the baseline value of 8.0 10 −4. We observe that the greatest reduction in variability is specified on the upper surface of the blade. The optimized tolerances reduce the mean loss coefficient to E = 2.23 10 −2, roughly 0.5% higher than the loss of the design intent geometry. For a very small decrease in the level of manufacturing variability, a significant increase in the mean performance is realized, demonstrating the efficacy of the proposed approach. 8. Summary and conclusions. Considerable research has been conducted in the area of design under uncertainty, bringing together the fields of uncertainty quantification and optimization. Optimization of the uncertainty itself has received considerably less attention. This paper has presented an approach for optimizing the mean and covariance of Gaussian random fields to achieve a desired statistical performance. The novel sensitivity analysis presented here allows for gradient-based algorithms to be leveraged when performing these optimizations. The approach presented in this paper can be applied when the mean and covariance functions depend explicitly on some set of parameters. We have presented the example of tolerance optimization, where the level of variability is a design variable. Another example arises from optimizing measurement locations in a Gaussian random field, where, conditioned on the measurements, the covariance depends explicitly on the measurement location. Future improvements to the proposed optimization framework would incorporate adjoint sensitivity information when considering PDE-constrained problems. This would reduce the computational cost of estimating gradients when the number of design parameters is large with respect to the number of objectives and constraints, which is common in engineering optimization.
/** * Close the connection, previously established by {@link #openDevice()}. */ private void closeDevice() { assert device_ != null; closeStreams(); connection_.close(); connection_ = null; }
Lenny Teytelman Harvard University is the most popular higher education institution on Facebook. It has 3.3 million "likes." This isn't surprising, it's one of the most famous universities worldwide and it's widely regarded as one of the best, perhaps the best. But according to Lenny Teytelman, founder of ZappyLab, a company that makes apps for research scientists, about 3 million of those likes are probably fakes, or likes generated by fake Facebook users who are trying to make their Facebook accounts look legit by populating them with bogus interests. A photo from Harvard's Facebook page. Harvard / Facebook Facebook said there were a number of different ways that a page can end up with millions of likes, and that did not mean they were fake. Harvard said it didn't pay for its likes: "Global interest in Harvard is validated by engagement across all our platforms. Social media is among the many tools we use to connect with the Harvard community and with many others interested in the teaching, learning and research at Harvard. The University did not pay for any of the 3.3 million likes on its official Facebook page." Of course, Harvard is an international name and it is not unexpected that Harvard would attract likes globally. It's not improbable that all those likes could be real. Notably, Teytelman doesn't offer proof that the likes are fake — so let's take this with a pinch of salt. Teytelman's claim is based on the fact many of the people who say they like Harvard live nowhere near Harvard. In fact, the most engaged Harvard fans on Facebook are from Dhaka, the capital of Bangladesh. Dhaka is the notorious home of many click farms who sell fake likes for money. So Teytelman decided to compare Harvard's Facebook fan base with some other institutions, and found this discrepancy (see chart above): Most universities have a few hundred thousand likes from people who live near their campuses — exactly what you'd expect. But the institutions that are most popular have millions of fans that live nowhere near the campus. Teytelman says he thinks Harvard's likes are probably fake: ... The most stunning example here is Harvard with 3.3 million "likes". Probably about three million of these are fakes. I just hope they did not pay for this, ... If the cost per "like" is similar to ours ($50-$100 per thousand), Harvard might have paid Facebook between $150,000-$300,000 for fake likes. Of course, there is a tautology here: In order to attract millions of fans, by definition a huge portion of them will come from far away, not nearby. That doesn't mean they are fake. Nonetheless, it raises a problem that many social media marketers have when they're trying to attract a large, high-quality audience of followers on Facebook: It's not easy to figure out which likes are legit and which are not. Facebook is fighting a running battle against fake accounts on its network. (Facebook click farms control fake accounts in order to sell bogus likes to unsuspecting companies who are advertising on Facebook in the hopes of increasing their popularity.) Only around 1% of all Facebook accounts are "abusive," or fake, Facebook reported said in its last annual report. But Facebook has more than 1 billion members, so 1% equals about 10 million fake accounts, liking everything they see. Low-quality likes from far away places is a problem on Facebook that primarily affects unsophisticated advertisers: If you run ads on Facebook indiscriminately, targeting anyone in any country, then you're likely to attract clicks from some pretty irrelevant accounts all over the globe. That may be what happened to Harvard — it doesn't necessarily have fake likes, just irrelevant ones. The reason Teytelman suspects Harvard's likes are fake is the Dhaka factor: "If the ads are not targeted and running all over the world, why would they skew to Dhaka, Bangladesh for 4/5 universities that clearly advertised (Oxford, Cambridge, Harvard, Univ. of the People)?" he tells Business Insider. In a statement to Business Insider, Facebook said it was not clear that anything was wrong with Harvard's page: There are many ways people find Pages to like on Facebook - from someone navigating directly to the Page to like it, to people seeing advertising campaigns, or from Pages appearing in Pages You May Like. Some Pages, including universities with an international reputation, often receive a large number of likes from people around the world and have fans that are dispersed geographically and demographically. These pages are often also featured in Pages You May Like and receive likes from people who aspire to attend or visit the university. The other problem with running ad campaigns that have indiscriminate geographic targeting is that your ads are likely to show up in the news feed of a fake-like click farm. That click farm may be generating fake likes for another unsuspecting client. In order to make its army of fake accounts look real (so they don't get banned from Facebook), it will have those accounts click on things that real people might like — and clicking on other people's ads is one way to do this quickly. There's a great video explaining this weird phenomenon here. Harvard did not advertise for likes, the institution tells us. But it did make a push to become the biggest university on Facebook back in 2011. Its president celebrated reaching the first 1 million likes with a video: "We are so pleased to be the first university to hit one million Facebook fans," said Harvard's President Drew Faust in a thank you video (embedded below). "We're also so pleased that networks like this -- some grown here at Harvard -- are uniting millions more people all around the globe." For comparison, Yale at the time had only 49,000 likes. Since then, Yale's page has become popular, too. That college now has 877,000 likes and its most engaged users are from Addis Ababa in Ethiopia — not well-known as Eli territory. Harvard will probably survive its fake like problem. But the issue isn't trivial for companies that do a lot of marketing through Facebook: They can end up with hundreds of thousands of followers who are fake, or irrelevant. At that point it becomes impossible for the company to figure out how interested its real fans are in their posts — real fan engagement gets drowned out by noise.
import os from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.chrome.options import Options chrome_options = Options() chrome_options.add_argument("--window-size=300,400") driver = webdriver.Chrome(chrome_options=chrome_options) driver.get("https://myportal.fhda.edu/cp/home/displaylogin") def scrape_cookies(): driver.execute_script(f"document.getElementById('user').value='{os.environ['MP_USER']}'") driver.execute_script(f"document.getElementById('pass').value='{os.environ['MP_PASS']}'") try: driver.execute_script("doLogin()") WebDriverWait(driver, 3).until( EC.title_is("MyPortal / Foothill-De Anza College District") ) driver.get( "https://myportal.fhda.edu/render.UserLayoutRootNode.uP?uP_tparam=utf&utf=%2fcp%2fip%2flogin%3fsys%3dsctssb%26url%3dhttps%3A%2F%2Fbanssb.fhda.edu%2FPROD%2Fbwskfcls.p_sel_crse_search") WebDriverWait(driver, 3).until( EC.title_is("MyPortal / Foothill-De Anza College District") ) finally: cookies_list = driver.get_cookies() return get_cookies(cookies_list) def get_cookies(cookies_list): cookies_dict = {} for cookie in cookies_list: cookies_dict[cookie['name']] = cookie['value'] return cookies_dict def kill_driver(): driver.quit() if __name__ == '__main__': scrape_cookies()
import { observable, computed, action } from 'mobx'; import { SemanticCOLORS } from 'semantic-ui-react'; import { TestItem, Impl, toUrlName } from './test_item_model'; import { Test } from './test_model'; import { Snapshot } from './snapshot_model'; import { Story } from './story_model'; export class TestGroup extends TestItem { name: string; urlName: string; pathName: string; parent: TestGroup; groups: TestGroup[]; tests: Test[]; beforeAll: Impl; before: Impl; beforeEach: Impl; afterAll: Impl; after: Impl; afterEach: Impl; @observable version: number = 0; @observable passingTests = 0; @observable failingTests = 0; constructor(parent: TestGroup, name: string) { super(name, parent); this.tests = []; this.groups = []; this.urlName = toUrlName(name); this.pathName = toUrlName(name, false).replace(/-/g, ''); if (parent) { parent.groups.push(this); parent.groups.sort((a, b) => a.name < b.name ? -1 : 1); } } @computed get duration() { let tests = this.allTests; return tests.reduce((prev, next) => prev + next.duration, 0); } get nestedGroupsWithTests() { const groups = this.findGroups(g => g.tests.length > 0); groups.sort((a, b) => a.path < b.path ? -1 : 1); return groups; } get fileName(): string { return (this.parent == null || this.parent.parent == null ? '' : (this.parent.fileName + '_')) + this.pathName; } get path(): string { if (this.parent == null) { return ''; } return (this.parent == null || this.parent.parent == null ? '' : (this.parent.path + ' > ')) + this.name; } get isRoot() { return this.parent == null; } get icon(): { name: string, color: SemanticCOLORS } { return { name: 'check', color: 'green' } } get color() { const passing = this.countTests(true); const failing = this.countTests(false); if (failing == 0) { return 'green'; } else if (passing == 0) { return 'red'; } else return 'orange'; } get snapshots() { let snapshots: {[index: string]: string} = {}; this.tests.forEach(t => t.snapshots.forEach(s => snapshots[s.originalName] = s.current )); return snapshots; } get allSnapshots() { let snapshots: Snapshot[] = []; this.tests.forEach(t => t.snapshots.forEach(s => snapshots.push(s))); return snapshots; } get allTests() { let tests: Test[] = [...this.tests]; this.groups.forEach(g => tests = tests.concat(g.allTests)); return tests; } findGroup(test: (group: TestGroup) => boolean): TestGroup { const queue: TestGroup[] = [this]; while (queue.length > 0) { let current = queue.shift(); if (test(current)) { return current; } for (let group of current.groups) { queue.push(group); } } return null; } findGroups(test: (group: TestGroup) => boolean): TestGroup[] { const result: TestGroup[] = []; const queue: TestGroup[] = [this]; while (queue.length > 0) { let current = queue.shift(); if (test(current)) { result.push(current); } for (let group of current.groups) { queue.push(group); } } return result; } /** * Decides where in the update tree the new group will be put * If it is the top level group it depends whether it is a completely new group * or one that is being updated * * @param {string} groupName * @param {Luis.State} state * @returns * @memberof TestGroup */ getGroup(name: string, state: Luis.State) { let possibleRoot = state.currentGroup.groups.find(g => g.name === name); return possibleRoot ? possibleRoot : new TestGroup(state.currentGroup, name); } getStory(name: string, props: StoryConfig, state: Luis.State): Story { let possibleRoot = state.currentGroup.groups.find(g => g.name === name) as Story; return possibleRoot ? possibleRoot : state.createStory(name, props); } countTests(passing: boolean) { let count = 0; const queue: TestGroup[] = [this]; while (queue.length > 0) { let current = queue.shift(); for (let test of current.tests) { if (passing && test.error == null) { count ++; } else if (!passing && test.error != null) { count ++; } } for (let group of current.groups) { queue.push(group); } } return count; } findTestByUrlName(urlName: string) { return this.tests.find(t => t.urlName == urlName); } @action updateCounts() { this.passingTests = this.countTests(true); this.failingTests = this.countTests(false); } }
A novel volume integral equation for solving the Electroencephalography forward problem In this paper, a novel volume integral equation for solving the Electroencephalography forward problem is presented. Differently from other integral equation methods standardly used for the same purpose, the new formulation can handle inhomogeneous and fully anisotropic realistic head models. The new equation is obtained by a suitable use of Green's identities together with an appropriate handling of all boundary conditions for the EEG problem. The new equation is discretized with a consistent choice of volume and boundary elements. Numerical results shows validity and convergence of the approach, together with its applicability to real case models obtained from MRI data.
import subprocess from argparse import ArgumentParser #import("test_summary") parser = ArgumentParser() parser.add_argument("-f", "--files", nargs="+", dest="files", help="Define a .csproj file list.") args = parser.parse_args() for f in args.files: proc = subprocess.Popen('dotnet', stdout=subprocess.PIPE) tmp = str(proc.communicate()[0]) subprocess.call("dotnet" + " test " + '"' + f + '"')
Ultrasound flow mapping in a model of a secondary hydraulic zinc-air battery The investigation of complex multiphase flows is relevant for a wide range of industrial processes. For example, the performance of secondary hydraulic zinc-air batteries, here referred to as zinc-air flow batteries (ZAB), where the energy is stored in microscopic zinc particles suspended in an aqueous electrolytic solution (i.e. suspension electrode), strongly depends on the suspension flow in the electrochemical cell. To improve the design of the fluidic cell structures, detailed knowledge of the local flow conditions is required. Flow measurements can be performed inside the opaque fluid using ultrasound technology, if the acoustical properties, i.e. attenuation and speed of sound are known. These properties strongly depend on the characteristics of the multiphase fluid, e.g. the density, compressibility, thermal conductivity and the concentration of the individual phases. Hence, a characterization of the multiphase fluid to parameterize the measurement system is needed. We present results from experimental characterization measurements of a suspension electrode for zinc-air flow batteries, applying a spectroscopic method to parametrize the measurement system for the flow mapping. Finally we present flow mapping results from a fluidic ZAB model and compare them to numerical results from multiphase CFD-DEM simulations.
/* * Types of defects: assignment from large to small size data type - data lost problem * Complexity: int float Variable */ void data_lost_004 () { int ret; float a = 2.14748365e+09F; ret = a; sink = ret; }
package application.siamakabbasi.imagemixer; import android.content.Intent; import android.graphics.Bitmap; import android.graphics.Matrix; import android.graphics.drawable.BitmapDrawable; import android.graphics.drawable.Drawable; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.util.Log; import android.view.View; import android.widget.Button; import android.widget.ImageView; import android.widget.SeekBar; import android.widget.TextView; import application.siamakabbasi.imagemixer.commonclasses.BitmapOperations; import application.siamakabbasi.imagemixer.commonclasses.DataManager; public class ManipulationActivity extends AppCompatActivity implements View.OnClickListener { private ImageView imgtop; private ImageView imgbottom; private SeekBar skbsettransparenty; private TextView text; private View content; private int alpha; private String LOG_TAG = "ManipulationActivity"; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_manipulation); //Set up memeber variables initAlphaBar(); Button btn = (Button) findViewById(R.id.btnOk); btn.setOnClickListener(this); content = findViewById(R.id.relcanvas); text = (TextView) findViewById(R.id.progresstext); DataManager dm = DataManager.getInstance(); imgtop = (ImageView) findViewById(R.id.imgtop); imgbottom = (ImageView) findViewById(R.id.imgbottom); imgtop.setImageBitmap(dm.getFirstImageBmap()); imgbottom.setImageBitmap(dm.getSecondImageBmap()); } //Controls for the alpha bar public void initAlphaBar() { try { skbsettransparenty = (SeekBar) findViewById(R.id.skbSetTransparenty); alpha=255; skbsettransparenty.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() { @Override public void onStopTrackingTouch(SeekBar arg0) { } @Override public void onStartTrackingTouch(SeekBar arg0) { } @Override public void onProgressChanged(SeekBar arg0, int progress, boolean arg2) { if (progress >= 0 && progress <= skbsettransparenty.getMax()) { alpha = progress ; text.setText(""+alpha); imgtop.setAlpha(((float)alpha)/255); } } }); } catch (Exception e) { e.printStackTrace(); } } @Override public void onClick(View view) { DataManager dm = DataManager.getInstance(); Bitmap bottomBmap = ((BitmapDrawable) imgbottom.getDrawable()).getBitmap(); Bitmap topBmap = ((BitmapDrawable) imgtop.getDrawable()).getBitmap(); //positions of bitmaps inside imageviews int[] locations0 = getBitmapPositionInsideImageView(imgbottom);// Need check if these functions work for every position int bottomPosX = locations0[0]; int bottomPosY = locations0[1]; int[] locations1 = getBitmapPositionInsideImageView(imgtop); // Need check if these functions work for every position int topPosX = locations1[0]; int topPosY = locations1[1]; //positions of imageviews int[] locations2 = new int[2]; imgbottom.getLocationOnScreen(locations2); int x2 = locations2[0]; int y2 = locations2[1]; int[] locations3 = new int[2]; imgtop.getLocationOnScreen(locations3); int x3 = locations3[0]; int y3 = locations3[1]; //Debug information Log.i(LOG_TAG, "inside imgview#\n"); Log.i(LOG_TAG, "bottom:\ty.pos=" + bottomPosY + " x.pos=" + bottomPosX); Log.i(LOG_TAG, "top:\t\ty.pos=" + topPosY + " x.pos=" + topPosX); Log.i(LOG_TAG, "onscreen#\n"); Log.i(LOG_TAG, "bottom:\ty.pos=" + y2 + " x.pos=" + x2); Log.i(LOG_TAG, "top:\t\ty.pos=" + y3 + " x.pos=" + x3); Log.i(LOG_TAG, "bottom\ty.size=" + bottomBmap.getHeight() + " x.size=" + bottomBmap.getWidth()+" dpi="+bottomBmap.getDensity()); Log.i(LOG_TAG, "top:\t\ty.size=" + topBmap.getHeight() + " x.size=" + topBmap.getWidth()+" dpi="+topBmap.getDensity()); Log.i(LOG_TAG, "canvas:\ty.size="+content.getHeight()+" x.size="+content.getWidth()); //Create a new Bitmap and set it to OriginalImage for the next Activity BitmapOperations bm = BitmapOperations.getInstance(); Bitmap result = bm.LayerFilter(bottomBmap, bottomPosX, bottomPosY,topBmap, topPosX, topPosY, content.getHeight(),content.getWidth(), alpha); dm.setOriginalImageBmap(result); //Start next activity Intent intent = new Intent(this, FilterActivity.class); startActivity(intent);/**/ } public static int[] getBitmapPositionInsideImageView(ImageView imageView) { int[] ret = new int[4]; if (imageView == null || imageView.getDrawable() == null) return ret; // Get image dimensions // Get image matrix values and place them in an array float[] f = new float[9]; imageView.getImageMatrix().getValues(f); // Extract the scale values using the constants (if aspect ratio maintained, scaleX == scaleY) final float scaleX = f[Matrix.MSCALE_X]; final float scaleY = f[Matrix.MSCALE_Y]; // Get the drawable (could also get the bitmap behind the drawable and getWidth/getHeight) final Drawable d = imageView.getDrawable(); final int origW = d.getIntrinsicWidth(); final int origH = d.getIntrinsicHeight(); // Calculate the actual dimensions final int actW = Math.round(origW * scaleX); final int actH = Math.round(origH * scaleY); ret[2] = actW; ret[3] = actH; // Get image position // We assume that the image is centered into ImageView int imgViewW = imageView.getWidth(); int imgViewH = imageView.getHeight(); int top = (int) (imgViewH - actH) / 2; int left = (int) (imgViewW - actW) / 2; ret[0] = left; ret[1] = top; return ret; } }
<filename>Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/db/v1/instances/fixtures.go<gh_stars>10-100 package instances import ( "fmt" "time" "github.com/apcera/libretto/Godeps/_workspace/src/github.com/rackspace/gophercloud" "github.com/apcera/libretto/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/db/v1/datastores" "github.com/apcera/libretto/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/db/v1/flavors" os "github.com/apcera/libretto/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/db/v1/instances" ) var ( timestamp = "2015-11-12T14:22:42Z" timeVal, _ = time.Parse(time.RFC3339, timestamp) ) var instance = ` { "created": "` + timestamp + `", "datastore": { "type": "mysql", "version": "5.6" }, "flavor": { "id": "1", "links": [ { "href": "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", "rel": "self" }, { "href": "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", "rel": "bookmark" } ] }, "links": [ { "href": "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", "rel": "self" } ], "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.rackspaceclouddb.com", "id": "{instanceID}", "name": "json_rack_instance", "status": "BUILD", "updated": "` + timestamp + `", "volume": { "size": 2 } } ` var createReq = ` { "instance": { "databases": [ { "character_set": "utf8", "collate": "utf8_general_ci", "name": "sampledb" }, { "name": "nextround" } ], "flavorRef": "1", "name": "json_rack_instance", "users": [ { "databases": [ { "name": "sampledb" } ], "name": "demouser", "password": "<PASSWORD>" } ], "volume": { "size": 2 }, "restorePoint": { "backupRef": "1234567890" } } } ` var createReplicaReq = ` { "instance": { "volume": { "size": 1 }, "flavorRef": "9", "name": "t2s1_ALT_GUEST", "replica_of": "6bdca2fc-418e-40bd-a595-62abda61862d" } } ` var createReplicaResp = ` { "instance": { "status": "BUILD", "updated": "` + timestamp + `", "name": "t2s1_ALT_GUEST", "links": [ { "href": "https://ord.databases.api.rackspacecloud.com/v1.0/5919009/instances/8367c312-7c40-4a66-aab1-5767478914fc", "rel": "self" }, { "href": "https://ord.databases.api.rackspacecloud.com/instances/8367c312-7c40-4a66-aab1-5767478914fc", "rel": "bookmark" } ], "created": "` + timestamp + `", "id": "8367c312-7c40-4a66-aab1-5767478914fc", "volume": { "size": 1 }, "flavor": { "id": "9" }, "datastore": { "version": "5.6", "type": "mysql" }, "replica_of": { "id": "6bdca2fc-418e-40bd-a595-62abda61862d" } } } ` var listReplicasResp = ` { "instances": [ { "status": "ACTIVE", "name": "t1s1_ALT_GUEST", "links": [ { "href": "https://ord.databases.api.rackspacecloud.com/v1.0/1234/instances/3c691f06-bf9a-4618-b7ec-2817ce0cf254", "rel": "self" }, { "href": "https://ord.databases.api.rackspacecloud.com/instances/3c691f06-bf9a-4618-b7ec-2817ce0cf254", "rel": "bookmark" } ], "ip": [ "10.0.0.3" ], "id": "3c691f06-bf9a-4618-b7ec-2817ce0cf254", "volume": { "size": 1 }, "flavor": { "id": "9" }, "datastore": { "version": "5.6", "type": "mysql" }, "replica_of": { "id": "8b499b45-52d6-402d-b398-f9d8f279c69a" } } ] } ` var getReplicaResp = ` { "instance": { "status": "ACTIVE", "updated": "` + timestamp + `", "name": "t1_ALT_GUEST", "created": "` + timestamp + `", "ip": [ "10.0.0.2" ], "replicas": [ { "id": "3c691f06-bf9a-4618-b7ec-2817ce0cf254" } ], "id": "8b499b45-52d6-402d-b398-f9d8f279c69a", "volume": { "used": 0.54, "size": 1 }, "flavor": { "id": "9" }, "datastore": { "version": "5.6", "type": "mysql" } } } ` var detachReq = ` { "instance": { "replica_of": "", "slave_of": "" } } ` var getConfigResp = ` { "instance": { "configuration": { "basedir": "/usr", "connect_timeout": "15", "datadir": "/var/lib/mysql", "default_storage_engine": "innodb", "innodb_buffer_pool_instances": "1", "innodb_buffer_pool_size": "175M", "innodb_checksum_algorithm": "crc32", "innodb_data_file_path": "ibdata1:10M:autoextend", "innodb_file_per_table": "1", "innodb_io_capacity": "200", "innodb_log_file_size": "256M", "innodb_log_files_in_group": "2", "innodb_open_files": "8192", "innodb_thread_concurrency": "0", "join_buffer_size": "1M", "key_buffer_size": "50M", "local-infile": "0", "log-error": "/var/log/mysql/mysqld.log", "max_allowed_packet": "16M", "max_connect_errors": "10000", "max_connections": "40", "max_heap_table_size": "16M", "myisam-recover": "BACKUP", "open_files_limit": "8192", "performance_schema": "off", "pid_file": "/var/run/mysqld/mysqld.pid", "port": "3306", "query_cache_limit": "1M", "query_cache_size": "8M", "query_cache_type": "1", "read_buffer_size": "256K", "read_rnd_buffer_size": "1M", "server_id": "1", "skip-external-locking": "1", "skip_name_resolve": "1", "sort_buffer_size": "256K", "table_open_cache": "4096", "thread_stack": "192K", "tmp_table_size": "16M", "tmpdir": "/var/tmp", "user": "mysql", "wait_timeout": "3600" } } } ` var associateReq = `{"instance": {"configuration": "{configGroupID}"}}` var listBackupsResp = ` { "backups": [ { "status": "COMPLETED", "updated": "` + timestamp + `", "description": "Backup from Restored Instance", "datastore": { "version": "5.1", "type": "MySQL", "version_id": "20000000-0000-0000-0000-000000000002" }, "id": "87972694-4be2-40f5-83f8-501656e0032a", "size": 0.141026, "name": "restored_backup", "created": "` + timestamp + `", "instance_id": "29af2cd9-0674-48ab-b87a-b160f00208e6", "parent_id": null, "locationRef": "http://localhost/path/to/backup" } ] } ` var ( createResp = fmt.Sprintf(`{"instance":%s}`, instance) getResp = fmt.Sprintf(`{"instance":%s}`, instance) associateResp = fmt.Sprintf(`{"instance":%s}`, instance) listInstancesResp = fmt.Sprintf(`{"instances":[%s]}`, instance) ) var instanceID = "{instanceID}" var expectedInstance = &Instance{ Created: timeVal, Updated: timeVal, Datastore: datastores.DatastorePartial{Type: "mysql", Version: "5.6"}, Flavor: flavors.Flavor{ ID: "1", Links: []gophercloud.Link{ gophercloud.Link{Href: "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", Rel: "self"}, gophercloud.Link{Href: "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", Rel: "bookmark"}, }, }, Hostname: "e09ad9a3f73309469cf1f43d11e79549caf9acf2.rackspaceclouddb.com", ID: instanceID, Links: []gophercloud.Link{ gophercloud.Link{Href: "https://ord.databases.api.rackspacecloud.com/v1.0/1234/flavors/1", Rel: "self"}, }, Name: "json_rack_instance", Status: "BUILD", Volume: os.Volume{Size: 2}, } var expectedReplica = &Instance{ Status: "BUILD", Updated: timeVal, Name: "t2s1_ALT_GUEST", Links: []gophercloud.Link{ gophercloud.Link{Rel: "self", Href: "https://ord.databases.api.rackspacecloud.com/v1.0/5919009/instances/8367c312-7c40-4a66-aab1-5767478914fc"}, gophercloud.Link{Rel: "bookmark", Href: "https://ord.databases.api.rackspacecloud.com/instances/8367c312-7c40-4a66-aab1-5767478914fc"}, }, Created: timeVal, ID: "8367c312-7c40-4a66-aab1-5767478914fc", Volume: os.Volume{Size: 1}, Flavor: flavors.Flavor{ID: "9"}, Datastore: datastores.DatastorePartial{Version: "5.6", Type: "mysql"}, ReplicaOf: &Instance{ ID: "6bdca2fc-418e-40bd-a595-62abda61862d", }, }
Generalized Partition Mechanism: Framework for Combining Multiple Strategy-Proof Mechanisms This paper presents a framework for combining multiple strategy-proof resource allocation mechanisms, in which participants are divided into several groups (partitions) and each mechanism is applied to one partition. The idea of dividing participants into several groups is introduced to achieve budget balance in a redistribution mechanism, i.e., the payment (money) collected in one partition is distributed in another partition. Furthermore, this idea has been used to adjust parameters of a mechanism (e.g., the reservation price in an auction) based on the information of participants in one partition in order to improve the mechanism's efficiency or revenue. This paper presents a unified framework called a generalized partition mechanism, in which information, money, and unsold goods can be transferred among partitions. This framework is very general and thus can be applied to various settings, including cases where a redistribution mechanism must adjust parameters to obtain a better social surplus. We provide a sufficient condition on the flow of information, money, and goods among partitions so that the generalized partition mechanism is strategy-proof, assuming that each mechanism applied to the partition is strategy-proof. We can use this sufficient condition as a guideline for combining multiple mechanisms. To show the applicability of this guideline, we develop new redistribution mechanisms based on this guideline, in which the utility of a participant can be non-quasi-linear.
Analysis of improved nutritional composition of bee pollen ( Brassica campestris L.) after different fermentation treatments Microbial fermentation as an excellent food processing technology has been used for improving the flavour and nutritional feature of food material. In this study, we compared the differences in the nutrient profile of bee pollen (rape bee pollen, Brassica campestris L., BP) fermented with different species of microbes, such as lactic acid bacteria (LAB), yeast, and the two mixed. Based on the nutritional components and multivariate statistical analysis, yeast fermentation has more advantage for BP than fermentation with LAB or mixed microbes. The yeastfermented BP has an 83.5% reduction in fructose and 87.4% reduction in glucose, while phenolic compounds, oligopeptides and fatty acids were increased by 9.3%, 68.8% and 18.2%, respectively, compared with the BP without fermentation. The contents of riboflavin, nicotinic acid, nicotinamide and free amino acids in the pollen fermented by yeast were 2.4, 39.6, 4.6 and 4.8 times higher than those in raw BP, respectively. Additionally, in the fermentation process, wallbreaking pollen showed more advantages in the transformation of nutrients, free amino acid increased by 11% at least, more low molecular weight peptides produced and nicotinic acid and nicotinamide increased by 7% at least, compared with fermented BP. Taken together, these results showed that BP with yeast fermentation has great potential in obtaining high available nutritional products.
Suits‘ main power couple might be preparing their closing arguments. Stars Patrick J. Adams and Meghan Markle are expected to leave USA Network’s long-running legal drama when their contracts are up at the end of season 7, both Deadline and The Hollywood Reporter report. If the series is renewed for an eighth season — which the trades say is likely — it will be without Adams and Markle, who play on-screen lawyer couple Mike Ross and Rachel Zane. Reps for Adams and the network declined to comment on the report; however, a source tells EW that discussions are still ongoing. Both Adams and Markle have been with the series since it premiered in 2011. Over the past six years, Adams has not only received a SAG Award nomination for portraying the fake lawyer-turned-real lawyer, but he has also directed four of the show’s episodes, including the 100th one, which aired in September. According to THR, there’s a chance that, should he exit, Adams could return in a limited capacity, either as a guest or recurring star, or director. If Adams and Markle end up exiting, that’ll leave original series regulars Gabriel Macht, Rick Hoffman, and Sarah Rafferty to hold down the fort. Adams and Markle aren’t the first regulars to leave Suits. In 2016, original cast member Gina Torres left the series; however, she returned as a guest star multiple times in season 6 and is expected to headline her own spin-off series. The spin-off will be launched by Suits’ season 7 finale, which is also functioning as a backdoor pilot. Suits is expected to return in 2018.
Crowd Behavior Observation for Military Simulation Observations are frequently used to examine naturally occurring behavior. The researchers used this tool to determine what types of behaviors crowds exhibited and the degree of aggression expressed by the crowds. Footage of the 1999 WTO protest in Seattle was examined and coded for 55 behaviors. Of the observed behaviors standing on an elevated platform, chanting, yelling and shouting, and raising flags were the most frequent behaviors. Overall, most of the action was moderately aggressive. Highly aggressive behaviors such as fighting and attacking police officers composed only 1% of the observed behaviors. A similar observation was conducted of an anti-war protest in New York. This observation found comparable results of prevalent nonviolent behaviors. The findings from these studies along with those obtained from interviews of returning military officers will be used to develop a model for simulation of crowd behaviors.
<filename>Classes/support/SAScrollMedia.h<gh_stars>10-100 // // SAMediaObject.h // SAScrollTableViewCell // // Created by globalreach-dev on 03/05/2014. // Modified by Passerbied on 11/05/2014. // Copyright (c) 2014 SA. All rights reserved. // #import <Foundation/Foundation.h> /** * list of supported media */ typedef NS_ENUM(NSUInteger, SAScrollMediaType) { /** * UIImage object */ SAScrollMediaTypeImageObject, /** * name of image within main bundle i.e sample1.png */ SAScrollMediaTypeImageName, /** * NSData representation of image */ SAScrollMediaTypeRawImage, /** * NSURL link of video */ SAScrollMediaTypeVideoAsset, /** * NSURL link of image on the web i.e http://lorempixel.com/250/250/ */ SAScrollMediaTypeImageURL, /** * for subclassing */ SAScrollMediaTypeOther }; @interface SAScrollMedia : NSObject @property (nonatomic) SAScrollMediaType type; @property (nonatomic) Class mediaClass; @property (nonatomic, strong) NSString *title; @property (nonatomic, strong) id object; /** * factory method without a title * * @param type SAMediaType * @param object object such as NSData, NSURL etc... * * @return self */ + (id)mediaWithType:(SAScrollMediaType)type object:(id)object; /** * factory method with title * * @param type SAMediaType * @param title title of image * @param object object such as NSData, NSURL etc... * * @return self */ + (id)mediaWithType:(SAScrollMediaType)type title:(NSString *)title object:(id)object; @end
<gh_stars>10-100 //************************************************************************************************** // // OSSIM Open Source Geospatial Data Processing Library // See top level LICENSE.txt file for license information // //************************************************************************************************** #ifndef ossimAtpTool_HEADER #define ossimAtpTool_HEADER 1 #include <ossim/plugin/ossimPluginConstants.h> #include <ossim/imaging/ossimImageHandler.h> #include <ossim/base/ossimRefPtr.h> #include <ossim/reg/PhotoBlock.h> #include <ossim/util/ossimTool.h> #include <memory> namespace ATP { class OSSIM_DLL ossimAtpTool : public ossimTool { public: enum Method { METHOD_UNASSIGNED=0, GET_ALGO_LIST, GET_PARAMS, GENERATE }; static const char* DESCRIPTION; ossimAtpTool(); virtual ~ossimAtpTool(); virtual void setUsage(ossimArgumentParser& ap); virtual bool initialize(ossimArgumentParser& ap); virtual bool execute(); virtual ossimString getClassName() const { return "ossimAtpTool"; } virtual void getKwlTemplate(ossimKeywordlist& kwl); virtual void loadJSON(const Json::Value& json); virtual void saveJSON(Json::Value& json) const { json = m_responseJSON; } private: void getAlgorithms(); void getParameters(); void generate(); /** * When the ATP generator works with image pairs (crosscorr and descriptor), This method is * used to loop over all image pairs and assemble the final tiepoint list for all */ void doPairwiseMatching(); std::ostream* m_outputStream; bool m_verbose; bool m_featureBased; unsigned int m_algorithm; Method m_method; std::string m_configuration; Json::Value m_responseJSON; std::shared_ptr<ossim::PhotoBlock> m_photoBlock; }; } #endif /* #ifndef ossimAtpTool_HEADER */
1. Field of the Invention The present invention relates to a storage apparatus employed in a computer system. More particularly, the present invention is concerned with a storage apparatus using a nonvolatile semiconductor memory such as a flash memory. 2. Description of the Related Art Generally, storage apparatuses include a randomly accessible nonvolatile storage medium. The randomly accessible nonvolatile storage medium is for example, a magnetic disk (which may be called a hard disk), an optical disk, or the like. As described in, for example, a patent document 1, numerous hard disks are integrated. In recent years, a storage apparatus using as a storage medium a nonvolatile semiconductor memory such as a flash memory in place of the conventionally employed hard disk has attracted attention. The flash memory has the merits that it can operate at a higher speed than the hard disk can and that the power consumption is limited. A patent document 2 has disclosed a technology for substituting a flash memory disk, which includes multiple flash memories and can be accessed by an accessing means for the conventional hard disk such as an accessing means conformable to the small computer system interface (SCSI) standard, for the hard disk of a storage apparatus. Incidentally, the patent document 1 refers to JP-A-2004-5370 and the patent document 2 refers to U.S. Pat. No. 6,529,416.
SELECTION AND VALIDATION OF CALCULATED PARAMETERS OF LARGE-SIZED TIRES FOR EVALUATING THE MOVEMENT DYNAMICS OF AGRICULTURAL MACHINES USING NUMERICAL SIMULATION The article provides an overview of tire model parameters used in the calculation of the wheeled vehicles dynamics when driving on roads or agricultural background. A list of parameters that are not essential for simulation the movement of machines at low speeds, mainly along a straight line path, is determined. A method for determining the necessary parameters is presented, an example of their calculation is given.
Study: Non-fearful social withdrawal linked positively to creativity UB research suggests that not all forms of social withdrawal are unhealthy “Over the years, unsociability has been characterized as a relatively benign form of social withdrawal. But, with the new findings linking it to creativity, we think unsociability may be better characterized as a potentially beneficial form of social withdrawal.” BUFFALO, N.Y. – Everyone needs an occasional break from the social ramble, though spending too much time alone can be unhealthy and there is growing evidence that the psychosocial effects of too much solitude can last a lifetime. But newly published research by a University at Buffalo psychologist suggests that not all forms of social withdrawal are detrimental. In fact, the research findings published in the journal Personality and Individual Differences suggest that one form of social withdrawal, referred to as unsociability, is not only unrelated to negative outcomes, but linked positively to creativity. “Motivation matters,” says Julie Bowker, an associate professor in UB’s Department of Psychology and lead author of the study, which is the first study of social withdrawal to include a positive outcome. “We have to understand why someone is withdrawing to understand the associated risks and benefits,” she says. Bowker’s study results are reminiscent of realities that surface in literature, from Thoreau’s retreat to Walden to Thomas Merton’s work as a cloistered monk, but for all the conversation and examples about the benefits of withdrawing to nature or reconnecting to the self, the pursuit has remained something that hasn’t been well investigated in the psychological literature, according to Bowker. Until now. “When people think about the costs associated with social withdrawal, often times they adopt a developmental perspective,” she says. “During childhood and adolescence, the idea is that if you’re removing yourself too much from your peers, then you’re missing out on positive interactions like receiving social support, developing social skills and other benefits of interacting with your peers. “This may be why there has been such an emphasis on the negative effects of avoiding and withdrawing from peers.” But, in recent years, Bowker says there is growing recognition for the different reasons why youth withdraw from and avoid peers, and that the risk associated with withdrawal depends on the underlying reason or motivation. Some people withdraw out of fear or anxiety. This type of social withdrawal is associated with shyness. Others appear to withdraw because they dislike social interaction. They are considered socially avoidant. But some people withdraw due to non-fearful preferences for solitude. These individuals enjoy spending time alone, reading or working on their computers. They are unsociable. Unlike shyness and avoidance, research consistently shows that unsociability is unrelated to negative outcomes. But, Bowker’s study is the first to link it to a positive outcome, creativity. “Although unsociable youth spend more time alone than with others, we know that they spend some time with peers. They are not antisocial. They don’t initiate interaction, but also don’t appear to turn down social invitations from peers. Therefore, they may get just enough peer interaction so that when they are alone, they are able to enjoy that solitude. They’re able to think creatively and develop new ideas – like an artist in a studio or the academic in his or her office,” says Bowker. In the study, shyness and avoidance were related negatively to creativity. Bowker thinks that “shy and avoidant individuals may be unable to use their solitude time happily and productively, maybe because they are distracted by their negative cognitions and fears” For the study, 295 participants reported on their different motivations for social withdrawal. Other self-report measures assessed creativity, anxiety sensitivity, depressive symptoms, aggression, and the behavioral approach system (BAS), which regulates approach behaviors and desires, and the behavioral inhibition system (BIS), which regulates avoidant behaviors and desires. Bowker says there is some overlap in the types of social withdrawal. Someone might be high in shyness, but also have some tendency toward unsociability. But, the results from her study show that when the research controls for all the subtypes, the three types of social withdrawal are related differently to outcomes. Not only was unsociability related positively to creativity, but the study findings also showed other unique associations, such as a positive link between shyness and anxiety sensitivity. “Over the years, unsociability has been characterized as a relatively benign form of social withdrawal. But, with the new findings linking it to creativity, we think unsociability may be better characterized as a potentially beneficial form of social withdrawal.”
On the biology of Pyrausta purpuralis and its comparison with Pyrausta ostrinalis (Hbner, 1793) and Pyrausta aurata (Lepidoptera: Pyralidae) An account is given of finding the larva of Pyrausta purpuralis in the British Isles feeding on Prunella vulgaris L. British literature gives Mentha arvensis L. and Thymus spp. as the foodplants apart from a citation of Prunella vulgaris in 1904 that seems to have been overlooked, except for being included as one of several foodplants in mainland European publications in 2012 and 2013. We discuss whether Prunella vulgaris may be the main, if not only, foodplant of Pyrausta purpuralis, at least in the British Isles. Descriptions and illustrations are provided of the larvae of Pyrausta purpuralis and the similar species of P. ostrinalis (Hbner, 1796) and P. aurata whose foodplants are also considered.
import { ApiModelProperty, ApiModelPropertyOptional } from '@nestjs/swagger'; import { IsEmail, IsNotEmpty, IsInt, IsEnum } from 'class-validator'; import { Role } from '../user.model'; import { ClubDto } from '../../club/dto/club.dto'; export class UserDto { @ApiModelProperty({ description: 'The Users primary key' }) id: number; @ApiModelProperty({ description: 'The Users full name', example: 'IAmAUser' }) @IsNotEmpty() name: string; @ApiModelPropertyOptional({ description: 'When used as input, this is the Users unencrypted password. Output wont yield this in any response.', example: '<PASSWORD>' }) password?: string; // Should only be present if this is a user creation @ApiModelProperty({ description: 'The Users email address', example: '<EMAIL>' }) @IsEmail() email?: string; @ApiModelProperty({ description: `An integer categorizing this users authorization level in the system. If this is a new registration, the role cannot be higher than 'Club'. If this user is created by another, the role cannot be higher than the currently logged on users role.`, enum: Role, example: Role.Club, default: Role.User }) @IsInt() role: Role; @ApiModelProperty({ description: 'The id of an existing club this user belongs to' }) clubId: number; @ApiModelProperty({ description: 'A reference to the club this user belongs to' }) club?: ClubDto; }
Royal Oak is about to unleash new regulations on dog owners. The new rules, which go into effect Thursday, require owners of “dangerous dogs” to carry $1 million in liability insurance, post signs, complete an obedience class with the dog, and keep the dog in a locked, fenced-in area. Owners must also comply with seven pages worth of other requirements to keep their pets in the city. Officials say a dog is deemed dangerous if it bites or attacks a person, or causes serious injury to another domestic animal. Exceptions include dogs protecting an owner or a homeowner’s property. City leaders say they created the ordinance after receiving 32 reports of dog bites and attacks during 2012 in Royal Oak. Royal Oak resident John Scott said the ordinance is a good move for the city, putting the responsibility on the owners instead of the dogs. “If you’re a dog owner, you know that dogs are protective of their territory. There’s an old saying that there’s no bad dogs, just bad owners,” he said. Lori Wosnicki, who has a Bernese Mountain Dog, she understands the reason for the new ordinance, but still thinks that it goes too far. “Look at this dog, who goes to schools and has kids lay all over him. I have a really hard time with [the ordinance] because how do you decide what’s dangerous,” she said. Violation of the dog ordinance is a misdemeanor offense, punishable by a fine up to $500 and 90 days in jail. RELATED: Michigan Ranks 6th In Nation For Dog Bite Claims
Core 4 Under-24: Why did the Oilers leap the Maple Leafs for top spot? The future continues to look bright for Canada's seven NHL teams, with the Oilers and Maple Leafs still holding the top two spots in TSN Hockey's Core 4 U-24 ranking. Both teams have players with identical grades (three A’s and one B), but Connor McDavid has the clear edge over Auston Matthews and Edmonton improved this season with Evan Bouchard replacing Jesse Puljujarvi in their list. When balancing the Leafs' books, the attention has been on contracts for William Nylander and Auston Matthews, but the reality is that it's Mitch Marner who could tear apart the team budget as he could be knocking on the door of a 100-point season in the final year of his entry-level deal. Patrik Laine headlines the Jets' list and head coach Paul Maurice has been impressed by Josh Morrissey, who took the biggest year-over-year leap last year he’d seen in his two decades behind an NHL bench. With a superstar like Elias Pettersson, just about anything seems possible now in Van City. Thomas Chabot has allowed Ottawa to forget Erik Karlsson, while Drake Batherson - who plays his first NHL game on Thursday - has shown great potential. And Montreal has bumped up its Core 4 stocks, swapping out Alex Galchenyuk for Max Domi and acquiring blue chip forward Nick Suzuki. Check out our deeper dive of the Canadian teams' Core 4 below. Methodology: We went through every organization, from top to bottom, isolating the top players 23-and-under who are now Core 4 players or are authentic candidates to become Core 4 players. In the case of closely graded players, teams with players currently in the NHL earned the edge over others with prospects in the minors or junior. Scoop: The Oilers are at a crossroads now with their organizational depth. Puljujarvi and Yamamoto have not been able to step in and consistently produce, necessitating their demotion to Bakersfield. The bottom half of their board, with Maksimov and then McLeod, is a couple of years away. Not having Puljujarvi and Yamamoto to rely on in the here and now creates a significant hole for a team that struggles more often than not because of a lack of scoring depth. They need Puljujarvi and Yamamoto, two players who could cure a lot of ills on a weak right side of the offence, to return as impact performers. 1. Will Jesse Puljujarvi find his way or is he Nail Yakupov redux? Puljujarvi, still 20, has shown to have a much more versatile skillset than Yakupov. He is back in the AHL now, unable to gain Todd McLellan’s trust to play in the top six. The miss for the Oilers was in not grooming Puljujarvi in one of those spots over the second half of a lost season last year. He will now have to dig in and work his way back from Bakersfield. 2. What is the ceiling for Evan Bouchard? The No. 10 overall pick made a great first introduction with the Oilers. He would check so many of the boxes Edmonton desperately needs: a top-performing, right-shooting blueliner who can produce points, run the power play, and log minutes. 3. Is Olivier Rodrigue Edmonton’s goalie of the future? Rodrigue, 18, is now the Oilers’ top goaltending prospect alongside Stuart Skinner after his second-round selection. Time will tell if he is the answer, but it would behoove the Oilers to begin mapping out a long-range vision for the crease. Scoop: The megawatt star power is the marquee attraction that will keep Matthews and the Maple Leafs near the top of the Core 4 list through 2020. That much is obvious. The most interesting development over the last year, or even in the last six weeks, is the emergence of Kapanen, who has filled the void of William Nylander so capably. Kapanen was a classic tweener - too good for the AHL, but not enough of a consistent producer for the NHL - but he has played the part of an authentic top-six forward well enough to at least bump Dermott out of the Core 4 in one of the toughest calls of the exercise. 1. Could Travis Dermott be a Top 3 defenceman? Dermott didn’t make the cut among the Leafs’ Core Four, but Dermott’s underlying numbers through 50 NHL games are impressive. He has shown himself to be more than capable to handle third pair responsibilities, but the Leafs are hoping for more, and he’s tracking ahead of whether other potential top-pairing players were at his age. 2. How soon will Rasmus Sandin and Timothy Liljegren fill out the blueline? With a real salary cap crunch on the way, and the potential to lose Jake Gardiner to free agency, the Leafs will need inexpensive blueline contributors in an expedient fashion. 3. Could Sean Durzi be a diamond in the rough? A second-round pick last June, Durzi was already passed over once in the NHL Draft. He was worth a shot as a dynamic offensive-minded defenceman shaped in a mold similar to Minnesota’s Jared Spurgeon. They’re both right-shooting and have similar point production in major junior. Scoop: The Canucks are one of only four teams, along Winnipeg, Florida and Detroit, to have all four Core 4 pieces earn ‘A’ grades. Headlined by wunderkind Pettersson, there is also an argument to be made with Winnipeg whether they’re the most potent offensively. Pettersson has become appointment television – it’s almost news now when he doesn’t score as he continues to take the NHL by storm with a start that rivals Teemu Selanne’s in 1992-93. Pettersson, Boeser and Horvat are turning the page nicely from the Sedins, and it won’t be long before Hughes is making a much-needed dent on their blueline. 1. Will Olli Juolevi be the Canucks’ Karl Alzner? Forget his tenure with the Canadiens. We’re talking about the Alzner the Capitals got out of the fellow No. 5 overall pick. The Canucks can’t unring the bell with Juolevi and he probably won’t live up to the billing of his selection, but Alzner gave the Caps a steady (though not flashy) 20:12 for 594 games. 2. Is Jonathan Dahlen a lower case Filip Forsberg? It will be hard to top the ill-advised Martin Erat for Filip Forsberg situation, but Dahlen has a chance. He finished out last season with 44 points in 45 games in Sweden as a 19-year-old. The Canucks acquired him for Alex Burrows, who has already been bought out in Ottawa. 3. Will Thatcher Demko take the net? There’s been a hole in goal since Roberto Luongo was traded in 2014. Recent concussion issues aside, which have sidelined him this season, Demko is one of the best North American-born goaltending prospects out there. Scoop: Last year, Laine, Connor and Ehlers combined for a staggering 104 goals, the most among any NHL Core 4 entry –including Toronto (83). Throw in Morrissey, who has blossomed into a No. 2 defenceman, and that is a solid group. Something seems off with Ehlers, who started this season on the fourth line after a quiet playoff run, but his 29-goal campaign last year still makes him a well-qualified ‘A’ in our book. 1. Could Jack Roslovic blossom into Mathieu Perreault Part Deux? Roslovic has been stuck in a fourth-line role with a deep Jets team. His best bet at the moment would be to become to Paul Maurice what Mathieu Perreault has been – a player who can rove up and down the lineup seamlessly and produce consistently. Perreault also had similar AHL numbers and a slow start to his NHL career. 2. Can Sami Niku’s offence translate to the NHL level? Niku, 22, is incredibly fun to watch with the swagger he brings to the blueline. The Jets haven’t been able to find a spot for him yet, but after being named the AHL’s top defenceman last season, some are wondering how seamless the transition in production will be for the flashy Finn. 3. What do the Jets have in Dylan Samberg? With both Tyler Myers and Jacob Trouba in the final year of deals, it’s not too early to look to the future. Winnipeg is one of the few teams heavy on the right side. On the left, it’s Niku and Samberg. He is trending toward a Brian Dumoulin-type addition. Little flash or pizzazz in his game, but Dumoulin has carved out a nice role for himself as a dependable depth player. Scoop: It’s been a calendar year to forget for the overall Senators organization, but not their prospect pool. Chabot has made Sens fans say, “Erik who?” to a two-time Norris Trophy winner. GM Pierre Dorion felt it was better to have a Tkachuk in hand than a Jack Hughes in the bush. And perhaps most surprisingly, Lajoie and Batherson came from seemingly out of nowhere to become can’t-miss players. Lajoie has gone from ECHL to NHL in one year; Batherson wasn’t on the radar of Team Canada at all for the World Junior championship, now he’s among the AHL’s leading scorers and has been called up to the NHL. 1. Will Alex Formenton be able to score in the NHL? There’s no question Formenton skates like the prototypical NHLer for the future, but his point production in junior hockey has been underwhelming. A nine-game audition did little to reinforce the notion he’ll score consistently at the next level. 2. What’s the ETA for JBD? That’s what everyone wants to know watching Jacob Bernard-Docker’s college career begin at North Dakota. Because the Sens’ second pick of the first round behind Brady Tkachuk has Hockey IQ through the roof, he makes the game look so easy. ETA: 2021-22 after three seasons in NoDak. 3. What do the Sens have in Logan Brown? In one year, Brown has dropped from No. 2 in the Sens’ Core 4 to outside the Top 10. Brown is anything but a certainty to become a full-time NHL player. Remember: Ottawa traded up to get him from No. 12 in the first round in 2016. Scoop: The Dougie Hamilton trade had a seismic impact on Calgary’s Core 4 and, in turn, the future outlook for the franchise. It’s interesting to note that neither Lindholm nor Hanifin was part of Carolina’s Core 4 last year. Departed prospect Adam Fox, starring at Harvard, also didn’t crack the Canes’ list this year. Nonetheless, the Flames seem well-positioned in the here and now, with the one real glaring hole in the pipeline being at centre. Lindholm’s versatility makes that less of a concern moving forward. 1. Is Tyler Parsons the goalie of the future? It’s no secret the Flames are looking for a long-term successor to 36-year-old Mike Smith. David Rittich, 26, has stepped up this season, but many in the Calgary organization ultimately envision Parsons ahead of Rittich and Jon Gillies on the depth chart. 2. Will Juuso Valimaki ascend like Mark Giordano? At the same age, the undrafted Giordano probably would have also been graded a ‘B’ in the Core 4 if we did the exercise in 2003. Valimaki has all the makings of a potential top-pair left-shooting defenceman if he follows his captain’s lead. 3. Is Dillon Dubé the Flames’ latest engine player? Make no mistake, Matthew Tkachuk is the heartbeat of the Flames, but Dubé is showing the potential to join him as an engine player. The captain of Team Canada’s 2018 World Junior team, Dube can crank it up and drag others along with him. Scoop: Eighteen months ago, five of the Canadiens’ top six players Under 24 weren’t in the organization. There’s no question Kotkaniemi is the game-changing addition. The No. 3 overall pick has drawn comparisons to Anze Kopitar and he’s already provided a shot in the arm to the Habs as the youngest player in the entire league. In Kotkaniemi, GM Marc Bergevin seems to have found his white whale at centre and the entire dynamic is looking up with the play of Domi, and potentially Suzuki and Poehling, down the middle. 1. Will Ryan Poehling sign? Poehling, 19, would be a key centre depth addition. But he is already into his junior season at St. Cloud State. That means it’s either sign him after this season, or risk losing him as a free agent after his senior season when he holds the cards to call his own shot. 2. What do the Habs have in Victor Mete? It’s legitimate to wonder whether Mete, who fell out of the Habs’ Core 4, is a bottom-pair blueliner or if he can play a top-four role. Mete looked like a real find as a 19-year-old, now he’s trying to find him himself in the NHL as a 20-year-old. 3. Will Nick Suzuki be Montreal’s Swiss Army knife? Suzuki was the key piece in the Max Pacioretty trade. He has previously drawn comparisons to Anaheim’s Rickard Rakell. Suzuki probably won’t be a 30-goal scorer like Pacioretty, but he can play in almost any situation and contribute in a significant way.
// This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild #include "valid_fail_eq_str.h" #include "kaitai/exceptions.h" valid_fail_eq_str_t::valid_fail_eq_str_t(kaitai::kstream* p__io, kaitai::kstruct* p__parent, valid_fail_eq_str_t* p__root) : kaitai::kstruct(p__io) { m__parent = p__parent; m__root = this; try { _read(); } catch(...) { _clean_up(); throw; } } void valid_fail_eq_str_t::_read() { m_foo = kaitai::kstream::bytes_to_str(m__io->read_bytes(4), std::string("ASCII")); if (!(foo() == (std::string("BACK")))) { throw kaitai::validation_not_equal_error<std::string>(std::string("BACK"), foo(), _io(), std::string("/seq/0")); } } valid_fail_eq_str_t::~valid_fail_eq_str_t() { _clean_up(); } void valid_fail_eq_str_t::_clean_up() { }
package org.hl7.fhir.r4.model; /* Copyright (c) 2011+, HL7, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of HL7 nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ import java.util.List; import org.apache.commons.lang3.NotImplementedException; import org.hl7.fhir.exceptions.FHIRException; import org.hl7.fhir.utilities.Utilities; /** * See http://www.healthintersections.com.au/?p=1941 * * @author Grahame * */ public class Comparison { public class MatchProfile { } public static boolean matches(String c1, String c2, MatchProfile profile) { if (Utilities.noString(c1) || Utilities.noString(c2)) return false; c1 = Utilities.normalize(c1); c2 = Utilities.normalize(c2); return c1.equals(c2); } public static <T extends Enum<?>> boolean matches(Enumeration<T> e1, Enumeration<T> e2, MatchProfile profile) { if (e1 == null || e2 == null) return false; return e1.getValue().equals(e2.getValue()); } public static boolean matches(CodeableConcept c1, CodeableConcept c2, MatchProfile profile) throws FHIRException { if (profile != null) throw new NotImplementedException("Not Implemented Yet"); if (c1.getCoding().isEmpty() && c2.getCoding().isEmpty()) { return matches(c1.getText(), c2.getText(), null); } else { // in the absence of specific guidance, we just require that all codes match boolean ok = true; for (Coding c : c1.getCoding()) { ok = ok && inList(c2.getCoding(), c, null); } for (Coding c : c2.getCoding()) { ok = ok && inList(c1.getCoding(), c, null); } return ok; } } public static void merge(CodeableConcept dst, CodeableConcept src) { if (dst.getTextElement() == null && src.getTextElement() != null) dst.setTextElement(src.getTextElement()); } public static boolean inList(List<Coding> list, Coding c, MatchProfile profile) { for (Coding item : list) { if (matches(item, c, profile)) return true; } return false; } public static boolean matches(Coding c1, Coding c2, MatchProfile profile) { if (profile != null) throw new NotImplementedException("Not Implemented Yet"); // in the absence of a profile, we ignore version return matches(c1.getSystem(), c2.getSystem(), null) && matches(c1.getCode(), c2.getCode(), null); } public static boolean matches(Identifier i1, Identifier i2, MatchProfile profile) { if (profile != null) throw new NotImplementedException("Not Implemented Yet"); // in the absence of a profile, we ignore version return matches(i1.getSystem(), i2.getSystem(), null) && matches(i1.getValue(), i2.getValue(), null); } public static void merge(Identifier dst, Identifier src) { if (dst.getUseElement() == null && src.getUseElement() != null) dst.setUseElement(src.getUseElement()); if (dst.getType() == null && src.getType() != null) dst.setType(src.getType()); if (dst.getPeriod() == null && src.getPeriod() != null) dst.setPeriod(src.getPeriod()); if (dst.getAssigner() == null && src.getAssigner() != null) dst.setAssigner(src.getAssigner()); } public static boolean matches(ContactPoint c1, ContactPoint c2, Object profile) { if (profile != null) throw new NotImplementedException("Not Implemented Yet"); // in the absence of a profile, we insist on system return matches(c1.getSystemElement(), c2.getSystemElement(), null) && matches(c1.getValue(), c2.getValue(), null); } public static void merge(ContactPoint dst, ContactPoint src) { if (dst.getUseElement() == null && src.getUseElement() != null) dst.setUseElement(src.getUseElement()); if (dst.getPeriod() == null && src.getPeriod() != null) dst.setPeriod(src.getPeriod()); } }
Predictors of unrealistic optimism: a study of Norwegian risk takers In areas of voluntary risk behaviour, as with other kinds of risk, people tend to be overly optimistic regarding not being injured. A study of risk perception and causal explanations of injury assessments was conducted on 199 respondents from three different subgroups in Norway; skydivers (n=88), fire fighters (n=73) and soldiers (n=38). Unrealistic optimism was studied by means of four demographic variables: the background of the subject (subsample), gender, age, and education. In addition, three predictors of unrealistic optimism were taken into accountsafety attitudes, control, and anxiety. These predictors were included in an Analysis of Linear Structural Relationship (LISREL) analysis. The results showed that optimism differed between the subgroups, and that different factors influenced risk perception depending on the group and depending on whether the assessment was of oneself or of others. These findings offers additional information that will help explain the inconsistent findings in the current literature of unrealistic optimism. Of the predictors investigated, safety attitudes were found to be the most important, which may be because respondents preoccupied with safety are more aware of potential dangers and thereby less optimistic.
Palladium-catalyzed asymmetric hydrophosphorylation of alkynes: facile access to P-stereogenic phosphinates Despite the importance of P-chiral organophosphorus compounds in asymmetric catalysis, transition metal-catalyzed methods for accessing P-chiral phosphine derivatives are still limited. Herein, a catalytic enantioselective method for the synthesis of P-stereogenic alkenylphosphinates is developed through asymmetric hydrophosphorylation of alkynes. This process is demonstrated for a wide range of racemic phosphinates and leads to diverse P-stereogenic alkenylphosphinates directly. Introduction P-Chiral organophosphorus compounds are broadly utilized as synthetic building blocks of bioactive molecules 1 and have served as an important class of chiral ligands that have significantly contributed to metal-catalyzed 2 and organocatalytic 3 transformations. P-stereogenic phosphinates are important molecules in medicinal and synthetic chemistry. For example, arylphosphinosugars have received continuous attention and demonstrated powerful activities on human cancer cell line panels. 4 However, P-chiral organophosphorus compounds are less studied due to their synthetic challenges, compared with chiral phosphine ligands where planar or point chirality is presented in the carbon framework. Despite the importance of P-stereogenic phosphinates, general and efficient methods for their preparation are rather rare. Traditionally, enantioenriched P-chiral phosphorus compounds are achieved through the use of chiral reagents or auxiliary-assisted transformations, using menthol or chiral amino alcohol, for example. 5 Recently, a variety of examples involving metal-catalyzed asymmetric processes through desymmetrization of prochiral phosphorus compounds have emerged. 6-11 Dialkynylphosphine oxides are the typical examples for constructing P-stereogenic phosphine oxides, 6 and the rst desymmetrization of dialkynylphosphine oxides was reported by using Rh(I)-catalyzed cycloaddition. 6a Desymmetrization of divinylphosphine oxides 7 and phospholene oxides 8 was also well-developed to construct P-stereogenic centers. Several elegant examples of inter-or intramolecular Pdcatalyzed enantioselective C-H arylation of phosphinamides, phosphonates and phosphine oxides were disclosed independently by Duan, Tang, Ma, Xu and Han. 9 Soon aer, Cramer reported Rh-catalyzed desymmetric alkynylation of phosphinamides with alkynes 10a,b and Ir-catalyzed arylation and amination of phosphine oxides. 10c,d Very recently, Zhang presented an asymmetric P-C cross-coupling for the efficient synthesis of Pstereogenic phosphine oxides catalyzed by Pd and their Xiaophos. 11 Nevertheless, there have been only two desymmetrization examples reported for the enantioselective synthesis of Pstereogenic phosphinates. In 2009, Hoveyda and Gouverneur reported a molybdenum-catalyzed asymmetric ring-closing metathesis to obtain P-stereogenic phosphinates (Scheme 1, eqn (2a)). 12 In 2019, Trost showed the desymmetrization of phosphinic acids by stereoselectively alkylating one of the enantiotopic oxygens through Pd-catalyzed asymmetric allylic alkylation to give P-stereogenic phosphinates with diversied substituents (Scheme 1, eqn (2b)). 13 Therefore, it is desirable to develop other new methods for the synthesis of multifunctional P-stereogenic phosphinates. Pd-catalyzed addition of an H-P(O)R 1 R 2 to alkynes is one of the most straightforward and atom-efficient approaches for the construction of a C-P bond. 14 The rst Pd-catalyzed addition of (RO) 2 P(O)H to alkynes was reported by Tanaka and Han to give the corresponding alkenylphosphonates. 14a Later, they developed a similar oxidative addition using (R P )-menthyl-phenylphosphinate to give enantiomerically pure P-chiral alkenylphosphinates with retention of conguration at phosphorus. 14b Though Han and co-workers reported a comprehensive study on the generality, scope, limitations, and mechanism of the palladium-catalyzed hydrophosphorylation of alkynes recently, 14c the catalytic enantioselective hydrophosphorylation of alkynes with phosphinates is still not reported, given more than 22 years have passed since the rst Pd-catalyzed hydrophosphorylation was reported. In 2006, Gaumont reported the Pd-catalyzed asymmetric hydrophosphination of alkynes with phosphine-boranes, only 70% conversion and 42% enantiomeric excess were obtained. 15 In 2018, Dong reported the hydrophosphinylation of 1,3-dienes to afford chiral allylic phosphine oxides with high enantio-and regiocontrol. 16 Herein, we disclose the rst catalytic enantioselective hydrophosphorylation reaction of alkynes with phosphinates, which provides a highly efficient approach to prepare chiral alkenylphosphinates with P-chirality. Results and discussion To begin the investigation, phenylacetylene 1a and ethyl phenylphosphinate 2a were chosen as the model substrates. Various types of ligands were initially evaluated, and most bidentate bisphosphine ligands with P chirality worked well in this transformation. When Duanphos L1 was used as the ligand, the reaction proceeded smoothly to afford alkenylphosphinate 3aa in 70% yield with 70% ee (. Unlike phosphinic acid and secondary phosphine oxide, 17 phosphinate 2a was not able to be easily racemized by base or transition metals. Thus, it is difficult for phosphinate to realize the dynamic kinetic resolution. Then, a kinetic resolution process was desired (for details, see the ESI ). However, when 1 equiv. of phosphinate was used, the product 3aa was obtained in 50% yield with 55% ee and the (R)-2a was recovered in 40% yield with 61% ee at 60 C (the S factor is only 6) ( Table 1, entry 10). Optimization of the ratio of 1a/2a was performed to enhance enantioselectivity of 3aa. When 4 equiv. 2a was used, the best yield and ee were obtained (Table 1, entry 4 vs. entries 10-13). When the amount of ethyl phenylphosphinate 2a was increased to 6 equiv., the yield was reduced which might due to the coordinative saturation of the palladium center by the excess amount of 2a, and hence resulted in catalyst deactivation (Table 1, entries 12 and 13). Omitting Ph 2 P(O)OH resulted in a reduced yield, but a little enhanced product enanatioselectivity (Table 1, entry 14 vs. 4). Thus, the optimal reaction conditions were toluene at 60 C with 1 mol% Pd 2 (dba) 3, 2 mol% (R,R)-QuinoxP*, and 4 mol% phosphinic acid. With these optimized conditions in hand, the reaction scope was next examined (Table 2). It was found that a large range of a Reaction conditions: 1 mol% Pd 2 (dba) 3, 2 mol% ligand, and 4 mol% Ph 2 P(O)OH in 1 mL toluene were stirred for 10 min in an argon atmosphere. 0.25 mmol alkynes and 1.0 mmol ethyl phenylphosphinate were added, and the mixture was stirred at the indicated temperature. Isolated yields. Determined by HPLC analysis. b 2 mol% Pd(dba) 2 was used instead of 1 mol% Pd 2 (dba) 3. c 1 equiv. ethyl phenylphosphinate was used. d 3 equiv. ethyl phenylphosphinate was used. e 6 equiv. ethyl phenylphosphinate was used. f 2 mL toluene was used. g Without Ph 2 P(O)OH. alkynes was applicable in this reaction system. The aryl alkynes substituted with electron-donating groups (MeO, Me, Et, n-pent, and t-Bu) (3ba-3ga, 3la, and 3ma) or electron-withdrawing groups (Cl, F, Br, and CF 3 ) (3ha-3ka, and 3na) at the para-or meta-positions were all well-tolerated and gave satisfactory results. However, substrates with sterically demanding alkynes gave diminished enantioselectivity (3da, 91% yield, 30% ee). The arene rings having the -CN or -NO 2 groups did not give the desired products. Substrates with thiophene or pyridine moieties worked well under these reaction conditions, providing the desired products 3oa and 3pa with moderate yield and good ee. Internal alkynes, diphenylacetylene, also gave moderate yield and ee (3qa, 69% yield, 61% ee). It was also demonstrated that aliphatic alkynes were functional and gave slightly decreased yields and good ee (3ra-3ta). Phosphinate reacted with a terminal alkyne prior to a disubstituted alkyne to give the enyne 3ta. As shown in Table 2, it was found that this transformation tolerated a variety of alkynes, including heteroaromatic alkynes, aliphatic alkynes and internal alkynes. Encouraged by the results obtained from alkynes, we further attempted to expand this catalytic system with various Hphosphinates to obtain alkenylphosphonates (Table 3). Substrates with methyl ester or propyl ester were also subjected to hydrophosphorylation and the corresponding alkenylphosphonates (3ab and 3ac) were formed in moderate yields and enantioselectivities. A substrate with isopropyl ester gave decreased yield (3ad), only 32% yield, and slightly decreased enantioselectivity, 73% ee. The phenylphosphinate with Me on the arene ring only gave the product 3ae in 47% yield and 37% ee at a higher temperature. Compound 3 with the t-Bu group was obtained in decreased yield compared to 3fa. When secondary phosphine oxides were tested under similar reaction conditions, the hydrophosphorylation product 3af was formed with 28% yield in 54% ee. To evaluate the synthetic potential of the current catalytic system, a gram-scale reaction between phenylacetylene and ethyl phenylphosphinate was performed, and the product 3aa was furnished in 68% yield and 82% ee (Scheme 2a). Further synthetic transformations of the hydrophosphorylation products were also illustrated. Compounds 4 and 5 were prepared through a Suzuki-Miyaura coupling without loss of enantiopurity (Scheme 2b). The absolute conguration of the phosphinate product 3ja was conrmed as R-conguration by X-ray crystallography of its derivative 5. 18 The Heck-type reaction of aryl diazonium salts with alkenylphosphinate 3aa led to cisstilbenes 3qa with excellent stereoselectivity without loss of chirality (Scheme 2c, 99% yield and 82% ee) which could make up for the moderate yield and ee of 3qa obtained by direct hydrophosphorylation of diphenylacetylene (Table 2). To construct 1,2-biphosphine derivative, the addition of HPPh 2 to product 3aa was achieved by copper(I)-catalyzed conjugate hydrophosphination to give biphosphine derivative 6 in 92% yield and 1.5 : 1 dr without loss of enantiopurity (Scheme 2d, 79% ee for both diastereomers). 19 A deuterium-labelling experiment has been conducted by using D-P(O)(OMe)Ph as the starting material, 20 giving the Table 2 Substrate scope of alkynes a a Conditions: 1 mol% Pd 2 (dba) 3, 2 mol% ligand L4, and 4 mol% Ph 2 P(O) OH in 1 mL toluene were stirred for 10 min in an argon atmosphere. 0.25 mmol alkynes 1b-1t and 1.0 mmol 2a were added, and the mixture was stirred at 60 C for 20 h. Isolated yields. Determined by HPLC analysis. b 80 C was used instead. Table 3 Substrate scope of phenylphosphinates and phenyphosphine oxide a a Conditions: 1 mol% Pd 2 (dba) 3, 2 mol% ligand L4, and 4 mol% Ph 2 P(O) OH in 1 mL toluene were stirred for 10 min in an argon atmosphere. 0.25 mmol alkynes 1a and 1f and 1.0 mmol phenylphosphinate 2a-2d or phenyphosphine oxide 2e were added, and the mixture was stirred at 60 C for 20 h. Isolated yields. Determined by HPLC analysis. b 80 C was used instead. product 3ab (42%), D 1 -3ab (50%) and a small amount of D 2 -3ab (6%) in which two deuteriums were incorporated at the terminal alkene carbon atoms (Scheme 3). This result suggested that the oxidative addition, hydropalladation, and ligand exchange are reversible. On the basis of the deuterium labeling experiment and previous report, 14 we proposed a mechanistic pathway for this catalysis. Chiral palladium complex A is formed from Pd 2 (dba) 3 and (R,R)-QuinoxP*. It is proposed that oxidative addition of the O-H bond of Ph 2 P(O)OH to palladium triggers the reaction to produce the internal palladium intermediate B. The hydropalladation of alkynes takes place rst to give an internal alkenylpalladium C by Markovnikov addition. Subsequent ligand exchange of this complex C with phosphinate 2a gives the internal phosphorylpalladium intermediate D. A reduced yield was observed in the absence of Ph 2 P(O)OH. Thus, an alternative pathway is also possible in which the intermediate E is generated directly by the oxidative addition of the P-H bond of 2a to palladium. Then hydropalladation of alkynes takes place to give the same intermediate D. Finally, reductive elimination gives the desired alkenylphosphinate product 3aa and regenerates the active chiral palladium complex A. Conclusions In summary, we have developed an efficient method to synthesize alkenylphosphinates with P-chirality through the rst Pd-catalyzed enantioselective hydrophosphorylation of alkynes, showing that this hydrophosphorylation reaction is a powerful and practical approach for the preparation of these valuable P-stereogenic organophosphorus compounds. Studies on further application of these chiral organophosphorus compounds are underway. Conflicts of interest There are no conicts to declare.
# coding=utf-8 import unittest from paypal import countries class TestCountries(unittest.TestCase): def test_is_valid_country_abbrev(self): self.assertEqual(True, countries.is_valid_country_abbrev('US')) self.assertEqual(True, countries.is_valid_country_abbrev('us')) self.assertEqual(False, countries.is_valid_country_abbrev('us', case_sensitive=True)) def test_get_name_from_abbrev(self): us_fullval = 'United States of America' self.assertEqual(us_fullval, countries.get_name_from_abbrev('US')) self.assertEqual(us_fullval, countries.get_name_from_abbrev('us')) self.assertRaises(KeyError, countries.get_name_from_abbrev, 'us', case_sensitive=True)
Douglas E. Moore Early life and education Douglas Elaine Moore was born in 1928 in Hickory, North Carolina. At an early age, he decided to follow in the footsteps of his grandfather and enter the Methodist ministry. Shortly after earning a Bachelor of Arts from North Carolina College in 1949, Moore enrolled at Boston University as a divinity student in 1951. His political leanings were evident early on, as he joined a radical leftist group on campus and participated in protests of social ills. Moore also temporarily joined a student group called the Dialectical Society, which met every week for dinner and a discussion. However, he found the talks largely dissatisfying, viewing them as far too passive and abstract. In addition, he was not too fond of the leader of the Dialectical Society, the then-unknown Martin Luther King, Jr. Referring to him as “just another Baptist preacher”, Moore invited King to join his student group. However, King declined to do so, likely put off by its radicalness and activist agenda. Moore soon parted ways with the Dialectical Society. He earned his Bachelor of Sacred Theology in 1953 and his Master of Sacred Theology in 1958. Move to Durham After graduating, Moore moved back to the American South. He served as the minister for two small-town Methodist churches before becoming the pastor of Durham's Asbury Temple Methodist Church in 1956. Soon after arriving in the city, Moore began to look for ways to challenge the its power structure. Despite the fact that Durham was known for having better-than-average race relations for the region, Moore quickly concluded that it was the “same as any other place: They [the whites] wouldn’t give up nothing”. He made several attempts to desegregate the city's public facilities. After his family was denied admission to the then all-white Long Meadow Park swimming pool in 1957, Moore appealed to Durham recreation officers, to no avail. Other efforts included petitions to the city council to end segregation at the Carolina Theatre and the Durham Public Library. While these also resulted in little to no changes, Moore would make headlines later that year via what came to be known as the Royal Ice Cream Sit-in. Royal Ice Cream Sit-in On June 23, 1957, the 28-year-old Moore led three African-American men and three African-American women into the segregated Royal Ice Cream Parlor. They all sat down in the white section and asked to be served. Moore later told a reporter, “We just decided we wanted to cool off, to get some ice cream or milk shakes.” The truth, however, was much more far-reaching than that. Moore later said that the parlor was chosen in advance because of its location in a predominantly-African-American neighborhood. He also indicated that he intended the sit-in to serve as a barometer – a way to see how much progress African-American protestors could make, as well as what they needed to achieve more in the future. In the end, after being asked to leave by the owner of the parlor and refusing to do so, all of the protestors, including Moore, were arrested. They were all convicted of trespassing and fined $10 plus court costs. The sit-in soon turned into a protracted court battle: seeking an ally in his fight for the desegregation of public facilities, Moore hired Floyd McKissick, a prominent African-American attorney, to sue Royal Ice Cream. At the same time, he and the other protestors appealed their convictions. The case eventually made its way to the North Carolina Supreme Court, but the defendants ultimately lost. Criticism and controversy The Royal Ice Cream Sit-in produced much controversy from the start. Moore failed to communicate to the sit-in participants all of the possible consequences of their actions: Virginia Williams and Mary Clyburn, two of the protestors, claimed in later interviews that they had not expected to be arrested. Nevertheless, the sit-in was carried out anyway, and there was immediate backlash from African-American groups in Durham. The Durham Committee on Negro Affairs and the Durham Ministerial Alliance heavily criticized Moore, calling his efforts “radical”. Indeed, Moore's call for immediate change directly opposed the practices of the African-American community in Durham. Previously, it had relied on backroom talks with the white elite to bring about concessions in a deliberate manner. Moore's actions came as a surprise to many and threatened to upset the delicate balance that existed in Durham, resulting in a backlash against the protestors from the city's African-American community. The vitriol shocked the sit-in participants, as they had only expected hostile reactions from Durham's white citizens. Mary Clyburn later recalled, “I didn’t hear nobody being happy about what we’ve done”. Durham movement Despite the initial backlash to the sit-in, Moore ultimately helped to bring about much change to Durham. He soon found himself some powerful allies in the city's community, including McKissick and outspoken African-American newspaperman and Carolina Times editor Louis Austin, who just one week prior to the sit-in had run an editorial denouncing Durham's elite African-American institutions. With support from these new allies, Moore was able to drum up support for a Durham-wide movement. The Durham Committee on Negro Affairs’ Economic Committee, headed by McKissick, debated whether or not to boycott Royal Ice Cream Parlor. As Moore himself later revealed, there was doubt as to whether or not this would be a good idea, due to the fact that the parlor's owner, Louis Coletta, was a Greek American and a minority himself. Nevertheless, the mere presence of such a discussion symbolized the growing activist movement in Durham, which was fueled primarily by the city's young African-Americans. Challenging the conservatism of the African-American elite, the Durham youth embraced Moore's activist agenda. For instance, a group of young girls held regular pickets outside of the parlor under the direction of McKissick, despite being members of the Durham NAACP, which had refused to publicly support Moore. The Durham movement eventually began to pick up steam, leading to a rapid series of reforms in the coming years. In 1960, the city became just the seventh one in North Carolina to desegregate its lunch counter service. After several years of legal action, the Royal Ice Cream Parlor finally desegregated along with the rest of the city's public facilities in 1963. The pace of the Durham movement surprised even Moore himself. In 1960, four African-American students held their own sit-in at the Woolworth's Department Store in neighboring Greensboro, North Carolina. At the time, Moore and McKissick had been organizing a nationwide sit-in that would begin in Durham. Originally, they felt that the Greensboro students had acted too soon. When McKissick heard about the sit-in, he exclaimed, “Oh my God, these kids have jumped the gun!” Nevertheless, Moore and his allies soon realized that the time had come to take action, realizing that the student activists in Durham wanted to emulate their Greensboro counterparts. Exactly one week after the Greensboro sit-in, Moore and McKissick led one of their own. They led dozens of college students into the heart of Durham, where they sat down at the lunch counters of Woolworth's. When the manager closed the counter, the students moved on the counters at S.H. Kress and Walgreens, which were also shut down. However, the message had been sent, and by the end of the week, students in Charlotte, Raleigh, Winston-Salem, and Fayetteville had joined in the sit-in movement, frightening white store owners by dressing well and staying dignified. The Durham movement had finally begun to spread beyond the city limits. Work with King and regional civil rights activists The inspiration behind the Durham movement and the ones it inspired came from an unlikely source. Back in 1955, Moore heard the news that his former classmate at Boston University, Martin Luther King, Jr., was leading a bus boycott in Montgomery, Alabama. Surprised at the change in the once-timid King, Moore decided to write him a letter. In it, he detailed his own experiences with the desegregation of buses in North Carolina and Virginia, noting that by relying “completely upon the force of love and Christian witness”, he was able to achieve his goals. Moore went on to suggest “a regional group which uses the power of nonviolence”, hinting that such a group, were it well-disciplined, could “break the backbone of segregated travel in North Carolina in less than a year”. King, however, continued to display reluctance to partake in Moore's radical agenda. In the end, Moore received only a polite thank-you note from King's secretary. However, he continued to let his faith play a role in his actions. Moore led a group of young Durham activists called “ACT”, which met at church every Sunday to talk about how to test the limits of the South's Jim Crow laws. When the student activism movement began to take flight in Durham, it was backed by the city's African-American churches, especially the female members of the congregations. Moore also became a board member of King's Southern Christian Leadership Conference. With his new-found power, he was able to find new ways to get his message of a nonviolent regional group across the South. For instance, Moore supported the efforts of McKissick to spread the gospel of direct action to African-American students in North Carolina. In addition, the organizers of the SCLC sent out a call for clergyman to organize their congregations for a widespread protest in 1957. The growing movement in the South soon became impossible for King to ignore. A week after the Durham sit-ins, he received an invitation from Moore to come to the city, which he accepted. The two visited the lunch counters that had been open just a few days earlier and spoke at White Rock Baptist Church. King gave the sit-in movement his blessing, saying that the student activists had made the sit-in action itself “a creative protest that is destined to be one of the glowing epics of our time”. With the support of King, the movement continued to grow. Moore enlisted the help of other regional leaders such as James M. Lawson, Jr., another Methodist minister. Lawson, like Moore, taught college students at his church in Nashville how to resist violence and employ the power of love to fight against segregation. Right after the Greensboro sit-ins, Moore urged Lawson to take action and organize a sit-in at his local Woolworth's, which he did. Activists around the South were soon making similar moves, thanks to networks set up by Moore and his allies, whose work also helped to popularize what became known as “local movement centers”. These centers can be conceptualized as “micro-social structures” that facilitated the collective actions of African-American activists, especially students, across North Carolina and the rest of the South. As a result, during the spring of 1960, sit-ins spread through these networks and centers to every Southern state except Mississippi. Moore's tireless efforts had paid off, and the era of civil rights in America had begun in full force. Later life in Washington, D.C. At the height of the Durham movement he had fostered for so long, Moore suddenly left the city. Along with Lawson, he was forced out of King's SCLC after several of the organization's members began to regard him as too radical and a threat. Disillusioned, Moore resigned as pastor of Asbury Temple Methodist Church and moved to Central Africa. Spending several years as a missionary in the Belgian Congo, he gradually underwent a change in his political views, adopting an even more radical, anti-colonial stance. When he returned to the United States, Moore settled down in Washington, D.C. He demonstrated the change in his political ideology by becoming the leader of the D.C. Black United Front, a black nationalist organization. Moore also ran for – and won – a position on the Council of the District of Columbia in 1974. His uncompromising attitude won him many friends and enemies alike. During his term, Moore ran into trouble with the law. Exhibiting behavior that contradicted his peaceful teachings of the past, Moore was convicted in 1976 of assaulting a white tow truck driver and put under probation. In 1981, he violated one of its conditions and served six months in jail after refusing to take a court-ordered psychiatric exam. Soon after his run-in with the law, Moore concluded that the key to African-American success in America was economics, not politics. He began a career as a “corporate gadfly” and constantly badgered stockholders with questions about the racial biases present in their hiring practices. Later, Moore decided to enter the business world himself. He now owns an energy company that regularly receives multimillion-dollar contracts from the Potomac Electric Power Company and Washington Gas Light Company. Moore served as the pastor of Elijah Methodist Church in Poolesville, Maryland. In 2002, he made a brief return to the political scene by running against Anthony A. Williams for mayor of Washington, D.C. However, he failed to drum up the widespread support necessary to mount a serious challenge in the race, and did not win. Reverend Douglas E. Moore died August 22 after brief hospitalization in Clinton, Md. He was 91. The cause of death was Alzheimer’s disease and pneumonia. Legacy Douglas Moore's legacy is one of an influential civil rights leader in North Carolina and Washington, D.C. Rarely budging from his agenda of activism, which was often perceived as radical, Moore received plenty of criticism from whites and African-Americans alike, especially after the short-term failure that came to be known as the Royal Ice Cream Sit-in. However, his persistence enabled him to help bring about the desegregation of the city of Durham, a cause to which he devoted many years of his life. Moore achieved many successes in his fight against segregation by allying with other prominent civil rights activists and inspiring a new generation of young, African-American student protestors. His efforts as a champion of the sit-in movement helped to popularize its use throughout North Carolina and the South. The sit-in movements had ideological roots in nonviolence and Christian ideology, ideas that Moore also circulated throughout the region. Although he rejected some of the causes he had once espoused so fervently later on in his life, by combining his faith with strong leadership, he was able to help bring about much progress in the area of civil rights during a turbulent time in the history of the United States.
Discussion in 'Strength & Conditioning Discussion' started by Affliction™, May 3, 2008. Is there a specific like weight/cardio regimine? Or does it mean a broader term as in just intense training? If there is a certain workout set up you do, can someone post it? Because I cant find one anywhere. HIIT stands for High Intensity Interval Training. It is a general term for a training method. You can apply it to lots of different kinds of things. Running, biking, etc.. you can do it with most exxcercises. think running. hiit running would be sprinting 100m or so, and then resting for 20-30 seconds...then going again. If you only sprint 100m in 20 seconds you're doing it wrong. The rest period is 20 seconds, not the actual sprint. As far as weightlifting is concerned, I'm kinda using HIIT with complexes: clean + front squat + push press. I do like 5 or 10 reps as fast as I can, then 1mn sec rest, start again... I'm not sure wether a 1mn rest period actually qualifies as HIIT though.
<filename>types/mattermost.go package types import ( "net/url" ) type SlashCommandForm struct { ChannelID string ChannelName string Command string ResponseURL string TeamDomain string TeamID string Text string Token string UserID string Username string } func NewSlashCommandForm(form url.Values) *SlashCommandForm { return &SlashCommandForm{ ChannelID: form.Get("channel_id"), ChannelName: form.Get("channel_name"), Command: form.Get("command"), ResponseURL: form.Get("response_url"), TeamDomain: form.Get("team_domain"), TeamID: form.Get("team_id"), Text: form.Get("text"), Token: form.Get("token"), UserID: form.Get("user_id"), Username: form.Get("username"), } }
Effect of Ribosomal Wash Factors on Inhibition by Chloramphenicol In vitro protein-synthesizing systems from Escherichia coli can be categorized as either chloramphenicol-sensitive or chloramphenicol-insensitive. The chloramphenicol-sensitive systems used in this study required the presence of factors removed from ribosomes with 1.0 m NH4Cl when chromatographically purified ribosomes were used for amino acid incorporation. These ribosomal wash factors inhibited but did not eliminate amino acid incorporation in chloramphenicol-insensitive systems. For both systems, addition of increasing amounts of the ribosomal wash factors increased the sensitivity to chloramphenicol inhibition.
YEREVAN. – President of Armenia Armen Sarkissian signed a decree to appoint Armen Papikyan as the head of Armenia's mission to OSCE. Papikyan is also appointed the head of Armenia’s permanent representation to the UN and other international organizations in Vienna.
/* * Copyright 2020, OpenTelemetry Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.opentelemetry.exporters.otlp; import static com.google.common.truth.Truth.assertThat; import io.opentelemetry.common.AttributeValue; import io.opentelemetry.proto.common.v1.AttributeKeyValue; import io.opentelemetry.proto.common.v1.AttributeKeyValue.ValueType; import io.opentelemetry.proto.common.v1.InstrumentationLibrary; import io.opentelemetry.sdk.common.InstrumentationLibraryInfo; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** Unit tests for {@link CommonAdapter}. */ @RunWith(JUnit4.class) public class CommonAdapterTest { @Test public void toProtoAttribute_Bool() { assertThat(CommonAdapter.toProtoAttribute("key", AttributeValue.booleanAttributeValue(true))) .isEqualTo( AttributeKeyValue.newBuilder() .setKey("key") .setBoolValue(true) .setType(ValueType.BOOL) .build()); } @Test public void toProtoAttribute_String() { assertThat(CommonAdapter.toProtoAttribute("key", AttributeValue.stringAttributeValue("string"))) .isEqualTo( AttributeKeyValue.newBuilder() .setKey("key") .setStringValue("string") .setType(ValueType.STRING) .build()); } @Test public void toProtoAttribute_Int() { assertThat(CommonAdapter.toProtoAttribute("key", AttributeValue.longAttributeValue(100))) .isEqualTo( AttributeKeyValue.newBuilder() .setKey("key") .setIntValue(100) .setType(ValueType.INT) .build()); } @Test public void toProtoAttribute_Double() { assertThat(CommonAdapter.toProtoAttribute("key", AttributeValue.doubleAttributeValue(100.3))) .isEqualTo( AttributeKeyValue.newBuilder() .setKey("key") .setDoubleValue(100.3) .setType(ValueType.DOUBLE) .build()); } @Test public void toProtoInstrumentationLibrary() { InstrumentationLibrary instrumentationLibrary = CommonAdapter.toProtoInstrumentationLibrary( InstrumentationLibraryInfo.create("name", "version")); assertThat(instrumentationLibrary.getName()).isEqualTo("name"); assertThat(instrumentationLibrary.getVersion()).isEqualTo("version"); } @Test public void toProtoInstrumentationLibrary_NoVersion() { InstrumentationLibrary instrumentationLibrary = CommonAdapter.toProtoInstrumentationLibrary( InstrumentationLibraryInfo.create("name", null)); assertThat(instrumentationLibrary.getName()).isEqualTo("name"); assertThat(instrumentationLibrary.getVersion()).isEmpty(); } }
Over 90,000 people per year are developing life threatening infections caused by the drug resistant staph germ MRSA. The germ is common in hospitals, and is spreading to schools, prisons and locker rooms.10% of those infected develop flesh eating disease. 21 schools in Bedford, Virginia have been shut down following the death of 17 year old student Ashton Bonds, who contracted the deadly disease. Hospitalized for over 2 weeks, all antibiotic treatments failed to halt the germs spread throughout the teens body, finally spreading to his kidneys, liver, lungs and the muscles around his heart. Many of the infections are being spread in gyms and locker rooms, where athletes - perhaps suffering from cuts or abrasions, share sports equipment or towels . Bonds played football last year but was not playing this season. His death prompted the school closings. Wednesday will involve intensive cleaning of all surfaces and equipment in the school buildings, bathrooms, gymnasiums and locker rooms. The staph germ can be carried by perfectly healthy individuals, housing itself on the skins surface or inside the nose. It is associated with sometimes-horrific skin infections, but it also causes blood infections, pneumonia and other illnesses. A study of the "Superbug" conducted by the Center For Disease Control revealed that the number of incidences reported correlate to 32 out of every 100,000 people contracting the disease. "That's an "astounding" figure, said an editorial in Wednesday's Journal of the American Medical Association, which published the study." Researches found that approximately 1/4 of cases involved persons that were hospitalized. Those considered hospitalized include patients in long term care facilities, persons receiving dialysis, or recovering from recent surgeries. Open wounds and exposure to medical equipment are the most common means of contracting the staph infection. There were 988 reported deaths among infected people in the study, for a rate of 6.3 per 100,000. That would translate to 18,650 deaths annually, although the researchers don't know if MRSA was the cause in all cases. "If these deaths all were related to staph infections, the total would exceed other better-known causes of death including AIDS which killed an estimated 17,011 Americans in 2005 - said Dr. Elizabeth Bancroft of the Los Angeles County Health Department, the editorial author." Hygiene, combined with doctors not over prescribing antibiotics are the first lines of defense. Hospitals are taking extra precautions when admitting anyone with an open wound that appears infected, isolating them from the general population until tests for MRSA have been completed. Frequent hand washing, as well as not sharing athletic equipment is also recommended.
On the Measurement of the International Propagation of Shocks In this paper I offer an alternative identification assumption that allows one to test for changing patterns regarding the international propagation of shocks when endogenous variables, omitted variables, and heteroskedasticity are present in the data. Using this methodology, I demonstrate that the propagation mechanisms of 36 stock markets remained relatively stable throughout the last three major international crises which have been associated with 'contagion' (i.e., Mexico 1994, Hong Kong 1997, and Russia 1998). These findings cast considerable doubt upon theories that suggest that the propagation of shocks is crisis contingent, and driven by endogenous liquidity issues, multiple equilibria, and political contagion. Rather, these findings would seem to support theories that identify such matters as trade, learning, and aggregate shocks as the primary transmission mechanisms in this process.
Classical but effective techniques for estimating cardiovascular dynamics This article first describes the effectiveness and the limitations of classical approaches based on linear models for the cardiovascular system. The authors then introduce their own work on less-invasive and real-time estimation of cardiac output flow and pump function in the clinical setting. It is shown that system identification techniques using linear models can yield unexpectedly accurate results useful for patient care.
<filename>meetjs-server/src/controllers/session.controllers.go package controllers import ( "meetjs-server/src/interfaces" "meetjs-server/src/utils" "net/http" "github.com/gin-gonic/gin" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" ) // CreateSession - Creates user session func CreateSession(ctx *gin.Context) { db := ctx.MustGet("db").(*mongo.Client) collection := db.Database("MeetJS").Collection("sessions") var session interfaces.Session if err := ctx.ShouldBindJSON(&session); err != nil { ctx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } session.Password = utils.HashPassword(session.Password) result, _ := collection.InsertOne(ctx, session) insertedID := result.InsertedID.(primitive.ObjectID).Hex() url := CreateSocket(session, ctx, insertedID) ctx.JSON(http.StatusOK, gin.H{"socket": url}) }
Thirty-five days before his new album launch, Uganda’s king of baritone, Jimmy Katumba, 51, passed away. Katumba died on Sunday at Mengo hospital of peptic ulcers. Close sources said he was also diagnosed with kidney and liver problems. Katumba was to launch a new album, comprising redone versions of his old songs, on September 16. He had been battling with peptic ulcers for 30 years and was taken to hospital for immediate surgery on July 13. According to Aisha Ahmad, Katumba’s friend, “he was strong and doing well. He was supposed to be discharged on August 7, 2006 but the doctors decided to carry out a thorough examination after he developed swollen feet. Katumba started his music career as an eight-year-old singer in Mukono church of Uganda where his father, the Rev. Blaiso Katumba, was a preacher. In 1977, he formed the Light Bearers, which he renamed Jimmy Katumba and the Ebonies. Popularly known for his baritone voice, Katumba became celebrated for songs like Twalina Omukwano, Drums of Africa and Fa Kukyolina. He left for the UK in 1990. He went to the United States in 1992 and returned in 1995. How much time do women spend in front of a mirror?
<gh_stars>1-10 from unittest import TestCase from online_cognacy_ident.align import needleman_wunsch class AlignTestCase(TestCase): def test_needleman_wunsch(self): self.assertEqual(needleman_wunsch("AAAAABBBB", "AACAABBCB"), (5.0, (('A', 'A'), ('A', 'A'), ('A', 'C'), ('A', 'A'), ('A', 'A'), ('B', 'B'), ('B', 'B'), ('B', 'C'), ('B', 'B'))))
A Highly Efficient Activated Carbon by Chemical Activation Method for Adsorption of Paraquat (Toxin) The optimum condition for preparing a highly efficient activated carbon has been investigated in this work. The effects of different activation temperatures on the pore structure and surface morphology of highly efficient activated carbon (AC) derived from waste palm shell by chemical activation method using phosphoric acid as activating agent were studied. For activation, different activation temperatures in the range of 550 °C-650 °C were carried out. Activated carbon with well developed pore size were produced at activation temperature of 600 °C for 2 hours. At this temperature the Brunauer, Emmett and Teller (BET) surface areas are 1287 m2g-1, the total pore volume for adsorption and desorption are 0.742 cm3 g-1. Scanning Electron Microscope also confirmed the porosity of the highly efficient activated carbon. Finally it was tested in vitro to determine its adsorbing capacity for paraquat as a toxin. For optimum adsorption ability of activated carbon for paraquat, 0.9% NaCl solution is the most suitable solvent. The paraquat preferentially adsorbed onto the activated carbon in NaCl solution. The adsorption ability of the activated carbon (the amount adsorbed) for paraquat observed to be 99.9 mg g-1.
<reponame>maleike/MITK<gh_stars>1-10 /*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #ifndef MITKIMAGECROPPEREVENTINTERFACE_H #define MITKIMAGECROPPEREVENTINTERFACE_H #include "itkObject.h" #include "mitkOperation.h" #include "mitkOperationActor.h" class QmitkImageCropper; namespace mitk { class ImageCropperEventInterface : public itk::Object, public OperationActor { public: ImageCropperEventInterface(); ~ImageCropperEventInterface(); void SetImageCropper( QmitkImageCropper* imageCropper ) { m_ImageCropper = imageCropper; } virtual void ExecuteOperation(mitk::Operation* op) override; private: QmitkImageCropper* m_ImageCropper; }; } #endif // MITKIMAGECROPPEREVENTINTERFACE_H
// Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. func (m *txSortedMap) Cap(threshold int) types.Transactions { if len(m.items) <= threshold { return nil } var drops types.Transactions sort.Sort(*m.index) for size := len(m.items); size > threshold; size-- { drops = append(drops, m.items[(*m.index)[size-1]]) delete(m.items, (*m.index)[size-1]) } *m.index = (*m.index)[:threshold] heap.Init(m.index) if m.cache != nil { m.cache = m.cache[:len(m.cache)-len(drops)] } return drops }
<filename>bootstrap-business/src/test/java/org/ligoj/bootstrap/core/curl/AuthCurlProcessorTest.java /* * Licensed under MIT (https://github.com/ligoj/ligoj/blob/master/LICENSE) */ package org.ligoj.bootstrap.core.curl; import org.apache.http.auth.AUTH; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; /** * Test class of {@link AuthCurlProcessor} */ class AuthCurlProcessorTest { /** * Process with provided and not empty credentials. */ @Test void process() { var request = new CurlRequest("", "", ""); try (final CurlProcessor processor = new AuthCurlProcessor("junit", "passwd") { @Override protected boolean call(final CurlRequest request, final String url) { return true; } }) { processor.process(request); Assertions.assertEquals("Basic anVuaXQ6cGFzc3dk", request.getHeaders().get(AUTH.WWW_AUTH_RESP)); request = new CurlRequest("", "", ""); processor.process(request); Assertions.assertEquals("Basic anVuaXQ6cGFzc3dk", request.getHeaders().get(AUTH.WWW_AUTH_RESP)); } } /** * Process without provided user. */ @Test void processNoUser() { final var request = new CurlRequest("", "", ""); try (final CurlProcessor processor = new AuthCurlProcessor("", "any") { @Override protected boolean call(final CurlRequest request, final String url) { return true; } }) { processor.process(request); Assertions.assertFalse(request.getHeaders().containsKey(AUTH.WWW_AUTH_RESP)); } } }
Exploring the Factors Affecting Sustainable Human Resource Productivity in Railway Lines This study aimed to identify the critical factors and items affecting the productivity of sustainable human resources in a Railway Operation Company based on the perceptions of employees and managers in the Human Resources Department. The study was motivated by research which was applied in terms of the objectives of the study and a descriptive survey was employed as the method. The statistical population of the current study consisted of all employees and managers of the Human Resources Department of the company. Random sampling was employed to collect data and the sample size was 191 people according to Morgans Table. Methods including the correlation coefficient, multivariate regression, and factor analysis were employed for data analysis. The findings highlight the main factors and items affecting labor productivity in the Urban and Suburban Railway Operation Company as perceived by the Human Resources Department, which were mainly related to human resources management and could be attributed to motivation and requirements for their effective contribution to the improvement of public welfare. Organizational Attitude and Culture, Leadership Style, and Bonus and Ergonomics were extracted as factors affecting productivity or as independent variables. This study is the first study that has aimed to discuss the perceptions of the Human Resources Department active in a company. As such, the study highlights the standpoint of the main decision makers in the Urban and Suburban Railway Operation Company with regard to labour productivity in the urban and suburban sector.
<reponame>opticpower/tracker import styled from 'styled-components'; import { Row, Col, Spacer, Select } from '@geist-ui/react'; import { Sun, Moon } from '@geist-ui/react-icons'; import { useSelector, useDispatch } from 'react-redux'; import { setTheme } from '../redux/actions/settings.actions'; import { getTheme } from '../redux/selectors/settings.selectors'; const darkLogo = '/images/opticDarkLogo.png'; const lightLogo = '/images/opticLightLogo.png'; const NavBar = styled(Row)` align-items: center !important; height: 50px; border-bottom: ${props => `0.5px solid ${props.theme.palette.border}`}; padding: 0px 15px; `; const Logo = styled.img` height: 40px; `; const SelectContainer = styled(Col)` text-align: right; `; const SelectOption = styled(Select.Option)` & > span { display: flex; align-items: center; } `; const Nav = (): JSX.Element => { const theme = useSelector(getTheme); const dispatch = useDispatch(); return ( <NavBar> <Col> <Logo src={theme === 'light' ? darkLogo : lightLogo} alt="OpticPower" /> </Col> <SelectContainer> <Select value={theme} onChange={theme => { if (Array.isArray(theme)) { return dispatch(setTheme(theme[0])); } return dispatch(setTheme(theme)); }} > <SelectOption value="dark"> <Moon size={16} /> <Spacer inline x={0.35} /> Dark Mode </SelectOption> <SelectOption value="light"> <Sun size={16} /> <Spacer inline x={0.35} /> Light Mode </SelectOption> </Select> </SelectContainer> </NavBar> ); }; export default Nav;
package org.telegram.ui.Components; import android.animation.Animator; import android.animation.AnimatorListenerAdapter; import android.animation.ObjectAnimator; import android.content.Context; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Paint; import android.text.Layout; import android.text.StaticLayout; import android.text.TextPaint; import android.text.TextUtils; import android.util.TypedValue; import androidx.annotation.Keep; import androidx.dynamicanimation.animation.FloatPropertyCompat; import androidx.dynamicanimation.animation.SpringAnimation; import androidx.dynamicanimation.animation.SpringForce; import org.telegram.messenger.AndroidUtilities; import java.util.ArrayList; import java.util.List; import java.util.Objects; public class AnimatedPhoneNumberEditText extends HintEditText { private final static float SPRING_MULTIPLIER = 100f; private final static boolean USE_NUMBERS_ANIMATION = false; private ArrayList<StaticLayout> letters = new ArrayList<>(); private ArrayList<StaticLayout> oldLetters = new ArrayList<>(); private TextPaint textPaint = new TextPaint(Paint.ANTI_ALIAS_FLAG); private ObjectAnimator animator; private float progress; private String oldText = ""; private HintFadeProperty hintFadeProperty = new HintFadeProperty(); private List<Float> hintAnimationValues = new ArrayList<>(); private List<SpringAnimation> hintAnimations = new ArrayList<>(); private Boolean wasHintVisible; private String wasHint; private Runnable hintAnimationCallback; public AnimatedPhoneNumberEditText(Context context) { super(context); } @Override public void setHintText(String value) { boolean show = !TextUtils.isEmpty(value); boolean runAnimation = false; if (wasHintVisible == null || wasHintVisible != show) { hintAnimationValues.clear(); for (SpringAnimation a : hintAnimations) { a.cancel(); } hintAnimations.clear(); wasHintVisible = show; runAnimation = TextUtils.isEmpty(getText()); } String str = show ? value : wasHint; if (str == null) str = ""; wasHint = value; if (show || !runAnimation) { super.setHintText(value); } if (runAnimation) { runHintAnimation(str.length(), show, () -> { hintAnimationValues.clear(); for (SpringAnimation a : hintAnimations) { a.cancel(); } if (!show) { super.setHintText(value); } }); } } @Override public String getHintText() { return wasHint; } private void runHintAnimation(int length, boolean show, Runnable callback) { if (hintAnimationCallback != null) { removeCallbacks(hintAnimationCallback); } for (int i = 0; i < length; i++) { float startValue = show ? 0 : 1, finalValue = show ? 1 : 0; SpringAnimation springAnimation = new SpringAnimation(i, hintFadeProperty) .setSpring(new SpringForce(finalValue * SPRING_MULTIPLIER) .setStiffness(500) .setDampingRatio(SpringForce.DAMPING_RATIO_NO_BOUNCY) .setFinalPosition(finalValue * SPRING_MULTIPLIER)) .setStartValue(startValue * SPRING_MULTIPLIER); hintAnimations.add(springAnimation); hintAnimationValues.add(startValue); postDelayed(springAnimation::start, i * 5L); } postDelayed(hintAnimationCallback = callback, length * 5L + 150L); } @Override public void setTextSize(int unit, float size) { super.setTextSize(unit, size); textPaint.setTextSize(TypedValue.applyDimension(unit, size, getResources().getDisplayMetrics())); } @Override public void setTextColor(int color) { super.setTextColor(color); textPaint.setColor(color); } @Override protected void onTextChanged(CharSequence text, int start, int lengthBefore, int lengthAfter) { super.onTextChanged(text, start, lengthBefore, lengthAfter); if (USE_NUMBERS_ANIMATION && !isTextWatchersSuppressed()) { setNewText(text.toString().trim()); } } @Override protected void onDraw(Canvas canvas) { if (USE_NUMBERS_ANIMATION) { int color = getCurrentTextColor(); setTextColor(Color.TRANSPARENT); super.onDraw(canvas); setTextColor(color); if (letters.isEmpty() && oldLetters.isEmpty()) { return; } float height = letters.isEmpty() ? oldLetters.get(0).getHeight() : letters.get(0).getHeight(); float x = 0; float oldDx = 0; canvas.save(); canvas.translate(getPaddingLeft() + x, (getMeasuredHeight() - height) / 2); int count = Math.max(letters.size(), oldLetters.size()); for (int a = 0; a < count; a++) { canvas.save(); StaticLayout old = a < oldLetters.size() ? oldLetters.get(a) : null; StaticLayout layout = a < letters.size() ? letters.get(a) : null; if (progress < 0) { if (old != null) { textPaint.setAlpha((int) (255 * -progress)); canvas.save(); canvas.translate(oldDx, (1f + progress) * height); old.draw(canvas); canvas.restore(); } if (layout != null) { if (a == count - 1 || old != null) { textPaint.setAlpha((int) (255 * (1f + progress))); canvas.translate(0, -progress * height); } else { textPaint.setAlpha(255); } } } else if (layout != null) { textPaint.setAlpha(255); } if (layout != null) { layout.draw(canvas); } canvas.restore(); canvas.translate(layout != null ? layout.getLineWidth(0) : old.getLineWidth(0) + AndroidUtilities.dp(1), 0); if (layout != null && old != null) { oldDx += old.getLineWidth(0) - layout.getLineWidth(0); } } canvas.restore(); } else super.onDraw(canvas); } public void setNewText(String text) { if (oldLetters == null || letters == null || Objects.equals(oldText, text)) return; if (animator != null) { animator.cancel(); animator = null; } oldLetters.clear(); oldLetters.addAll(letters); letters.clear(); boolean replace = TextUtils.isEmpty(oldText) && !TextUtils.isEmpty(text); progress = 0; for (int a = 0; a < text.length(); a++) { String ch = text.substring(a, a + 1); String oldCh = !oldLetters.isEmpty() && a < oldText.length() ? oldText.substring(a, a + 1) : null; if (!replace && oldCh != null && oldCh.equals(ch)) { letters.add(oldLetters.get(a)); oldLetters.set(a, null); } else { if (replace && oldCh == null) { oldLetters.add(new StaticLayout("", textPaint, 0, Layout.Alignment.ALIGN_NORMAL, 1.0f, 0.0f, false)); } StaticLayout layout = new StaticLayout(ch, textPaint, (int) Math.ceil(textPaint.measureText(ch)), Layout.Alignment.ALIGN_NORMAL, 1.0f, 0.0f, false); letters.add(layout); } } if (!oldLetters.isEmpty()) { animator = ObjectAnimator.ofFloat(this, "progress", -1, 0); animator.setDuration(150); animator.addListener(new AnimatorListenerAdapter() { @Override public void onAnimationEnd(Animator animation) { animator = null; oldLetters.clear(); } }); animator.start(); } oldText = text; invalidate(); } @Override protected void onPreDrawHintCharacter(int index, Canvas canvas, float pivotX, float pivotY) { if (index < hintAnimationValues.size()) { hintPaint.setAlpha((int) (hintAnimationValues.get(index) * 0xFF)); } } @Keep public void setProgress(float value) { if (progress == value) { return; } progress = value; invalidate(); } @Keep public float getProgress() { return progress; } private final class HintFadeProperty extends FloatPropertyCompat<Integer> { public HintFadeProperty() { super("hint_fade"); } @Override public float getValue(Integer object) { return object < hintAnimationValues.size() ? hintAnimationValues.get(object) * SPRING_MULTIPLIER : 0; } @Override public void setValue(Integer object, float value) { if (object < hintAnimationValues.size()) { hintAnimationValues.set((int) object, value / SPRING_MULTIPLIER); invalidate(); } } } }
<filename>exchange/adapter_map.go package exchange import ( "github.com/prebid/prebid-server/adapters" "github.com/prebid/prebid-server/adapters/appnexus" "github.com/prebid/prebid-server/adapters/conversant" "github.com/prebid/prebid-server/adapters/facebook" "github.com/prebid/prebid-server/adapters/index" "github.com/prebid/prebid-server/adapters/lifestreet" "github.com/prebid/prebid-server/adapters/pubmatic" "github.com/prebid/prebid-server/adapters/pulsepoint" "github.com/prebid/prebid-server/adapters/rubicon" "github.com/prebid/prebid-server/config" "github.com/prebid/prebid-server/openrtb_ext" "net/http" ) // The newAdapterMap function is segregated to its own file to make it a simple and clean location for each Adapter // to register itself. No wading through Exchange code to find it. func newAdapterMap(client *http.Client, cfg *config.Configuration) map[openrtb_ext.BidderName]adaptedBidder { return map[openrtb_ext.BidderName]adaptedBidder{ openrtb_ext.BidderAppnexus: adaptBidder(appnexus.NewAppNexusBidder(client, cfg.ExternalURL), client), // TODO #267: Upgrade the Conversant adapter openrtb_ext.BidderConversant: adaptLegacyAdapter(conversant.NewConversantAdapter(adapters.DefaultHTTPAdapterConfig, cfg.Adapters["conversant"].Endpoint, cfg.Adapters["conversant"].UserSyncURL, cfg.ExternalURL)), // TODO #211: Upgrade the Facebook adapter openrtb_ext.BidderFacebook: adaptLegacyAdapter(facebook.NewFacebookAdapter(adapters.DefaultHTTPAdapterConfig, cfg.Adapters["facebook"].PlatformID, cfg.Adapters["facebook"].UserSyncURL)), // TODO #212: Upgrade the Index adapter openrtb_ext.BidderIndex: adaptLegacyAdapter(index.NewIndexAdapter(adapters.DefaultHTTPAdapterConfig, cfg.Adapters["indexexchange"].Endpoint, cfg.Adapters["indexexchange"].UserSyncURL)), // TODO #213: Upgrade the Lifestreet adapter openrtb_ext.BidderLifestreet: adaptLegacyAdapter(lifestreet.NewLifestreetAdapter(adapters.DefaultHTTPAdapterConfig, cfg.ExternalURL)), // TODO #214: Upgrade the Pubmatic adapter openrtb_ext.BidderPubmatic: adaptLegacyAdapter(pubmatic.NewPubmaticAdapter(adapters.DefaultHTTPAdapterConfig, cfg.Adapters["pubmatic"].Endpoint, cfg.ExternalURL)), // TODO #215: Upgrade the Pulsepoint adapter openrtb_ext.BidderPulsepoint: adaptLegacyAdapter(pulsepoint.NewPulsePointAdapter(adapters.DefaultHTTPAdapterConfig, cfg.Adapters["pulsepoint"].Endpoint, cfg.ExternalURL)), openrtb_ext.BidderRubicon: adaptBidder(rubicon.NewRubiconBidder(client, cfg.Adapters["rubicon"].Endpoint, cfg.Adapters["rubicon"].XAPI.Username, cfg.Adapters["rubicon"].XAPI.Password, cfg.Adapters["rubicon"].XAPI.Tracker, cfg.Adapters["rubicon"].UserSyncURL), client), } }
<reponame>JeffreyAsuncion/CSPT15_ComputerScienceFundamentals """ Objective 13 - Create user-defined classes and interact with instances of those classes """ """ Below, you'll find a class definition for animals. Create two new animals `cat` and `dog`. Set `cat` to have a name of "Purrfect", kind of "cat", and color of "brown". Set `dog` to have a name of "Fido", kind of "dog", and color of "black". """ class Animal: name = "" kind = "" color = "" def description(self): return "%s is a %s %s." % (self.name, self.color, self.kind) # Create instances of Animal here and modify the instance attributes # as described above. # YOUR CODE HERE cat = Animal() cat.name = "Purrfect" cat.kind = "cat" cat.color = "brown" dog = Animal() dog.name = "Fido" dog.kind = "dog" dog.color = "brown" # Should print Purrfect is a brown cat. print(cat.description()) # Should print Fido is a black dog. print(dog.description())
Management considerations in Addisons disease complicating pregnancy Addisons disease is a rare endocrine disorder. We present the management of a pregnant lady with Addisons disease. She was managed jointly with the endocrinologists and the titrated dose of hydrocortisone and fludrocortisone dosage was regulated. Her antenatal care was uneventful and she had emergency caesarean section at 39 weeks in view of abnormal CTG. Her labour was appropriately covered with rescue steroids. However she went into hypotension immediately after delivery. She was resuscitated and was managed further in ICU where high dose steroids were given. She made an uneventful recovery. Key Messages: 1. Management of pregnancy with Addisons disease is challenging but are often achieved with appropriate multidisciplinary management; 2. Patients with Addisons disease tolerate pregnancy well if the replacement steroids are adjusted and monitored closely; 3. Labour and delivery need to be managed cautiously with adequate steroid replacement to ensure successful outcomes; 4. Differentials need to be considered when adequate steroid cover has been provided and still there is a picture of Addisonian crisis. © This is an open access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/) which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited. Introduction Addison's disease (Primary adrenal insufficiency) is a rare endocrine disorder of adrenal cortex where there is insufficient production of glucocorticoids and mineralocorticoids. Addisonian crisis is a life threatening event and may accompany stressful conditions. Pregnant women with Addison's disease need to be managed with care as crisis can occur in situations like labour, puerperium, hyperemesis gravidarum. Here we discuss the management issues around pregnancy and delivery by presenting the case of a pregnant lady with Addison's disease who also developed severe hypotension post-delivery. Case History A 25year old primigravida, a known case of Addison's disease booked for her antenatal care at our hospital at 12 weeks of gestation. She was diagnosed as Addison's disease three years back during her evaluation for excessive tiredness and hyperpigmentation. Her Serum cortisol levels at 30 and 60 minute post 250 mcg IV ACTH was 4.7 and 4.9 mcg/dl (Normal-18mcg/dl) respectively with normal electrolytes, thyroid functions, and adrenals on imaging. She was commenced on Hydrocortisone 20mg/day and Fludrocortisone 100mcg/day replacement and was stable. Her pregnancy was jointly managed with endocrinologist. She was continued on her steroid replacement dose where in Hydrocortisone was gradually increased to 40mg/day by the end of third trimester and she was monitored closely for signs and symptoms of under and over steroid replacement. She had an uneventful pregnancy with normal growth scans. She went into spontaneous labour at 39 weeks gestation. She received adequate stress cover steroids (Inj. Hydrocortisone 100 mg 8th hourly for cover during labour). She needed emergency LSCS for CTG abnormalities. She had epidural analgesia but needed spinal anaesthesia as epidural top-up did not work. However per operatively she developed profound hypotension (unrecordable BP, bradycardia) and was resuscitated with IV fluids, vasopressors, steroids and ventilated. Post operatively in ICU she developed neurological deficits(disorientation, slurring of speech, ataxia) on Day 1.MRI revealed undiagnosed Arnold Chiari malformation with cerebellar tonsillar herniation (CTH).She was managed conservatively and neurological deficits were completely reversed by Day 3. Discussion The majority of women with Addison's disease have normal pregnancy outcomes, and overall risks are generally considered to be low as per the population based study from Sweden. Successful outcome in pregnant women with Addison's disease are often achieved by a multidisciplinary model of care with careful monitoring of foetal growth. 1 Pregnancy and labour in these women need to be managed with adequate steroid coverage and close monitoring. During normal pregnancy, circulating cortisol concentration is increased 2-to 3-fold, with a continuous increase from the first trimester onward due to increases in CBG levels. Hence there is a need to increase hydrocortisone dose by 20-40% from the 24th week onward to reflect the physiological cortisol levels. Hydrocortisone is generally preferred over other steroids and Dexamethaone is usually contraindicated because it crosses the placenta. Close monitoring for clinical symptoms and signs like normal weight gain, fatigue, postural hypotension or hypertension, hyperglycemia has to be made. These indicate the adequacy of the glucocorticoid dosage. Mineralocorticoid requirements during pregnancy are harder to assess but serum electrolyte levels help in the assessment. Though there may be a need to increase the fludrocortisone dose this is covered by the increase in hydrocortisone in most cases. 2 Adrenal crisis can happen due to insufficient glucocorticoid dosage or due to stressful situations. Addisonian crisis which is a life threatening event can occur in pregnancy in situations like labour, puerperium, hyperemesis gravidarum. Adequate stress cover steroids need to be provided to prevent crisis. 3 A bolus injection of 100 mg hydrocortisone IV followed by continuous infusion of 200 mg hydrocortisone over 24 hours has been recommended. This case emphasises the necessity for considering differential diagnosis for Addisonian crisis. Our patient received appropriate steroid replacement in pregnancy as well as adequate steroid cover during labour. Peroperatively she developed profound hypotension and bradycardia and this could be due to addisonian crisis but possibility of high spinal hypotension need to be considered, especially in the background of adequate maintenance and stress glucocorticoid coverage. Her neurological symptoms were possibly due to the aggravation of the tonsillar herniation due to cerebrospinal fluid leak after the spinal. Addison's can be due to a variety of causes though autoimmune adrenalitis and Tuberculosis in developing countries account for the majority of adult cases. 3 A rare entity, Triple A (Allgrove) syndrome has been described where there is association of Addison's disease with Arnold chiari malformation. Achalasia and alacrimia are also associated and it is due to autosomal recessive inheritance of triple A syndrome gene, designated AAAS located on chromosome 12q13. 4 Source of Funding None. Conflict of Interest None.
<filename>analysis/plot/python/plot_groups/gps.py # Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Plots relating to the GPS.""" import types from makani.analysis.plot.python import mplot from matplotlib.pyplot import cm from matplotlib.pyplot import plot import numpy MFig = mplot.PlotGroup.MFig # pylint: disable=invalid-name # Name of the satellite system based on bits 19-16 on the channel tracking # status message, according to # https://www.novatel.com/assets/Documents/Manuals/om-20000129.pdf#page=590 _SATELLITE_SYSTEM = { 0: 'GPS', 1: 'GLONASS', 2: 'SBAS', 3: 'Galileo', 4: 'BeiDou', 5: 'QZSS', 6: 'Reserved', 7: 'Other' } # Signal type on bits 25-21 on the channel tracking status message, according to # https://www.novatel.com/assets/Documents/Manuals/om-20000129.pdf#page=590 _SIGNAL_TYPE = { 'GPS': { 0: 'L1 C/A', 5: 'L2 P', 9: 'L2 P codeless', 14: 'L5 Q', 17: 'L2 C' }, 'GLONASS': { 0: 'L1 C/A', 1: 'L2 C/A', 5: 'L2 P' }, 'Galileo': { 2: 'E1C', 12: 'E5a Q', 17: 'E5b Q', 20: 'AltBOC Q' }, 'QZSS': { 0: 'L1 C/A', 14: 'L5Q', 17: 'L2C' }, 'SBAS': { 0: 'L1 C/A', 6: 'L5I' }, 'BeiDou': { 0: 'B1 with D1 data', 1: 'B2 with D1 data', 4: 'B1 with D2 data', 5: 'B2 with D2 data' }, 'Other': { 19: 'L-Band' } } class Plots(mplot.PlotGroup): """Plots of the GPS.""" def __init__(self, *args, **kwargs): super(Plots, self).__init__(*args, **kwargs) # Create a color map and index the satellite names so that both plots share # the same color for each satellite. self._all_satellites = set() for node in args[0]['cn0_by_sat'].keys(): self._all_satellites.update(args[0]['cn0_by_sat'][node]) self._color_map = cm.get_cmap(name='nipy_spectral', lut=len(self._all_satellites)) self._all_satellites_list = list(self._all_satellites) self._all_satellites_index_map = { self._all_satellites_list[i]: i for i in range(len(self._all_satellites_list))} # If the timestamp array is provided, use it for the x-axis, otherwise, use # the index. timestamp = None if 'timestamp' in args[0]['cn0_by_sat']: timestamp = args[0]['cn0_by_sat']['timestamp'] xlabel = 'Time [UTC]' if timestamp is not None else 'Time [samples]' nodes = args[0]['cn0_by_sat'].keys() if 'timestamp' in nodes: nodes.remove('timestamp') # Dynamically create the PlotCarrierToNoiseDensityRatio functions for the # given nodes provided in the `cn0_by_sat map`. The function name is set to # 'PlotCarrierToNoiseDensityRatio<node name>'. For example, if the nodes # provided are 'FcA' and 'GpsBaseStation', then this class will contain two # plotting methods: PlotCarrierToNoiseDensityRatioFcA() and # PlotCarrierToNoiseDensityRatioGpsBaseStation(). for node in nodes: setattr( self, # Set the name of the method. 'PlotCarrierToNoiseDensityRatio' + node, # types.MethodType binds the function to the name as a callable # function. types.MethodType( # MFig(args)(function) applies the decorator to the function. MFig( title='Carrier to noise density ratio ' + node, ylabel='C/No [dB-Hz]', xlabel=xlabel )(self._get_plot_carrier_to_noise_density_ratio_function( node, timestamp=timestamp)), self, Plots )) @MFig(title='Idle time', ylabel='Idle time [percent]', xlabel='Time [s]') def PlotIdleTime(self, plot_data): for node in plot_data['idle_time_by_node']: plot(plot_data['idle_time_by_node'][node]['timestamp'], plot_data['idle_time_by_node'][node]['idle_time'], label=node) first_node = plot_data['idle_time_by_node'].keys()[0] plot(plot_data['idle_time_by_node'][first_node]['timestamp'], 15.0 * numpy.ones( plot_data['idle_time_by_node'][first_node]['idle_time'].shape), 'k--', label='danger zone', color='r') def _get_plot_carrier_to_noise_density_ratio_function( self, node, timestamp=None): """Returns the PlotCarrierToNoiseDensityRatio for a given node. Args: node: String with the name of the node, which must be a valid key in the cn0_by_sat map. timestamp: A numpy.array of numpy.object with the datetime.datetime used for the x-axis. If none, the sample index is used for the x-axis. """ def plot_carrier_to_noise_density_ratio(self, plot_data): for sat_name in sorted(plot_data['cn0_by_sat'][node].keys()): if timestamp is None: plot( plot_data['cn0_by_sat'][node][sat_name], label=sat_name, color=self._color_map(self._all_satellites_index_map[sat_name])) else: plot( plot_data['cn0_by_sat']['timestamp'], plot_data['cn0_by_sat'][node][sat_name], label=sat_name, color=self._color_map(self._all_satellites_index_map[sat_name])) return plot_carrier_to_noise_density_ratio def get_cn0_by_satellite(novatel_obs, satellite_names=None): """Returns a map of C/No by satellite name. This function generates a map for each satellite present in the data with its respective carrier to noise density ratio (C/No = 10[log_10(S/N_0)] (db-Hz)). If a data entry does not contain a measurement for a satellite that was previously detected, a `numpy.nan` value is assigned. Args: novatel_obs: Log of NovAtelObservations data. satellite_names: A set where the satellites names found are added to. Returns: A map of satellite names to C/No data. """ prn = novatel_obs['range']['prn'] num_obs = novatel_obs['range']['num_obs'] cn0 = novatel_obs['range']['cn0'] status_bits = novatel_obs['range']['status_bits'] cn0_by_satellite = {} for i, cn0_value in enumerate(cn0): for obs_idx in range(num_obs[i]): satellite_name = _get_satellite_name(prn[i, obs_idx], status_bits[i, obs_idx]) if satellite_name is not None: if satellite_names is not None: satellite_names.add(satellite_name) if satellite_name not in cn0_by_satellite: # TODO: Consider adding a parameter to use 0 instead of # numpy.nan in case we want to see when exactly the cn0 drops. cn0_by_satellite[satellite_name] = numpy.full_like(cn0[:, 0], numpy.nan) if cn0_value[obs_idx] > 0: cn0_by_satellite[satellite_name].put(i, cn0_value[obs_idx]) return cn0_by_satellite def _get_satellite_name(prn, status_bits): """Build a satellite name with the system name, PRN/slot, and signal type. Args: prn: The satellite PRN number of range measurement. status_bits: The channel tracking status of the NovAtelObservations range message. Returns: A string with the full satellite name, or None if the values in the status bits are not supported. """ if prn == 0 or status_bits == 0: return None satellite_system_code = (status_bits >> 16) & 0x7 # Extract bits 19-16. if satellite_system_code not in _SATELLITE_SYSTEM: print 'Unsupported satellite system: {0:b}'.format(satellite_system_code) satellite_system_name = 'UNKNOWN_SATELLITE' else: satellite_system_name = _SATELLITE_SYSTEM[satellite_system_code] signal_type_code = (status_bits >> 21) & 0x1F # Extract bits 25-21. if signal_type_code not in _SIGNAL_TYPE[satellite_system_name]: print 'Unsupported signal type: {0:b}'.format(signal_type_code) signal_type_name = 'UNKNOWN_SIGNAL_TYPE' else: signal_type_name = _SIGNAL_TYPE[satellite_system_name][signal_type_code] return '{0} #{1}: {2}'.format(satellite_system_name, prn, signal_type_name)
The manner in which the Catholic church handles the topic of AIDS is quite frankly bizarre, and illustrates the degree of their utter moral bankruptcy. It is highly ironic, not only because they claim to hold the high moral ground, but also because they claim to have an exclusive direct line to God. This of course is a complete delusion, but I need not ponder on that, it is blindly obvious to any critical thinker. The latest news is that a Vatican cardinal, Cardinal Tarcisio Berton, opened an international conference on AIDS with a speech that strongly defended the church’s two-pronged strategy against the disease – education of consciences and mobilization of Catholic health resources for patients. What he fails to account for is that they strongly oppose the only known means of preventing AIDS, condoms. This, of course, is no surprise since it is the same Bertone that resisted the very idea that a bishop should be obligated to contact police to denounce a priest who had admitted paedophilia, and also publicly blamed the child sex scandal on homosexual infiltration of the clergy. When it comes to titles that accurately describe him, I much prefer “Clueless Immoral Gobshite” instead of the made up and utterly meaningless term “Cardinal”. In the days leading up to the conference a Vatican newspaper ran two articles saying condom campaigns were unsuccessful in stopping the AIDS epidemic; they wrote that campaigns promoting condoms provide a false sense of security, and reinforced that use of condoms within marriage “deforms” the act of procreation – ah yes … lying for the sake of propagating religious dogma, but then that is catholic ethics for you in full flight. The problem here is that these folks claim they wish to help, yet their delusional beliefs completely cripple them. If they truly wished to get serious about preventing new HIV infections then they would need to focus on promoting wider access to condoms and spreading information about how best to use them, but no, religious dogma is more important than lives. In fact, lying about the effectiveness of condoms is also apparently OK. One raw fact we should all remember here – this is a belief driven policy that has a body count associated with it, and as long as the delusional beliefs are permitted to take precedence over basic human decency, many many more will continue to die. Share this: Facebook Twitter Reddit Tumblr Pinterest LinkedIn Pocket Skype WhatsApp Email Print Like this: Like Loading...
IMPROVEMENT OF MULTIFUNCTIONAL AUTOMOTIVE TEXTILE In order to improve multifunctional automotive textile, required functionalities that have been expected by the automotive industry were enhanced in this study. For this aim, flame retardant and water-oil repellent effects were achieved on 100 % polyester based automotive fabrics via pad-dry-cure system with performed finishing recipes. After functionalization of the fabrics, polyurethane based foams were laminated to the treated automotive textiles. In order to be able to compare the results of tests and to determine the effects of used chemicals on the functionalities of fabrics, chemical amounts of recipe were optimized and the results were discussed. Flame retardancy, water repellency and oil repellency tests were performed before and after abrasion tests. Color spectrums of treated fabrics were tested after chemical finishing applications. Morphological analyses of samples were tested by SEM and chemical structures of the fabrics were analyzed by FTIR-ATR. According to performance test results, multifunctional automotive textile including flame retardant and water-oil repellent effect was achieved successfully. During the burning test, it was determined that laminated textile structure was self-extinguished whereas 3M water repellency grades were at the highest value before abrasion tests.
<reponame>laiudm/Webassemby-mini-c //--------------- // mini-c, by <NAME> (c) 2015; modified to create wasm binary code instead by <NAME> 2017. // MIT license //--------------- // functions to r/w 8 bits when only 32 bit memory access int getChar (const char* buffer) {return buffer[0] & 255;} void writeChar(char* buffer, char ch) { buffer[0] = (buffer[0] & (~255)) | (ch & 0xff) ; } int puts(const char* s); // fwd decl. //#include <stdbool.h> // stdbool.h equivalent #define bool int int false = 0; int true = 1; //#include <ctype.h> // ctype.h equivalent bool isalpha (char ch) {return ( ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z'); } bool isdigit (char ch) {return ch >= '0' && ch <= '9';} bool isalnum (char ch) {return isalpha(ch) || isdigit(ch);} bool ishex (char ch) {return isdigit(ch) || (ch >='a' && ch <= 'f');} // just lower case for the moment int hexValue(char ch) {return isdigit(ch) ? ch - '0' : ch - 'a' + 10;} //#include <stdlib.h> // stdlib functions... void* malloc(int size); int atoi(char* buffer) { // as used here, buffer will only contain numbers... int result = 0; if ( getChar(buffer) == '-') return -1*atoi(buffer+1); while (getChar(buffer)) result = result*10 + (getChar(buffer++) - '0'); return result; } void* calloc(int num, int size) { int sizeInBytes = num*size; void* memory = malloc(sizeInBytes); int sizeInWords = (sizeInBytes >> 2); int* ptr = memory; while (sizeInWords >= 0) { ptr[sizeInWords] = 0; sizeInWords--; } return memory; } void free(void* memory) {} // very simplistic memory allocator; no free space management... //#include <string.h> // character/string functions int strlen(char* buffer) { int count = 0; while (getChar(buffer++)) count++; return count; } int strcmp(char* s1, char* s2) { // only need ==, != result while ( getChar(s1) == getChar(s2) ) { if (getChar(s1) == 0) { return 0; } s1++; s2++; } return -1; } char* strdup(char* s1) { int length = strlen(s1) + 1; char* s2 = malloc(length); char* retValue = s2; while (length--) { writeChar(s2++, getChar(s1++)); } return retValue; } #include <stdio.h> //Still needed as it defines the struct FILE & linking fails otherwise. But this is fine FILE* fopen (const char* filename, const char* mode); int fclose (FILE* stream); int fgetc (FILE* stream); // char file input int ungetc (int c, FILE* stream); int feof (FILE* stream); int fputc (int c, FILE* stream); // binary file output... int putchar(int c); // write a character to stdout -- int puti(int i) { // write ascii equiv. of int to stdout int div = i / 10; int rem = i % 10; if (i < 0) { putchar('-'); puti(-i); } else { if (div) puti(div); putchar('0' + rem); } return 1; } int puts(const char* s); int printfsmall(const char* s, char* s1) { char ch; do { ch = getChar(s++); if (ch == '%') { getChar(s++); // just assume it's a '%s' & skip over it printfsmall(s1, ""); // the second string better not include any formatting) } else if (ch) putchar(ch); } while (ch); return 1; } int puts(const char* s) { return printfsmall(s, ""); } int ptr_size = 4; int word_size = 4; FILE* output; //=== Bytecode generation === char* bytecode; // remember the starting location int bytecodeLength; char* headerCode; // remember the starting location int headerLength; char* streamPointer; // generic stream pointer char* sp; // current bytecode pointer char* createEmitStream(int maxlen) { streamPointer = malloc(maxlen); sp = streamPointer; return streamPointer; } int stream_length() { return sp - streamPointer; } char* emitByte(char op) { char* retval = sp; writeChar(sp++, op); return retval; } char* emit2Byte(char op, char p) { char* retval = emitByte(op); emitByte(p); return retval; } char* emiti32load(int offset) { char* retval = emit2Byte(0x28, 0x02); emitByte(offset); return retval; } char* emiti32store(int offset) { char* retval = emit2Byte(0x36, 0x02); emitByte(offset); return retval; } int blockDepth = 0; int emitBlock(int block_type) { emit2Byte(0x02, block_type); blockDepth++; return blockDepth; } int emitLoop(int block_type) { emit2Byte(0x03, block_type); blockDepth++; return blockDepth; } void emitEnd() { emitByte(0x0b); // end blockDepth--; } void emitBranch(int code, int blockNo) { emit2Byte(code, blockDepth - blockNo); // number of blocks to skip } char* emitConst(int value) { // value is 32 bit signed... //printf("emitConst: %04x\n", (unsigned int) value); // from https://en.wikipedia.org/wiki/LEB128#Signed_LEB128 char* retval = emitByte(0x41); //i32.const int more = 1; //int negative = (value < 0); int byte; while(more) { byte = value & 0x7f; // lowest 7 bits value = value >> 7; // following is unnecessary if >> is arithmetic shift //if (negative) value = value | (- (1 << (32-7))); // sign extend if (((value==0) && !(byte & 0x40)) || ((value==-1) && (byte & 0x40))) more = 0; else byte = byte | 0x80; emitByte(byte); //printf("emit: %02x ", (unsigned char) byte); } //printf("\n"); return retval; } char* emitLength() { // called to make space for the length; value is subsequently overwritten char* retval = emitByte(0x55); // preventative: make obvious that's it's not been overwritten emitByte(0x57); emitByte(0x58); emitByte(0x59); emitByte(0x5a); return retval; } void emitNumberFixup(char* p, int no) { // turn the number into a 5 char encoded length int i = 0; while (i<5) { char c = no & 0x7f; if (i !=4) c = c | 0x80; // set the top bit of all but the last byte writeChar(p++, c); no = no >> 7; i++; } } void emitLengthFixup(char* p) { int byteLength = sp - p - 5; // calculate the no. of bytes between the pointers, offset by the length of the length-field emitNumberFixup(p, byteLength); } void putstring(char *c) { while ((getChar(c) != 0) && (getChar(c+1) != 0)) { int upper = hexValue(getChar(c++)); int lower = hexValue(getChar(c++)); emitByte(upper*16 + lower); } } void emit5ByteNumber(int length) { char* p = emitLength(); emitNumberFixup(p, length); } //==== Global Memory Generation ==== char* memory; // remember the starting location char* mp; int memoryLength; void emit4ByteMemory(int n); char* createMemory(int maxlen) { memory = malloc(maxlen); mp = memory; emit4ByteMemory(0); // Scratch location - used during fn prologue emit4ByteMemory(1024*64*4-16); // Call stack pointer - start high in memory return memory; } int getGlobalAddress() { return mp - memory; } void emit1ByteMemory(int n) { writeChar(mp++, n); } void emit4ByteMemory(int n) { if (n > 0) { //printf("init %i to %i\n", getGlobalAddress(), n); } emit1ByteMemory(n >> 0); // little endian emit1ByteMemory(n >> 8); emit1ByteMemory(n >> 16); emit1ByteMemory(n >> 24); } void emitStringToMemory(char* s) { // call multiple times to concat strings. MUST call emitStringEndToMemory() to properly terminate. while (getChar(s)) { emit1ByteMemory(getChar(s++)); } } void emitStringEndToMemory() { emit1ByteMemory(0); // terminate the string while (getGlobalAddress() & 3 ) { // round to a 4 byte address emit1ByteMemory(0); } } void dumpSymTable(); //==== Lexer ==== char* inputname; FILE* input; int curln; char curch; char* buffer; int buflength; int token; //No enums :( int token_other = 0; int token_ident = 1; int token_int = 2; int token_str = 3; int intResult; // lexer does ascii int/hex conversion too. char next_char () { if (curch == 10) // '\n' curln++; curch = fgetc(input); //if (curch == 64) { // ampersand - use for debug // dumpSymTable(); // curch = fgetc(input); //} //puts("next_char: "); puti(curch); puts("\n"); return curch; } bool prev_char (char before) { ungetc(curch, input); curch = before; return false; } void eat_char () { writeChar(buffer + buflength, curch); next_char(); buflength++; } void decode_char() { if (curch == '\\') { next_char(); curch = curch == 'r' ? 13 : curch == 'n' ? 10 : curch == 't' ? 9 : curch; // anyting else, incl. '\\', \', \" just use 2nd char. } eat_char(); } void next () { //Skip whitespace while (curch == ' ' || curch == '\r' || curch == '\n' || curch == '\t') // 32 13 10 9 ' ', '\r', '\n', '\t' next_char(); //Treat preprocessor lines as line comments if ( curch == '#' || (curch == '/' && (next_char() == '/' || prev_char('/')))) { while (curch != '\n' && !feof(input)) next_char(); //Restart the function (to skip subsequent whitespace, comments and pp) next(); return; } buflength = 0; token = token_other; //Identifier or keyword if (isalpha(curch)) { token = token_ident; while ((isalnum(curch) || curch == '_') && !feof(input)) eat_char(); //Integer literal } else if (isdigit(curch)) { // do ascii to int conversion here - incl. hex format. token = token_int; intResult = 0; eat_char(); if (getChar(buffer) == '0' && curch == 'x') { // it's a hex number. eat_char(); while (ishex(curch) && !feof(input)) { intResult = intResult*16 + hexValue(curch); eat_char(); } } else { intResult = getChar(buffer) - '0'; while (isdigit(curch) && !feof(input)) { intResult = intResult * 10 + curch - '0'; eat_char(); } } // character literal } else if (curch == 39) { token = token_int; next_char(); // skip the quote decode_char(); intResult = getChar(buffer); next_char(); // assume closing quote too. Should check.... if (curch != 39) buflength = 0; // reset the buffer //String literal } else if (curch == '"') { token = token_str; next_char(); while (curch != '"' && !feof(input)) { decode_char(); } next_char(); //Two char operators } else if ( curch == '+' || curch == '-' || curch == '|' || curch == '&' || curch == '=' || curch == '!' || curch == '>' || curch == '<') { eat_char(); if ((curch == getChar(buffer) && curch != '!') || curch == '=') eat_char(); } else eat_char(); writeChar(buffer + buflength, 0); // null terminate the token //printf("DEBUG: next, token = %i, buflength = %i, buffer = %s\n", token, buflength, buffer); //printfsmall("DEBUG: buffer = %s, intResult = ", buffer); puti(intResult); puts(" token = "); puti(token); puts("\n"); } void lex_init (char* filename, int maxlen) { inputname = filename; input = fopen(filename, "r"); //Get the lexer into a usable state for the parser curln = 1; buffer = malloc(maxlen); next_char(); next(); } //==== Parser helper functions ==== int errors; void errorLine() { //printf("%s:%d: error: ", inputname, curln); puts(inputname); puts(":"); puti(curln); puts(": error: "); } void error (char* format) { errorLine(); //Accepting an untrusted format string? Naughty! //printf(format, buffer); printfsmall(format, buffer); errors++; } void require (bool condition, char* format) { if (!condition) error(format); } bool see (char* look) { return (token!=token_str) && !strcmp(buffer, look); // ), and ")" both appear the same in buffer[]. } bool waiting_for (char* look) { return !see(look) && !feof(input); } void match (char* look) { if (!see(look)) { errorLine(); //printf("expected '%s', found '%s'\n", look, buffer); printfsmall("expected '%s'", look); printfsmall(", found '%s'\n", buffer); errors++; } next(); } bool try_match (char* look) { if (see(look)) { next(); return true; } return false; } //==== Symbol table ==== char** globals; int global_no; int* globalAddr; // the global variable's address in global memory. Only valid if it this symbol table entry is a global var. int* is_fn; // 0 = it's not a function. -1 = function declaration seen (initially, later negative index into the exports section) . >0 = fn body exists, index into Code section (offset by 1) int* fn_sig; // index into the Type section for the correct fn signature //ignore the following offset by 1. Therefore 0 if not a fn. char** locals; int local_no; int param_no; int* offsets; int bodyCount; int functionOffset; // imported functions offset the number for functions with a body. -1 indicates it's not been updated yet. It's a crosscheck to ensure things are done in the right order void sym_init (int max) { globals = malloc(ptr_size*max); global_no = 0; globalAddr = calloc(max, ptr_size); // zero initialise is_fn = calloc(max, ptr_size); // zero initialise fn_sig = calloc(max, ptr_size); // zero initialise locals = malloc(ptr_size*max); local_no = 0; // none of these 0 initialisations are really needed... param_no = 0; bodyCount = 1; // offset the count by one for is_fn[] encoding offsets = calloc(max, word_size); functionOffset = -1; } int sym_lookup (char** table, int table_size, char* look) { int i = 0; while (i < table_size) if (!strcmp(table[i++], look)) return i-1; return -1; } void new_global (char* ident) { int varAddr = getGlobalAddress(); //printf("Global: %s = %i\n", ident, varAddr); globals[global_no] = ident; globalAddr[global_no++] = varAddr; } void new_fn (char* ident, bool returnsValue, int noParams, bool hasBody) { //printf("new function %s, global_no = %i, bodycount = %i, signature = %i, hasBody = %i\n", ident, global_no, bodyCount, returnsValue + 2*noParams, hasBody); int existing = sym_lookup(globals, global_no, ident); if (existing > 0) { // matches an existing entry - delete that entry by "zeroing" impt fields //puts("matches existing entry & deleting it. index = "); puti(existing); puts("\n"); free(globals[existing]); globals[existing] = "<del>"; // these are the 2x fields that need to be reset to fully delete the entry. NB - don't set to 0; other functions assume a null terminated string! is_fn[existing] = 0; globalAddr[existing] = global_no; // set up a link to the newly created, correct entry - needed when doing call fixups. } // add it is_fn[global_no] = hasBody ? bodyCount++ : -1; fn_sig[global_no] = returnsValue + (2*noParams); // calculate index into Type Section's function signatures new_global(ident); } int new_local (char* ident) { locals[local_no] = ident; offsets[local_no] = local_no; return local_no++; } void new_param (char* ident) { new_local(ident); param_no++; } void dumpSymTable() { int i = 0; puts("Globals dump\n"); int j = global_no; puti(global_no); while (i < global_no) { puts(globals[i++]); puts(" "); } puts("\n"); } //Enter the scope of a new function void new_scope () { local_no = 0; param_no = 0; } //==== Codegen call fixups ==== // At the time we generate a Call instruction we don't yet know the function's index // Therefore record the location of all the call instructions in this table, and go back to fix them all when we know int* fixupSymTableEntry; char** fixupBytecodeAddr; int fixupIndex; void init_codegen(int max) { fixupSymTableEntry = calloc(max, ptr_size); fixupBytecodeAddr = calloc(max, ptr_size); fixupIndex = 0; // really unnecessary } void emitCall(int symtableIndex) { emitByte(0x10); //call instruction fixupSymTableEntry[fixupIndex] = symtableIndex; //puts("enter fixup no "); puti(fixupIndex); puts(", index = "); puti(symtableIndex); puts("\n"); fixupBytecodeAddr[fixupIndex] = emitByte(0x55); // arbitrary no. acting as a sentinel - check on overwriting. Just 1x varuint byte so limited to 127 fns max. fixupIndex++; } void doCallFixups() { // must be called after generateImportSection() as the is_fn values for external functions needs to have been updated. if (functionOffset < 0) { puts("functionOffset is < 0; interlock has failed\n"); } int i = 0; while (i<fixupIndex) { int symtableIndex = fixupSymTableEntry[i]; if ( !strcmp("<del>", globals[symtableIndex]) ) { //puts("Re-routing call to fwd-declared fn. From "); puti(symtableIndex); puts(" to "); puti(globalAddr[symtableIndex]); puts("\n"); symtableIndex = globalAddr[symtableIndex]; } int callOffset = is_fn[symtableIndex]; if (callOffset < 0) { callOffset = - callOffset - 1; // it's calling an imported function; invert the number and allow for the offset } else { callOffset = callOffset - 1 + functionOffset; // calling a local function, offset the index by the number of imported fuctions. Also it also has an offset } char* bytecodeAddr = fixupBytecodeAddr[i]; //printf("dofixups %i: symtableIndex = %i, calloffset = %i, bytecodeAddr = %i\n", i, symtableIndex, callOffset, bytecodeAddr-bytecode); if ( getChar(bytecodeAddr) != 0x55) { //printf("Sentinel is invalid - found %i at %i\n", bytecodeAddr[0], bytecodeAddr-bytecode); puts("Sentinel is invalid - found "); puti(bytecodeAddr[0]); puts(" at "); puti(bytecodeAddr-bytecode); puts("\n"); } //puts("doCallFixups no. "); puti(i); puts(", addr = "); puti(bytecodeAddr); puts(", offset = "); puti(callOffset); puts("\n"); writeChar(bytecodeAddr, callOffset); // this has to be a byte write, otherwise it will obliterate adjacent bytecode i++; } } //==== One-pass parser and code generator ==== bool lvalue; void needs_lvalue (char* msg) { if (!lvalue) error(msg); lvalue = false; } int expr (int level, int stackDepth); //The code generator for expressions works by placing the results on the stack //Regarding lvalues and assignment: //An expression which can return an lvalue looks ahead for an //assignment operator. If it finds one, then it pushes the //address of its result. Otherwise, it dereferences it. //The global lvalue flag tracks whether the last operand was an //lvalue; assignment operators check and reset it. int factor (int stackDepth) { int global; // compiler limitation that local vars have to be declared 1st, not inside {} int local; int str; int arg_no; int stackDelta = 1; // the stack grows by 1 for most factors; the exception is a function call with a void return. lvalue = false; if (see("true") || see("false")) { emitConst( see("true") ? 1 : 0); next(); } else if (token == token_ident) { global = sym_lookup(globals, global_no, buffer); local = sym_lookup(locals, local_no, buffer); // printf("symbol %s: global= %i, local= %i\n", buffer, global, local); require(global >= 0 || local >= 0, "no symbol '%s' declared\n"); next(); if (see("=") || see("++") || see("--")) lvalue = true; if (global >= 0) { if (!is_fn[global]) { // ok, it's an ordinary variable emitConst(globalAddr[global]); if (!lvalue) { emiti32load(0x00); // i32.load offset = 0; } } else { // it's a function call - parse the parameters match("("); arg_no = 0; if (waiting_for(")")) { do { expr(0, 0); //start stack counting again for the para. Could add a check that the returned stack is 1 arg_no++; } while (try_match(",")); } match(")"); emitCall(global); if ( !(fn_sig[global] & 1) ) { // bottom bit of signature determines the return value stackDelta = 0; } } } else if (local >= 0) { // all locals are in the frame emit2Byte(0x20, 2); // get_local - 'cached' fp emitConst(4*(offsets[local]+1)); // position 0 is the old-fp emitByte(0x6a); // i32.add - now have the address of the local if (!lvalue) { emiti32load(0); // TODO Hmm, could optimise the read case by using the offset in load. Only use the calc. for lvalue. } } } else if (token == token_int) { emitConst(intResult); next(); } else if (token == token_str) { str = getGlobalAddress(); while (token == token_str) { //Consecutive string literals are concatenated. emitStringToMemory(buffer); next(); } emitStringEndToMemory(); emitConst(str); // put the address of the string on the stack } else if (try_match("(")) { expr(0, 0); // should really verify that the returned stack depth is 1. match(")"); } else error("expected an expression, found '%s'\n"); return stackDepth + stackDelta; } int object (int stackDepth) { int arg_no; stackDepth = factor(stackDepth); while (true) { // function parameter processing used to be done here, but is now moved to factor() because of wasm restriction - can't put fn addr on stack if (try_match("[")) { // array indexing // the base address is on the stack stackDepth = expr(0, stackDepth); match("]"); if (see("=") || see("++") || see("--")) lvalue = true; emitConst(4); // word-size emitByte(0x6c); // i32.mul -- multiply the index by the word-size emitByte(0x6a); //i32.add -- and add in the base address to get the final address if (!lvalue) { // doesn't properly deal with return a[b] ???? The code looks like it should???? emiti32load(0x00); // i32.load offset = 0; } stackDepth--; // 2 stack positions are replaced by 1 } else return stackDepth; } return 0; // keep compiler happy when true is defined as a variable, not a constant... } int unary (int stackDepth) { if (try_match("!")) { //Recurse to allow chains of unary operations, LIFO order unary(stackDepth); emitByte(0x45); // i32.eqz - inverts the boolean } else if (try_match("~")) { //Recurse to allow chains of unary operations, LIFO order unary(stackDepth); emitConst(-1); emitByte(0x73); // i32.xor with 0 to invert all bits. }else if (try_match("-")) { unary(stackDepth); // there's no i32.neg instruction! emit2Byte(0x21, 0); // set_local emitConst(0x00); emit2Byte(0x20, 0); // get_local emitByte(0x6b); // i32.sub } else { //This function call compiles itself stackDepth = object(stackDepth); if (see("++") || see("--")) { needs_lvalue("assignment operator '%s' requires a modifiable object\n"); emit2Byte(0x22, 0); // tee_local L0 emit2Byte(0x20, 0); // get_local L0 emiti32load(0x00); // i32.load offset = 0; emit2Byte(0x22, 1); // tee_local L1 emitConst(0x01); // emitByte(see("++")? 0x6a : 0x6b); // i32.add : i32.sub emiti32store(0x00); // update the pointer emit2Byte(0x20, 1); // get_local L1 -- finally put the original value back on the stack next(); // net result of all this is the stack remains at the same depth } } return stackDepth; } void ternary (); int expr (int level, int stackDepth) { char instr; char instrNot; int shortcircuit; if (level == 6) { stackDepth = unary(stackDepth); return stackDepth; } stackDepth = expr(level+1, stackDepth); while ( level == 5 ? see("*") || see("/") || see("%") : level == 4 ? see("+") || see("-") || see("&") || see("|") || see("<<") || see(">>") : level == 3 ? see("==") || see("!=") || see("<") || see(">") || see(">=") || see("<=") : false) { instr = see("+") ? 0x6a : see("-") ? 0x6b : see("*") ? 0x6c : see("/") ? 0x6d : see("%") ? 0x6f : see("&") ? 0x71 : see("|") ? 0x72 : see("<<") ? 0x74 : see(">>") ? 0x75 : see("==") ? 0x46 : see("!=") ? 0x47 : see("<") ? 0x48 : see(">") ? 0x4a :see(">=") ? 0x4e : 0x4c; // last one is <= next(); stackDepth = expr(level+1, stackDepth); emitByte(instr); stackDepth--; // 2x operand are replaced by one } if (level == 2) while (see("||") || see("&&")) { // would like to just conditionally jump forward, but this isn't easy in wasm. emit2Byte(0x21, 0); // set_local - we need to both test and optionally restore this value on the stack shortcircuit = emitBlock(0x7f); // the updated condition result is returned on TOS emit2Byte(0x20, 0); // get_local - this will be TOS at the end of the block if we don't need to calc. the next expr. emit2Byte(0x20, 0); // get_local - Get the value to test it if (see("&&") ) emitByte(0x45); // optionally invert the result emitBranch(0x0d, shortcircuit); emitByte(0x1a); // drop the condition result next(); expr(level+1, 0); // should check that it returns stackDepth == 1 emitEnd(); // end result is that the stackDepth is at the same at exit as at entry, irrespective of the 2x paths } if (level == 1 && try_match("?")) ternary(); if (level == 0 && try_match("=")) { //lvalue is already on the stack needs_lvalue("assignment requires a modifiable object\n"); stackDepth = expr(level+1, stackDepth); emiti32store(0x00); // store, 0 offset stackDepth = stackDepth - 2; // 2x values removed from the stack } return stackDepth; } void line (); void ternary () { // the result of the <conditional evaluation> is on the stack // need to save into a local and then create the nested blocks for the <then>, <else> parts // this code closely matches if_branch() below emit2Byte(0x21, 0); // set_local int ifBlock = emitBlock(0x7f); // the block exits with a i32 int elseBlock = emitBlock(0x40); // Block encloses 'then' with 0 results emitBlock(0x40); // Block encloses 'conditional' with 0 results emit2Byte(0x20, 0); // get_local emitByte(0x45); // i32.eqz emitBranch(0x0d, elseBlock); // br_if emitEnd(); // end of condition eval. expr(1, 0); // emit 'then' code emitBranch(0x0c, ifBlock); // br emitEnd(); // end of 'then'code match(":"); expr(1, 0); // emit 'else' code emitEnd(); } void if_branch () { // create code all if stmts as follows: // Block -- wraps entire if stmt, contains the 'else' code (if any) // Block -- contains the 'then' code // Block -- contains the condition evaluation // ...code to evaluate the condition, with br_if's as required // ...flow thru. or br_if 0x00 to execute 'then' clause // end // ...'then' clause code // br 0x01 -- exit the outer if stmt block // end // ...'else' clause code // end int ifBlock = emitBlock(0x40); // Block encloses 'if' with 0 results int elseBlock = emitBlock(0x40); // Block encloses 'then' with 0 results emitBlock(0x40); // Block encloses 'conditional' with 0 results match("if"); match("("); expr(0, 0); match(")"); emitByte(0x45); // i32.eqz emitBranch(0x0d, elseBlock); // br_if emitEnd(); // end of condition eval. line(); // emit 'then' code emitBranch(0x0c, ifBlock); // br emitEnd(); // end of 'then'code if (try_match("else")) line(); // emit 'else' code emitEnd(); } void while_loop () { // Block -- wraps entire while/do stmt // Loop -- contains the conditional/body for while loop or body/conditional for do loop. while loop illustrated below // Block // ...code to evaluate the conditional, with br_if's as required // ...flow thru. for "true" condition or br_if 0x01 to exit the while // End // ... body code // br to loop // End // End int whileBlock = emitBlock(0x40); int loopBlock = emitLoop (0x40); bool do_while = try_match("do"); if (do_while) line(); emitBlock(0x40); // block enclosing the conditional match("while"); match("("); expr(0, 0); match(")"); emitByte(0x45); // i32.eqz emitBranch(0x0d, whileBlock); // br_if emitEnd(); // end of condition eval. if (do_while) match(";"); else line(); emitBranch(0x0c, loopBlock); // br back to beginning of loop emitEnd(); // end of loopBlock emitEnd(); // end of whileBlock } void decl (int kind); //See decl() implementation int decl_module = 1; int decl_local = 2; int decl_param = 3; int return_to; void line () { bool ret; int stackDepth = 0; if (see("if")) if_branch(); else if (see("while") || see("do")) while_loop(); else if (see("int") || see("char") || see("bool") || see("void")) decl(decl_local); else if (try_match("{")) { while (waiting_for("}")) line(); match("}"); } else { ret = try_match("return"); if (waiting_for(";")) stackDepth = expr(0, 0); // set starting stack-depth to 0 if (ret) { emitBranch(0x0c, return_to); } else { // non-zero stackDepth without a return. Need to pop it. Caused by i++ or fn-call with discarded ret-value if (stackDepth > 0) { emitByte(0x1a); // drop. Pop the unneeded value off the stack // printf("Stack non-zero, pop required %i\n", stackDepth); } } match(";"); } } void function (char* ident, bool returnsValue) { //Header char* length = emitLength(); // the length will be fixed up below emitByte(1); // no. of local entries - just one as all locals are type i32 emitByte(3); // no. of locals of type i32. We only ever use 3x locals emitByte(0x7f); // i32 type return_to = emitBlock(returnsValue ? 0x7f : 0x40); // either 0x7f (i32 return) or 0x40 (void return) //Prologue: - update the frame int fp =4; int temp = 0; emitConst(temp); // save old value in temp location emitConst(fp); // get the fp emiti32load(0); emiti32store(0); emitConst(fp); // update the fp emitConst(fp); emiti32load(0); emitByte(0x41); // i32.const char* frameSize = emitLength(); // fix up the actual size later emitByte(0x6b); // i32.sub emiti32store(0); emitConst(fp); // save old fp in 1st frame location emiti32load(0); emitConst(temp); emiti32load(0); emiti32store(0); int i = 0; while (i < param_no) { emitConst(fp); // save parameters in the frame emiti32load(0); // get the fp emit2Byte(0x20, i); // get_local emiti32store(4 + (4*i)); // and save it, using the offset feature at last i++; } emitConst(fp); // 'cache' the bp value to speed up para, local var access emiti32load(0); emit2Byte(0x21, 2); // set_local line(); emitEnd(); // where any return statements jump to //Epilogue: emitConst(fp); // update the fp emit2Byte(0x20, 2); // get the old value of fp using the cached fp emiti32load(0); // offset 0, where old fp is saved emiti32store(0); // fp is now updated; easy. emitByte(0x0b); // function terminator // Now calculate & fix the frame size. The frame comprises: <old fp> *<paras> *<locals> // local_no already = <no-paras> + <no-locals> -- bad naming.... emitNumberFixup(frameSize, 4*(local_no+1)); emitLengthFixup(length); } void decl (int kind) { //A C declaration comes in three forms: // - Local decls, which end in a semicolon and can have an initializer. // - Parameter decls, which do not and cannot. // - Module decls, which end in a semicolon unless there is a function body. bool fn = false; bool returnsValue = false; bool fn_impl = false; int local; if (see("const")) // the stdio function declarations need const type definitions to avoid cl compiler warnings next(); returnsValue = !see("void"); next(); // skip over the type declaration - int, void, etc. while (try_match("*")) returnsValue = true; // void* does return a value //Owned (freed) by the symbol table char* ident = strdup(buffer); next(); //Functions if (try_match("(")) { // it's a function if (kind == decl_module) new_scope(); //Params if (waiting_for(")")) do { decl(decl_param); } while (try_match(",")); match(")"); new_fn(ident, returnsValue, param_no, see("{")); // this captures all we need - returnType, no-params, body fn = true; //Body if (see("{")) { require(kind == decl_module, "a function implementation is illegal here\n"); fn_impl = true; function(ident, returnsValue); } //Add it to the symbol table } else { if (kind == decl_local) { local = new_local(ident); } else { if (kind == decl_module) new_global(ident); else new_param(ident); // kind == decl_module ? new_global(ident) : new_param(ident); } } //Initialization if (see("=")) require(!fn && kind != decl_param, fn ? "cannot initialize a function\n" : "cannot initialize a parameter\n"); if (kind == decl_module) { if (try_match("=")) { if (token == token_int) { emit4ByteMemory(intResult); } else error("expected a constant expression, found '%s'\n"); next(); //Static data defaults to zero if no initializer } else if (!fn) { emit4ByteMemory(0); } // it's a variable declaration within a function... } else if (try_match("=")) { // replicate code from factor() for loading the local's address. This could be simplified slightly by using the i32.store's offset field emit2Byte(0x20, 2); // get_local - 'cached' fp emitConst(4*(offsets[local]+1)); // position 0 is the old-fp emitByte(0x6a); // i32.add - now have the address of the local expr(0, 0); emiti32store(0); // and save the result } if (!fn_impl && kind != decl_param && !feof(input)) match(";"); } //==== wasm binary file format generation ==== void generateFunctionType(int no_paras, int returnType) { emitByte(0x60); // form emitByte(no_paras); int i = 0; while( i++<no_paras ) { emitByte(0x7f); } emitByte(returnType); if (returnType) { emitByte(0x7f); } } void generateTypeSection(int maxParams) { // Type Section declares all function signatures that will be used in the module. // For simplicity we're going to just create a list of signatures up to 5 paramaters with/without a return value // entry meaning // 0 void result, no paras // 1 int result, no paras // 2 void result, 1 para // 3 int result. 1 para // 4 void result, 2 para // 5 int result, 2 para // So index = returnsResult + 2 x no-paras // format is: <id> <payload-len><count of entries> <func-type>* // where <func-type> is: <form><para-count><para-type>*<return-count><return-type> // For us, form = 0x60, para-type, return-type are always int32 = 0x7f (if present) emitByte(0x01); // Section Type char* length = emitLength(); emitByte(2 + (2*maxParams)); // calculate the number of entries int i = 0; while( i<=maxParams ) { generateFunctionType(i, 0); generateFunctionType(i, 1); i++; } emitLengthFixup(length); } void generateFunctionSection() { emitByte(0x03); // Function Section char* length = emitLength(); emitByte(bodyCount-1); // remember bodyCount is deliberately off by one int i = 0; int xCheck = 0; while (i < global_no) { // functions are put in the global table if (is_fn[i]>0) { emitByte(fn_sig[i]); xCheck++; } i++; } if (bodyCount-1 != xCheck) //printf("function cross-checks don't match: %i vs %i\n", bodyCount-1, xCheck); puts("function cross-checks don't match\n"); emitLengthFixup(length); } void emitString(char* s) { emitByte(strlen(s)); while (getChar(s) != 0) { emitByte(getChar(s++)); } } void generateImportEntry(char* name, int signature) { emitString("env"); emitString(name); emitByte(0x00); // external kind = function emitByte(signature); } void generateImportSection() { // Work even when there are 0 entries // this call also updates the is_fn[] indicies for the symbol table entries for imported functions. emitByte(0x02); // Imports Section char* length = emitLength(); char* noEntries = emitLength(); int entries = 0; int i = 0; while (i < global_no) { if (is_fn[i] < 0) { // it's a fn, with no body defined is_fn[i] = -(entries+1); // update the function index; it's -ve, and offset by one... generateImportEntry(globals[i], fn_sig[i]); entries++; } i++; } functionOffset = entries; emitNumberFixup(noEntries, entries); emitLengthFixup(length); } void generateExportEntry(char* name, int kind, int index) { emitString(name); emitByte(kind); emitByte(index); } void generateExportsSection() { emitByte(0x07); // Exports Section char* length = emitLength(); char* noEntries = emitLength(); generateExportEntry("memory", 0x02, 0x00); int entries = 1; int i = 0; while (i < global_no) { if (is_fn[i] > 0) { // it's a fn, with a body defined generateExportEntry(globals[i], 0x00, is_fn[i]-1+functionOffset); // smell - this calc is done twice entries++; } i++; } emitNumberFixup(noEntries, entries); emitLengthFixup(length); } void generateDataSection() { emitByte(11); // Data Section char* length = emitLength(); emitByte(1); // just 1 data segment emitByte(0); // the linear memory index, = 0 emitConst(0); // offset of 0 emitByte(0x0b); // terminate with End int payloadLength = getGlobalAddress(); emit5ByteNumber(payloadLength); // size of data in bytes char* p = memory; while (payloadLength) { emitByte(getChar(p++)); payloadLength--; } emitLengthFixup(length); } // binary file output void writeStream(char* ptr, int length, char* header) { int count = 0; while (count++<length) { // output the bytes that's been generated... fputc(getChar(ptr++), output); // write it out to the file } } void program () { errors = 0; bytecode = createEmitStream(100000); // start with this - should be big enough char* count_of_bodies = emitByte(0); // come back & fix this value after having generated code for all the functions. Assume <127 max. while (!feof(input)) decl(decl_module); int byteCodeLength = stream_length(); writeChar(count_of_bodies, bodyCount-1); // Overwrite the earlier no. remember bodyCount is off by 1 headerCode = createEmitStream(50000); // create another stream for the header info. // Generate the header putstring("0061736d"); // magic putstring("01000000"); // version // Generate Section: type generateTypeSection(4); // up to just 2x parameters to begin with - this is still 5x entries //Generate Section: Imports generateImportSection(); doCallFixups(); // update the bytecode with correct call offset. Must be called after generateImportSection() // Generate Section: Function generateFunctionSection(); // Generate Section: Table putstring("048480808000"); // type, length putstring("01700000"); // count, anyfunc-type, resizable limits // Generate Section: Memory putstring("058380808000"); // type, length putstring("010010"); // count=1, memory_type = resizable limits. Flags=0, so only initial length present = 1 = 64kbytes; 10 = 16 * 64K // Generate Section: Global putstring("068180808000"); // type, length putstring("00"); // count = 0 // Generate Section: Export generateExportsSection(); // Generate Section: Code emitByte(0x0a); // Section type = code emit5ByteNumber(byteCodeLength); // the length of the entire code section // Write out the wasm file: writeStream(headerCode, stream_length(), "Header"); writeStream(bytecode, byteCodeLength, "Bytecode"); headerCode = createEmitStream(50000); // create a new stream just for the final Data section generateDataSection(); writeStream(headerCode, stream_length(), "Data"); } int main (int argc, char** argv) { //--argc; ++argv; //if (argc > 0 && **argv == '-' && (*argv)[1] == 's') { src = 1; --argc; ++argv; } //if (argc > 0 && **argv == '-' && (*argv)[1] == 'd') { debug = 1; --argc; ++argv; } //if (argc < 1) { printf("usage: c4 [-s] [-d] file ...\n"); return -1; } char* ipname; if (argc == 2) { ipname = argv[1]; //puts("Usage: cc <Input file>\n"); //return 1; } else ipname = "cc.c"; //output = fopen(argv[1], "w"); output = fopen("program.wasm", "wb"); // keep it simple to begin with. Must open with 'b' for binary, otherwise it adds \r to each \n lex_init(ipname, 256); sym_init(800); init_codegen(3000); // one entry for every call - should be enough createMemory(20000); // should be enough program(); return errors != 0; }
The Squid Axon Na+/Ca2+ Exchanger Shows Ping Pong Kinetics only when the Cai-regulatory Site is Saturated In a previous work we demonstrated that, in dialyzed squid axons, an impairment of the Ca<sup>2+</sup><sub>i</sub>-regulatory site affected the apparent affinities for external Na<sup>+</sup> and Ca<sup>2+</sup> in a way opposite to that predicted by the exiting (ping-pong) models for the exchangers. In the present work, we used model simulations and actual experiments where the Ca<sup>2+</sup><sub>i</sub>-regulatory remained always saturated while <sub>i</sub> was either limiting or near saturating for the internal Ca<sup>2+</sup> transport sites. Under these conditions, both the theoretical and experimental transport activation curves for external Na<sup>+</sup> and Ca<sup>2+</sup> were those expected from the current kinetic schemes. These observations have two important implications: On the one hand, they confirm the ping-pong translocation schemes for Na<sup>+</sup>/Ca<sup>2+</sup> exchange. On the other, they call for caution in interpreting kinetic data in membrane transport systems possessing intracellular ionic and/or metabolic regulation.
Housing starts fell 8.7% in the month of February, according to the latest report from the U.S. Dept. of Housing and Urban Development and the U.S. Dept. of Commerce. The one bright spot seems to be an increase in multifamily permits, where the overwhelming majority of which will feed the rental market. Housing starts significantly heated up in January, according to the latest report from the U.S. Dept. of Housing and Urban Development and the U.S. Dept. of Commerce. First American Chief Economist Mark Fleming said the 18.6% monthly increase in housing starts reflects rising consumer sentiment and builder confidence. Housing starts for the month of December took a tumble, according to the latest report from the HUD and the Dept. of Commerce. Starts fell 11.2% in December to a seasonally adjusted annual rate of 1.08 million units, according to the report, which was delayed due to the more than month-long partial government shutdown. How will America's lumber fight with Canada impact homebuilders? The lumber war between the U.S. and Canada goes way beyond the new tariff on lumber that U.S. Commerce Secretary Wilbur Ross announced on Monday. And what is the main industry using lumber from Canada? Homebuilders. Here’s how it looks broken down to dollars and cents. What will the financial news headlines read on January 29? Dollars to doughnuts, I’d expect more than a few screaming headlines about economic growth—next Friday is when the U.S. Department of Commerce rolls out its advance estimate for GDP in the fourth quarter of 2009. But all will likely not be as it seems, if so. US real gross domestic product (GDP) slid at an annual rate of 6.1% in Q109 as unemployment rose, slashing the output of goods and services. Downsizing and job loss either contributing to or resulting from the GDP contraction may also heap pressure on now-unemployed homeowners. The quarter's contraction comes after GDP shrank 6.3% in Q408, the US Department of Commerce said Wednesday in its advance estimates. Real gross domestic product -- or GDP -- contracted at a seasonally-adjusted annual rate of 6.3 percent in the fourth quarter 2008 from the previous quarter, according to revised estimates released Thursday by the Bureau of Economic Analysis within the U.S. Department of Commerce. This rate was revised down from a previous estimate of a 6.2 percent decline. After dropping over 10 percent in January, new single-family home sales rebounded 4.7 percent in February to a seasonally adjusted annual rate of 337,000, according to data released Wednesday by the U.S. Department of Commerce. The upward movement in February's sales was the first nationwide increase since last July, and well above the 323,000 pace expected by economists surveyed by MarketWatch. Nonetheless, February's sales were still down 41.1 percent compared to the same time last year. New single-family home sales in January came in at an adjusted annual rate of 309,00, down 10.2 percent from a month earlier and 48.2 percent from the rate seen in the same month a year earlier, according to data released Thursday by the U.S. Department of Commerce. The South experienced the highest number of new home sales in the month, with 172,000 units sold, while the Northeast reported the smallest number of new home sales, with 27,000 units sold.
More Evidence of the Plateau Effect The purpose of this study was to test the existence of the plateau effect at the social level. The authors tried to confirm the preliminary conclusion that people may not be willing to trade off any longevity to improve the health state of a large number of people if the health states are mild enough. They tested this assumption using the person- tradeoff technique. They also used a parametric approach and a nonparametric ap proach to study the relationship between individual and social values. Results show the existence of the plateau effect in the context of resource allocation. Furthermore, with the nonparametric approach, a plateau effect in the middle part of the scale was also observed, suggesting that social preference may not be directly predicted from individual utilities. The authors caution against the possible framing effects that may be present in these kinds of questions. Key words: person tradeoffs; social prefer ences ; utility assessments; plateau effect. (Med Decis Making 1998;18:287-294)
Optimal SAT-based Minimum Adder Synthesis of Linear Transformations Linear transformation with known fixed coefficients in commonly used in VLSI circuits, e.g., discrete cosine transformation (DCT) used in image/video compression, FIR/IIR filters used in DSP, and generic matrix multiplication (GEMM) used in artificial neural networks. In all these applications, because one of the multiplicands is constant, it is possible to apply optimizations that exploit the relationships between coefficients to reduce the hardware complexity. Additionally, in hardware implementations, the coefficients are usually carefully chosen to remove the need for a generic multiplier, and instead, multiplications are implemented using only adders and shifters. Unfortunately, finding the optimal minimum adder implementations for those is NP-hard, even in the simplest GF case. In this paper, we extends an existing boolean satisfiability (SAT) based formalization of this problem from GF to generic case, and proposes a successive approximate framework to make the formulation feasible with current SAT solvers. This approach could not only find optimal solutions but could also formally prove the optimality of the result. When applying this to synthesis of lightweight linear transformations (e.g., approximate DCT), it could find optimal multi-layer implementations that save up to 3.4% area and 3% power comparing to state-of-the-art ASIC synthesizers and up to 22% area saving and 6% delay improvements compared to FPGA synthesizers.
Reinforcement Learning Based Neural Architecture Search for Flaw Detection in Intelligent Ultrasonic Imaging NDE System Ultrasonic flaw detection has been extensively used for NDE applications because it has high inspection resolution and accuracy. Conventional ultrasonic flaw detection is more vulnerable to human errors and time-consuming as the workload increases. The artificial intelligence (AI), such as machine learning (ML) methods, automates the evaluation process and is more reliable and practical. However, modeling the ML algorithms, such as the neural networks (NN) requires substantial computational resources for training and significant effort in obtaining efficient NN architecture. In this study, we introduce a reinforcement learning (RL) based neural architecture search (NAS) framework to automatically model the optimal NN design. By using this framework, a NAS-based NN: Ultrasonic Flaws Detection NAS Neural Network: UFDNASNet, is proposed for flaws detection with high accuracy and data-efficiency. The ultrasonic datasets are processed by the NAS framework using the recurrent neural network (RNN) controller to search for the best convolutional operations. The flaw detection performance is analyzed and compared between the introduced UFDNASNet and several hand-designed deep Convolutional Neural Networks (deep-CNN) based on detection accuracy and inference data-efficiency. To evaluate the performance for defects detection, the NNs are trained with the transfer learning (TL) using the USimgAIST dataset of B-scan images representing without-defect and with-defects cases. The B-scan images were collected by using the pulsed laser ultrasonic scanning system from 17 stainless steel specimen plates with various types of flaws and some plates without any damage. Our purpose is to realize an intelligent system to detect flaws with high accuracy for data-efficient ultrasonic NDE applications.
Pancreatic protein secretion and gastrointestinal hormone release in response to parenteral amino acids and lipid in dogs. Parenteral nutrition has been advocated for and used in clinical situations in which provision of calories without stimulation of pancreatic secretion is desired. A recent report, however, provided evidence for substantial stimulation of pancreatic secretion after parenteral administration of amino acids and fat. We have studied the effect of intravenous administration of crystalline amino acids and lipid on pancreatic protein secretion and release of gastrointestinal hormones in five dogs with chronic pancreatic fistulas. The amino acids were given as a 4.25% solution in 5% glucose at 2 gm/hr. Parenteral fat was administered as Intralipid 10% at 3.5 ml/kg/hr. Plasma concentrations of cholecystokinin (CCK) and pancreatic polypeptide (PP) and serum concentrations of gastrin, measured by radioimmunoassay, were determined before, and at intervals during, infusion of amino acids and fat. Pancreatic juice was collected simultaneously with blood sampling, and volume and protein output were measured. Basal concentrations of CCK, PP, and gastrin were not affected by intravenous infusion of amino acids. Pancreatic protein secretion and volume were also unaffected by parenteral amino acids. Parenteral infusion of fat resulted in a significant inhibition of integrated gastrin release but had no effect on plasma concentrations or integrated release of CCK or PP. Neither the volume nor protein output of pancreatic secretion was affected by intravenous fat administration. In summary, no stimulation of pancreatic secretion or release of CCK, PP, or gastrin occurred as a result of parenteral amino acid or fat administration. There is, therefore, no contraindication to the use of parenteral nutrition in situations in which it is desirable to keep the pancreas at rest.
/* Save the bounday string into paf state*/ static inline bool store_boundary(MimeDataPafInfo *data_info, uint8_t val) { if (!data_info->boundary_search) { if ((val == '.') || isspace (val)) data_info->boundary_search = (char *)&boundary_str[0]; return 0; } if (*(data_info->boundary_search) == '=') { if (val == '=') data_info->boundary_search++; else if (!isspace(val)) data_info->boundary_search = NULL; } else if (*(data_info->boundary_search) == '\0') { if (isspace(val) || (val == '"')) { if (!data_info->boundary_len) return 0; else { data_info->boundary[data_info->boundary_len] = '\0'; return 1; } } if (data_info->boundary_len < sizeof (data_info->boundary)) { data_info->boundary[data_info->boundary_len++] = val; } else { data_info->boundary[data_info->boundary_len -1] = '\0'; return 1; } } else if ((val == *(data_info->boundary_search)) || (val == *(data_info->boundary_search) - 'a' + 'A')) { data_info->boundary_search++; } else { if ((val == '.') || isspace (val)) data_info->boundary_search = (char *)&boundary_str[0]; else data_info->boundary_search = NULL; } return 0; }
PARADOXICAL REACTIONS DURING ANTITUBERCULOSIS THERAPY A SINGLE-CENTER PROSPECTIVE ANALYSIS Background: Paradoxical reactions during anti-TB treatment represent a real challenge to pneumo-phthisiologists and require high index of suspicion. It has been suggested that this reaction during appropriate treatment is common and severe in HIVnegative individuals. Our objective was to determine the frequency of paradoxical reactions and their associated features. Method : A prospective study was undertaken in a population of HIV-TB+ patients to determine the frequency of paradoxical reactions and their associated features. Results: Paradoxical reactions occurred in 1.5% of all our hospitals TB patients. Conclusion : Paradoxical reactions during anti-TB treatment is common in HIV-uninfected individuals and must be considered after careful exclusion of medication non-adherence, other infections, development of resistance, and other similar conditions. INTRODUCTION Paradoxical reactions during anti-TB treatment represent a real diagnostic and therapeutic challenge to pneumophthisiologists, and were defined as the worsening of preexisting tuberculous lesions on the basis of clinical or radiological findings or the development of new TB lesions in patients who had received anti-TB treatment for at least 10 days and whose conditions were reported to be improving. This phenomenon received renewed interest because of the immune reconstitution inflammatory syndrome that can occur in HIV-infected patients with TB when they start receiving HAART. The objective of our work is to report our experience through a prospective study which was 2 years long and concerned HIV-uninfected patients. PATIENTS AND METHODS This is a prospective study conducted in our pneumophtisiology department during a period from January 2014 to December 2015. Included in this study were patients not infected with HIV and patients whose paradoxical reaction diagnosis during anti-TB treatment was made. In all of our patients, TB diagnosis was made after histological and/or bacteriological confirmation. All of our patients showed initial clinical and radiological improvement after antibacillary treatment administration, followed by clinical and/or radiological aggravation. A detailed medical history and a comprehensive assessment including a new intradermal reaction were performed in all our patients in order to eliminate noncompliance to treatment or differential diagnoses such as; secondary infection with nosocomial or mundane germs, inadequate or insufficient treatment, resistance, or side effects of antibacillaries. Basic lab tests included complete blood count, basic metabolic panel, liver function tests, coagulation profile, thyroid-stimulating hormone, and urinalysis. For statistical analysis, a chi-squared test was used for categorical variables. A P-value of <0.05 was considered significant. RESULTS Our study collected 30 cases, 11 of which were men. The sex ratio was 1/3 and the average age was 37.3 years. Extreme ages ranged from 10 to 60 years. One patient had a history of viral hepatitis B and another was diabetic. 3 patients were already treated for TB and 3 were smokers. The initial site of tuberculosis was in lymph node in 9 cases, pleural in 7 cases, pulmonary in 7 cases including one military TB and pleuropulmonary TB in 4 cases, peritoneal in 3 cases, pleuropericardial TB in 2 cases, genital and multifocal respectively in a single case respectively (table 1). The median time to the occurrence of paradoxical reaction was 4.08 months. Three of our cases were diagnosed after antibacillary treatment was stopped and 10 cases in the first two months of treatment. The symptoms were pleurisy in 13 cases, the appearance of new adenopathies in 7 cases with fistula in 3 cases, parenchymal involvement in 3 cases, a pleuropericarditis in 2 cases, hydropneumothorax in 2 cases, tuberculomas in 2 cases, cold abscess in a single case and ascites in three cases. The clinical symptoms and signs of paradoxical deterioration manifested in the initial site of infection in 22 of the 30 (73%) episodes, of which 20 in the respiratory system. A paradoxical reaction occurred in an anatomical site other than that of the initial presentation in 8 (27%) episodes, 2 of which were initially manifest in the central nervous system. Four episodes of paradoxical reaction (13.3%) led to the prescription of Prednisolone in various doses (average dose, 60 mg/day; range 20-60 mg/day) and for various durations (median duration, 46 days; range 21-90 days). These patients had pericardial and cerebral lesions. TB treatment was extended for 3 to 6 months for 6 patients. Pleurisies were evacuated. The evolution of paradoxical reaction was favorable in all patients. Since the advent of the first TB treatment, clinicians have faced worsening epithelioid granulomas and especially giant cell granulomas with clinical TB symptoms in patients successfully treated with anti-TB treatment with negativity of cultures confirming microbiological cure of infection. The mecha¬nism of paradoxical reaction is not wellunderstood, but it has been attributed to hosting immunologic reactions, with possible mechanisms in¬cluding delayed hypersensitivity response, decrease in immune suppression and response to mycobacterial antigens (Campbell 1977). Involvement of lymph nodes during paradoxical deterioration (30%) in our series was significantly less common than that in HIV-positive patients (41%). The paradoxical reaction does not correspond to a relapse of the opportunistic infection but to restoration of a pathological immune response. Reported in 30 to 35% of HIV-infected patients through immune restoration syndrome during antiretroviral treatment, these paradoxical reactions have also been described in HIVuninfected patients. This diagnosis should not be made at first hand, and should primarily eliminate TB treatment inefficiency, for poor adherence, absorption problem, potentially due to drug interactions, or because of resistant mycobacterium strain. Another inter-current opportunistic infection should also be eliminated. Despite many hopes, today we do not have immunoassays that are used routinely to assist this diagnosis of exclusion. The paradoxical reaction manifested clinically by a worsening of preexisting symptoms in 74.6% of cases, or the occurrence of new clinical signs in 25.4% of cases. Some authors set the occurrence time to an average of four weeks. The slow action of anti-TB drugs could explain the latency. The time of occurrence of other lesions or worsening of pre-existing symptoms may correspond to the time required for the action of anti-TB drugs. The pathogenesis of paradoxical reactions is not fully elucidated. The hypothesis of a restoration of the specific immune response was proposed. It is considered that the pathophysiology of paradoxical reactions is quite similar to the pathophysiology described in HIV-infected patients. Immunity is initially altered by TB itself due to its immunosuppressive effect. However, it is gradually corrected through TB treatment. The immunosuppressive effects of M. tuberculosis in tuberculosis disease were found at several levels: apoptosis, dysfunction of antigenpresenting cells, T cell lymphopenia and reduced production of IFN by T cells. The main risk factors of paradoxical reactions are extrapulmonary or disseminate tuberculosis sites, initial lymphopenia and increased lymphocytes. The paradoxical reaction is encountered in 6 to 30% of patients undergoing treatment for all forms of tuberculosis. Extrapulmonary tuberculosis is found in 80% of cases of paradoxical reaction. Tuberculosis meningitis and miliary tuberculosis come at the top of the list. The central nervous system, pleura and lungs are the most commonly affected sites. The frequency of lymph node site is variable according to the authors. Tuberculous lymphadenitis is complicated by paradoxical reaction in 4 to 23% of cases. with an increase in the size of lymph nodes (12%), a fluctuation (11%), erythema and spontaneous discharge (7 %). New lymphadenopathy may occur in 27 to 36% of cases. The site is cervical in 68% of cases. Sample taken at the time of paradoxical reaction usually shows no results. PCR can remain positive for a long period. The finding of concomitant increase in ALC and conversion of the tuberculin skin test in our patients during paradoxical deterioration concurred with the observation in a previous reports who demonstrated a conversion of the tuberculin skin test in five HIV-negative patients during paradoxical deterioration (3,. In rare cases, M. tuberculosis could be isolated and its sensitivity to the used anti-TB drugs is not changed. The changing of anti-TB molecules is not necessary in a paradoxical reaction. Nevertheless, some authors have extended the quadruple tuberculosis therapy during three months then relaying by triple therapy instead of a double therapy for the rest of treatment. The use of corticosteroids is recommended in meningitis, cerebral tuberculoma, pericarditis or hypoxic miliary. Corticosteroid therapy has been widely used (40% of cases) in all forms to limit the exaggerated inflammatory reaction of the body. Faced with a tuberculous lymphadenitis in an English study, corticosteroid therapy is recommended if there was a risk of compression of adjacent organs. In neuromeningeal forms, corticosteroid therapy remains subject to discussion; however, it appears in some recommendations of experts. If a mortality reduction was reported in a series including a small number of patients, the advantage in terms of morbidity seems more modest. In pericarditis, the advantage of corticosteroid therapy on mortality reduction and prevention of occurrence of constrictive pericarditis is controversial. Important abscesses should probably be drained but it is not known how often it should be done. The duration of treatment is an average of two to four weeks. The prognosis is favorable in most cases. Most ganglion forms show spontaneous resolution in about 2.5 months and 7 to 11% had residual lymphadenopathy at the end of treatment. Concerning the damage to the central nervous system, deaths were reported especially in patients coinfected with HIV with 13 to 30% of mortality, despite treatment with corticosteroids, and in pregnant women with 38% of mortality. The use of corticosteroids in the adjunctive management of paradoxical reactions is common. Some case reports have described rapid recovery after initiation of corticosteroid therapy (1,. The advantage of corticosteroid therapy in reducing edema around enlarging intracranial tuberculomas is apparent, but the advantage associated with the use of such therapy for lymph node TB is less clear. CONCLUSION Paradoxical reaction during anti-TB treatment can take many aspects. Paradoxical reaction requires the implementation of a comprehensive test before making the diagnosis: a diagnosis of exclusion. There is no consensus on the therapeutic management of this possibility but some authors suggest an extension of TB treatment and/or corticosteroids. Although evolution is usually spontaneously favorable, complications are possible. Further studies are also needed to better understand the pathogenesis and risk factors in immunocompetent patients, which would help identify patients at risk of developing paradoxical reaction during TB treatment and better control its clinical manifestation. We believe that the role of corticosteroid therapy can only be defined by a randomized placebo-controlled trial. AUTHORS' CONTRIBUTIONS The participation of each author corresponds to the criteria of authorship and contributorship emphasized in the Recommendations for the Conduct, Reporting, Editing, and Publication of Scholarly work in Medical Journals of the International Committee of Medical Journal Editors. Indeed, all the authors have actively participated in the redaction, the revision of the manuscript and provided approval for this final revised version. ACKNOWLEDGEMENT Declared none.
A gas flare on an oil production platform in the Soroush oil fields is seen alongside an Iranian flag in the Gulf, July 25, 2005. Photo: Reuters / Raheb Homavandi / File. The United States granted Iraq a 90-day waiver exempting it from sanctions to buy energy from Iran, a State Department official said on Wednesday, the latest extension allowing Baghdad to keep purchasing electricity from its neighbor. The official said the waiver was granted on Tuesday. The last waiver for Iraq to be exempt from US energy sanctions on Iran was granted on Dec. 21. The Trump administration reimposed sanctions on Iran’s energy exports in November, citing its nuclear program and meddling in the Middle East, but has granted waivers to several buyers to meet consumer energy needs. “While this waiver is intended to help Iraq mitigate energy shortages, we continue to discuss our Iran-related sanctions with our partners in Iraq,” the State Department official said on condition of anonymity. Iraq relies heavily on Iranian gas to feed several power stations, importing roughly 1.5 billion standard cubic feet per day via pipelines in the south and east. Washington has said it wants to roll back Iranian influence in the Middle East, including in Iraq, where Iran holds broad sway over politics and trade. Although Iraq has one of the world’s largest natural gas reserves, it has moved slowly to develop them and relied on Iran to supply it with gas and electricity. Iran last year stopped supplying electricity to Iraq due to unpaid bills. The power shortages in Iraq sparked protests in Basra and other cities as people blamed government corruption. Saudi Arabia has also offered to sell electricity to Baghdad at a discount, part of an effort by the kingdom to curb the influence of its rival Iran in Iraq. “We are also continuing to work with Iraq to end its dependence on Iranian natural gas and electricity and increase its energy independence,” the State Department official said.
Mild Hypothermia Promotes Ischemic Tolerance and Survival of Neural Stem Cell Grafts by Enhancing Global SUMOylation Cerebral infarct penumbra due to hypoxia and toxin accumulation is not conducive to the transplantation of neural stem cells (NSCs), although mild hypothermia can improve the local microenvironment of the ischemic penumbra and exert neuroprotective effects. However, insufficient understanding of the molecular mechanism by which mild hypothermia protects the brain limits widespread clinical application. This study evaluated the molecular mechanism of mild hypothermia-induced brain protection from the perspective of global protein small ubiquitin-like modifier (SUMO) modification, with the aim of improving NSC transplant survival rates in the penumbra to enhance neurological function. NSCs from neonatal rats were extracted to detect the effects of hypoxia and mild hypothermia on SUMOylation modification levels, cell stemness, and hypoxia-induced injury. Overexpression and knockdown of UBC9 in NSCs were used to evaluate their ability to maintain stemness and withstand hypoxic injury. Finally, a rat middle cerebral artery occlusion (MCAO) model was used to verify the effect of mild hypothermia treatment and UBC9 overexpression on neural function of NSCs following penumbra transplantation in rats. Results showed that hypoxia and mild hypothermia promoted both the SUMOylation modification and maintenance of NSC stemness. Overexpression of UBC9 enhanced the abilities of NSCs to maintain stemness and resist hypoxic injury, while UBC9 knockdown had the opposite effect. Following transplantation into the ischemic penumbra of MCAO model rats, mild hypothermia and Ubc9-overexpressing NSCs significantly reduced cerebral infarct areas and improved neurological function. In conclusion, this study demonstrated that global protein SUMOylation is an important molecular mechanism for NSCs to tolerate hypoxia, and mild hypothermia can further increase the degree of global SUMOylation to enhance the hypoxia tolerance of NSCs, which increases their survival during transplantation in situ and ability to perform nerve repair in the penumbra of cerebral infarction. Introduction China ranks first in the world for the number of people experiencing stroke and, with the advent of an aging population, this trend is increasing annually. According to reports, the prevalence of ischemic stroke in China was 1981 per 100,000 in 2017, with a mortality rate of 149 per 100,000, thus imposing a heavy burden on families and society. Current treatment measures for cerebral infarction involve basic support and monitoring, dehydration to reduce intracranial pressure, anticoagulation, scavenging of free radicals, and nourishing nerves in an attempt to prevent complications and reduce mortality ; however, the efficacy of all these methods remains uncertain. Therefore, in clinical practice, the implementation of an effective treatment plan is particularly important for improving the survival of patients and their quality of life. In recent years, NSCs have yielded high hopes for the treatment of stroke, especially ischemic cerebrovascular disease. Theoretically, NSCs transplanted into the penumbra at the edge of cerebral infarction will proliferate for a few generations and then differentiate to supplement neurons and glial cells, thereby repairing damage and improving nerve function. However, in fact, the penumbra microenvironment exhibits severe hypoxia and accumulation of large amounts of toxic substances that are extremely unfavorable for the local survival of transplanted NSCs, which severely limits their application. Therefore, improving the survival of transplanted NSCs in the penumbra is key for the treatment of ischemic cerebrovascular disease. Nowadays, the application of mild hypothermia for brain protection has attracted increasing attention and gradually been implemented in clinical practice. A large number of international trials have confirmed the effectiveness and practicability of mild hypothermia in clinical applications, which can reduce the mortality rate and effectively improve the quality-of-life of patients with ischemic cerebrovascular disease. However, as most reports only describe the clinical efficacy and methods of mild hypothermia treatment, the exact mechanism of its action has not been clarified. This restricts its wide acceptance by doctors and, to a certain extent, widespread promotion in clinical practice. Therefore, it is necessary to have a deeper and comprehensive understanding of the molecular mechanism by which mild hypothermia protects the brain to help doctors provide more precise treatment plans for patients with cerebral ischemia. Small ubiquitin-like modifier-(SUMO-) mediated SUMOylation, a form of posttranslational modification of proteins, is used by cells to respond to external stress and adapt to changes in the internal environment. SUMO modification of proteins requires the cascade reaction of SUMO activating enzyme (E1), conjugating enzyme (E2 and UBC9), and ligase enzyme (E3). Neurons can reportedly antagonize the adverse microenvironment of hypoxia by increasing global SUMOylation of a large number of proteins, such as hypoxia-inducible factor 1 (HIF-1), and mild hypothermia can further increase global SUMOylation in neurons. Indeed, this enriches the molecular mechanism underlying mild hypothermia-induced brain protection to a certain extent. At present, no reports describe the effects of hypoxia and mild hypothermia on protein SUMOylation in neural stem cells (NSCs). Moreover, it is unknown whether transplantation of NSCs overexpressing SUMO into the edge of a cerebral infarction, with or without mild hypothermia, can increase the survival rate of NSC grafts and improve prognosis. Therefore, this study investigated the effects of hypoxia and mild hypothermia on global SUMOylation of NSCs, as well as their proliferation, differentiation, and hypoxia tolerance. We also transplanted NSCs overexpressing UBC9 into the cerebral ischemic penumbra of a rat middle cerebral artery occlusion (MCAO) model to evaluate their survival in vivo, as well as effects on the neurological functions of rats. In summary, the results show that mild hypothermia can promote the ischemic tolerance and survival of NSC grafts by enhancing global SUMOylation and improve the neurological function of rats. These conclusions identify a molecular mechanism supporting the brain protection elicited by mild hypothermia and provide a guide for increasing the survival of NSC grafts to improve the prognosis of patients with cerebral infarction. Materials and Methods The research conforms to NIH Guide for the Care and Use of Laboratory Animals (8th Edition, Institute for Laboratory Animal Research, Division on Earth and Life Studies, National Research Council of the National Academies Press). Experimental Rats. Total number of 60 12-week-old male and 5 new-born (within 1 day) female Sprague-Dawley rats were purchased from SPF Biotechnology Co., Ltd. (Beijing, China). These rats were housed in the Animal Experimental Center of the Fifth Central Hospital of Tianjin (Tianjin, China) with 50% ± 5% humidity and 20-25°C ambient. 2.2. NSC Culture and Treatment. Rat NSCs were isolated and extracted from the hippocampus of new-born rats (within 1 day) under sterile conditions, digested with 0.05% trypsin for 15 minutes, and carefully pipetted with a dropper to form a single cell suspension at 1000 r/min. Trypsin was removed after centrifugation for 5 min. Neural stem cells were cultured in rat neural stem cell culture medium (Cyagen Biosciences, Suzhou, China). After 5-7 days of culture, the neurospheres were dissociated into single cell suspensions by mechanical separation for subculture, and the cells were seeded at a density of 2 10 5 cells/. Cells of neurospheres were confirmed to be NSCs and propagated for 2 passages to obtain enough NSCs for experiments. Cultures maintained at 37°C and 5% CO 2 in an incubator were recorded as the control group (Con). Hypoxia was performed by placing NSCs in 1% O 2, 94% N 2, 5% CO 2, balanced nitrogen, and 95% humidity for 12 h (designated as H 12 h). For the mild hypothermia group, incubators were set at 33°C (designated as 33°C). All the experiments were carried out after a subsequent 48 h of culture under normal conditions. Establishment of Rat MCAO Models. A total number of 60 adult male Sprague-Dawley rats were randomly divided into 5 groups. During the operation, a small-animal ventilator (Shanghai Yuyan Instruments Co., Ltd., Shanghai, China) was used to maintain animals' respiration, and body temperature was monitored by a rectal temperature control. Making a 1 cm longitudinal incision between rat sternum and mandible, then left common carotid artery was isolated, and we found out the external carotid and internal carotid arteries under a stereo microscope (Olympus Corporation, Tokyo, Japan). Next, we ligated the distal heart end of the external carotid artery and the proximal heart end of the carotid artery, and a modified nylon thread (with 0.23 mm head diameter and 0.18 mm trunk diameter) was inserted from the carotid artery to the middle cerebral artery (~12.0 mm deep) and fixed with surgical line. Mild Hypothermic Treatment. Rats which underwent surgery without thread insertion were defined as the "sham operation" group. NSCs or UBC9 NSCs were transplanted to the ischemic penumbra after the establishment of MCAO models as reported in literatures. To make mild hypothermia, MCAO rats injected with NSCs were placed on an insulation blanket (Shanghai Yuyan Instruments), and a rectal temperature monitor was to keep body temperature at 32 to 34°C for 12 h. Rats were then removed from blanket and gradually recovered to normal body temperature. After removal, some models' brains were stripped and sliced then incubated in 1% 2,3,5-triphenyl-2H-tetrazolium chloride (TTC, Sigma-Aldrich, St. Louis, MO) for 20 min at 37°C to distinguish the infarct area (white) and the uninfarct area (pink and red). Sections were fixed with 4% paraformaldehyde for 2 h to distinguish stained from unstained areas. The infarcted and uninfarcted areas were analyzed by ImageJ software (National Institutes of Health, Bethesda, MD), and percentages of infarct were calculated as infarct area/area of the whole brain slice 100%. Brain tissues under the same conditions should be collected for paraffin section and immunofluorescence staining assay as described in Section 2.6. 2.9. Neural Function Analysis. Animals recovered from MCAO for 1, 4, 7, 14, 21, or 28 days before assess neurological functions by modified Neurological Severity Scores (mNSS). The mNSS consisted of balance, movement, sensory function, and reflex tests with a score ranging from 0 to 18 (normal score: 0; maximum defect score: 18). In addition, rotarod testing was performed to evaluate of rat neurological deficits (Zhishuduobao Biotechnology Co., Ltd, Beijing, China). Rats were pretrained for 3 times before MCAO, and the experiment was performed for 7 days after MCAO. The cylinder accelerated from 10 to 40 rpm within 5 min; before rats fell from the rod, the latency to fall was recorded. Mean latency for each rat was calculated from three trials with a 30 min interval between 2 trials. 3 Oxidative Medicine and Cellular Longevity 2.10. Rat Behavior Tests. 7 days after MCAO, rats should be tested for behaviors. Spontaneous activity was monitored within 30 min for distance traveled and time spent in corners. Activity was assessed as distance traveled (locomotion), vertical activity (rearing), thigmotaxis, and time spent in corners using behavioral tester (Zhishuduobao Biotechnology Co., Ltd, Beijing, China) equipment. On the 10th day after MCAO, rats were tested for memory and learning abilities in the Morris water maze (Zhishuduobao Biotechnology Co., Ltd, Beijing, China) according to reference. Rats underwent the visible platform experiment for the first 2 days, the nonvisible platform experiment for the following 3 days, and the probe trial for the last day. Escape latency and swimming paths were measured during the first 5 days. In the probe trial, percentages of time spent in quadrant IV and numbers of platform crossing were recorded. 2.11. Data Statistics and Analysis. Each experiment was performed at least for 3 times. Data are showed as mean ± standard deviation (SD) and were analyzed using GraphPad Oxidative Medicine and Cellular Longevity Prism 6 software (San Diego, CA). A P value <0.05 was considered significant difference. One-way ANOVA or the unpaired Student's t test was used to evaluate the significance of differences among treatment groups, as appropriate. Results 3.1. Hypoxia Increased Injury, Inhibited Differentiation, Increased the Stemness Maintenance Potential, and Reduced the Metabolic Capacity of NSCs. After 12 h of hypoxia stimulation, the content of LDH released by NSCs increased significantly, suggesting cell damage (Figure 1(a)). The results showed that the percentage of apoptotic cells reached 30% after hypoxia (Figure 1(b)). Immunofluorescence detection showed that a certain proportion of NSCs spontaneously differentiated under normal conditions. NSE and GFAP were expressed. Nestin, a marker of NSCs, was highly expressed, and cells showed extensional growth morphology similar to nerve fiber structures. After hypoxia, NSCs exhibited obvious spherical growth, NSE and GFAP expression decreased, and nestin expression significantly increased (Figure 1(c)). Moreover, expression of stem cell markers Oct4 and SOX2 increased significantly (Figure 1(d)). Additionally, we calculated ECAR levels and found that hypoxia could significantly increase the anaerobic hydrolysis level of NSCs (Figure 1(e)). (Figure 2(c)). Expression of Oct4 and SOX2 was also increased by mild hypothermia compared with cells at 37°C exposed to hypoxia (Figure 2(d)). Mild hypothermia could reduce the metabolic level of cells and inhibit hypoxiainduced increases of anaerobic fermentation (Figure 2(e)). Hypoxia Increased Whole-Protein SUMOylation in NSCs and Mild Hypothermia further Strengthened SUMOylation Modification. Western blot was used to detect whole-cell levels of the SUMOylation modification under the conditions of hypoxia, mild hypothermia, and their superposition. The results showed that hypoxia and hypothermia could significantly promote the binding of SUMO1 and SUMO2/3 to target proteins and had a superposition effect; however, it had little effect on free SUMOs. Further detection of conju-gating enzyme E2 (UBC9) showed that hypoxia could promote the expression of this protein (Figures 3(a)-3(c)). Overexpression of UBC9 Could Increase the Stemness and Hypoxia Tolerance of NSCs. We transfected NSCs with a plasmid carrying the UBC9 gene sequence and screened clones with high expression of UBC9. Protein detection showed that SUMO1 and SUMO2/3 conjugates in UBC9overexpressing NSCs were significantly increased, as were the contents of stemness maintenance molecules Oct4 and SOX2 (Figures 4(a)-4(c)). Immunofluorescence detection showed that UBC9 overexpression could significantly increase the expression of the NSC marker nestin and promote spherical growth of cells. Under hypoxia, expression of differentiation markers in NSCs was further reduced, nestin expression was increased, and the cell ball became smaller and round (Figure 4(d) H 12h=33C * * * * * * * * * * * * * * * * * (c) Figure 3: Effects of hypoxia (H 12 h) and mild hypothermia (33°C) on whole-protein SUMO modification in NSCs. (a) and (b) Expression of SUMO1 and SUMO2/3 conjugates, free SUMO1 and SUMO2/3, and UBC9 in NSCs after hypoxia and/or moderate hypothermic treatment, as assessed by western blotting. (c) Quantitative data were normalized to GAPDH and are expressed as mean ± SD (n = 3). * P < 0:05, * * P < 0:01, and * * * P < 0:001 vs. control. 6 Oxidative Medicine and Cellular Longevity damage after hypoxia, but there was no significant damage to cells under normoxia (Figure 4(e)). Hypoxia activated the cleaved caspase-3 apoptosis signal in NSCs and pro-moted apoptosis, but UBC9 overexpression could partially reverse the proapoptotic damage induced by hypoxia (Figure 4(f)). 7 Oxidative Medicine and Cellular Longevity 3.5. siUBC9 Reduced the Stemness and Hypoxia Tolerance of NSCs. We used small interfering RNA sequences to knockdown UBC9 expression. Protein detection showed that low UBC9 expression inhibited the modification of target proteins by SUMO1 and SUMO2/3 in NSCs, and expression of the stemness maintenance molecules Oct4 and SOX2 was significantly decreased (Figures 5(a)-5(c)). Immunofluorescence detection showed that low UBC9 expression could significantly inhibit nestin expression and promote cell differentiation. However, under hypoxic conditions, expression Oxidative Medicine and Cellular Longevity increased and promoted the expression of its precursor form ( Figure 5(f)). Thus, low UBC9 expression significantly aggravated the hypoxic injury of NSCs. Mild Hypothermia Increased the Survival of NSCs Transplanted into the Cerebral Ischemic Penumbra of Mice and Improved Neuromotor Function. MCAO could generate infarct areas reaching 50% in rats, and the infarct area could be reduced to 30% after transplantation of NSCs into the penumbra, while mild hypothermia could further reduce the infarct area. Immunofluorescence staining showed that NSC transplantation could significantly increase the density of NSCs in the penumbra, and mild hypothermia could further increase the survival of NSCs (Figures 6(a) and 6(b)). Compared with the MCAO group, the motor and coordination ability of rats transplanted with moderately hypother-mic NSCs improved after 7 days (Figure 6(c)), and the neural function of rats significantly improved after 21 days (Table 1). Notably, postoperative MCAO rats showed anxiety, which was significantly relieved after NSC transplantation and mild hypothermia (Figures 6(d) and 6(e)). Morris water maze testing showed that the learning and memory function of rats transplanted with NSCs into the penumbra and treated with mild hypothermia improved to varying degrees compared with after MCAO (Figures 6(f) days following surgery in each group. Data are expressed as mean ± SD (n = 7). # P < 0:05 vs. MCAO and & P < 0:05 vs. MCAO+NSC. Transplanted NSCs Overexpressing UBC9 in the Cerebral Ischemic Penumbra of Rats Exhibited Higher Survival Rates and Enhanced Neuromotor Function. Compared with simple transplantation of NSCs, UBC9-overepxressing NSCS could reduce the cerebral infarction area of rats from 30% to 12%. Moreover, overexpression of UBC9 could promote the survival of NSCs, as well as their ability to adapt to hypoxic injury of brain (Figures 7(a) and 7(b)). Compared with the NSC-transplanted group, MCAO rats transplanted with NSCs overexpressing UBC9 exhibited significantly improved motor ability and neurological functions (Table 1, Figure 7(c)), reduced anxiety levels, and enhanced learning functions to varying degrees (Figures 7(d)-7(h)). Discussion In this study, we first examined the effect of hypoxia on NSCs. The results show that although hypoxia damaged NSCs, it increased their potential to maintain stemness, inhibited their neuronal differentiation, and reduced their metabolism. Further studies showed that mild hypothermia antagonized hypoxia-induced damage to NSCs, further inhibiting their differentiation and reducing cell metabolism. To evaluate whether the protective effect of mild hypothermia on NSCs was related to the SUMO modification of proteins, we examined the effect of mild hypothermia on the expression of SUMOs in NSCs. The results show that hypoxia increased global SUMO modifications of NSCs, both SUMO1 and SUMO2/3, and mild hypothermia further strengthened these trends. These results preliminarily validated our hypothesis that hypoxia can increase the hypoxia tolerance of NSCs by increasing global SUMOylation, and mild hypothermia can enhance neuroprotective effects by reinforcing this trend. To evaluate whether global SUMOylation is indispensable to improving the hypoxia tolerance of NSCs, we next overexpressed and silenced the UBC9 gene (the only E2binding enzyme in the SUMO modification reaction) in NSCs and evaluated their hypoxia tolerance and stemness maintenance potential. The results show that Ubc9 overexpression increased both the stemness potential of NSCs and their tolerance to hypoxia; in contrast, silencing UBC9 reduced the stemness of NSCs and decreased their tolerance to hypoxia. These results indicate that global SUMO modification of target proteins is essential to improve the tolerance of NSCs to hypoxia. However, we cannot specify exactly which proteins were SUMO-modified during this event. According to previous studies, we speculate that hypoxia greatly increases SUMO modification levels of numerous target proteins, including HIF-1, Oct4, and SOX2. Theoretically, SUMO-modified HIF-1, Oct4, and SOX2 cannot be degraded by ubiquitin hydrolase, thus allowing these proteins to persist and stably exist in the nucleus and cytoplasm of NSCs, whereby they increase hypoxia tolerance and stemness potential. Finally, we established an MCAO model in rats to investigate the effects of mild hypothermia on survival of NSCs transplanted in the cerebral ischemic penumbra and subsequent improvements of neurological function. The results show that mild hypothermia increased the survival of NSCs transplanted in the cerebral ischemic penumbra of rats and improved their neurological functions, including motor and learning abilities. Finally, we transplanted NSCs overexpressing UBC9 into the cerebral ischemic penumbra of rats and found that these cells exhibited higher survival rates and better improved the neurological function of experimental animals, including motor and learning functions. These results verify our hypothesis that increasing global SUMOylation of many target proteins, e.g., by overexpressing UBC9, can help NSCs obtain stronger hypoxia tolerance. Moreover, transplantation of NSCs with stronger hypoxia tolerance into the cerebral ischemic penumbra and subsequent mild hypothermia treatment increased the survival of NSCs in the penumbra and enhanced the neurological function of animals. Although this study has enriched new ideas for the future treatment of ischemic cerebrovascular disease with mild hypothermia therapy combined with NSCs transplantation, there are still many difficult problems to be solved before it is actually applied to the clinic. First, due to the existence of the blood-brain barrier, how to inoculate NSCs into the brain of patients with cerebral ischemia will be a difficult problem for clinicians to face. Second, how to control the time window of hypothermia cooling and rewarming to produce the best therapeutic effect on patients is far from being interpreted. Third, the potential tumorigenic effects brought about by the proliferative characteristics of NSCs are serious side effects that we can never ignore. Last but not the final, SUMOylation can simultaneously intervene in the posttranslational modification of thousands of proteins. How to achieve advantages and avoid disadvantages at the molecular level and protein function will be a problem that researchers attach great importance to. In conclusion, there is still a long way to go before the clinical application of mild hypothermia combined with NSCs transplantation in the treatment of ischemic cerebrovascular disease. Conclusions Global protein SUMO modification is an important molecular mechanism for NSC tolerance of hypoxia, and mild hypothermia can further increase the degree of global protein SUMO modification in NSCs-a newly discovered molecular mechanism by which mild hypothermia protects the brain. Indeed, mild hypothermia can improve the hypoxia tolerance of NSCs and increase their survival following transplantation in situ, thus yielding better nerve repair effects. Data Availability All the data used to support the findings of this study are available from the corresponding authors upon request. 11 Oxidative Medicine and Cellular Longevity
<filename>src/branches/ability/keyword/banding.ts import { Pattern, r } from "@/r"; import { BandingKeywordAbility } from "@/types/ability"; import { EverythingScope } from "@/types/scope"; import { parseScope } from "@/branches/scope/_"; export const parseBandingKeywordAbility: Pattern<BandingKeywordAbility> = ( r.defer(() => r.anyOf( r`bands with other ${parseScope}`.as(([scope]) => scope), // TODO: fix hack // tslint:disable-next-line:max-line-length r`${r`banding`.as(_ => <EverythingScope>{ type: "everything" })}`.as(([scope]) => scope) ).as(scope => <BandingKeywordAbility>{ type: "keyword", keyword: "banding", scope })) );
// New initializes and returns an Queue, default min heap. func New(opts ...Option) *Queue { q := &Queue{ &comparator.Container{ Items: make([]interface{}, 0), }, } for _, opt := range opts { opt(q) } heap.Init(q.data) return q }
def city_timezone(city: str) -> str: geolocator = Nominatim(user_agent="circuit_maintenance") location = geolocator.geocode(city) timezone = ( tzwhere.tzwhere() ) return timezone.tzNameAt(location.latitude, location.longitude)
/// Receives message from game, nonblocking: None if not available pub fn recv(&mut self) -> Option<ToPlayer> { match self.rx.try_recv() { Ok(msg) => Some(msg), Err(TryRecvError::Empty) => None, Err(TryRecvError::Disconnected) => panic!("Disconnected"), } }
T.S. Eliot, in his poem "The Rock," asks rhetorically: "Where is the wisdom we have lost in knowledge? Where is the knowledge we have lost in information?" He could not have imagined how we have increased the world’s database of information. According to some reports, the store of facts and data has been doubling almost every year since the turn of the 20th century. Today, given the proliferation of the Internet, the computerization of news archives, and libraries available on the World Wide Web, literally thousands of references are available at the click of a mouse. The challenge today is not acquiring information, it is determining which information is relevant. Addressing an education conference in late 2006, Dana Gioia , then the chairman of the National Endowment for the Arts, said that we need "a system that grounds all students in pleasure, beauty, and wonder.” He added: “If we are going to compete productively with the rest of the world, it’s going to be in terms of creativity and innovation." Knowing what education should be doing in an age in which people are likely to have more than 10 jobs by age 42, according to the U.S. Department of Labor, has greatly complicated matters. Further exacerbating the situation is the projection that the top 10 jobs that will be in demand for today’s students don’t yet exist, and will be using technologies that haven’t been invented to solve problems we don’t even know are problems yet. Clearly, we are headed into a new and uncertain future. In 2007, the federal America COMPETES Act was signed into law by President George W. Bush, authorizing $151.2 million to help college students earn bachelor’s degrees in math or science with concurrent teacher certification, $125 million to help teachers earn master’s degrees in these areas, and additional funding to create more educational programs at the K-12 level to align math and science curricula in students' preparation for college. In truth, we need a huge infusion of capital and a change in attitude not only for mathematics and science, but also for art and music. Importantly, we need to define a well-rounded education and make the case for its importance in a global, innovation-based economy. The evidence for such an effort is slowly mounting. Robert Root-Bernstein, a professor of physiology and a winner of the MacArthur prize, completed a study of 150 eminent scientists from Pasteur to Einstein. His findings were startling to those educators lobbying for more emphasis on the sciences, for he discovered that nearly all of the great inventors and scientists were also musicians, artists, writers, or poets. Galileo, for example, was a poet and literary critic. Einstein was a passionate student of the violin. And Samuel Morse, the inventor of the telegraph and father of telecommunications, was a portrait painter. Perhaps as a consequence of the Harvard University scholar Howard Gardner's pioneering research on "multiple intelligences" and the idea that all children learn differently, various practical applications are evident throughout the country. Six years ago, the Los Angeles County board of supervisors adopted "Arts for All," a regional blueprint for arts education. The program's vision is for every public school student in the county to receive an effective K-12 education, of which the arts are an important component. High Tech High in San Diego is another remarkable example of art infusion, indeed infusion of all the various disciplines. HTH is a charter school network that is well funded by the Bill & Melinda Gates Foundation, the family of Gary Jacobs (formerly of Qualcomm), and many San Diego businesses. It consists of eight schools: five high schools, two middle schools, and one elementary school, with a total of 2,500 students and 300 employees. A hundred percent of graduates have been admitted to college, 80 percent to four-year institutions of higher learning. High Tech High is unusual in that its staff members create "personalized, project-based learning environments where all students are known well and challenged to meet high expectations." There is no math class or art. Rather, those disciplines—still taught, still relevant—are curriculum-infused, integrated if you will, into the study of larger questions such as these: How does the world work? Who lives here? Why do things matter? Each semester, the entire faculty and student body are assigned a topic they will work on together and that draws on all the disciplines, thereby forcing students to work collaboratively on real-world dilemmas. Larry Rosenstock, High Tech High’s chief executive officer, quotes Sir Ken Robinson, an international expert in the field of creativity and innovation, when he says, "Creativity is as important as literacy and should be given equal status." Maybe we really need to go back to basics and ask what the purpose of public education is, and what we consider an educated person to be. Perhaps we should change the vocabulary of the educational establishment, alter the lenses in the camera, and in the process awaken to the competitive demands of this new age. John M. Eger is the Lionel Van Deerlin endowed chair of communications and public policy at San Diego State University. He can be reached by e-mail at [email protected]. “More Than Movies,” July 16, 2008. “Learning Where They Teach,” July 18, 2007.
<gh_stars>0 package com.raccoonberus.chatbot.connector.telegram.model; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import java.math.BigInteger; import java.util.ArrayList; import java.util.List; @JsonIgnoreProperties(ignoreUnknown = true) public class GetUpdatesResponse { @JsonProperty private boolean ok = false; @JsonProperty private List<ResultItem> result = new ArrayList<>(); public boolean isOk() { return ok; } public GetUpdatesResponse setOk(boolean ok) { this.ok = ok; return this; } public List<ResultItem> getResult() { return result; } public GetUpdatesResponse setResult(List<ResultItem> result) { this.result = result; return this; } }
James Giddens, the trustee for liquidation of MF Global, said the "shortfall" may be larger than MF Global's management initially reported prior to the bankruptcy filing on October 31, according to a press release. "At present, however, the Trustee does not have access to other funds beyond the $1.6 billion on hand, and he is very close to exhausting the funds under his control," the release said. James W. Giddens, Trustee for the liquidation of MF Global Inc., today reported that his current plan to distribute 60% of what should have been segregated in US depositories for all former customers with US futures positions will total nearly all of the assets currently under his control. The Trustee to date has brought approximately $3.7 billion under his control, all of which comes from the former US depositories of the broker-dealer. Having already distributed $1.5 billion in collateral, and currently distributing $520 million in cash, leaves approximately $1.6 billion on hand. The previously announced next step, restoring 60% of what is in segregated customer accounts for US futures positions, would require approximately $1.3 to $1.6 billion to implement; that is, virtually all of the assets currently under the Trustee's control. This next step is subject to Bankruptcy Court approval, and will be done in close cooperation with the CFTC, SIPC, and the CME. The Trustee expects this transfer to occur in early December, once the current transfer is complete and books and records are reconciled to allow it to happen. Efforts to collect other funds from US depositories continue around the clock, and it is expected that the US funds available to the Trustee will increase in the coming weeks. At present, however, the Trustee does not have access to other funds beyond the $1.6 billion on hand, and he is very close to exhausting the funds under his control. Further complicating matters, assets located in foreign depositories for customers that traded in foreign futures are now under the control of foreign bankruptcy trustees, and while the Trustee will pursue them vigorously, it has been his experience that recovery of these foreign assets may take more time. The Trustee's counsel has also stated in open court that the Trustee has only relatively nominal proprietary - that is non-customer - assets in his immediate control. The amount of assets the Trustee controls is a separate issue from the apparent shortfall in what former MF Global Management should have segregated. At present, the Trustee believes that even if he recovers everything that is at US depositories, the apparent shortfall in what MF Global management should have segregated at US depositories may be as much as $1.2 billion or more. The Trustee wants to stress that these are preliminary numbers that may well change, and the Trustee will update in due course. The Trustee's investigative team, consisting of counsel experienced in broker-dealer liquidations and expert consultants and forensic accountants from both Deloitte and Ernst & Young, continues around the clock in close coordination with the Department of Justice, the CFTC, the SEC, SIPC, the CME, and others. The information in this statement does not apply to any other MF Global entity, including separate insolvency proceedings involving the parent company, MF Global Holdings Ltd.
<filename>test_comparable.cc #include "compare.h" #include <cassert> #include <cstdlib> struct nodeX { nodeX(int xx) : x(xx) { } lace::compare_t cmp(const nodeX &that) const { return lace::compare(x, that.x); } private: int x; }; LACE_COMPARABLE(nodeX, cmp); struct nodeLT { nodeLT(int xx) : x(xx) { } friend bool operator<(const nodeLT &foo, const nodeLT &bar) { return foo.x < bar.x; } private: int x; }; LACE_COMPARABLE_LT(nodeLT); template <typename T> void test() { assert( (T(-1) != T(+1)) ); assert( (T(-1) < T(+1)) ); assert( (T(-1) <= T(+1)) ); assert(!(T(-1) == T(+1)) ); assert(!(T(-1) >= T(+1)) ); assert(!(T(-1) > T(+1)) ); assert(!(T( 0) != T( 0)) ); assert(!(T( 0) < T( 0)) ); assert( (T( 0) <= T( 0)) ); assert( (T( 0) == T( 0)) ); assert( (T( 0) >= T( 0)) ); assert(!(T( 0) > T( 0)) ); assert( (T(+1) != T(-1)) ); assert(!(T(+1) < T(-1)) ); assert(!(T(+1) <= T(-1)) ); assert(!(T(+1) == T(-1)) ); assert( (T(+1) >= T(-1)) ); assert( (T(+1) > T(-1)) ); } int main(int, char*[]) { test<int>(); test<nodeX>(); test<nodeLT>(); assert( (lace::compare("a","b") != 0)); assert( (lace::compare("a","b") < 0)); assert( (lace::compare("a","b") <= 0)); assert(!(lace::compare("a","b") == 0)); assert(!(lace::compare("a","b") >= 0)); assert(!(lace::compare("a","b") > 0)); assert(!(lace::compare("a","a") != 0)); assert(!(lace::compare("a","a") < 0)); assert( (lace::compare("a","a") <= 0)); assert( (lace::compare("a","a") == 0)); assert( (lace::compare("a","a") >= 0)); assert(!(lace::compare("a","a") > 0)); assert(!(lace::compare(L"a", L"a") != 0)); assert(!(lace::compare(std::string("a"),std::string("a")) != 0)); return EXIT_SUCCESS; }
<reponame>RotaractAlumniMora/TrainLK import { BrowserModule } from '@angular/platform-browser'; import { ErrorHandler, NgModule, Injectable } from '@angular/core'; import { IonicApp, IonicErrorHandler, IonicModule } from 'ionic-angular'; import { IonicStorageModule } from '@ionic/storage'; import { HttpClientModule } from '@angular/common/http'; import { MyApp } from './app.component'; import { HomePage } from '../pages/home/home'; import { AlertsPage } from '../pages/alerts/alerts'; import { SettingsPage } from '../pages/settings/settings'; import { AboutUsPage } from '../pages/about-us/about-us'; import { SubmitPage } from '../pages/submit/submit'; import { TimetablePage } from '../pages/timetable/timetable'; import { ViewAdsPage } from '../pages/view-ads/view-ads'; import { ViewNewsPage } from '../pages/view-news/view-news'; import { StatusBar } from '@ionic-native/status-bar'; import { SplashScreen } from '@ionic-native/splash-screen'; import { NewsProvider } from '../providers/news/news'; import { UserProvider } from '../providers/user/user'; import { NewsItemsProvider } from '../providers/news-items/news-items'; import { AlertProvider } from '../providers/alert/alert'; import { TimetableProvider } from '../providers/timetable/timetable'; import { RoutesProvider } from '../providers/routes/routes'; import { TrainsProvider } from '../providers/trains/trains'; import { LocalNotifications } from '@ionic-native/local-notifications'; @NgModule({ declarations: [ MyApp, HomePage, AlertsPage, SubmitPage, TimetablePage, SettingsPage, ViewAdsPage, ViewNewsPage, AboutUsPage, ], imports: [ BrowserModule, HttpClientModule, IonicModule.forRoot(MyApp), IonicStorageModule.forRoot(), ], bootstrap: [IonicApp], entryComponents: [ MyApp, HomePage, AlertsPage, SubmitPage, TimetablePage, SettingsPage, ViewAdsPage, ViewNewsPage, AboutUsPage, ], providers: [ StatusBar, SplashScreen, {provide: ErrorHandler, useClass: IonicErrorHandler}, NewsProvider, UserProvider, NewsItemsProvider, AlertProvider, TimetableProvider, RoutesProvider, TrainsProvider, LocalNotifications, ] }) export class AppModule {}
Therapeutic Alliance in Web-Based Treatment for Eating Disorders: Secondary Analysis of a Randomized Controlled Trial Background In face-to-face therapy for eating disorders, therapeutic alliance (TA) is an important predictor of symptom reduction and treatment completion. To date, however, little is known about TA during web-based cognitive behavioral therapy (web-CBT) and its association with symptom reduction, treatment completion, and the perspectives of patients versus therapists. Objective This study aimed to investigate TA ratings measured at interim and after treatment, separately for patients and therapists; the degree of agreement between therapists and patients (treatment completers and noncompleters) for TA ratings; and associations between patient and therapist TA ratings and both eating disorder pathology and treatment completion. Methods A secondary analysis was performed on randomized controlled trial data of a web-CBT intervention for eating disorders. Participants were 170 females with bulimia nervosa (n=33), binge eating disorder (n=68), or eating disorder not otherwise specified (n=69); the mean age was 39.6 (SD 11.5) years. TA was operationalized using the Helping Alliance Questionnaire (HAQ). Paired t tests were conducted to assess the change in TA from interim to after treatment. Intraclass correlations were calculated to determine cross-informant agreement with regard to HAQ scores between patients and therapists. A total of 2 stepwise regressive procedures (at interim and after treatment) were used to examine which HAQ scores predicted eating disorder pathology and therapy completion. Results For treatment completers (128/170, 75.3%), the HAQ-total scores and HAQ-Helpfulness scores for both patients and therapists improved significantly from interim to post treatment. For noncompleters (42/170, 24.7%), all HAQ scores decreased significantly. For all HAQ scales, the agreement between patients and therapists was poor. However, the agreement was slightly better after treatment than at interim. Higher patient scores on the helpfulness subscale of the HAQ at interim and after treatment were associated with less eating disorder psychopathology. A positive association was found between the HAQ-total patient scores at interim and treatment completion. Finally, posttreatment HAQ-total patient scores and posttreatment HAQ-Helpfulness scores of therapists were positively associated with treatment completion. Conclusions Our study showed that TA in web-CBT is predictive of eating disorder pathology and treatment completion. Of particular importance is patients confidence in their abilities as measured with the HAQ-Helpfulness subscale when predicting posttreatment eating disorder pathology and treatment completion. Background Eating disorders (EDs) are related to serious physical, psychological, and social consequences and are characterized by a chronic character and high treatment costs. However, many patients have EDs for years before receiving treatment. Access to face-to-face treatment of ED is often limited because of personal barriers, such as feelings of shame and fear of stigmatization, and intervention-related barriers, such as costs, geographic distance, and lack of availability. Web-based alternatives, which may encompass website-and mobile app-based treatment programs, might show promise. Web-based treatment was shown to be effective in reducing ED psychopathology, and it can improve access to ED treatment compared with face-to-face treatment. Web-based treatment provides the added advantages of approachability, relative anonymity, and widespread 24-hour access, which are considered important benefits for patients with ED. One particularly important facet of face-to-face treatment is the therapeutic alliance (TA) between therapists and patients. Although there are various ways to define the concept of TA, all definitions have in common that TA can best be characterized by the degree of agreement between a therapist and a patient concerning the goals and tasks of the treatment and suggest the presence of an affective bond. TA was shown to be predictive of treatment completion and outcomes in the general population. The quality of TA was also shown to be predictive of treatment completion and outcomes in face-to-face ED treatment. However, the predictive value of TA for treatment outcomes in patients with ED varies between studies and between patient groups. More specifically, the predictive value of TA for treatment outcome is less obvious for patients with bulimia nervosa (BN) than for patients with anorexia nervosa. Overall, the predictive value of TA for treatment outcomes is associated with small to medium effect sizes. With regard to web-based treatment in the general population, multiple studies have demonstrated that the strength of the TA during treatment can be improved without face-to-face contact with a therapist. However, compared with face-to-face treatment, much less is known about important predictors in the development of TA in the context of web-based treatment. Studies focusing on TA in web-based treatment are often methodologically inferior to those focusing on face-to-face treatment. Few studies have been conducted on the role of TA with regard to treatment outcomes and adherence in web-based treatment for ED. It was found that TA was rated high over the course of ED treatment. It was also found that higher TA ratings were associated with better treatment outcomes. Furthermore, some evidence indicates that the extent of TA during web-based treatment of ED is positively associated with treatment adherence. Objectives Concerning the effects of TA on treatment effectiveness, it is also important to emphasize the degree of agreement between the therapist and patient perspectives. In the general population, it has been shown that convergent patient-therapist ratings over the course of treatment predict a better treatment outcome. It was also found that, for face-to-face treatment in the general population, therapist ratings of the TA are not as predictive of treatment outcomes as the TA ratings provided by patients. This study focused on web-based cognitive behavioral therapy (web-CBT) for female patients with ED and aimed to investigate TA ratings measured at interim and after treatment, separately for patients (treatment completers and noncompleters) and therapists; the degree of agreement between therapists and patients (treatment completers and noncompleters) for TA ratings; and associations between patient and therapist TA ratings and both ED pathology and therapy completion. We hypothesized that the TA would increase from interim to post treatment for both patients and therapists and that there would be stronger agreement between therapists and patients who completed treatment than between therapists and patients who did not complete treatment. Furthermore, we hypothesized that TA ratings provided by patients and therapists are predictors of eating disorder pathology, particularly after treatment. Finally, we hypothesized that TA ratings would be positively associated with treatment completion. Study Design A secondary analysis was conducted on the data from a randomized controlled trial (RCT) investigating a web-CBT intervention for EDs. The study design, procedures, and results of the RCT are described in detail elsewhere. Recruitment for the RCT was conducted from March 2011 to December 2013. Information on the study was disseminated through announcements on ED-related websites, forums, and newspaper advertisements. Ethics Approval All participants provided written informed consent, and the study was approved by the Medical Ethics Committee of the Medical Spectrum Twente (NL31717.044.010, P10-31) and registered in the Netherlands Trial Registry (NTR2415). An RCT compared a web-CBT intervention group to a waiting list control group. Participants were stratified by ED type (BN, binge eating disorder , or eating disorder not otherwise specified ). Participants in the intervention group started web-CBT immediately, whereas those in the control group had to wait 15 weeks after randomization. Both completers and noncompleters completed the questionnaire. For the current analysis, measurements at the interim (after the first part of treatment) and after treatment were used. As this study did not focus on the efficacy of the treatment but on the interim and posttreatment measurements of the TA, the data from the intervention phase of the study of both the initial intervention and control groups were merged. Participants The participants of this study were female patients with a Diagnostic and Statistical Manual of Mental Disorders, Fourth Edition (DSM-IV) diagnosis of BN, BED, or EDNOS who completed the first part of the web-based CBT and completed the interim questionnaire. In addition to the DSM-IV classification, the inclusion criteria for the RCT were age ≥18 years, access to the internet, fluent in Dutch, referral from a general practitioner, and to be within 85% of the target weight established by the table of height and weight limits of MINI-Plus. Exclusion criteria were as follows: suicidal ideation, receiving psychological or pharmaceutical treatment for any ED within the past 6 months, pregnancy, and expected absence of 4 weeks or longer during the treatment period of 15 weeks. Intervention The web-based treatment program, Etendebaas (English translation: "Look at your eating"), included a structured 15-week web-CBT that was designed within a secure web-based application. The treatment program consisted of 2 parts and included 16 treatment modules, with at least 21 scheduled asynchronous contact moments and 10 homework assignments. The first part aimed to analyze participants' ED attitudes and behaviors, whereas the second part focused on behavioral changes. All treatment modules were completed by the patients in a fixed order, and it was not possible to skip a module. CBT and motivational interviewing were the fundamental elements of the intervention, which included techniques such as psychoeducation, self-monitoring through daily diary entries, thought restructuring, problem-solving, and relapse prevention. In their personal files, patients could read and respond to the therapist's messages and complete homework assignments. The treatment protocol prescribed regular contact between patients and their therapists, with therapists responding to the patients' messages and assignments within 3 working days. A total of 17 therapists carried out web-based treatments, including 2 male therapists and 15 female therapists. Therapists had either a bachelor's degree in nursing or social work or a master's degree in psychology and received specific training for web-based treatment. A comprehensive manual was available, which included a detailed description of all treatment modules and safety protocols. The treatment also included web-based coaches and support from a multidisciplinary team (psychologists, psychotherapists, addiction medicine physicians, psychiatrists, and dieticians) who were available for consultation. The participating therapists did not have knowledge of the TA scores of the patients and did not receive any instructions regarding investing in improving the TA. However, within the regular web-based treatment protocol, the core task of a therapist is to build and maintain the TA. Therapeutic Alliance TA was measured using the Dutch patient and therapist version of the Helping Alliance Questionnaire (HAQ). The HAQ is a self-report questionnaire that measures the strength of a therapeutic patient-therapist alliance. The therapist's version was derived from the patient's version and was compatible. The Dutch version of the HAQ has 11 items scored on a 5-point Likert scale (1=totally disagree, 2=disagree, 3=neutral, 4=agree, and 5=totally agree). The HAQ contains two subscales: cooperation (5 items), reflecting the perception of the patient on the cooperation with a care provider or vice versa, and helpfulness (6 items), which reflects a patient's or therapist's confidence in their own capacity to improve the situation. The HAQ-total score was determined as the sum of the subscale scores. This study found the patient version of the HAQ to be internally reliable at the interim measurement: Cronbach of.81 for the cooperation subscale, Cronbach of.81 for the helpfulness subscale, and Cronbach of.87 for the total HAQ score. The therapist version of the HAQ was also internally reliable: Cronbach of.77 for the cooperation subscale, Cronbach of.78 for the helpfulness subscale, and Cronbach of.87 for the total HAQ score. Eating Disorder Psychopathology Changes in the clinical severity of ED psychopathology were measured using the total score of the Eating Disorder Examination Questionnaire (EDE-Q). The EDE-Q is a widely used validated self-report scale based on Eating Disorder Examination interviews. The instrument focuses on the previous 28 days to assess important behavioral and attitude aspects of ED and the severity of ED psychopathology. The EDE-Q consists of 36 items, with four subscales (restraint, eating concern, shape concern, and weight concern). Items are scored on a 7-point Likert scale (range 0=not one single day-6=every day), with a higher score reflecting more psychopathology. Subscale scores were obtained by averaging the items for each subscale, whereas the total EDE-Q score was obtained by summing the subscale scores. Previous research indicates that the EDE-Q demonstrates acceptable internal consistency (Cronbach ranging from.77 to.84). Treatment Completion Participants were considered completers when they had attended all 16 treatment modules with at least 21 contact moments with their personal therapist, completed all 10 homework assignments, and completed the at-interim and posttreatment questionnaires. Participants who stopped the treatment program before the completion of all treatment modules and completed the at-interim and posttreatment questionnaires were considered noncompleters. Therefore, treatment completion was operationalized using a dichotomous measure (yes or no). Statistical Analysis All analyses were conducted using SPSS for Windows (version 21; IBM Corp). Continuous variables were summarized using the mean with the associated SD or as the median with the associated IQR for normally and nonnormally distributed data, respectively. Categorical variables were summarized as frequencies with corresponding percentages. Sum scores were computed for the HAQ-total score and separately for the cooperation and helpfulness subscales, both at interim and after treatment, and separately for patients and therapists. Differences in demographic characteristics between completers and noncompleters were analyzed using independent 2-tailed t tests for continuous normally distributed data and Wilcoxon rank-sum tests for continuous nonnormally distributed data. Differences in categorical variables were analyzed using chi-square or Fisher exact tests (as appropriate). Paired t tests were conducted to assess the change in TA ratings from the interim to the end of treatment for both therapists and patients. In the analyses, we stratified for completers and noncompleters because we expected an opposite pattern in TA ratings from interim to post treatment. Cohen d=( 1 − 2 )/ 1,2 was calculated to determine the effect sizes for significant findings. Cohen defines d scores of 0.2, 0.5, and 0.8 as small, medium, and large effects, respectively. An intraclass correlation coefficient (ICC) analysis was conducted to determine cross-informant agreement between patients (separately for completers and noncompleters) and therapists in TA ratings, both at interim and post treatment. To determine the strength of agreement, the guidelines drafted by Koo and Li were used (<0.50: poor, between 0.50 and 0.75: moderate, between 0.75 and 0.90: good, and >0.90: excellent). Next, we examined whether the HAQ scores for patients and therapists were related to ED pathology by creating two linear regression models: one at interim and one post treatment. To do this, TA ratings of therapists and patients (completers and noncompleters combined) were first analyzed separately in univariate linear regression models. We merged the data of completers and noncompleters. The choice to combine completers and noncompleters was based on the following arguments: this would increase statistical power; this would provide fairer insights, as TA ratings of completers were expected to be overly positive specifically because these patients completed the treatment, whereas TA ratings of noncompleters may have been more critical; and by including both groups, the range of ED pathology included in the analyses was broader, increasing the ecological validity of the results. Rating scores that were sufficiently related (P≤.15) in these univariate analyses were entered into a multiple linear regression model. Owing to multicollinearity between the TA subscales and total scale within the patient or therapist groups, we entered the total scale or subscales with the highest explained variance (R 2 ) into the multiple linear regression model. Nonsignificant variables were removed individually until the explained variance deteriorated significantly. To assess whether TA ratings were related to treatment completion, we constructed two logistic regression models: one at interim and one post treatment. These models were constructed identically to the construction of the previously described multiple regression models. Owing to multicollinearity between the TA subscales and the total scale for the patient or therapist groups at each time point, we entered the scale(s) that produced the best model fit (−2 log likelihood). Nonsignificant variables were removed one by one until the −2 log likelihood deteriorated significantly. Nagelkerke R 2 was used to estimate the pseudoproportion of the variance. Two-sided significance levels were set to 0.05 in all measurements. Figure 1 presents a flowchart of the inclusion process used in this study. A total of 214 participants were included in an earlier RCT. Of these, 128 (59.8%) completed the treatment. Of the participants who did not complete the web-based CBT (86/214, 40.1%), 15 never started treatment (nonstarters), and 29 stopped the treatment before the end of the first part of the treatment (early dropouts). The 44 participants did not complete the interim questionnaire, so no information about their experiences with the TA was available. Therefore, these were excluded from the analysis. Of the 170 participants who were included in this study, 42 stopped the program during the second part of the web-based CBT. These participants were considered late dropouts and filled out the interim and posttreatment questionnaires, including the TA. In this study, late dropouts were defined as noncompleters, although the overall number of noncompleters in the RCT was higher (n=106), as it also included 15 nonstarters and 29 early dropouts. Figure 1. Flowchart of the inclusion process of this study. *In the underlying RCT, power analysis was used to determine how many participants could be assigned to each subgroup. The number of patients included in the binge eating disorder (BED) and eating disorder not otherwise specified (EDNOS) subgroups reached the necessary number of patients that should be recruited within the subgroup based on the sample size calculation, and the necessary number of patients for the bulimia nervosa (BN) group was not reached. web-CBT: web-based cognitive behavioral therapy. Participants The participant characteristics are reported in Table 1. The sample included 170 women with BN (n=33), BED (n=68), or EDNOS (n=69), and a mean age of 39.6 (SD 11.5) years. Completers reported significantly higher BMI scores than noncompleters. When stratifying for BMI categories (underweight, normal weight, and overweight), no significant differences were found between these groups. TA Before and After Treatment In Table 2, TA ratings at interim and post treatment and the difference scores between the measurements are reported separately for completers and noncompleters and from both the patient and therapist perspectives. For completers, the HAQ-total and HAQ-Helpfulness scores improved significantly from interim to the end of treatment, with effect sizes ranging from small to medium. For noncompleters, all 3 types of HAQ scores significantly decreased, with medium to large effect sizes. Cross-Informant Agreement Between Patients and Therapists The ICCs that represent agreement between therapists and patients regarding the TA (represented by the HAQ-total score and HAQ subscale scores) are presented in Table 3. Agreement between therapists and patients increased as treatment progressed. However, in general, agreement between patients and therapists was poor for both noncompleters and completers both at interim and post treatment. Associations With Treatment Outcome For ED pathology measured with the EDE-Q, Table 4 shows the results of the univariate regression analyses at the interim. All patients' HAQ scores at interim were found to be univariately negatively associated with the extent of ED psychopathology, as was the therapists' HAQ-Helpfulness score at interim. The subscales showed the best explained variance; therefore, we entered these subscales and not the total HAQ scale in the initial multiple regression model. After entering the patients' subscale scores and therapists' scores for the helpfulness subscale and completing the stepwise regression procedure, only the HAQ-Helpfulness score at the interim of patients was significantly negatively associated with ED pathology. The final interim model with the HAQ-Helpfulness scores of patients as the sole predictor explained 9.8% (F 1 =17.03; P<.001) of the variance in eating disorder pathology. As presented in Table 5, posttreatment HAQ scores of all patients were found to be univariately negatively associated with posttreatment ED psychopathology, as well as therapists' HAQ-Helpfulness and HAQ-total scores after treatment. Owing to multicollinearity and the best model of fit for the subscales, all patients' subscale scores and the therapists' helpfulness subscale scores were entered in the initial multiple regression model. After completing the stepwise regression procedure, the HAQ-Helpfulness score of the patients remained the only predictor that was negatively associated with ED pathology after treatment. After treatment, the HAQ-Helpfulness score of patients explained 22.3% (F 1 =43.58; P<.001) of the variance in ED pathology. Table 4. Univariate regression models for the at-interim association between the strength of the therapeutic alliance (Helping Alliance Questionnaire scores) and eating disorder pathology (Eating Disorder Examination Questionnaire), separately per subscale and as total score, and separately for patients and therapists. Table 5. Univariate regression models for the posttreatment association between the strength of the therapeutic alliance (Helping Alliance Questionnaire scores) and eating disorder pathology (Eating Disorder Examination Questionnaire), separately per subscale and as a total score, and separately for patients and therapists. Table 6 shows the results of the univariate logistic regression analyses at the interim of the association between HAQ scores and treatment completion. All patients' HAQ scores at the interim measurement and the therapists' HAQ-Helpfulness and HAQ-total score at the interim were found to be univariately associated with treatment completion. We entered patients' HAQ-total scores and therapists' subscale scores into the initial multiple regression model because these resulted in the best model fit. In the final multivariate model, the HAQ-total score of patients at the interim measurement remained the only significant predictor of treatment completion, explaining 18.8% (−2 log likelihood=167.10; Nagelkerke R 2 =0.188) of the pseudovariance in treatment completion. Table 7 shows the results of the posttreatment univariate logistic regression analyses, focusing on HAQ scores and treatment completion. All the HAQ scores were found to be univariately positively associated with treatment completion. The HAQ-total scores of patients and the therapists' subscales were entered into the initial model because these resulted in the best model fit. In the final multiple regression model, both the HAQ-total scores of patients (odds ratio 0.30, 95% CI 1.18-1.55; P≤.001) and HAQ-Helpfulness scores of therapists (OR 0.13, 95% CI 0.97-1.34; P=.12) were positively associated with treatment completion, explaining 59% (−2 likelihood=80.24; R 2 =0.59) of the pseudovariance in treatment completion. Principal Findings First, in line with our expectations, it is possible to examine and detect changes in the TA of web-based CBT. Our study showed that the HAQ-total and HAQ-Helpfulness scores for completers significantly increased from interim to post treatment, whereas for noncompleters, all 3 HAQ scores significantly decreased. This shows that, in general, patients who completed treatment experienced a TA that grew stronger over time, whereas those who did not complete treatment experienced a weaker TA that decreased over time. These findings were observed for both patients and therapists and confirmed the results of previous studies. In addition, we found that although the ICCs of agreement between patients and therapists increased from interim to posttreatment measurement, the overall agreement about the degree of TA remained relatively poor. This might partly be because of differences in perceptions between patients and therapists regarding what the TA entails. Patients are more concerned with the helpfulness of the treatment and collaboration, whereas therapists are more concerned with the performance of the client and their own confidence as therapists. Patients' interim and posttreatment HAQ scores were positively associated with treatment outcomes. The final interim model with the HAQ-Helpfulness scores of patients as the sole predictor explained 9.8% (F 1 =17.03; P<.001) of the variance in eating disorder pathology. Post treatment, the HAQ-Helpfulness score of patients explained 22.3% (F 1 =43.58; P<.001) of the variance in eating disorder pathology. This corresponds with the results of other studies. Although the explained variance is not that high, meaning that there are more unknown factors influencing eating disorder pathology, it does show that patients who are confident in their own capacity to improve their situation are more likely to have better treatment outcomes. This is in line with a narrative review of web-based interventions that showed that in most studies, helpfulness-related factors were found to be positively associated with treatment outcomes in internet interventions. In only one of the studies described in the narrative review, a positive association between cooperation-related factors and treatment outcomes was found. Patients who opt for web-CBT may value cooperation with a therapist less important than patients who prefer face-to-face treatment and may prefer the relative anonymity of the internet. For web-CBT for ED, we found one study that also reported a positive association between TA and treatment outcomes. However, this study used a different measure to operationalize the construct of TA and did not focus on the perspective of therapists. Finally, treatment completion is an important predictor of treatment outcome. In a previous study by our laboratory, we found that completers had significantly better treatment outcomes than noncompleters. This highlights the importance of investigating predictors of web-CBT treatment completion. Incidentally, we found that patients with higher BMI completed treatment more often. This is in contrast to observations by Werz et al. On the basis of the current scientific knowledge, we have no reason to interpret our findings as clinically relevant. This study reported a positive association between TA ratings and treatment completion. More specifically, the univariate models indicated that all HAQ scales (helpfulness, cooperation, and the total score), as scored by both patients and therapists, were predictors of treatment completion both at interim and post treatment. However, the multivariate model indicated that only the patients' HAQ-total score and therapists' HAQ-Helpfulness score were positively associated with treatment completion. This might indicate that treatment noncompletion could be reduced by improving the TA. It should be noted that the results of this study are limited by a lack of consensus within the field of TA research concerning the definition and operationalization of TA. Across studies, a wide diversity of measures, such as the Working Alliance Inventory and Therapeutic Alliance Scale, are used to operationalize the TA and were designed for face-to-face treatments. This reduces the cross-comparability between studies. Establishing a consensus concerning the operationalization of the construct of TA in EDs and other psychotherapeutic treatments, specifically focusing on web-based treatment, would therefore be very welcome. It should also be noted that the HAQ does not provide norm scores regarding the quality of the TA, which makes it difficult to determine whether the TA is good. No clinically relevant differences in TA were determined. Owing to the rapid development of mobile-and internet-based technology, tools and apps that are integrated into mobile devices such as smartphones are increasingly being used. The data of this study were collected from 2011 to 2013 and have already shown the importance of investing in TA because it could contribute to less psychopathology and more treatment completion. With the increased options for interactivity, it is becoming increasingly interesting to study the impact of TA in web-based treatment. For future studies, we suggest including a more extensive population because it could lead to different results and insights. For example, male patients with ED are increasingly recognized and have unique concerns regarding disordered eating and body image. The same applies to patients with anorexia nervosa. In this population, high dropout rates have been reported, and the strength of the TA has been shown to be associated with changes in ED symptoms. It would also be interesting to include a face-to-face CBT condition, as this allows a comparative estimation of the effectiveness of TA on treatment outcome and treatment completion. Finally, monitoring TA from the patient's perspective and acting on relatively low and diminishing scores throughout the treatment process might be fruitful for clinical practice and contribute to better treatment results and completion. Conclusions The results of this study showed that the strength of the TA during web-CBT for ED increased for patients who completed the program and decreased for patients who did not from both the perspectives of patients and therapists. Our study also showed that TA is predictive of ED pathology and treatment completion. In particular, patients' confidence in their own abilities, measured using the HAQ-Helpfulness subscale, is important for predicting posttreatment ED pathology and treatment completion.
Finding of a putative Lake Baikal endemic, Lindavia minuta, in distant lakes near the Arctic pole in Yakutia (Russia) In the northern lakes of Yakutia, Vorota and Labynkyr, which are 2100 kilometres northeast of Lake Baikal, we found a putative Lake Baikal endemic, Lindavia minuta. Morphological analysis and non-metric multidimensional scaling showed that L. minuta populations from Yakutian and Transbaikalian lakes belong to the same (morpho) species. The possible dispersal history is discussed. The most probable hypothesis is that L. minuta was present in the Baikal basin during several Pleistocene glaciation cycles, and since Lake Baikal was previously connected to the River Lena, it is possible that there was a stable population of this species in the river, and that the river carried some cells to other suitable habitats. During glaciated periods, L. minuta could also have survived for longer periods in several ice dam lakes and possibly dispersed further north-east to the lakes in Yakutia.
Manual to Automated Testing Purpose - The purpose of this case study is to understand how two organizations view and utilise automated testing and how it relates to the literature. It outlines and discusses the key factors to be taken into consideration when setting up an automated testing, in addition to the risks involved. Design/methodology/approach Focus group discussions were executed to collect the data and the findings were compared with other literatures. Findings For cognition of automated testing, it is not only limited to its definition and benefits that may be brought into the organization, but also need to focus more on scope of application and preconditions. Aside from the key considerations such as people resistance, working process and training, some other concerns were also found from managerial perspective when adopting automated testing: Cost-benefit Return of Investment (ROI) is an effective method to analyse the investment, especially for the factors affecting the cost of investment; Management support. It includes balancing between business and technology, management involvement and coordinating the relations between the departments; Tool selection: Choosing the right automation tool is a very complicated process with a lot internal factors involved. Practical implication For an organization that doesnt have automated testing implemented yet, a pilot project can be the first option to understand its practicality and applicability based on individual organizational context. Originality/Value This case study can be used for an organization that interests in better introducing and implementing automated testing within the organization. Key Words Automated testing, Cost-benefit, Management support, Tool selection, practicality, applicability and ROI. Paper Type Case Study Research.
Mass spectrometry in environmental toxicology. In environmental toxicology, mass spectrometry can be applied to evaluate both exposure to chemicals as well as their effects in organisms. Various ultra-trace techniques are employed today to measure pollutants in different environmental compartments. Increasingly, effect-directed analysis is being applied to focus chemical monitoring on sites of ecotoxicological concern. Mass spectrometry is also very instrumental for studying the interactions of chemicals with organisms on the molecular and cellular level, providing new insights into mechanisms of toxicity. In the future, diverse mass spectrometry-based techniques are expected to become even more widely used in this field, contributing to the refinement of currently used environmental risk assessment strategies.
Fe b 20 05 Sum-free sets in abelian groups Let A be a subset of a finite abelian group G. We say that A is sum-free if there is no solution of the equation x + y = z, with x, y, z belonging to the set A. In this paper we shall characterise the largest possible sum-free subsets of G in case the order of G is only divisible by primes which are congruent to 1 modulo 3.
/** * Test some client functions with fake http connect * */ public class TestOozieClientWithFakeServlet { private int answer = 0; private boolean check = true; /** * Test method getJMSTopicName */ @Test public void testGetJMSTopicName() throws Exception { answer = 0; check = true; FakeOozieClient client = new FakeOozieClient("http://url"); String answer = client.getJMSTopicName("jobId"); assertEquals("topicName", answer); } /** * Test method getJMSConnectionInfo */ @Test public void testGetJMSConnectionInfo() throws Exception { answer = 1; check = true; FakeOozieClient client = new FakeOozieClient("http://url"); JMSConnectionInfo answer = client.getJMSConnectionInfo(); assertNotNull(answer); } /** * Test method getCoordActionInfo */ @Test public void testGetCoordActionInfo() throws Exception { answer = 1; check = true; FakeOozieClient client = new FakeOozieClient("http://url"); CoordinatorAction answer = client.getCoordActionInfo("actiomId"); assertNotNull(answer); } /** * Test method getBundleJobsInfo */ @Test public void testGetBundleJobsInfo() throws Exception { answer = 2; check = true; FakeOozieClient client = new FakeOozieClient("http://url"); List<BundleJob> answer = client.getBundleJobsInfo("", 0, 10); assertNotNull(answer); assertEquals(1, answer.size()); } /** * Test method getBulkInfo */ @Test public void testGetBulkInfo() throws Exception { answer = 3; check = true; FakeOozieClient client = new FakeOozieClient("http://url"); List<BulkResponse> answer = client.getBulkInfo("", 0, 10); assertNotNull(answer); assertEquals(2, answer.size()); assertEquals(Status.READY, answer.get(0).getAction().getStatus()); } /** * Test method FakeOozieClient */ @Test public void testBundleRerun() throws Exception { answer = 1; check = true; FakeOozieClient client = new FakeOozieClient("http://url"); Void answer = client.reRunBundle("jobId", "", "", true, true); assertNull(answer); } private class FakeOozieClient extends OozieClient { public FakeOozieClient(String oozieUrl) { super(oozieUrl); } @Override protected HttpURLConnection createConnection(URL url, String method) throws IOException { HttpURLConnection result = mock(HttpURLConnection.class); when(result.getResponseCode()).thenReturn(200); when(result.getInputStream()).thenReturn(getIs()); return result; } @SuppressWarnings("unchecked") private InputStream getIs() { ByteArrayInputStream result = new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8)); if (check) { JSONArray array = new JSONArray(); array.add(2L); String s = JSONValue.toJSONString(array); result = new ByteArrayInputStream(s.getBytes(StandardCharsets.UTF_8)); check = false; return result; } if (answer == 0) { JSONObject json = new JSONObject(); json.put(JsonTags.JMS_TOPIC_NAME, "topicName"); result = new ByteArrayInputStream(json.toJSONString().getBytes(StandardCharsets.UTF_8)); } if (answer == 1) { JSONObject json = new JSONObject(); result = new ByteArrayInputStream(json.toJSONString().getBytes(StandardCharsets.UTF_8)); } if (answer == 2) { JSONObject json = new JSONObject(); List<WorkflowJobBean> jsonWorkflows = new ArrayList<WorkflowJobBean>(); jsonWorkflows.add(new WorkflowJobBean()); json.put(JsonTags.BUNDLE_JOBS, WorkflowJobBean.toJSONArray(jsonWorkflows, "GMT")); result = new ByteArrayInputStream(json.toJSONString().getBytes(StandardCharsets.UTF_8)); } if (answer == 3) { JSONObject json = new JSONObject(); List<BulkResponseImpl> jsonWorkflows = new ArrayList<BulkResponseImpl>(); BulkResponseImpl bulk = new BulkResponseImpl(); bulk.setBundle(new BundleJobBean()); bulk.setCoordinator(new CoordinatorJobBean()); CoordinatorActionBean action = new CoordinatorActionBean(); action.setStatus(Status.READY); bulk.setAction(action); jsonWorkflows.add(bulk); jsonWorkflows.add(bulk); json.put(JsonTags.BULK_RESPONSES, BulkResponseImpl.toJSONArray(jsonWorkflows, "GMT")); result = new ByteArrayInputStream(json.toJSONString().getBytes(StandardCharsets.UTF_8)); } return result; } } }
Advanced metastatic breast carcinoma in sickle cell disease BackgroundBreast cancer is the leading cancer in women leading to over 400,000 deaths per year worldwide. It begins in the breast tissue and can metastasize to other organs if early diagnosis and treatment is not instituted. Women with sickle cell disease are usually spared from breast cancer and other solid tumours due to the tumoricidal effect of sickled erythrocytes. Breast cancers are rare among these group of patients. Despite its rare occurrence, this paper was to emphasize the need for breast cancer screening among female sickle cell disease patients who have positive family history of breast cancer.Case descriptionOO was a 30-year old woman with sickle cell disease who presented to the hospital one and half years ago with a seven months history of right breast swelling and pains. She had lost her mother to breast cancer about 15 years ago. Mammography and histology of breast biopsy confirmed diagnosis of invasive ductal carcinoma of the right breast. Financial constraint was a major challenge in managing this patient as she was unable to buy her chemotherapy. She developed features suggestive of metastasis such as seizures and hepatomegaly. She was stabilized and discharged home but we lost her to follow up. She died at home.Conclusion Breast cancer is rare among females with sickle cell disease; any of them with a family history should be routinely screened for early diagnosis and treatment.
SDLP leader Colum Eastwood used his historic address to the Fianna Fail ard fheis to tell Sinn Fein to do their duty and restore power-sharing in Northern Ireland adding there was a "special place reserved in hell" for those calling for a border poll with no plan as to how a united Ireland would work. He said nationalists should not be seduced into thinking Assembly was a "convenience but not a necessity" and there could be no hope of reconciliation without the devolved institutions at Stormont. He said he and their new Fianna Fail partners would "fight" to ensure government returns. "There is no hope of delivering integrated economic and social progress across this island without an Assembly at Stormont," he said. "There is no way of fully protecting our people against the devastating consequences of Brexit without an Assembly in Stormont. "And for any real republican, there is no pathway towards a New Ireland without an Assembly in Stormont." How can anyone use the pretence of patriotism to avoid their responsibilities to protect the citizens of this country? Echoing the remarks of the European Council president on Brexit, the MLA, addressing Sinn Fein's near constant calls for a border poll said there was a "special place in hell" for those calling for such a vote without first plan on how a united Ireland would work. He also indicated Sinn Fein's opposition to Brexit was "empty rhetoric" without taking the actions they were capable of. "There is no point saying you oppose Brexit if you’re not prepared to turn up and vote against it," he told the conference delegates. "This country is in the middle of a national emergency. In Britain, Theresa May’s majority is disintegrating before our eyes. "How can anyone use the pretence of patriotism to avoid their responsibilities to protect the citizens of this country? "I say this to Sinn Fein – its still an empty formula, get in there and do your duty to this country or be forever defined by empty rhetoric." We recognise the deep divisions that exist across our community and we know that it will take real leadership to begin to bring our communities together. Mr Eastwood said those with the view unionists as unbending to change and or "doesn't believe in rights" needed to be "faced down". He said: "That cannot be the basis of our vision - this is not 1968 and we are not 2nd class citizens anymore. "I for one am tired of hearing that argument. "We diminish ourselves by its repetition and we diminish the progress which has been secured by previous generations. Extraordinary times call for more than ordinary measures. Eastwood's address was the first since he and Fianna Fail leader Michael Martin announced a partnership between their two parties. While the SDLP membership endorsed the link-up it has led to prominent representatives resigning their positions with the party in opposition to the move. "Extraordinary times call for more than ordinary measures," Mr Eastwood went on. "Brexit encompasses change on every level. The current political stew is bubbling with issues of identity, belonging and culture – it’s no longer just the economy, stupid. "Brexit has changed everything in this regard - and will continue to change everything. "We are coming together to shape the change before us." With the centenary of partition and subsequent creation of Northern Ireland approaching Mr Eastwood said "meaningful reconciliation" between the nationalist and unionist communities must be a priority. He said it was important to reach out to unionists and say that while they were working toward a united Ireland "they also need to hear that while we want to shape the change ahead, we want to make Northern Ireland work right now." He continued: "A new and reconciled Ireland will only ever be built by fully recognising the changing island of today. "For our young people especially, the picture of their world is no longer reflected in the murals of our past. Old political certainties and old majorities are no more. "On an island of new minorities the only option is to build a broad coalition for change. That changed Ireland won’t be built upon the rubble of our history - it will instead be based on the values we invest in modern nationhood." The SDLP leader said the purpose of a "new Ireland can’t be guided by a blind obsession with historic wrongs - it should be about creating opportunity for our emerging generations". "We will only succeed in reaching that New Ireland if we first provide it with definition and detail. And no referendum should be called until that work is done. "There will be a special place reserved in hell for those who call for a border poll in Ireland with no plan and idea on how to actually deliver it."
<filename>codes/leetcode/ImplementQueueUsingStacks.cpp /* ************************************************ username : smmehrab fullname : <NAME> email : <EMAIL> institute : university of dhaka, bangladesh session : 2017-2018 ************************************************ */ class MyQueue { private: stack<int> stack1; stack<int> stack2; int popStack2(){ int poppedElement = stack2.top(); stack2.pop(); return poppedElement; } public: /** Initialize your data structure here. */ MyQueue() { } /** Push element x to the back of queue. */ void push(int x) { stack1.push(x); } /** Removes the element from in front of queue and returns that element. */ int pop() { if(!stack2.empty()) return popStack2(); while(!stack1.empty()){ stack2.push(stack1.top()); stack1.pop(); } return stack2.empty() ? -1 : popStack2(); } /** Get the front element. */ int peek() { if(!stack2.empty()) return stack2.top(); while(!stack1.empty()){ stack2.push(stack1.top()); stack1.pop(); } return stack2.empty() ? -1 : stack2.top(); } /** Returns whether the queue is empty. */ bool empty() { return stack1.empty() && stack2.empty(); } }; /** * Your MyQueue object will be instantiated and called as such: * MyQueue* obj = new MyQueue(); * obj->push(x); * int param_2 = obj->pop(); * int param_3 = obj->peek(); * bool param_4 = obj->empty(); */
The correlation between the Th17/Treg cell balance and bone health With the ageing of the world population, osteoporosis has become a problem affecting quality of life. According to the traditional view, the causes of osteoporosis mainly include endocrine disorders, metabolic disorders and mechanical factors. However, in recent years, the immune system and immune factors have been shown to play important roles in the occurrence and development of osteoporosis. Among these components, regulatory T (Treg) cells and T helper 17 (Th17) cells are crucial for maintaining bone homeostasis, especially osteoclast differentiation. Treg cells and Th17 cells originate from the same precursor cells, and their differentiation requires involvement of the TGF- regulated signalling pathway. Treg cells and Th17 cells have opposite functions. Treg cells inhibit the differentiation of osteoclasts in vivo and in vitro, while Th17 cells promote the differentiation of osteoclasts. Therefore, understanding the balance between Treg cells and Th17 cells is anticipated to provide a new idea for the development of novel treatments for osteoporosis. Introduction Osteoporosis is a systemic bone disease characterized by a decrease in the bone mineral content and destruction of the bone microstructure, which increases the fragility of bone and the incidence of fracture. According to the traditional view, the occurrence of osteoporosis is associated with endocrine disorders, metabolic disorders and mechanical factors, especially oestrogen deficiency. However, osteoporosis is also considered a chronic inflammatory bone disease. In recent years, research on the pathogenesis of osteoporosis has been extended to address the interaction between the skeletal system and the immune system. Many studies have demonstrated that immune disorders can cause many skeletal diseases. Since Arron and Choi proposed the concept of osteoimmunology in 2000, this cross-disciplinary field has attracted great interest and attention. In this review, we introduce the correlation between bone loss and Treg cells as well as Th17 cells. In addition, the impact of the balance between Treg cells and Th17 cells on osteoporosis is presented. Moreover, we summarize the relevant factors that affect the Th17/Treg cell balance, aiming to provide new ideas for the treatment of osteoporosis in the future. Immunological factors of osteoporosis Osteoporosis patients usually show an increase in bone turnover, which leads to an imbalance of bone resorption and bone formation. Bone development is a process of dynamic balance that is achieved by bone remodelling. Bone remodelling is a process during which bone function constantly adapts to changes in mechanical and physiological stress. It can allow the shaping and repair of bone morphology. Osteoblasts and osteoclasts play a major role in bone remodelling, and any imbalance between them causes various metabolic bone diseases. In recent years, many studies have confirmed that immune cells can interact with osteoblasts and osteoclasts to regulate bone formation and resorption and that macrophage colony-stimulating factor (M-CSF) and receptor activator of nuclear factor-kB ligand (RANKL) act as a bridge between the immune system and bone system. Osteoclasts, originating from haematopoietic stem cells, are multinucleated cells formed after the fusion of precursor cells of the monocytic lineage. Induction of osteoclast formation requires M-CSF and RANKL. In the process of bone resorption, RANKL activates the nuclear factor-kB receptor activator (RANK) receptor on the membrane surface of osteoclast precursor cells and osteoclasts, which leads to the formation and activation of osteoclasts, thus affecting bone remodelling. M-CSF promotes the proliferation and survival of osteoclast precursor cells mainly by activating extracellular signal regulated kinase (ERK) via growth factor receptor binding protein 2(Grb2) and protein kinase B (Akt) via phosphoinositide 3 kinase (PI3K). T cells account for approximately 5% of bone marrow cells in the bone marrow stroma and parenchyma. T cells can differentiate into CD4 + T cells and CD8 + T cells. Naive CD4 + T cells can differentiate into Th1, Th2, Th9, Th17, Th22, and Treg cells and follicular helper T (Tfh) cells. Th17 cells and Treg cells play important roles in maintaining bone homeostasis, especially in osteoclast differentiation ( Fig. 1). The relationship between Treg cells and bone loss In 1995, Sakaguchi et al. first discovered Treg cells in the study of autoimmune diseases in mice. Since then, Treg cells have become a hotspot of research on autoimmune diseases, tumours and other diseases. Treg cells mature in the thymus. Interleukin-2 (IL-2) plays an important role in the survival and development of Treg cells. Foxp3, a member of the forkhead box family of transcription factors, is currently recognized as a specific identification marker of Treg cells and is also an essential molecule for the development and functional expression of Treg cells. Treg cells are mainly divided into two categories: naturally occurring Treg cells (nTregs) and induced Treg cells (iTregs). nTregs exist naturally in the thymus, and iTregs are generated from naive T cells in peripheral lymphoid tissues under stimulation by selfantigens. M-CSF and RANKL, which induce the Fig. 1 CD4 + Treg cells affect the bone include cell contact-dependent mechanisms and inhibitory cytokine inhibition mechanisms. CD4 + Treg cells can promote the proliferation and differentiation of osteoblasts by secreting TGF- and activating intracellular effectors such as MAPK and Smadrelated proteins that induce mesenchymal stem cells to differentiate into osteoblasts and promote the proliferation and differentiation of these osteoblasts. CD8 + Treg cells can inhibit the maturation and activity of osteoclasts by suppressing the formation of their actin rings. Simultaneously, in the bone marrow, the unique property of osteoclasts to induce CD8 + Treg cells and the ability of CD8 + Treg cells to regulate osteoclast function established a bi-directional regulatory loop between the two types of cells. Th17 cells express high level of RANKL on its surface, which binds to RANK on the surface of osteoclast precursor cells, promoting the development of osteoclast precursor cells to osteoclasts to accelerate bone absorption. Th17 cells also can secrete IL-17 which directly enhances the expression of RANKL in osteoclastogenesis-supporting cells differentiation of osteoclasts are produced under the action of immune cells, bone marrow stromal cells, osteoblasts and fibroblasts. Treg cells have immunosuppressive functions. They can inhibit the production of osteoclasts by preventing the production of RANKL and M-CSF, leading to an increase in bone mass. Studies have shown that the main mechanisms through which Treg cells affect bone include cell contact-dependent mechanisms and inhibitory cytokine inhibition mechanisms. Recently, it has been pointed out that nTregs mainly inhibit the production of osteoclasts through a cell contact-dependent mechanism, while the inhibitory effect of iTregs occurs through an inhibitory cytokine-dependent mechanism. Cytotoxic T lymphocyte-associated antigen-4 (CTLA-4) is an important surface molecule involved in Treg cell-mediated cell contact-dependent inhibition of osteoclast generation. Treg cells expressing CTLA-4 bind to CD80/CD86 on the surface of osteoclast precursor cells and induce the activation of indoleamine-2,3-dioxygenase in osteoclast precursor cells. Activated indoleamine-2,3-dioxygenase can degrade tryptophan, promote the apoptosis of osteoclast precursor cells, and thus inhibit bone resorption. In addition to triggering immunosuppression through direct contact between cells, Treg cells can also secrete inhibitory cytokines that have indirect immunosuppressive activity. IL-10 is one of the inhibitory cytokines secreted by Treg cells and can inhibit the proliferation of T cells and the production of cytokines by T cells. IL-10 can inhibit the differentiation and maturation of osteoclasts by upregulating the secretion of osteoprotegerin (OPG) and downregulating the expression of RANKL and M-CSF. IL-35 is a newly discovered cytokine secreted by Treg cells that can reduce the expression of IL-17, thereby reducing the progression of collageninduced arthritis in mice. It has been demonstrated that after injection of mice with Treg cells that were amplified and purified in vitro with magnetic beads and coated with anti-CD3 and anti-CD28 antibodies, the expression of cytokines inhibiting osteoclast generation, such as granulocyte-macrophage colony-stimulating factor (GM-CSF), interferon- (IFN-), IL-5 and IL-10, increased significantly in the mice. In addition, evidence has shown that Treg cells also have certain effects on osteoblasts. Treg cells can promote the proliferation and differentiation of osteoblasts by secreting TGF- and activating intracellular effectors such as mitogen activated protein kinase (MAPK) and Smad-related proteins that induce mesenchymal stem cells to differentiate into osteoblasts and promote the proliferation and differentiation of these osteoblasts. In addition, on the surface of osteoblasts, there are specific receptors for each subtype of TGF-. Binding of TGF- to its receptor on the surface of osteoblasts can accelerate the generation of osteoblasts through the Smad protein. The Smad protein has been shown to be directly involved in TGF- signalling pathway-induced osteoblast formation. Wnt10b is an osteogenic Wnt ligand that can activate Wnt signalling in osteoblasts. Treg cells are involved in upregulation of Wnt10b by CD8 + T cells during intermittent PTH treatment and supplementation with the probiotic Lactobacillus rhamnosus GG. Recently, the CD8 counterpart of Treg cells has been discovered and is called Foxp3 + CD8 + Treg cells. These cells do not affect the survival of osteoclasts, but they can inhibit the maturation and activity of osteoclasts by suppressing the formation of their actin rings. Simultaneously, in the bone marrow, the unique property of osteoclasts to induce Foxp3 + CD8 + Treg cells and the ability of Foxp3 + CD8 + Treg cells to regulate osteoclast function establishes a bi-directional regulatory loop between these two types of cells. Interestingly, this regulatory loop does not require the presence of various proinflammatory cytokines. Unlike CD4 + Treg cells which are present in large numbers in peripheral blood and the lymphatic circulation (accounting for approximately 5-12% of all CD4 + T cells), CD8 + Treg cells are present in small numbers in peripheral blood and the lymphatic circulation, accounting for only 0.2-2% of total CD8 + T cells in various lymphoid organs. Thus, current studies on CD8 + Treg cells are not sufficient, and the role of CD8 + Treg cells in osteoporosis has not yet been fully illustrated. Therefore, further studies in this area are needed. However, it can be confirmed that a decrease in the number or function of CD4 + Treg cells and CD8 + Treg cells in the human body will cause an increase in bone loss and consequently lead to osteoporosis. The relationship between Th17 cells and bone loss Immature T cells can differentiate into Th17 cells under stimulation by TGF- and the inflammatory response. In addition, IL-6, IL-1 and IL-23 can affect the differentiation and development of Th17 cells. Retinoic acid-related orphan receptor-t (RORt) is an important transcription factor of Th17 cells that is responsible for pathological immune responses. Th17 cells not only can secrete IL-17, IL-21 and IL-22, but also can produce IFN-. Among these cytokines, IL-17 is the most important proinflammatory factor. The IL-17 family has six members: IL-17A-IL-17F. Th17 cells control bone mass in two ways. On the one hand, Th17 cells express high surface levels of RANKL, which binds to RANK on the surface of osteoclast precursor cells, promoting the differentiation of osteoclast precursor cells into osteoclasts to accelerate bone absorption. On the other hand, Th17 cell-secreted IL-17 directly enhances the expression of RANKL in osteoclastogenesissupporting cells such as osteoblasts and synovial fibroblasts. RANKL binds to RANK on the surface of osteoclast precursor cells and promotes the maturation of osteoclasts, leading to an increase in bone resorption. Moreover, IL-17 can also induce macrophages to produce a variety of inflammatory factors, such as TNF-, IL-1 and IL-6, to activate and intensify the local inflammatory response, which indirectly promotes the expression of RANKL in osteoclastogenesis-supporting cells, enhances the binding of RANKL to RANK on the surface of osteoclast precursor cells, and synergistically accelerates bone absorption by osteoclasts. A very important activity of IL-17 is that it triggers the production of high levels of RANKL by upregulating the production of RANK, which is crucial for the interaction between T lymphocytes and bone cells (Table 1). Therefore, in some studies, Th17 cells are called osteoclast subsets of T lymphocytes. Many clinical analyses have shown that the number of Th17 cells in the blood and surrounding tissues of osteoporosis patients is several fold higher than that in the osteoporosis-free population. Thus, the Th17 cell count can be used as an important marker for osteoporosis. Studies have demonstrated that after ovariectomy (OVX), the level of IL-17 is significantly increased in rats. An anti-IL-17 antibody antagonist was found to effectively prevent bone damage caused by oestrogen reduction, indicating that IL-17 is involved in bone resorption. The correlation between Treg cells and Th17 cells CD4 + T cells are the common precursor cells of Treg cells and Th17 cells. Differentiation of Treg cells and Th17 cells requires the involvement of the TGF--regulated signalling pathway. However, in different cytokine environments, the differentiation direction of CD4 + T cells can be changed. In the presence of IL-6, IL-23 and TGF-, IL-6 can inhibit the expression of Foxp3 by activating signal transducer and activator of transcription 3 (STAT3) and can upregulate IL-23 receptor expression to induce immature T cells to differentiate into Th17 cells. In contrast, in the absence of IL-6 and other pro-inflammatory factors, TGF- drives the differentiation of immature T cells into Treg cells. It has also been reported that in human T cells cultured in vitro, the absence of IL-6, IL-21 and TGF- can induce RORt production, upregulate IL-23 receptor expression, inhibit Foxp3 expression, and promote the differentiation of Th17 cells. In addition, Th17 cells can secrete IL-21 to further promote the generation of Th17 cells.. Retinoic acid is a key regulator of the TGF--dependent immune response. It can inhibit RORt and promote Treg cell differentiation under the inductive effects of Th17 cells. Recent studies have identified a new subset of Treg cells called CD39 + Foxp3 + Treg cells. These cells can inhibit the secretion of IL-17 by Th17 cells, thereby inhibiting autoimmune inflammation induced by IL-17. Interestingly, Th17 cells and Treg cells can also interconvert. For example, when the concentration of cytokines produced by exogenous Th17 cells increases, Treg cells are transformed into IL-17-secreting cells. Yang et al. found that in the presence of IL-6 and TGF- or IL-1 and IL-23, both nTregs and iTregs can be transformed into Th17 cells. Foxp3/ IL-17 double-positive T cells act as intermediate cells in the transformation of Th17 cells into Treg cells. Because Th17 cells and Treg cells are associated with each other, there is a balance between Th17 cells and Treg cells when they function in the human body. Considering the effects of Treg cells and Th17 cells on bone loss, we may conclude that Th17 cells can promote bone resorption while Treg cells can inhibit bone resorption. Therefore, by regulating the cross-talk between the Th17/Treg cell balance and bone cells, we may find new approaches for the treatment of osteoporosis. Factors affecting the balance between Th17 cells and Treg cells Signalling pathways The signalling pathways involved in the Th17/Treg cell balance include the Notch signalling pathway, T cell receptor (TCR) signalling pathway and costimulatory molecule signalling pathway. The Notch signalling pathway is a highly conserved intercellular communication cascade in multicellular organisms that can regulate the fate of various cells and differentiation processes in the human immune system. The Notch pathway includes four Notch receptors (Notch1, Notch2, Notch3 and Notch4) and five ligands (Jagged1, Jagged2, Delta-like1, Delta-like3 and Delta-like4). Li et al. showed that Notch1 mRNA expression was positively correlated with the Th17/Treg ratio. In the inflammatory response, when Notch1 signalling was enhanced, the expression of RORt was significantly increased but the expression of Foxp3 was significantly decreased, thereby regulating the differentiation of Th17 cells and Treg cells. Yin et al. found that blocking Notch signalling with DAPT (a -secretase inhibitor) significantly inhibited the differentiation of Th17 cells and reduced the number of Th17 lineage cells, leading to a reduction in IL-17 secretion, which suggests that inactivation of Notch signalling may reduce the production of IL-17. Notch signalling molecules can regulate the Th17/Treg cell balance by inducing the transformation of immature CD4 + T cells into Th17 cells and Treg cells: Jagged1 reduces the expression of IL-6 and TGF-induced RORt in CD4 + T cells, inhibiting the conversion of CD4 + T cells into Th17 cells. In addition, Jagged1 and 2, together with Delta-like1 and 4, can enhance the conversion of CD4 + T cells into Treg cells by regulating the TGF- signalling pathway and Foxp3. Although the mechanism of the Notch signalling pathway in osteoporosis is not yet completely understood, we can still see that regulating the Th17/Treg cell balance by reducing the differentiation and function of Th17 cells by inhibiting the activity of the Notch signalling pathway might be a potential therapeutic approach for the treatment of osteoporosis. The TCR signalling pathway also has some influence on the growth and development of Treg cells. When the key enzymes in TCR stimulation-induced signal cascade reactions, such as lymphocyte protein tyrosine kinase (LCK), chain related protein kinase (Zap70) and the adaptor used to activate T cells (LAT) contain mutations or deletions, the TCR signal is weakened, which leads to the development of defects and reductions in the activity of Treg cells, and simultaneously stimulates the production of IL-6 to drive the differentiation of CD4 + T cells into Th17 cells. However, when all components of the TCR signalling pathway are normal, inhibition of TCR signalling promotes the generation of Treg cells. For example, a CD3 mutant with a phosphorylation defect can weaken TCR signalling but promote Treg cell generation. Interestingly, TCR signalling mainly affects nTregs. Whether it has any effect on the differentiation of iTregs remains to be studied. Some scholars claim that the levels of IL-17 and Foxp3 do not increase when the Src family kinase LCK is mutated, indicating that the numbers of Th17 cells and Treg cells do not change, a possibility that is worthy of further study. Treg cells can further differentiate into effector Treg cells after activation of the TCR signalling pathway and exhibit an activated phenotype and full suppressor function. Interferon regulatory factor 4 (IRF4) plays a synergistic role in this process by driving the expression of the immunosuppressive cytokine IL-10. Bach2 is an important regulator of the maintenance of the stable state of downstream TCR signalling and the differentiation of Treg cells. It can limit the production of IL-10 and prevent the premature differentiation of Treg cells. Bach2 can inhibit the genomic-binding of IRF4, thus limiting the effector differentiation of Treg cells driven by TCR. Bach2 balances the transcriptional activity of IRF4 induced by TCR signalling to maintain homeostasis of nTregs and iTregs. In addition, casein kinase 2 (CK2), as an enzyme modifying the TCR signalling pathway, plays an important role in the regulation of the Th17/Treg cell balance. Recently, a study reported that CK2 can promote Th17 cell differentiation and inhibit Treg cell generation by inhibiting FoxO1. If FoxO1 is knocked out or chemically inhibited, the number of Th17 cells is significantly decreased while the number of Treg cells is increased. Activation of T cells requires the participation of a dual signal system. In addition to the first signal provided by TCR recognition of MHC-restricted antigenic peptide epitopes, the second signal provided by costimulatory molecules on antigen-presenting cells (APCs) is also needed to activate T cells. The costimulatory molecules CD80 and CD86 on the surface of APCs bind to CD28 on the surface of T cells. The cytoplasmic tail of CD28 has docking sites for signalling molecules, among which the YMNM motif at the membrane-proximal end binds to PI3K, and the PYAP motif at the distal end binds to growth factor receptor binding protein 2 (Grb2) and Lck. CD28 signal transduction is important to maintain the stability and function of Treg cells. Costimulatory signals are transmitted to developing thymocytes through the Lck binding motif in the cytoplasmic tail of CD28, thus inducing Foxp3 expression and upregulating the expression of glucocorticoid-induced tumour necrosis factor receptor (GITR) and CTLA-4 to initiate the differentiation of Treg cells. In CD28-deficient mice, the number of nTregs and iTregs is decreased. In addition, the CD28 costimulatory signal can also enhance the secretion of IFN- and IL-2 from activated CD4 + T cells. IL-2 can inhibit the expression of the -chain of the IL-6 receptor, and IFN- inhibits STAT3 and further blocks the activation of Th17 cells by IL-6. These events form a negative regulatory loop modulating the differentiation of Th17 cells. Metabolism Nutrient metabolism in the human body is also important for maintenance of the Th17/Treg cell balance. The energy demand of immature T cells is low. The ATP required for T cell activity is mainly produced by aerobic oxidation of glucose or by fatty acid oxidation. When T cells are activated, the glycolysis pathway becomes the main energy source due to active cell proliferation and growth. The mammalian target of rapamycin (mTOR) protein regulates the key factor in T cell differentiation and function. Under steady-state conditions, mTOR is inhibited. When immature T cells recognize antigens, mTOR is activated and promotes the differentiation of T cells into different cell subtypes. Cluxton et al. demonstrated that the differentiation of Th17 cells mainly depends on glycolysis and hypoxia inducible factor-1 (HIF-1) because when the glucose level in mice was reduced or the mTOR inhibitor rapamycin was used, the number of Th17 cells decreased but the number of Treg cells increased in these mice. Treg cells also depend on glycolysis to some extent, but they are less dependent on glycolysis than Th17 cells. The differentiation of Treg cells requires oxidative phosphorylation and can be inhibited by HIF-1. Cluxton also pointed out that Treg cells can exhibit enhanced glycolysis, mitochondrial respiration and fatty acid oxidation, but Th17 cells appear dependent on fatty acid synthesis. Diet Diet is closely correlated with human health. Excessive salt intake is not conducive to human health because a high-salt diet can cause a series of diseases, such as hypertension and diabetes. Hamid Y. Dar et al. pointed out that excessive salt intake can lead to increased bone loss because a high-salt diet increases the expression of pro-inflammatory factors such as IL-6, IL-17, RANKL and TNF- and decreases the expression of anti-inflammatory factors such as IL-10 and IFN-, which subsequently enhances the induction of Th17 cells and simultaneously decreases the number of Treg cells. Yang et al. showed that high-salt diet can drive thymic Treg cells to adopt a Th17-like phenotype and promote the production of induced Treg cells with a Th17-like phenotype in a serum/glucocorticoid-regulated kinase 1 (SGK1) dependent manner, while maintaining their inhibitory function. SGK1 is a salt receptor in T cells and is preferentially translated in activated Treg cells. High-salt-induced activation of SGK1 signalling can directly promote the expression of RORt in Foxp3 + Treg cells, thereby playing an upstream role in Th17 polarization. L. Wu et al. concluded that increased bone resorption after high-sodium diet intake not only may be a secondary cause of urinary calcium loss, but also may be due to a direct cell-mediated effect on osteoclasts. In their experiment, they found that higher concentrations of Na + can significantly increase the expression of some transcription factors for osteoclastogenesis, such as nuclear factor-activated T cells c1 (NFATc1) and spleen proviral integration oncogene (SPI1). Importantly, NFATc1 is considered to be the most potent transcription factor induced by RANKL. Interestingly, Agnes Schroder and colleagues found that a low-salt diet (LSD) increased bone density, reduced the number of osteoclasts, and increased the Na + content and nuclear factor of activated T cell 5 (NFAT5) levels in bone marrow compared with those in mice on a high-salt diet. Mechanistically, local Na + accumulation in the bone marrow of LSD-treated mice increased the expression of OPG and prevented RANKL-induced osteoclast formation in an NFAT5-dependent manner. In addition, MacGregor and Lin et al. demonstrated that a reduction in salt intake may have an important beneficial effect on bone density, thus preventing and treating osteoporosis. En-De Hu et al. showed that the Treg/Th17 cell ratio in mice fed a high-fibre diet and sodium butyrate was significantly higher than that in mice in the model control group. A high-fibre diet and sodium butyrate can reduce the mRNA expression of IL-17 and IL-6, and increase the expression of IL-10 and TGF-. A high-fibre diet can induce the production of short-chain fatty acids (SCFAs) such as butyrate and propionate. In a mouse model of inflammatory bowel disease, administration of SCFAs was found to increase the level of Treg cells in the intestine, especially in the colon, via certain G-protein-coupled receptors or via inhibition of histone deacetylases. Some scholars believe that SCFAs can act on the free fatty acid receptors GPR43, GPR41 and GPR109A to exert their effects on host immunity. GPR43 expression is essential for the expansion and inhibition of Treg cells in colitis induced by SCFAs. GPR109A is a receptor that responds to both niacin and butyrate. Activation of GPR109A by SCFAs can upregulate the expression of anti-inflammatory molecules in monocytes, increase the differentiation of Treg cells and enhance the production of IL-10. Vitamin A is a fat-soluble vitamin and retinoic acid is its biologically active form. Vitamin A is highly concentrated in the intestine and is the core mediator of Treg cell homeostasis in the intestine. In the presence of TGF-1, retinoic acid can induce the differentiation of Treg cells. Retinoic acid can not only enhance Treg cell differentiation but also prevent Th17 cell differentiation. Interestingly, SCFAs may also stimulate the production of retinoic acid by epithelial cells. Mice fed a diet lacking vitamin A or treated with retinoic acid receptor inhibitors show a reduction in the population of Treg cells. In addition, dietary amaranth can reduce the internal level of IL-17 while increasing the level of IL-10 and can reduce the Th17/Treg cell ratio to provide immunomodulatory effects through its abundant beneficial compounds. The intestinal microflora The intestinal microflora is not only involved in the regulation of various physiological functions in the human body but also related to many human diseases. Importantly, the intestinal microflora may be a key regulatory factor of bone metabolic homeostasis. The intestinal microflora mainly consists of five different phyla and several genera of the Eubacteria domain, including Actinobacteria (Bifidobacterium), Bacteroidetes (Bacteroides), Firmicutes (Lactobacillus), Proteobacteria (Escherichia), and Verrucomicrobia (Akkermansia). Bifidobacteria can promote monocytes to secrete large amounts of TGF- to induce Treg cell differentiation. Interestingly, the human symbiotic species Bifidobacterium adolescentis, can independently induce the production of Th17 cells in the intestines of mice. Sarah Onuora et al. found that Bifidobacterium adolescentis worsened autoimmune arthritis in a mouse model. The role of Bacteroides fragilis is largely dependent on polysaccharide A (PSA), an immunomodulator that maintains host immune homeostasis. PSA can promote the differentiation of CD4 + T cells into Treg cells. In addition, it can inhibit the differentiation of Th17 cells through Tolllike receptor signalling inherent in CD4 + T cells. Hamid Y. Dar et al. found that oral administration of Bacillus clausii in mice with postmenopausal osteoporosis reduced the levels of pro-inflammatory cytokines (IL-6, IL-17 and TNF-) and increased the levels of antiinflammatory cytokines (IL-10 and IFN-), thereby enhancing bone health. You Jin Jang and colleagues isolated novel strains of Lactobacillus fermentum (KBL374 and KBL375) from faeces. When they used these two strains to treat human peripheral blood mononuclear cells, they found that the levels of inflammatory cytokines such as IL-17A were decreased but those of anti-inflammatory cytokines such as IL-10 were increased. Administration of Lactobacillus fermentum KBL374 or KBL375 to mice increased the population of CD4 + CD25 + Foxp3 + Treg cells in mesenteric lymph nodes. Abdul Malik Tyagi and his team treated neonatal mice with Lactobacillus rhamnosus GG (LGG) and found that the trabecular bone volume in treated mice was increased. Mechanistically, butyrate produced by LGG in the intestine may induce the expansion of Treg cells. Treg cells promote the assembly of the NFAT1-SMAD3 transcription complex in CD8 + cells. NFAT1-SMAD3 drives the expression of Wnt10b, which consequently regulates bone anabolism. In addition to producing SCFAs and PSA, the intestinal microflora may also produce the aryl hydrocarbon receptor (AHR), polyamines (PAs) and poly-gamma-glutamic acid (-PGA). AHR regulates the differentiation of Treg cells and Th17 cells in a ligand-specific manner. For example, when activated by TCDD (2,3,7, 8tetrachlorodibenzo-p-dioxin), AHR can induce the generation of Treg cells and suppress experimental autoimmune encephalomyelitis (EAE) through a TGF-1-dependent mechanism. In contrast, after FICA (6formylindolo carbazole) treatment, AHR can promote Th17 cell differentiation and exacerbate EAE. Recently, a team proposed that AHR binds directly to the open chromatin regions in the locus of the orphan chemoattractant receptor GPR15 to enhance its expression and thus regulates intestinal homing of Treg cells. PAs are small polycationic molecules produced during arginine metabolism. Spermidine is the best characterized PA to date. Carriche and his colleagues found that spermidine enhanced Treg cell differentiation in vitro in an autophagy-related manner. -PGA can induce the expression of Foxp3 through the Toll-like receptor 4 pathway, thus promoting Treg cell differentiation. -PGA can also inhibit the differentiation of Th17 cells by suppressing the expression of IL-6. In summary, the microflora plays an important role in regulating the maintenance and function of intestinal Treg cells and Th17 cells, although the mechanisms through which the microflora regulates the balance between Th17 cells and Treg cells are not yet fully understood (Fig. 2). Conclusion In conclusion, the impact of the balance between Th17 cells and Treg cells on bone mass is obvious. If the Th17/Treg cell balance shifts towards Th17 cells, bone resorption is enhanced, and the risk of osteoporosis is greatly increased. Currently, the treatment of osteoporosis mainly includes oestrogen replacement, phosphate treatment, calcium and vitamin D treatment, and appropriate physical activities. Considering the close correlation between Th17 cells and Treg cells and their plasticity, we believe that there are other influencing factors in addition to signalling pathways, metabolism, diet and the intestinal microflora. In-depth study of the factors that affect Th17/Treg cell balance in osteoporosis will help to further identify targets for new osteoporosis drugs, which are also crucial for the maintenance of human health. The Th17/Treg cell balance also has a profound impact on the treatment of cancer and autoimmune diseases. However, most of the current studies are carried out in animal models. In the future, more high-quality clinical studies are needed to further explore the effectiveness and safety of regulating the Th17/ Treg cell balance in the treatment of osteoporosis.
Q: Using LoRa as a www link I am new to IoT and LoRa, and still trying to grasp the scope and capabilities after discovering a whole technology I never knew existed. I have read some about LoRaWan and IoT, but am confused about the technology. Is LoRaWan and IoT wide area networks that are independent of the world wide web (www) or are they part of it? I also am not clear on the capability and functionality of LoRa devices. This is what I am trying to do, and would like to see if I can utilize LoRa technology to accomplish this: Basically I want to set up a wireless link between two sites which are ~.5 mile apart in which siteA will be connected to the internet (www) and the siteB may connect to the internet through siteA's gateway. I find information indicating there are devices which interface LoRa devices to ethernet. Is it possible to use these devices to set up a wireless link over LoRa? Example, ethernet-to-LoRa interface is connected to the gateway at siteA and to a LoRa device. LoRa device at siteA communicates with LoRa device at siteB, which is also connected to a ethernet-to-LoRa interface. Now a computer or WiFi access point is connected to siteB ethernet-to-LoRa interface, and user at siteB is able to access the world wide web. I understand there will be a sacrifice in network speed, and I wouldn't expect to be able to stream movies etc. A: I think you missed how slow LoRa is. It is very, very, very, very slow. The slowest data rate (at SF12, BW125) is just 250 bits per second. That's 31 bytes per second. Just the text of this answer would take 50 seconds to send, without counting any overhead. This full page, including contents, styles, scripts, images, etc. is over 3 MB, that would take over a day to send! The fastest data rate varies between 11000 and 21900 bits per second depending on the region. Much faster, but that still brings us to the speeds we were used to back in 1994 or thereabouts. The web has since evolved to take advantage of the multi-megabit speeds afforded by broadband, so again, a page like this one would take about 20 to 40 minutes to send. Also, in some regions, there are regulatory restrictions, and you can't transmit more than 1% of the time in the same band for instance, so that would multiply everything by 100! LoRa is designed for sensors which send very very little data very very rarely, not to exchange files, transmit large amounts of data, browse the web or anything like that. You may want to consider long-distance Wi-Fi (though, again, there are EIRP limitations), WiMax, and other point-to-point radio technologies. Note that radio technologies don't like obstacles. Hills, buildings, trees, etc. will reduce your range a lot (or even completely block transmission). You want not only line of sight between the two antennas, but you need that line of sight to remain well above any obstacle (see Fresnel zone for details). Or install a separate Internet access, or use a 4G modem, at the second location.