content
stringlengths
7
2.61M
<reponame>Saeegcts/CITS<filename>IDE/src/main/java/com/cognizant/cognizantits/ide/main/mainui/components/testdesign/tree/ProjectDnD.java /* * Copyright 2014 - 2017 Cognizant Technology Solutions * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cognizant.cognizantits.ide.main.mainui.components.testdesign.tree; import com.cognizant.cognizantits.datalib.component.Scenario; import com.cognizant.cognizantits.datalib.component.TestCase; import com.cognizant.cognizantits.ide.main.mainui.components.testdesign.tree.model.GroupNode; import com.cognizant.cognizantits.ide.main.mainui.components.testdesign.tree.model.ProjectTreeModel; import com.cognizant.cognizantits.ide.main.mainui.components.testdesign.tree.model.ScenarioNode; import com.cognizant.cognizantits.ide.main.mainui.components.testdesign.tree.model.TestCaseNode; import com.cognizant.cognizantits.ide.main.mainui.components.testdesign.tree.model.TestPlanNode; import com.cognizant.cognizantits.ide.main.utils.dnd.TransferableNode; import java.awt.datatransfer.DataFlavor; import java.awt.datatransfer.Transferable; import java.awt.datatransfer.UnsupportedFlavorException; import java.io.IOException; import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; import javax.swing.JComponent; import javax.swing.JTree; import javax.swing.TransferHandler; import static javax.swing.TransferHandler.COPY_OR_MOVE; import static javax.swing.TransferHandler.MOVE; import javax.swing.tree.TreePath; /** * * */ public class ProjectDnD extends TransferHandler { public static final DataFlavor TESTCASE_FLAVOR = new DataFlavor(TestCaseDnD.class, TestCaseDnD.class.getSimpleName()); private final ProjectTree pTree; private ProjectTreeModel sourceTreeModel; private Boolean isCut = false; public ProjectDnD(ProjectTree pTree) { this.pTree = pTree; } @Override public int getSourceActions(JComponent c) { return COPY_OR_MOVE; } @Override protected Transferable createTransferable(JComponent source) { List<ScenarioNode> scenarios = pTree.getSelectedScenarioNodes(); if (!scenarios.isEmpty()) { return new TransferableNode(new TestCaseDnD(pTree.getTreeModel()). withScenarioList(scenarios), TESTCASE_FLAVOR); } List<TestCaseNode> testcases = pTree.getSelectedTestCaseNodes(); if (!testcases.isEmpty()) { return new TransferableNode(new TestCaseDnD(pTree.getTreeModel()). withTestCaseList(testcases), TESTCASE_FLAVOR); } return null; } @Override public boolean canImport(TransferHandler.TransferSupport ts) { return getDestinationObject(ts) != null && ts.isDataFlavorSupported(TESTCASE_FLAVOR); } @Override public boolean importData(TransferHandler.TransferSupport ts) { if (ts.isDataFlavorSupported(TESTCASE_FLAVOR)) { try { TestCaseDnD testCaseDnD = (TestCaseDnD) ts.getTransferable() .getTransferData(TESTCASE_FLAVOR); sourceTreeModel = testCaseDnD.model; if (testCaseDnD.isTestCases()) { return importTestCases(testCaseDnD.getTestCaseList(), ts); } else { return importScenarios(testCaseDnD.getScenarioList(), ts); } } catch (UnsupportedFlavorException | IOException ex) { Logger.getLogger(ProjectDnD.class .getName()).log(Level.SEVERE, null, ex); return false; } } return false; } private Boolean importTestCases(List<TestCaseNode> testCaseNodes, TransferHandler.TransferSupport ts) { Boolean shouldCut = ts.isDrop() ? ts.getDropAction() == MOVE : isCut; Object destObject = getDestinationObject(ts); ScenarioNode scNode = getScenarioNode(destObject); if (scNode != null) { copySelectedTestCases(testCaseNodes, scNode, shouldCut); return true; } if (!(destObject instanceof TestPlanNode) && destObject instanceof GroupNode) { copySelectedTestCases(testCaseNodes, (GroupNode) destObject, shouldCut); return true; } return false; } private ScenarioNode getScenarioNode(Object obj) { if (obj instanceof ScenarioNode) { return (ScenarioNode) obj; } if (obj instanceof TestCaseNode) { return (ScenarioNode) ((TestCaseNode) obj).getParent(); } return null; } private Boolean importScenarios(List<ScenarioNode> scenarioNodes, TransferHandler.TransferSupport ts) { Boolean shouldCut = ts.isDrop() ? ts.getDropAction() == MOVE : isCut; if (shouldCut) { return false; } Object destObject = getDestinationObject(ts); if (destObject instanceof GroupNode) { for (ScenarioNode scenarioNode : scenarioNodes) { addScenario(scenarioNode.getScenario(), (GroupNode) destObject); } return true; } return false; } private Object getDestinationObject(TransferHandler.TransferSupport ts) { TreePath path; if (ts.isDrop()) { path = ((JTree.DropLocation) ts.getDropLocation()).getPath(); } else { path = ((JTree) ts.getComponent()).getSelectionPath(); } if (path != null) { return path.getLastPathComponent(); } return null; } @Override protected void exportDone(JComponent source, Transferable data, int action) { isCut = action == MOVE; super.exportDone(source, data, action); } private void copySelectedTestCases(List<TestCaseNode> testCaseNodes, ScenarioNode dropscenario, Boolean isCut) { for (TestCaseNode testCaseNode : testCaseNodes) { Scenario scenario = testCaseNode.getTestCase().getScenario(); TestCase testCase = testCaseNode.getTestCase(); testCase.loadTableModel(); if (isCut) { if (testCase.equals(dropscenario.getScenario() .getTestCaseByName(testCaseNode.toString()))) { continue; } } TestCaseNode newTestCaseNode = addTestCase(dropscenario.getScenario(), testCaseNode.toString()); testCase.copyValuesTo(newTestCaseNode.getTestCase()); newTestCaseNode.getTestCase().setReusable(testCase.getReusable()); if (isCut) { scenario.removeTestCase(testCase); sourceTreeModel.removeNodeFromParent(testCaseNode); pTree.getProject().refactorTestCaseScenario( testCaseNode.toString(), scenario.getName(), dropscenario.toString()); } } } private void copySelectedTestCases(List<TestCaseNode> testCaseNodes, GroupNode dropGroup, Boolean isCut) { for (TestCaseNode testCaseNode : testCaseNodes) { Scenario scenario = testCaseNode.getTestCase().getScenario(); TestCase testCase = testCaseNode.getTestCase(); ScenarioNode scNode = dropGroup.addScenarioIfNotPresent(scenario); pTree.getTreeModel().addTestCase(scNode, testCase); if (isCut) { sourceTreeModel.removeNodeFromParent(testCaseNode); } } pTree.getTreeModel().reload(dropGroup); } private TestCaseNode addTestCase(Scenario scenario, String name) { String newName = name; int i = 1; while (scenario.getTestCaseByName(newName) != null) { newName = name + " Copy(" + i++ + ")"; } return pTree.getTreeModel().addTestCase(scenario.addTestCase(newName)); } private void addScenario(Scenario scenario, GroupNode gNode) { String newName = scenario.getName(); int i = 1; while (scenario.getProject().getScenarioByName(newName) != null) { newName = scenario.getName() + " Copy(" + i++ + ")"; } ScenarioNode sNode = pTree.getTreeModel().addScenario(gNode, scenario.getProject().addScenario(newName)); List<TestCase> testcases; if (pTree.getTreeModel().getRoot() instanceof TestPlanNode) { testcases = scenario.getTestcasesAlone(); } else { testcases = scenario.getReusables(); } for (TestCase testcase : testcases) { testcase.loadTableModel(); TestCase newTestCase = sNode.getScenario(). addTestCase(testcase.getName()); testcase.copyValuesTo(newTestCase); sNode.addTestCase(newTestCase); } } }
/*! \brief Handles marking the server as shutting down based on a cancel call */ func MonitorSignals (running *bool, srv *http.Server) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { <-c *running = false time.Sleep(time.Second * 5) srv.Shutdown (context.Background()) }() }
A CENTRE for the elderly is getting ready to build a new café to create a more relaxed and sociable environment for its visitors. Annick Lynn launched Chesham House Centre, in South Street, Lancing, six years ago, under the Royal Voluntary Service, to provide events, activities and support for the elderly. Since then, the self-funding hub has gone from strength to strength. But Annick feels that a vintage-themed café will create a much more relaxing space for vulnerable people. “I wish I had a pound for every time someone said to me, ‘it’s taken me a year to build up the courage to walk in here’,” said Annick. The centre provides three main services for the elderly – activities, transport and a befriending scheme. Annick is in charge of activities, which include cribs, a lunch club, grocery bingo and quizzes. The centre has around £16,000 in the pot to start building the café, which includes a generous donation from Sussex Mark Masons’ Mark Benevolent Fund. This week, the centre is holding a jumble sale with all profits going towards the café and other services. The team plan to tear down a few walls to create a larger space and have a seating plan so that passers by can see the new venue through the window. The centre is now appealing to any plumbers, electricians and builders who are able to offer their expert services and advice to help make the café become a reality.
United Continental Holdings Inc expects to cut some management employees as a part of its larger restructuring program. "While we don't have an exact figure now, a small number of our management team will be affected by reductions," said Mike Bonds, executive vice president of human resources, in a letter to employees last week. [NEW YORK] United Continental Holdings Inc expects to cut some management employees as a part of its larger restructuring program. "While we don't have an exact figure now, a small number of our management team will be affected by reductions," said Mike Bonds, executive vice president of human resources, in a letter to employees last week. However, frontline employees, which include pilots, flight attendants, customer-service and gate agents, would not be affected by the impending changes, according to the letter, parts of which the company provided to Reuters. The No 3 US airline by passenger traffic is increasing efforts to match margins of No 2 Delta Air Lines Inc. United said in October cheap airfares and higher wages from new contracts would squeeze its results in the fall, making it difficult to be as profitable as competitors.
The effect of activated charcoal on drug exposure following intravenous administration: A metaanalysis Activated charcoal both reduces primary drug absorption and enhances drug elimination. However, the two mechanisms of action overlap and are indistinguishable from each other. In order to estimate the extend of enhanced elimination, we summarized the effect of activated charcoal on intravenously administered drugs, where reduced drug exposure can be attributed to enhanced elimination. We performed a metaanalysis of randomized controlled studies evaluating the effect of orally administered activated charcoal on the systemic exposure of intravenously administered drugs. We searched the bibliographic databases PubMed, Embase and Cochrane. Metaregression analyses of selected physiochemical drug properties on the effect sizes of activated charcoal were performed. All but one of 21 included studies used multipledose activated charcoal (MDAC). MDAC reduced the median halflife of the intravenously administered study drugs by 45.7% (interquartile range: 15.3%51.3%) and area under the concentration time curve by 47.0% (interquartile range: 36.4%50.2%). MDAC significantly improved drug elimination across nine different intravenously administered drugs, but we were unable to identify factors allowing extrapolation to other drugs. The results offer a possible and plausible rationale for the previously observed effects of singledose activated charcoal beyond the timeframe where ingested drug is present in the gastrointestinal tract.
Multiuser diversity and cooperative relaying in wireless networks with short-term power constraint In wireless networks that experience channel fading, multiuser diversity can be exploited by scheduling users so that they transmit when their channel conditions are favorable. Cooperative relaying provides another form of diversity due to the spatial separation of the users. In this work we study the simultaneous application of these two sources of diversity, in a decentralized manner, and show the significant throughput improvement in wireless networks. To exploit this synergy, in we have proposed a family of protocols termed Channel Aware Aloha with Cooperation (CAAC). In this work we study the effect of short-term power constraints on the aggregate throughput using CAAC protocol. We derive the scaling behavior of the achievable rate, and show that the overall system performance is significantly improved in comparison with direct transmission.
<reponame>FatemeGhasemi/impact-graph import { MigrationInterface, QueryRunner } from 'typeorm'; export class changeImagePathProjectDescriptions1646744494458 implements MigrationInterface { async up(queryRunner: QueryRunner): Promise<void> { const projectTableExists = await queryRunner.hasTable('project'); if (projectTableExists) { await queryRunner.query( ` UPDATE project SET description = REPLACE ( description, 'gateway.pinata.cloud', 'giveth.mypinata.cloud' ); `, ); } } async down(queryRunner: QueryRunner): Promise<void> { const projectTableExists = await queryRunner.hasTable('project'); if (projectTableExists) { await queryRunner.query( ` UPDATE project SET description = REPLACE ( description, 'giveth.mypinata.cloud', 'gateway.pinata.cloud' ); `, ); } } }
More than 5,000 Scots have waited longer than the target time to start cancer treatment in the last five years within the “struggling” NHS, a leading has charity said. Macmillan Cancer Support called for action after figures were published showing the waiting times target for the disease had been missed again. The goal is to have 95 per cent of all people urgently referred starting treatment within 62 days. That target has not been achieved for the past five years. Analysis by the charity showed from the start of January 2013 to the end of December last year a total of 5,509 patients waited more than two months to start treatment. These included the latest NHS figures, which showed that in the past three months of 2017 that 87 per cent of cancer patients began getting help within the target time, down very slightly from the previous quarter. There were 3,394 people referred for treatment during the period, an increase of 4.3 per cent on the same time in 2016. But the figures showed three out ten urological cancer patients, including people with kidney, bladder and prostate cancer, waited more than two months to start treatment in October to December 2017. More than a fifth of cervical cancer patients waited longer than this, with the target met for 78 per cent of women in this group. None of the ten main types of cancer covered by the statistics achieved the goal of having 95 per cent of patients start getting help within 62 days, the figures showed. Four regional health boards managed this, with the target met in NHS Borders, Dumfries and Galloway, Lanarkshire and Orkney, while ten areas failed to achieve it. Macmillan Cancer Support’s national programme manager in Scotland, Gordon McLean, said: “It’s now five years since cancer waiting times were met. That’s five years of people who are waiting to start cancer treatment facing unnecessary stress and anxiety. Gregor McNie, Cancer Research UK’s head of external affairs in Scotland, said: “It’s very concerning that cancer waiting time targets continue to be missed. It’s an unacceptable situation which causes a great deal of anxiety for people. “These statistics tell us that cancer services in Scotland are struggling to cope as every year more people are referred for diagnostic tests. The NHS also narrowly failed to meet another cancer waiting time target, of having 95 per cent of eligible patients start receiving treatment within 31 days of a decision being made on how best to care for them. This was achieved for 94.6 per cent of patients in the past three months of 2017, the same as the previous quarter. Health Secretary Shona Robison said: “The Scottish Government is committed to beating cancer, investing more than £100 million in our cancer strategy which is focused on improving the prevention, detection, diagnosis, treatment and aftercare of those affected by the disease, including £5m to improve cancer waiting times across Scotland. “There are more urgent suspected cancer patients being treated than ever before, and our expert group is continuing to focus on driving forward cancer performance. “In May, we will host an event bringing together cancer experts across Scotland to share best practice and learning and consider what other innovative approaches can be taken to drive up performance. “And we are also pulling together a package of bold measures to tackle the root causes of cancer. Obesity is the second-biggest cause of preventable cancer, behind smoking. “We have recently consulted on a package of bold measures which includes world leading proposals to restrict the promotion and advertising of foods high in fat, salt and sugar, including bargain buy special offers on junk food. “Scotland is also the first part of the UK to have set a target to eliminate smoking from society by 2034. We have already reached a record low in the numbers of teenagers smoking, and halved the number of children being exposed to second hand smoke.
Muslim Organisations and Intergenerational Change in Germany The process of social integration of the people associated with immigrant organisations, and the social (and potentially religious) change that comes with it, present the organisations with the difficult challenge of justifying their legitimacy to various societal actors. This is certainly true of Muslim organisations in Western immigration societies. In Germany, this process is quite clearly reflected within the community of established organisations, which play a part in creating intergenerational change. This is not to say, however, that they will be the only relevant or even the predominant actors involved in establishing Islam in Germany in the future, despite their roots in their countries of origin, nor that they will automatically become redundant over time.
An Application-Driven Modular IoT Architecture Building upon the advancements in the recent years, a new paradigm in technology has emerged in Internet of Things (IoT). IoT has allowed for communication with the surrounding environment through a multitude of sensors and actuators, yet operating on limited energy. Several researchers have presented IoT architectures for respective applications, often challenged by requiring major updates for adoption to a different application. Further, this comes with several uncertainties such as type of computational device required at the edge, mode of wireless connectivity required, methods to obtain power efficiency, and not ensuring rapid deployment. This paper starts with providing a horizontal overview of each layer in IoT architecture and options for different applications. Then it presents a broad application-driven modular architecture, which can be easily customized for rapid deployment. This paper presents the diverse hardware used in several IoT layers such as sensors, embedded processors, wireless transceivers, internet gateway, and application management cloud server. Later, this paper presents implementation results for diverse applications including healthcare, structural health monitoring, agriculture, and indoor tour guide systems. It is hoped that this research will assist the potential user to easily choose IoT hardware and software as it pertains to their respective needs.
mod = 10 ** 9 + 7 n, k = [ int(v) for v in input().split() ] num_list = [ int(v) for v in input().split() ] num_list_2 = [ i - 1 for i in num_list ] t_list = [] ts_list = [] for i, v in enumerate(num_list): t = 0 s = 0 for j in range(i): if num_list[j] < v: s += 1 for j in range(i+1,n): if num_list[j] < v: t += 1 t_list.append(t) ts_list.append(t+s) a = ( k * (k-1) ) // 2 a = a % mod print( (sum(ts_list)*a+ k*sum(t_list) ) % mod )
Are Antibiotics Prescribed to Inpatients According to Recommended Standard Guidelines in South Sudan? A Retrospective Cross-Sectional Study in Juba Teaching Hospital Purpose Antibiotic resistance is spreading at an alarming rate globally, mainly because of antibiotics misuse. The World Health Organization developed guidelines for the rational use of antibiotics to prevent antibiotic misuse and reduce the potential development of antibiotic resistance. Although many countries adhere to these guidelines and have contextualized them to their needs, data on antibiotics use are limited in African countries, particularly in South Sudan. This study explored prescription patterns and use of antibiotics at Juba Teaching Hospital (JTH) to clarify the potential for antibiotic resistance in South Sudan. Materials and Methods We conducted a retrospective, cross-sectional study of archived patient data from 2016 to determine the prevalence of inappropriate antibiotics use at JTH. We used methodology developed in a previous study to assess the appropriate use of antibiotics. The study sample comprised 384 files. After reviewing and cleaning the files, 316 files were included in our analyses. This study was approved by the South Sudan Ministry of Health Ethics Review Board (approval number: MoH/ERB 51/2018) and all procedures were consistent with the Declaration of Helsinki. Results Antibiotics use was highest in the medical ward (75.4%). Most antibiotics prescriptions were for infectious diseases (23.7%), followed by ailments affecting the digestive system (19.9%). Commonly prescribed antibiotics were ceftriaxone (21.2%) and metronidazole (20.0%). The mean number of antibiotics prescribed per patient encounter was 2.09 (95% confidence interval: 1.982.19). Most files (n=233, 70.57%) demonstrated incorrect use of antibiotics with 78.8% (n=249) of prescriptions being inappropriate (misuse). Conclusion This study revealed a high level of inappropriate antibiotics use at JTH despite the existence of local guidelines, which suggested there was an increased risk for antibiotic resistance. Therefore, it is necessary to introduce antibiotic stewardship activity, along with continuous national surveillance. Enforcement of guidelines to reduce irrational antibiotics use may reduce the risk for antibiotic resistance. Introduction Antimicrobial resistance (AMR) is spreading at an alarming rate globally. This is mainly attributed to over-prescription of antibiotics to patients without due indication, and accounts for about 700,000 deaths per year. 1 O'Neill 2 estimated that by 2050, AMR will result in approximately 10 million lost lives lost and cost US$100 trillion per year worldwide. Furthermore, Gould et al 3 and Bartlett et al 4 estimated that multidrug resistance will have caused nearly 444 million deaths by 2050. Other contributors to AMR are the use of antimicrobials in the food and livestock industries, 5 including the use of antibiotics in food production (animals and plants) to prevent, control, and treat disease and promote growth. 6 However, recent evaluations of AMR-related costs had restricted scopes, and further prospective studies are needed to estimate the actual economic burden. In theory, microbes develop resistance to antibiotics through a natural selection process wherein some microbes succumb to an antibiotic whereas others do not. 7 Misuse of antimicrobials often exacerbates this process. Generally, first-and second-line antibiotic treatment options limit resistance and availability of effective antibiotics. Patients with resistant infections are more likely to die, and those that survive may have extended hospital stays, delayed recuperation, and long-term disability. 8 Achievements in reducing mortality and morbidity through early use of antibiotics based on practical guidelines have also jeopardized appropriate actions to control AMR. 9 In developing countries, approximately 60% of medicines in public health facilities and 70% in private facilities are prescribed and dispensed inappropriately. 10 Recent studies reported increased antibiotic abuse in developing countries where irrational antibiotics use has become common, with (often unnecessary) antibiotics prescribed for 44-97% of hospitalized patients. 11 A study conducted in Bangladesh found that 38% of patients received antibiotics, of which 14% of prescriptions were irrational. 12 Baktygul et al also reported inappropriate use of antibiotics (73.3%) in a hospital in the Kyrgyzstan Republic, with significantly higher inappropriate antibiotics choice in gynecology (odds ratio 2.70, 95% confidence interval : 1.02-7.69) compared with other wards. 13 A lack of quality data and weak AMR surveillance systems mean that the magnitude of AMR in Africa is not fully understood. However, Essack et al noted that the high burden of communicable diseases in Africa engenders extensive use of antibiotics, which leads to AMR. 14 For example, a study from Ghana reported high bacterial resistance to certain drugs, including chloramphenicol, tetracycline, ampicillin, and cotrimoxazole (>70%). 15 In Cameroon, Chem et al reported an antibiotics prescription rate of 36.71%, with a mean of 1.14 antibiotic prescriptions per patient per year. 16 Those authors concluded there was the misuse of antibiotics in primary care facilities and recommended that only physicians should be allowed to write prescriptions because they were adequately trained. Another study from East Africa reported high levels of AMR to commonly used antibiotics (ampicillin and cotrimoxazole), with resistance rates of 50-100%. 17 However, no such study has been conducted in South Sudan to guide government policy on antibiotics use, and no data on antibiotic use are available. Despite the existence of protocols guiding the use of medicines in the healthcare system, there are unconfirmed reports of patients being prescribed antibiotics and other medicines without microbiological testing. In addition, many prescribers do not follow available guidelines or protocols in their day-today practice, resulting in incorrect prescribing and use of antibiotics. Therefore, it is necessary to assess the use of antibiotics in South Sudan to provide scientific evidence regarding the magnitude of this problem. Antibiotics account for a substantial proportion of hospital drug expenditure, and their misuse and overuse generates unnecessary costs. Among reported issues associated with the inappropriate prescribing of antibiotics, knowledge deficits among prescribers and problematic or underequipped practice environments have been highlighted. 18 Shafiq et al 19 found that implementing treatment guidelines for commonly occurring infections in the tertiary care hospital setting increased the appropriate use of antibiotics. The World Health Organization (WHO) noted that many prescribers in developing countries have little access to proper information about diagnosis and drugs. 20 For example, the South Sudan Standards for Treatment Guidelines (STG) are often unavailable in facilities, and health workers are often unsupported and unsupervised 6. These findings supported the rationale for this study. Healthcare costs for antibiotic-resistant infections are markedly higher than those for patients with non-resistant infections. 21 In Africa, AMR is an acknowledged problem in the treatment of HIV and pathogens that cause malaria, Tuberculosis, typhoid, cholera, meningitis, gonorrhea, and dysentery. 22 It is therefore crucial for African countries to develop AMR plans. Worldwide, few countries (4.3%) have national AMR plans and only 14.9% have national infection prevention control policies, although 93.6% have essential medicines lists, and 91.5% have national medicines policies and treatment guidelines reflecting rational use. 14 However, no countries have national surveillance systems that routinely generate representative, robust data on antimicrobial use and resistance. 14 for urgent action, the World Health Assembly adopted a Global Action Plan on AMR in May 2015. The Africa Centers for Disease Control and Prevention (CDC) also established the Anti-Microbial Resistance Surveillance Network (AMRSNET). 22 The goals of AMRSNET for the subsequent 5 years were to improve surveillance of AMR organisms among humans and animals, delay AMR emergence, limit AMR transmission, and mitigate harm among patients infected with AMR organisms. A previous study noted that measuring the level of rational antibiotic prescribing contributes to understanding the correct use of antibiotics in a country and preventing the development of AMR 10 To address the problem of AMR, the WHO 20 advocates for the rational use of drugs based on the "Rule of Right." This rule focuses on providers ensuring their prescriptions reflect proper treatment, correct doses, and a suitable duration. It also assumes patients adhere to the treatment regimens prescribed, including completing the dose. This means that physicians must give "the right drug to the right patient at the right time in the correct dose." However, this rule is often ignored by clinicians in Africa, leading to an increased risk for AMR. Most healthcare services in South Sudan are publicly funded and provided, and antibiotics use is limited to prescription and regulated overthe-counter access. In this context, it is urgent to address the problem of AMR because new antibiotic-resistant mechanisms are emerging that threaten our ability to treat common bacterial diseases and result in prolonged illness, thereby increased treatment costs, disability, and death. This study will provide guidance for policymakers, health professionals, and prescribers regarding implementing strategies to help contain antibiotics resistance in South Sudan. Materials and Methods This study explored the magnitude of over-and underprescription/use of antibiotics in Juba Teaching Hospital (JTH), which is located in South Sudan, to estimate the risk for AMR. We defined appropriate antibiotics use as use that was consistent with WHO rational use or national treatment guidelines. 20 We used the model developed by Gyssens et al 26 to evaluate whether the prescription of antibiotics was compliant with the South Sudan STG. We referred to the STROBE guidelines in preparing this report. 23 Study Design This study used a retrospective cross-sectional design. We used data from archived files for patients admitted to the medical and surgical wards at JTH between January and December 2016. A non-probability purposive sampling process was used to select files for review. Study Setting This study was conducted at JTH, which is located in Juba, Juba County, Jubek State (Former Central Equatoria State). Juba is the capital city of the Republic of South Sudan. JTH is a public tertiary-care teaching institution that has an official capacity of 260 beds and covers various medical specialties. We focused on admissions to the medical and surgical wards, including those for children, because the pediatric ward was not functional at that time. Study Population This study focused on inpatients who had been prescribed antibiotics at JTH, irrespective of their condition. The hospital records indicated that in 2016, the total number of admissions was 40,251. We searched the files extracted from the hospital archives to identify files for patients admitted to the medical and surgical wards between January and December 2016. Sample Size and Description We accessed files for children (aged 0-18 years) and adults (aged >18 years) admitted to the JTH medical and surgical wards. The required sample size was 384 files (at a 95% confidence level), which was estimated using the United States CDC calculator (StatCalc Epi Info TM ). 24 We excluded 68 files because of insufficient information, which left 316 files for review (113 males and 203 females). Ethical Approval We could not obtain informed consent from patients as this was a retrospective study. However, this study followed the procedures set out in the Declaration of Helsinki 25 and received approval from the Ministry of Health Ethics Review Board (approval number MOH/ERB 51/2018). We used data from de-identified files. First, the researchers formally contacted the study hospital by letter to obtain consent to use archived files for this study. This letter included the detailed study protocol and a statement outlining the study's purpose, and potential benefits to the hospital and country. The hospital consented to participate in this study. Data Collection To avoid possible bias, files were drawn from the archived medical and surgical ward files using simple nonprobability sampling proportional to the ward size. The files required for review were then selected from the ward files using a simple random sampling method. We collected secondary data from the selected files over 2 months using a standard data collection form adopted from Baktygul et al. 13 Compliance behavior was assumed to be under the supervision of ward nurses as they attended to the patients. For example, when distributing oral medication (antibiotics), nurses provided patients with water to swallow the medication, thereby ensuring compliance. The data collection team had medical backgrounds and received 2 days of training and orientation using the data collection form before this study. The collected data were entered into a computer database, cleaned, and then assigned codes. Demographic characteristics recorded included the patients' age, gender, and diagnosis. Variables included in the quality assessment of antibiotics use were antibiotics prescription, dose, route of administration, duration, and the correct choice of antibiotics. We evaluated the indications for antibiotic use based on adherence to the South Sudan guidelines. Data Analysis and Interpretation Clinical microbiology services provided data to measure resistance to antimicrobial agents. As we used archived data based on cumulative tabulated susceptibility testing results of isolates from individual patients, we evaluated antibiotics use with an algorithm developed by Gyssens et al. 26 This model helped us assess the prevalence of appropriate antibiotic use in the reviewed files as an outcome. The variables assessed for the prescribed antibiotics were correctness/incorrectness of choice, duration of therapy, dosage, and route of administration. Correctness or appropriateness implied that the antibiotic choice, duration of therapy, dosage, and route of administration were all correct. Data for the prescribed antibiotics were compared with the STG and standard textbooks to evaluate correct antibiotic use. 27 The collected data were then edited, coded, tallied, cleaned, and scored. We used descriptive statistics to address our study objective. STATA version 13.0 was used for the data analyses. 28 Numbers were used to code data for categorical variables and frequencies and percentages were used to present quantitative data, with the main results shown in tables. We evaluated the appropriateness of antimicrobial treatment using the algorithm developed by Gyssens et al. 26 To understand the level of misuse, we examined patients' characteristics, type of antibiotics used, and patients' clinical conditions. We chose this algorithm because the method was validated in a previous study. 29 In brief, this algorithm is judged as follows. A. Appropriate decisions: all criteria of correct antimicrobial use were fulfilled. B. Inappropriate indication: prescription of antimicrobials without the presence of infectious disease or for an infection that did not need antimicrobial treatment. C. Inappropriate choice, including the inappropriate spectrum of the antimicrobial agent (too broad, too narrow, not practical), or inappropriate toxicity profile. D. Inappropriate application, including inappropriate dosage, timing, route of administration, and duration of therapy. E. Divergence from guidelines. F. Missing or insufficient data to judge the appropriateness of antimicrobial use. Results The primary aim of this study was to clarify the magnitude of antibiotics misuse at JTH and estimate the risk for AMR based on national treatment guidelines. We described the characteristics of the study population, antibiotics used, and medical conditions treated. We assessed the quality of antibiotics use based on the dose, route of administration, duration of treatment, and choice of antibiotic. We also evaluated whether the antibiotics prescribed were consistent with the national STG. Patients' Demographic and Clinical Characteristics We reviewed the medical records for 384 patients to determine the use of antibiotics during their hospital stay. From these, we included the medical records for 316 patients that had complete information in our analyses. Table 1 shows patients' characteristics. About 64.2% of patients were female, and over half were aged ≥18 years. The distribution of patients in the selected wards was non-homogeneous, with the medical wards having the highest number of admissions (n=248, 78.5%). Both the medical and surgical wards had more female than male patients. Conditions for Which Antibiotics Were Prescribed In both wards, patients were administered antibiotic/antimicrobial regimens for infections in all organ systems. Many patients were diagnosed with infectious and parasitic diseases (n=75, 23.7%), including diseases of the digestive system (n=63, 19.9%) and respiratory system (n=56, 17.7%), all of which were treated with antibiotics and protozoal agents. Those without parasitic infection only received antibiotic treatment. Eight different antibiotic groups and 21 single antibiotics were prescribed (Table 2). Most prescribed antibiotics (34%) were from the penicillin group, followed by the cephalosporin (23%) and metronidazole groups (20%). Only 11% of prescribed antibiotics were from the aminoglycoside group. Prevalence of Antibiotic Use by Ward The frequency of single antibiotics prescribed by ward type is shown in Table 3. There were 660 prescriptions for single antibiotics across the studied wards, with ceftriaxone being the most commonly prescribed (21%), followed by metronidazole and amoxicillin (20% and 12%, respectively). We found that 248 patients in the medical ward consumed 75% (n=497) of the antibiotics, and six patients in the surgical ward accounted for 25% (n=163) of the antibiotics used. Number of Antibiotics Prescribed per Patient During Their Admission Period During their admission period, 40.2% of patients received two antibiotics, 30.1% received one antibiotic, and 20.6% received three antibiotics per prescription ( Quality Assessment of Antibiotic Therapy We assessed whether the choice of antibiotics prescribed for each patient was correct or incorrect. We found that 29.43% (n=93) of the reviewed files showed a correct choice of antibiotics for therapy. However, 70.57% (n=223) of the files demonstrated incorrect use. In terms of dose, most files (n=281, 88.9%) showed the dose of the prescribed antibiotics was correct, but 35 (11.1%) files showed incorrect doses. The most commonly used administration method was injection solution (45.2%), followed by tablet (25.2%) and capsule (17.3%) ( Table 5). 2875 As shown in Table 6, 74.1% of patients received antibiotic therapy during an admission period of 0-7 days, 21.5% received treatment in an admission period of 8-14 days, and 4.4% received antibiotic treatment in an admission period ≥15 days. Rationality of Antibiotic Therapy at JTH Our review of the single antibiotics used showed the majority of antibiotics were given intravenously (51.5%), followed by oral (47.6%) and intramuscular (0.66%) administration. Parenteral antibiotics were also commonly used. Antibiotic therapy was correctly used (appropriate) in 21.2% (n=67) of the prescriptions, and incorrectly used (inappropriate) in 78.8% (n=249) of prescriptions. Antibiotics prescribed were evaluated by diagnosis to clarify if they were compliant with local treatment guidelines. However, only 29.11% (n=92) of prescriptions complied with treatment according to these guidelines, and 70.89% (n=224) did not comply. Discussion The primary aim of this study was to explore the prevalence of antibiotic misuse in South Sudan, which exposes the population to an increased risk for AMR. The results indicated that there was over-prescription of antibiotics in South Sudan. However, the socioeconomic conditions of the region mean it remains unclear if this translated to overuse or underuse. There are also government regulatory issues related to the availability of over-the-counter antibiotics in the study setting, and the level of awareness of AMR among healthcare personnel remains unclear. Few studies focused on AMR have been conducted in Africa, and none in the South Sudan setting. To fill this knowledge gap, this study evaluated how antibiotics were used in a hospital in South Sudan to elucidate the contribution of this prescribing practice/pattern to development of AMR. In the African region in general, understanding of issues related to AMR and its magnitude is hampered by the limited surveillance of drug resistance (only in a few countries), which has resulted in incomplete and inadequate data on the true extent of the problem. 30 However, AMR is an acknowledged issue 22 and countries should develop AMR plans. Unfortunately, few countries have national AMR plans (4.3%) or national infection prevention control policies (14.9%). Rates of essential medicines lists and national medicines policies and treatment guidelines reflecting rational use are higher (93.6% and 91.5%, respectively), but no countries have national surveillance systems that routinely generate representative, robust data on antimicrobial use and resistance. 14 However, some progress has been made such as the Global Action Plan on AMR adopted by the World Health Assembly in May 2015, 30 and establishment of AMRSNET by the Africa CDC. 31 Despite considerable improvement in the availability and control of antibiotics in hospitals, rational antibiotic use remains a worldwide concern. Increasing rates of AMR mean that medical intervention becomes impossible and people die of common infections (eg, during surgery, chemotherapy, organ transplantation, and care for premature infants). This study demonstrated high antibiotics use at JTH; antibiotics were commonly being misused (78.8%) and non-adherence to local guidelines was high (70.9%). The proportion of reviewed files in which antibiotics were prescribed at JTH was 100%, with an average of 2.1 antibiotics per patient prescription. This finding was consistent with those reported by Ampaire et al, where AMR was high in commonly-used antibiotics (ampicillin and cotrimoxazole), with resistance of 50%-100%. 17 This study also found that infectious and parasitic diseases were the most prevalent conditions for which antibiotics were prescribed, and there was a high rate of intravenous antibiotics administration (51.5%). Our finding that the prescribed antibiotics were inappropriate in 78.8% of cases was consistent with studies from Ethiopia (80.6%) 32,33 and the Kyrgyz Republic (73.3%). 13 The most commonly used antibiotic group in our study was penicillin (33.8%). We found that several inpatient records documented three (20.6%) or four (8.54%) antibiotics prescribed in a single encounter, 15 where patients received two antibiotics per encounter on average. In contrast, studies conducted in Saudi Arabia 35 and Cameroon 16 reported a single antibiotic per prescription. The difference between those studies and this study may reflect limited time to diagnose the patient and select appropriate treatment. In addition, there may be differences in settings between our study (a tertiary hospital) and other studies. Doctors may overprescribe antibiotics because of a lack of knowledge about rational use or to prevent potential infections. The 2.09 antibiotics prescribed per encounter in this study highlight the need for improved knowledge about polypharmacy among healthcare professionals, especially given the WHO recommended average of 1.6-1.8 antibiotics per encounter. The most common clinical indications for antibiotic use in this study were infectious and parasitic diseases (23.7%). However, this was inconsistent with studies from Egypt 36 and Jordan, 37 where the most prevalent conditions included respiratory tract (39.2%) and urinary tract (53.75%) infections. A study from India revealed that 69.4% of patients received antibiotics for acute respiratory infection and diarrhea of viral origin. 19 These findings suggested that the burden of infectious and parasitic diseases in South Sudan contributed to the overuse of antibiotics. The increased burden of infections in developing countries may broadly explain the misuse of antibiotics in healthcare settings associated with AMR. In this study, the majority of antibiotics (51.5%) were given intravenously, followed by oral (47.6%) administration, with overall injectable antibiotics constituting 45.2%. Intravenous administration, including injectable antibiotics, exceeded the optimal level of ≥10%recommended by the WHO. 14 However, the proportion of intravenous administration in our study was lower than that in a similar study in Pakistan, where injections accounted for 75% of antibiotics administration. 37,38 However, the presence of pediatric patients in our study population might have contributed to the high use of intravenous antibiotics. Shifting the route of administration from an intravenous to an oral route saves costs, shortens the length of hospital stay, and decreases adverse reactions from intravenous use. 33 We found 74.1% of patients received antibiotic therapy during a 0-7-day admission period, whereas 21.5% of patients received antibiotic treatment over 8-14 days and 4.4% received treatment for 2 weeks. The cost of healthcare for patients with resistant infections is high because of a longer duration of illness, additional tests, and the use of more expensive drugs. Limitations of This Study This study had several limitations that should be noted. Despite inappropriate use of antibiotics and non-adherence to national treatment guidelines were high, we only included files for patients admitted to two wards in a tertiary-level hospital. This means our findings may not provide a comprehensive picture of antibiotics prescribing in South Sudan. In addition, the sample in this study may not be representative of the whole South Sudan population, which limits the generalizability of the results. Furthermore, we were not able to perform crosstabulation as part of our analyses. Finally, the data collectors involved in this study might not have been sufficiently knowledgeable about the data they were recording, despite having medical backgrounds and receiving training before data collection. Conclusions This study explored antibiotic prescribing practices in a hospital in South Sudan. Most antibiotics in the study hospital are prescribed for the treatment of infectious and parasitic diseases. However, we found antibiotics use in JTH is highly inappropriate, with prescribing patterns failing to adhere to national guidelines. The proportion of antibiotics prescribed is 100%, with an average of 2.1 antibiotics per patient prescription. The study area also has a high rate of intravenous antibiotics use. Our findings suggest it is necessary to introduce antibiotic stewardship activity in the study area, along with continuous national surveillance. Enforcement of guidelines to reduce irrational antibiotics use may be helpful to reduce the risk for antibiotic resistance. Data Sharing Statement The datasets generated and/or analyzed during the present study (which include individual treatment files) are not publicly available because of ethical restrictions but may be available from the corresponding author on reasonable request. Ethics Approval and Consent to Participate This study was approved by the Ministry of Health Ethics Review Board and given approval number MOH/ERB 51/ 2018. The study hospital was formally contacted (invitation letter sent) and consent sought for their participation.
import readInput from '../common/readInput' /** * Calculate the number of depth increases in an ordered list of depth measurements. * * You can optionally provide the size of the sliding window to use in order to average measurements (the default is 1). * * @param depthMeasurements The list of depth measurements to calculate the number of depth increases for. * @param windowSize The size of the sliding window to use when averaging measurements. * @returns The number of depth increases in the list of measurements. */ export function calculateNumberOfDepthIncreases(depthMeasurements: string[], windowSize: number = 1): number { let numberOfDepthIncreases = 0 depthMeasurements.forEach((line, currentIndex, lines) => { // No previous value to compare against. if (currentIndex === 0) { return } const currentDepth = calculateSumForWindow(getWindow(lines, currentIndex, windowSize)) const previousDepth = calculateSumForWindow(getWindow(lines, currentIndex - 1, windowSize)) if (!currentDepth || !previousDepth) { throw Error('Could not parse the input file!') } if (currentDepth > previousDepth) { numberOfDepthIncreases++ } }) return numberOfDepthIncreases } function getWindow(measurements: string[], startIndex: number, windowSize: number): string[] { return measurements.slice(startIndex, startIndex + windowSize) } function calculateSumForWindow(measurements: string[]): number { return measurements .map((measurement) => parseInt(measurement)) .reduce((previousValue, currentValue) => previousValue + currentValue) } const input = readInput('day1/input.txt') const numDepthIncreasesWindowSize1 = calculateNumberOfDepthIncreases(input) console.log('Number of depth increases with window size of 1: %d', numDepthIncreasesWindowSize1) const numDepthIncreasesWindowSize3 = calculateNumberOfDepthIncreases(input, 3) console.log('Number of depth increases with window size of 3: %d', numDepthIncreasesWindowSize3)
def _init_layers(cls, config, inputs, params, summaries=None): raise NotImplementedError
The day you've been waiting for has arrived. Today we announce the honorees of the 2015 Core77 Design Awards! With over 210 total honors bestowed by the dedicated jury teams, the honorees hail from all over the globe and represent the most innovative and inspiring student and professional work from across 14 categories. The 2015 jury teams, comprised of 56 design experts from 15 cities and 8 countries, were most impressed by the depth and thoroughness of student entries this year, noting that the level of execution and research matched, and often exceeded, that of their professional counterparts. The juries noted the ongoing interest in social impact, with many of the designs focusing on creating a lasting positive impact rather than on more superficial design elements. The honoree announcements also kick off the public voting for the inaugural Core77 Community Choice Prize. This brand new addition to the Design Awards program asks the global design community to vote on their favorite honored entries between today and June 22nd. A grand prize winner, the project that receives the most votes over all across all 14 categories, will receive airfare and accommodations to attend the 2015 Core77 Conference in Los Angeles, California. Anyone who votes in the Community Choice Awards will be automatically entered into a drawing for one of three free tickets to attend the 2015 Core77 Conference. So dive in! The winning projects, jury videos and community choice awards are open for your browsing and voting pleasure.
Tests And Quality Of The 3.5 Meter Primary Mirror Of The Max-Planck-Institut Fur Astronomie's Telescope At Calar Alto The figuring of the 3.5 m primary mirror of the large telescope is described. The execution of final tests was carried out in a vertical test tunnel by means of a laser interferometer, using a compensating system. A special sequence of interferograms was applied aiming to suppress the thermal, accidental, and environmental influences. The wave front errors were evaluated accordingly. The results show that the standard deviation due to the mirror is 24 nm, out of total standard deviation of 49 nm taken as an avarage of 24 interferograms.
<gh_stars>0 package com.mikeleitz.example.annotation; /** * @author <NAME> */ public enum SeverityEnum { CRITICAL, HIGH, MEDIUM, LOW, TRIVIAL }
package org.plasmacore; import android.media.AudioAttributes; import android.media.AudioManager; import android.media.SoundPool; import android.os.Build; class PlasmacoreSoundManager implements SoundPool.OnLoadCompleteListener { public LookupList<PlasmacoreSound> sounds = new LookupList<PlasmacoreSound>(); public SoundPool soundPool; public boolean allSoundsPaused; public PlasmacoreSoundManager() { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { AudioAttributes attributes = new AudioAttributes.Builder() .setUsage( AudioAttributes.USAGE_GAME ) .setContentType( AudioAttributes.CONTENT_TYPE_SONIFICATION ) .build(); soundPool = new SoundPool.Builder().setAudioAttributes(attributes).setMaxStreams(8).build(); } else { soundPool = new SoundPool( 8, AudioManager.STREAM_MUSIC, 0 ); } soundPool.setOnLoadCompleteListener( this ); final PlasmacoreSoundManager THIS = this; Plasmacore.setMessageListener( "SoundManager.is_loading", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { for (int i=0; i<sounds.count(); ++i) { if (sounds.get(i).isLoading) { m.reply().set( "is_loading", true ); return; } } m.reply().set( "is_loading", false ); } } ); Plasmacore.setMessageListener( "Sound.create", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { String filepath = m.getString( "filepath" ); boolean isMusic = m.getBoolean( "is_music" ); if (isMusic) { m.reply().set( "id", sounds.id(new PlasmacoreSound.Music(THIS,filepath)) ); } else { m.reply().set( "id", sounds.id(new PlasmacoreSound.Effect(THIS,filepath)) ); } } } ); Plasmacore.setMessageListener( "Sound.duration", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { int id = m.getInt( "id" ); PlasmacoreSound sound = sounds.getByID( id ); if (sound != null) m.reply().set( "duration", sound.duration() ); } } ); Plasmacore.setMessageListener( "Sound.is_playing", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { int id = m.getInt( "id" ); PlasmacoreSound sound = sounds.getByID( id ); if (sound != null) m.reply().set( "is_playing", sound.isPlaying() ); } } ); Plasmacore.setMessageListener( "Sound.pause", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { int id = m.getInt( "id" ); PlasmacoreSound sound = sounds.getByID( id ); if (sound != null) sound.pause(); } } ); Plasmacore.setMessageListener( "Sound.play", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { int id = m.getInt( "id" ); boolean is_repeating = m.getBoolean( "is_repeating" ); PlasmacoreSound sound = sounds.getByID( id ); if (sound != null) sound.play( is_repeating ); } } ); Plasmacore.setMessageListener( "Sound.position", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { int id = m.getInt( "id" ); PlasmacoreSound sound = sounds.getByID( id ); if (sound != null) m.reply().set( "position", sound.position() ); } } ); Plasmacore.setMessageListener( "Sound.set_position", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { int id = m.getInt( "id" ); double position = m.getDouble( "position" ); PlasmacoreSound sound = sounds.getByID( id ); if (sound != null) sound.setPosition( position ); } } ); Plasmacore.setMessageListener( "Sound.set_volume", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { int id = m.getInt( "id" ); double volume = m.getDouble( "volume" ); PlasmacoreSound sound = sounds.getByID( id ); if (sound != null) sound.setVolume( volume ); } } ); Plasmacore.setMessageListener( "Sound.unload", new PlasmacoreMessageListener() { public void on( PlasmacoreMessage m ) { int id = m.getInt( "id" ); PlasmacoreSound sound = sounds.removeID( id ); if (sound != null) { sound.unload(); } } } ); } public void onLoadComplete( SoundPool soundPool, int soundID, int status ) { for (int i=0; i<sounds.count(); ++i) { PlasmacoreSound sound = sounds.get( i ); if (sound.hasSoundID(soundID)) { sound.onLoadFinished( 0 == status ); break; } } } public void pauseAll() { if (allSoundsPaused) return; allSoundsPaused = true; soundPool.autoPause(); for (int i=0; i<sounds.count(); ++i) { sounds.get( i ).systemPause(); } } public void resumeAll() { if ( !allSoundsPaused ) return; allSoundsPaused = false; soundPool.autoResume(); for (int i=0; i<sounds.count(); ++i) { sounds.get( i ).systemResume(); } } }
<reponame>danielroth1/CAE<filename>src/simulation/collision_detection/CollisionManagerListener.cpp #include "CollisionManagerListener.h" CollisionManagerListener::CollisionManagerListener() { } CollisionManagerListener::~CollisionManagerListener() { }
Accelerating the Adoption of Disruptive Technologies: The Impact of COVID-19 on Intention to Use Self-Driving Vehicles One of the most notable global transportation trends is the accelerated pace of development in vehicle automation technologies. Uncertainty surrounds the future of automated mobility as there is no clear consensus on potential adoption patterns, ownership versus shared use status and travel impacts. Adding to this uncertainty is the impact of the COVID-19 pandemic that has triggered profound changes in mobility behaviors as well as accelerated the adoption of new technologies at an unprecedented rate. This study examines the impact of the COVID-19 pandemic on willingness to adopt the emerging new technology of self-driving vehicles. Using data from a survey disseminated in June 2020 to 700 respondents in contiguous United States, we perform a difference-in-difference regression to analyze the shift in willingness to use autonomous vehicles as part of a shared fleet before and during the pandemic. The results reveal that the COVID-19 pandemic has a positive and highly significant impact on consideration of autonomous vehicles. This shift is present regardless of techsavviness, gender or political views. Individuals who are younger, left-leaning and frequent users of shared modes of travel are expected to become more likely to use autonomous vehicles once offered. Understanding the effects of these attributes on the increase in consideration of AVs is important for policy making, as these effects provide a guide to predicting adoption of autonomous vehicles - once available - and to identify segments of the population likely to be more resistant to adopting AVs.
Effectiveness of Common Household Cleaning Agents in Reducing the Viability of Human Influenza A/H1N1 Background In the event of an influenza pandemic, the majority of people infected will be nursed at home. It is therefore important to determine simple methods for limiting the spread of the virus within the home. The purpose of this work was to test a representative range of common household cleaning agents for their effectiveness at killing or reducing the viability of influenza A virus. Methodology/Principal Findings Plaque assays provided a robust and reproducible method for determining virus viability after disinfection, while a National Standard influenza virus RT-PCR assay (VSOP 25, www.hpa-standardmethods.org.uk) was adapted to detect viral genome, and a British Standard (BS:EN 14476:2005) was modified to determine virus killing. Conclusions/Significance Active ingredients in a number of the cleaning agents, wipes, and tissues tested were able to rapidly render influenza virus nonviable, as determined by plaque assay. Commercially available wipes with a claimed antiviral or antibacterial effect killed or reduced virus infectivity, while nonmicrobiocidal wipes and those containing only low concentrations (<5%) of surfactants showed lower anti-influenza activity. Importantly, however, our findings indicate that it is possible to use common, low-technology agents such as 1% bleach, 10% malt vinegar, or 0.01% washing-up liquid to rapidly and completely inactivate influenza virus. Thus, in the context of the ongoing pandemic, and especially in low-resource settings, the public does not need to source specialized cleaning products, but can rapidly disinfect potentially contaminated surfaces with agents readily available in most homes. Introduction Influenza A virus poses a major public health problem and is associated with frequent annual epidemics and occasional pandemics. During annual epidemics, the disease is associated with excess mortality and morbidity (including hospitalisations), especially in the elderly, children under 2 years, and those of all ages with underlying high-risk co-morbidities. In April, 2009 a novel influenza virus (A/H1N1v) was identified in North America, but had already spread extensively in Mexico during the preceding weeks. Rapid global spread ensued and a global pandemic was formally declared by the World Health organization on June 11 th 2009. During an influenza pandemic, the majority of people who develop symptoms will stay at home, where informal lay care will most often be provided by relatives. It is therefore crucial to gather specific information about how the virus is shed around the home and how transmission may be reduced by the adoption of appropriate hygiene measures, including cleaning of surfaces likely to be contaminated by virus. This is especially important in households containing young children as the latter are well known for their poor respiratory etiquette and higher virus shedding. Whilst numerous commercial virucidal agents are currently available, they may become scarce during a pandemic and are not available in low-resource settings. The purpose of this work was to assess a representative selection of simple, household cleaning agents and commercially available wipes which might be readily utilised to reduce the amount of virus spread around the home. We find that dilute solutions of washing up detergent, bleach or vinegar provide suitable means of disinfecting surfaces of influenza A virus. Liquid Cleaning Agents A British Standard was adapted to assess the ability of various household cleaning agents to kill or reduce the infectivity of an H1N1 human influenza virus A/PuertoRico/8/34 (PR8, H1N1, Cambridge lineage). We reasoned that routine domestic cleaning often uses hot water, and during an informal experiment gauged that many people ran 'hand-hot' water to a temperature of about 55uC; accordingly virus was mixed with water at this temperature, containing varying concentrations of bleach, washing up liquid and malt vinegar. Hot water alone was used as the baseline control. Samples were assayed either immediately (''0'' minutes) as a measure of rapid inactivation or after 60 minutes to simulate prolonged contact and allow comparability with previous studies using this time cut-off. Plaque assays were used to assess the viability of the virus at set time points post incubation and quantitative RT-PCR was used to assay the effect of the cleaning agents on viral genome. The outcome of the plaque assays indicated that rapid treatment of the virus with hot water had little effect on the virus, reducing the titre by around two-fold, but prolonged incubation at 55uC abolished detectable infectivity. However, the addition of any of 1% bleach, 50% and 10% malt vinegar and 1%, 0.1% and 0.01% washing up liquid were all effective at rapidly reducing viable virus below the limit of detection, while a low concentration of vinegar (1%) was no more effective than hot water alone ( Figure 1A). In contrast to the plaque assay results, most agents were ineffective at reducing the number of detectable genome copies as determined by RT-PCR, with only bleach having a significant effect ( Figure 2A). The data for the plaque assays and RT-PCR assays are compiled in Tables S1 and S2. Thus, while a strong oxidizing agent such as bleach is effective at reducing both genome detection and virus infectivity, low pH and detergent are equally efficacious virucidal agents. These results also indicate that whilst vinegar and detergent disrupt the viral envelope proteins reducing infectivity, only bleach disrupts the viral genome. Wipes and Tissues Surface cleaning wipes and antiviral tissues were assayed by a further adaptation of the BSEN14476:2005 Standard in which a single wipe to be tested was rinsed multiple times in room temperature sterile water. The resulting solution was filtered and used in the assay. Solubilised active ingredients from toddler wipes had a relatively small effect on virus viability, similar to treatment with hot water, causing around a 50-fold drop in titre even after 60 minute incubation (Figure 2A). Extracts from multisurface wipes cause a similar immediate reduction in virus infectivity, but on prolonged incubation showed a complete virucidal effect. Both the anti-bacterial wipe solution and the anti-viral tissue solution completely abrogated virus infection immediately after mixing with the virus (Figure 2A). However, viral genomes were detected in similar abundance by RT-PCR after all treatments with every wipe or tissue studied ( Figure 2B, see also Table S2 for a comparison of numerical data). Thus, while all types of wipe tested had some antiviral effect, they varied considerably in the magnitude and rapidity of their virucidal effects. Discussion The aim of this work was to identify commonly available cleaning agents and wipes which might be used during a pandemic situation to ensure household surfaces are free of viable influenza virus. By using hand-hot water (55uC) and concentrations of washing up liquid (0.1% to 0.01%) commonly used for domestic 'washing up' we attempted to create 'real life' conditions. However we acknowledge the tension in all such experiments between 'real life' assessments and the use of standard, reproducible internationally accepted assays. Although our study could not assess mechanical wiping effects, had we done so, it would have been difficult to assess our results in relation to 'real life' conditions, because wiping is carried out differently by individuals and the potential variability is considerable. Our work does not rule out a potentially important effect of physical wiping, though in real life this might also spread viruses further. A number of the agents tested were extremely efficient at killing the virus. These included 1% bleach, 10% malt vinegar, 0.01% washing up liquid, antibacterial wipes and anti-viral tissues. Some of these agents are relatively cheap and make for readily available, easy to use disinfectant products suitable for use in the home, even in low resource settings. The bleach used contains sodium hypochlorite and sodium hydroxide. The Food and Agricultural Organisation (FAO) of the United Nations recommends 2% sodium hydroxide (NaOH) for decontamination of animal housing equipment and machinery vehicles in order to be effective against avian influenza viruses. Little work has been carried out to investigate the specific effects of NaOH against influenza viruses. However, there is evidence that treatment with 1% NaOH can reduce an A/ H1N1 virus titre by up to 10 6 EID 50 /0.2 ml. Sodium hypochlorite is a chlorine-containing compound and the disinfectant nature of such agents arises due to the formation of hypochloric acid in water. The WHO recommend 1% sodium hypochlorite (which contains 0.05% or 500 mg/L free chlorine) to disinfect surfaces and medical equipment. Recent work also suggests that sodium hypochlorite at 750 ppm (750 mg/L) is capable of inactivating a low-pathogenicity avian influenza virus. Further data suggests that other avian influenza viruses, such as A/H5N1, are inactivated by extremely low concentrations of chlorine (0.52-1.08 mg/L,. Our data suggest that 1% household bleach, which equates to 0.05% sodium hypochlorite, are sufficient for the inactivation of human influenza viruses. All dilutions of washing up liquid tested (down to 0.01%) inactivated the virus. Undiluted, this product contains 1-5% denatured ethanol, 15-30% ionic detergents and 5-15% non-ionic detergents. In a separate informal experiment we determined that a typical bowl of fresh 'washing up' water is likely to contain 0.1% to 0.01% washing up liquid. Although the alcohols have a denaturing effect on the viral proteins, at the concentrations used here it is most likely that the detergents are the active ingredients, acting to disrupt the viral envelope. Vinegar is a commonly stocked household product, suitable for culinary use and also used for stain removal and other household cleaning. Malt vinegar (4-8% acetic acid) was effective down to a dilution of 10%. Previously 5% acetic acid has been demonstrated to be effective at inactivating an A/H7N2 strain of influenza and it has been known for some years that acid-based media cause inactivation and aggregation of HA glycoprotein spikes and virus, by triggering the low pH-dependent conformational change in the HA that normally only occurs in late endosomes.. Warm water is frequently used in the home to rinse surfaces and dishes. However, the data clearly show that, when used alone, it is ineffective in killing enveloped viruses, unless incubated with them for extended periods of time. Heating at 56uC of an A/H7N2 influenza strain for 30 minutes was shown to be effective at inactivating the virus. However, there are conflicting data which demonstrate that A/H7N3 avian influenza viruses can withstand 56uC warm water incubation for up to 60 minutes. All the liquid cleaning agents were diluted in warm water which may therefore have had a synergistic effect. However, due to the lack of fast killing with warm water alone, it is highly likely that it was the active ingredients in the cleaning agents which exerted a rapid virucidal effect. The branded anti-bacterial wipes and anti-viral tissues were encouragingly effective at inactivating the virus. The branded antibacterial wipes contain butoxypropanol (1-5%) and ethanol (5-10%). The branded anti-viral tissues contain citric acid (7.81%) as the active ingredient. In vitro tests demonstrated that citric acid based buffer solution nasal sprays reduced the titre of an influenza A Sydney/5/97 (H3N2) influenza strain by up to 3 logs after 1 minute contact time. Citric acid works in a similar manner to acetic acid, inducing the low pH transition in the viral HA protein thus rendering it unable to mediate cell entry. The toddler wipes and multi surface wipes which were markedly less effective contain,5% surfactants, compounds recommended for use against influenza because they disrupt the integrity of the lipid virus envelope. Our data indicate that the surfactants in the wipes are not present in high enough concentration to inactivate PR8 in under 60 minutes. The toddler wipes contain citric acid in common with the highly effective antiviral tissues; however the concentration in the former is not specified and may well be too low to have a substantial effect on virus infectivity. Most of the cleaning agents had little effect on genome copy number. However, 1% bleach reduced copies of the genome by over a thousand fold. In another study, treatment of avian influenza viruses with 1% sodium hypochlorite resulted in no detectable RNA. A high concentration of washing up liquid (1%, which contains alcohol and surfactants) showed a 3 log drop in genome copies compared to 1 log with 0.1 or 0.01% washing up liquid. Alcohol based hand gels have been demonstrated to reduce A/H1N1 down to only 100 virus copies/ml. Conclusions The virus envelope not only protects the genome and core virion proteins but also acts as a vector to transfer genome between host cells. Disruption of the envelope either by lipid attack (causing disintegration) or protein denaturation (preventing fusion to host cells) inhibits the virus being transmitted to a new host. Active ingredients in a number of the cleaning agents, wipes and tissues tested were able to target the influenza envelope and render the virions non viable. Some of these agents were also capable of destroying the viral genome, in particular bleach. In the context of the on-going pandemic and the control of interpandemic influenza in the home, it is possible to conclude that in a household setting, simple, readily available products such as 1% bleach, 10% vinegar and 0.01% washing up liquid all make convenient, easy to handle killing agents for influenza virus A/H1N1. These findings can be readily translated into simple public health advice, even in low resource settings. The public do not need to source more sophisticated cleaning products than these; notwithstanding, wipes with a claimed antiviral or antibacterial effect are also likely to be highly effective. However, caution should be exercised with nonmicrobicidal 'cleansing' wipes and toddler wipes containing only low concentrations (,5%) of surfactants as these appear to have less anti-influenza action. It may be appropriate for families intending to use wipes to reduce influenza transmission in the home, to be advised not to assume that all have equal antiinfluenza properties and to be encouraged to select brands with certified or validated anti-viral or antibacterial properties. Cleaning Agents Tested The following agents were tested: 1% bleach (DomestosH: Unilever, UK); 50%, 10% & 1% malt vinegar (Sainsburys, UK); 1%, 0.1% and 0.01% washing up liquid (Original Fairy LiquidH, Proctor and Gamble, UK). Water at 55uC was included as a control. The agents were also diluted in this, since it represented a comfortable hot water temperature which people might use in a domestic household. Viruses and Cells Human influenza virus A/PuertoRico/8/34 (H1N1, Cambridge lineage) was grown in embryonated eggs and titrated in MDCK cells as previously described. Stocks at a titre of 1.3-1.9610 8 pfu/ml were used. This and subsequent dilutions in distilled H 2 0 were within the range of concentrations found in nasal secretions. Madin Darby Canine Kidney cells (MDCKs) obtained from the European Collection of Cell Cultures were used in the virus plaque assays. They were maintained in Dulbeccos Modified Eagles Medium (DMEM, Gibco, UK) containing 2 mM glutamine, 10% foetal calf serum, 100 mg/ml penicillin and 100 mg/ml streptomycin (Invitrogen). Cells were split using 0.25% trypsin/EDTA and seeded at 1.6610 6 /well in six-well tissue culture plates for plaque assays. Nucleic Acid Extraction and qRT-PCR 0.5 ml samples were processed using the NucliSENSH easy-MAG system (Biomerieux), with an elution volume of 60 ml. As an internal control, bacteriophage MS2 (ATCC 15597-B1) at approximately 4000 pfu was included in each sample. qRT-PCR All primers and probes were obtained from Metabion, with the exception of the MGB probe which was obtained from Applied Biosystems (ABI). The enzyme and buffers used were from the Invitrogen Superscript TM III Platinum kit. Primers and probes used in the RT-PCT were as follows: MS2-F and MS2-R: Forward and reverse primers binding to target sequences on the internal MS2 bacteriophage control, 20 pmol/ml each. Each 20 ml RT-PCR reaction contained 4.4 ml water, 12.5 ml 2 x buffer, 0.1 ml each MS2 primers, 0.2 ml MS2 probe, 0.5 ml AMF, 1 ml AMR, 0.4 ml AM probe, and 0.8 ml Superscript TM III Platinum one-step enzyme. DNA Matrix plasmid standards of known concentration from 25 (4.8610 6 ) to 210 (4.8610 1 ) were used to construct a standard curve, from which the genome copies in the samples could be calculated. RT-PCR was performed on a Rotor-Gene TM 6000 (Corbett Research) real-time DNA detection system as follows: Incubation at 50uC for 30 minutes, followed by a further incubation at 95uC for 2 minutes and then 45 cycles of amplification with denaturation at 95uC for 15 minutes and annealing and extension at 60uC for 1 minute. Acquisition was on the Joe and Rox channels at each cycle. Virus Killing Assay: Cleaning Agents This assay was based upon British Standard BS EN 14476:2005. 60 ml of PR8 virus stock was diluted (1:10) into 150 ml 10% BSA solution and 1290 ml Serum Free Media (DMEM + glutamine + penicillin/streptomycin). To simulate the household situation, autoclaved (15 minutes at 121uC) tap water was used as the diluent. When using the tap water alone as a reagent, 15 ml was added to a 50 ml tube and placed in a water bath set to 55uC. For all other cleaning agents, dilutions were made in autoclaved tap water. For all reagents tested, a separate 15 ml tube containing autoclaved tap water was heated to 55uC in the water bath. This was used as the control experiment. The 15 ml of cleaning agent was added to the diluted PR8 (see above) to begin the assay. Samples of the PR8/ disinfectant test solution were taken, in triplicate, at 0 (i.e. immediately), & 60 mins. The test solution was removed in order to sample the virus but replaced into the water bath for incubation until later sampling time points. For each sample, 450 ml was taken, snap frozen on dry ice and stored at 270uC. These aliquots were used in the plaque assays to assess the effect of each agent on virus viability. At the same time points, 500 ml was added to 2 ml lysis buffer for RT-PCR and stored at 220uC. This was also done in triplicate. For each cleaning agent experiment, including water alone, a positive control experiment was performed to ensure that the plaque assay method worked under these conditions. A 1:25 dilution of PR8 stock was made by adding 10 ml PR8 into 25 ml 10% BSA solution and 215 ml Serum Free Media (SFM, DMEM + glutamine + penicillin/streptomycin) in a 15 ml tube. 2 ml of 55uC autoclaved tap water was added to the PR8 control dilution to begin the assay. Immediately, 450 ml was sampled, snap frozen and stored at 270uC for plaque assay. At the same time, 500 ml was added to NucliSENS lysis buffer for RT-PCR. Virus Killing Assay: Wipes/Tissues The PR8 stock was diluted as for the liquid cleaning agent experiments. In a Biological Safety Cabinet, a wipe/tissue was removed from its packaging and rinsed 3 times in 75 ml of cold sterile water. 15 ml of the wipe/tissue solution was taken up into a 20 ml syringe and filtered through a MinistartH filter (0.45 mm) to remove wipe particles in the solution. The procedure was repeated for the three wipes and tissue product. 15 ml of wipe/tissue solution was added to the diluted PR8 (see above) to begin the assay. Samples of this solution were taken, in triplicate, at 0 (immediately) and 60 mins. For each sample, 450 ml was taken, snap frozen using dry ice and stored at 270uC. At the same time points, 500 ml was added to 2 ml lysis buffer for RT-PCR. As with the liquid cleaning agents, positive controls were included. Unlike the controls for the liquid agents where PR8 was treated with autoclaved 55uC tap water, the controls for the wipe experiments were treated with sterile water. Plaque Assay Plaque assays were carried out in 6 well culture dishes containing confluent MDCK cells overlaid with 2 ml of a mixture of 50% serum free medium, 50% Avicel (Signet Chemical Corporation, India), 1 mg/ml Worthington's Trypsin and 0.14% BSA, where the volume of each component was adjusted based on the number of plaques assays performed. The cells were then incubated at 37uC for 48 hours without agitation, before fixation with formal saline and staining using 1% Toluidine Blue.
#include "interface/interface.h" #include "menu/menu.h" void SELLER_Interface() { int op=usemenu(SELLER); while(op != optionNum[SELLER]) { switch(op) { case 1: break; case 2: break; case 3: break; case 4: break; case 5: break; } op=usemenu(SELLER); } }
#include <bits/stdc++.h> #define fi first #define se second #define mp make_pair #define inf 1000000000 #define md 1000000007 #define pb push_back #define li 100005 #define lo long long using namespace std; int n; string s,t; int main(){ cin>>s>>t; int say=0; for(int i=0;i<=2;i++){ if(s[i]==t[i]) say++; } printf("%d\n",say); return 0; }
Long-term Variations in the Intensity of Plages and Networks as Observed in Kodaikanal Ca-K Digitized Data In our previous article (Priyal et al. in Solar Phys. 289, 127, 2014) we have discussed the details of observations and methodology adopted to analyze the Ca-K spectroheliograms obtained at the Kodaikanal Observatory (KO) to study the variation of Ca-K plage areas, enhanced network (EN), and active network (AN) for Solar Cycles, namely 19, 20, and 21. Now, we have derived the areas of chromospheric features using KO Ca-K spectroheliograms to study the long-term variations of Solar Cycles 14 to 21. The comparison of the derived plage areas from the data obtained at the KO observatory for the period 1906-1985 with that of MWO, NSO for the period 1965-2002, earlier measurements made by Tlatov, Pevtsov, and Singh (Solar Phys. 255, 239, 2009) for KO data, and the SIDC sunspot numbers shows a good correlation. The uniformity of the data obtained with the same instrument remaining with the same specifications provided a unique opportunity to study long-term intensity variations in plages and network regions. Therefore, we have investigated the variation of the intensity contrast of these features with time at a temporal resolution of six months assuming that the quiet-background chromosphere remains unchanged during the period 1906-2005 and found that the average intensity of the AN, representing the changes in small-scale activity over solar surface, varies with solar cycle being less during the minimum phase. In addition, the average intensity of plages and EN varies with a very long period having a maximum value during Solar Cycle 19, which was the strongest solar cycle of twentieth century. Introduction The study of several types of long term activities on the surface of the Sun is very important to enable us to understand the internal dynamics of the Sun and the meridional flows (Choudhuri, Schussler, and Dikpati, 1995). The meridional flows cause the observed systematic variation in the activity on the solar surface and recycling of the toroidal and poloidal components of magnetic field. The flow of material in the meridional plane direction, from the solar equator toward poles and from poles towards the equator deep inside the Sun, plays a significant role in the solar magnetic dynamo (Choudhuri, Schussler, and Dikpati, 1995;Charbonneau, 2007). The pattern of change in surface magnetic field, sunspots, H filaments, Ca-K features and other solar activity indices, and its variation with time has implication for the study of meridional flows (;Jin and Wang, 2012;Sindhuja, Singh, and Ravindra, 2014). The variations in solar irradiance might originate from number of reasons: i) change in the effective temperature of the Sun due to variations in the occurrence of faculae, plages and other active features (Gray and Livingston, 1997); ii) change in the size of the Sun (Ulrich and Bertello, 1995); iii) variation in activity with solar latitude changes with the solar cycle phase. Various measurements of total solar irradiance (TSI) has indicated that it varies ≈0.2 % over a solar cycle (Solanki and Unruh, 2013). But, the Ca-K index representing chromosphere increases by ≈ 20 % during the maximum phase of a solar cycle compared to the minimum phase (White and Livingston, 1981). The large variations in Ca-K index compared to that in TSI makes it possible to study the periodic variations in the activity on the Sun from a large data set of chromospheric images obtained at various observatories. Using the measurements of solar spectral lines, Gray and Livingston found that the variation in the effective temperature of the Sun is 1.5±0.2 K over a cycle. Further, Penn and Livingston found that umbral magnetic field and temperature reduces during the minimum phase compared to maximum phase. Number of investigations about the long term variations in the chromospheric activity, especially those pertain to variations in Ca-K plage areas were made using the Ca-K line images (Foukal, 1996;;Worden, White, and Woods, 1998;Tlatov, Pevtsov, and Singh, 2009;;). After identifying the plages in Mt. Wilson Ca-K images, Foukal et al. found a linear relationship between the plage areas and the sunspot numbers. But, they did not find any good correlation between white-light facular areas and Ca-K plage areas. Tlatov, Pevtsov, and Singh compared the Ca-K data of the three observatories, namely Kodaikanal Observatory (KO), Mt. Wilson (MWO) and National Solar Observatory at Sacramento Peak (NSO/SP) and found a high correlation between these data sets. Further, the comparison indicated that generally the plage areas determined from KO images are smaller by about 20 % as compared those from MWO and NSO/SP data. This is probably due to different passband of the filter used or selected by the exit slit of the spectrograph. Foukal et al. compared the Ca-K indices derived from Kodaikanal, Mt. Wilson ad US National Solar Observatories and found that they show consistent behavior on the long time scales. After analyzing 1400 Ca-K line spectroheliograms taken during the period of 1980-1996, Worden, White, and Woods (1998 identified the plages, enhanced network(EN) and active network(AN) areas using the empirically determined values of threshold intensity for these features and found that plages and enhanced network areas occupy about 13 % and 1 % areas on the solar disk, respectively, during the active phase. In addition to this, the plages and enhanced network areas show strong rotation modulation of 27 days. Ermolli et al. compared various parameters such as, image quality, eccentricity, stray light, large scale inhomogeneities, image contrast, etc., of Ca-K images taken from KO, NSO/SP, and MWO and concluded that it would be extremely useful to digitize the KO spectroheliograms with a higher photometric accuracy and spatial resolution since the KO series is the most homogeneous and longest among these. It may be noted that KO had already started to digitize the Ca-K images with pixel resolution of 0.86 arcsec and 16-bit digitization to achieve higher photometric accuracy (). Priyal et al. have analyzed the Ca-K line spectroheliograms for three solar cycles (19 -21) to study the variations of plage and network areas with solar activity on the Sun using the empirically determined values of threshold intensity contrast for the plages, enhanced network (EN) and active network (AN) with respect to normalized quiet chromoshpere as done by Worden, White, and Woods. They have normalized the intensity of the quiet chromosphere to 1 to measure the intensity contrast of various active features. This assumption is supported by the measurements of Ca-K index and Wilson-Bappu width of the line at the center of Sun which did not show change during the period of 1974(. The details of the methodology to analyse the data and selected values of intensity contrast for different features are given in the earlier article by Priyal et al.. It may be noted that most of the earlier studies consider the variation of plage areas with time and a couple of those studies investigate the variation of plage intensity with time. The methodology adopted by all the authors removes the effect of limb darkening only to identify the Ca-K line features. The procedure we have developed, yields the images free from all the effects such as limb darkening, vignetting due to instrument, stray light, and local defects due to photographic plate (). Using the same procedure, we have extended the data analysis to the KO spectroheliograms for the period of 1906 -2005 to identify and determine the Ca-K plage areas, EN and AN areas on day-to-day basis. The uniformity in the Ca-K spectroheliograms obtained at Kodaikanal over a century with the same instrument without any change in the optical components provides a unique dataset to study variation in the intensity contrast of plages and network with time. We therefore, have determined the average intensity contrast of these features with an interval of six months to study the variations, if any. In order to examine how much of contribution comes from the plages and network to the TSI, it is not only important to measure the plage and network element areas, it is equally important to measure the total or mean intensity of those regions on the daily basis. In this article, we first confirm the variation of the plage area with the solar cycle. Later, the computed mean intensity for the plages and networks is examined over ten solar cycles. In the end of this article we discuss the possible reason for the observed long term variations in the intensity of solar features. Data, Calibration, and Analysis The light feeding system is a 18-inch Focult siderostat which sends the sunlight to a 12-inch, Cooke photo-visual objective. The objective lens makes the image of the Sun on the entrance slit of the spectroheliograph. The siderostat had a gravity based tracker in the past. It was replaced with motor during 90's. The rate of the gravity clock and frequency of the motor was adjusted to compensate the seasonal variations in the motions of the Sun in the sky. No guiding arrangement was introduced in the system at any time. The spectroheliograph existing at the Kodaikanal observatory is similar to the one developed by Hale for the Yerkes observatory (Hale and Ellerman, 1903) The spectrograph has a set of two 4-inch prisms and two identical 6-feet focal length lenses to collimate the solar beam and focus the spectrum on to the exit slit. The width of the entrance and exit slits are maintained at 70 and 100 microns respectively, throughout the period of observations. The width of the exit slit permits 0.5 spectral pass-band centered around the Ca-K line allowing the violet and red emission wings of the line. For a smooth movement to scan and build the image on a photographic plate or film, the whole of the spectroheliograph was made to move at a uniform speed across the Sun's image. This is achieved using a hydraulic and a pulley system on three rolling balls. The speed of this movement can be varied to adjust for the exposure time depending on the sky conditions and the time of the day. Generally, large exposure time is required in the morning hours compared to the noon. By scanning over the Sun's image, the spectroheliogram was made in less than one-minute of time, generally around 30 seconds. Due to use of siderostat, the image rotates on its axis during the image build up on the photographic plate but the effect is negligible. It is around one-arcsec which is much less than the the seeing conditions at the Kodaikanal observatory. The images obtained have minor ellipticity (less than 1 %) due to the drift of the image during the exposure. Any attempt to guide the image manually during the exposure time led to poor results. The full-disk spectroheliograms are made in Ca-K and H on daily basis. In addition, by covering the image of the Sun by a round blackened plate (like coronagraph) at the focal plane of the solar image, chromospheric limb images are also made for the study of prominences. There are streaks in the Ca-K and H spectroheliograms. The streaks in the spectroheliograms are due to the spectrograph slit. We have developed a software to compensate for the intensity vigneting (applied on digitized images) due to the instrument and a small shift of light patch on the dispersing elements of the spectrograph. The orientation of the solar image is made after recording the image on the photographic plate. There is a mathematical formulation to determine the value of angle between north-pole of the Sun and the vertical top point of the image considering the place of observations, time and date of observations and p-angle. We had prepared tables for each day and time at an interval of six minutes. Interpolation was done if the observing time occurred between the two values of time. Then considering p-angle, date and time of observation, we identify the position of the North pole. North and south polarity was marked by placing the obtained image on the circular grid showing angle with a resolution of one degree. Thus the North-South poles were marked with a resolution better than one degree. The detailed description of the Ca-K images obtained at the Kodaikanal Observatory, digitization and calibration procedures which involves finding center and radius of each image, centering of each image, image rotation, limb darkening correction using the background chromosphere without considering the active regions, normalizing the quiet chromosphere to uniform value of one, has been reported in our earlier article (). In addition to the procedure described in our earlier article, here we show the typical intensity contrast distribution of the images after removal of limb darkening and instrumental effects during the minimum and maximum phases of solar cycle in Figure 1. In the top row of the figure we show typical images after the removal of limb darkening for the minimum and maximum phases of the solar cycle. Visual inspection of the images indicate that on one side these appear brighter as compared to the other because of the residual effect of intensity vignetting in the images due to instrument as shown in our previous article (). In the middle panel we show the same images after making correction for the limb darkening effect and intensity vignetting due to instrument as explained in the earlier article. These images show quiet chromosphere to be uniform over the whole image. Further, for comparison we show the intensity contrast distribution for these images in the bottom panel of this figure. The dashed line shows the intensity contrast distribution for the image corrected for the limb darkening and solid line indicates the same for the image corrected for both, limb darkening and instrumental effects. The final distribution of the intensity contrast is similar to the distribution obtained by Bertello, Ulrich, and Boyden. The bottom panel of this figure shows that the width of the intensity contrast distribution for limb darkening corrected images (dotted curve) is larger and irregular as compared to that of both limb darkening and instrumental effects corrected image (solid curve). Intensity vignetting in the image due to instrument makes the distribution broader and thus needs to be corrected. All the measurements of the plages and networks have indicated that at any given time, during all phases of the solar cycle, the areas occupied by the quiet background chromosphere are more than the total areas occupied by the plages and networks including the quiet network on the solar surface. Therefore, it is expected that the peak of the intensity distribution curve of the image represents the background chromosphere. The intensity of the background chromosphere does vary with time as it represents general temperature of the Sun. Even if it varies with time, the variation in the background will be marginal and will not change the results of present study. We, therefore, after calibrating and correcting images for the instrument effects, have normalized the peak of the intensity distribution curve to one. The portion on the left side of the intensity distribution curve at the time of minimum phase of the Sun indicates that the values of intensity contrast vary from 0.9 to 1.0, representing the background chromosphere. Hence, the intensity contrast values between 1 ± 0.1 shows the scatter in the background chromoshpere. We have referred these areas as quiet background chromosphere. The intensity contrast during the minimum phase generally varies from 0.9 to 1.25. The areas of intensity contrast values larger than 1.1 represent mostly the quiet and active network. The larger tail and area under the curve for larger values of intensity contrast during maximum phase of the solar cycle is due to the occurrence of more plages and networks during that period. One may note that the quiet-background chromosphere is normalized to unity for all the data so that variations in the intensity contrast of the plages and networks with time can be investigated. The larger tail and area under the curve for larger values of intensity contrast during maximum phase of the solar cycle is due to the occurrence of more plages during that time. One may study variations of plage area and network area with time using intensity threshold values for different features, intensity contrast distribution and area under the curve. But, we identified the features in each image using the intensity threshold and the area threshold values. We developed software program to automatically identify the chromospheric features such as plages, enhanced network, active network and quiet network (QN) as has been done in Worden, White, and Woods using the threshold value for the features and filling the areas within the intensity contours for the plages and enhanced networks. Following Worden, White, and Woods, we have determined empirically the values of threshold intensities for the plage and network areas for KO data. By using the method of filling the intensity contours Worden, White, and Woods found an average plage area to be larger by a factor of two than determined by others from MWO data. Hence we did not fill the intensity contours but computed the total areas occupied by pixels with the defined values of threshold intensity. The plages are identified by using threshold values of intensity contrast determined empirically by using randomly selected images spread over the observing period and fixing threshold values of intensity contrast for each feature. We found that these threshold values were able to identify the features properly in about 90 % of good images obtained during 1906 -2005. After visual examination of the analysed images, the remaining 10 % of data where the features could not be identified properly were discarded. Further, we have determined the average intensity contrast of plages, EN, and AN on day to day basis. In the next section we show the variations in the areas and intensity of plages, EN, and AN on a long-term basis. Results We remind the reader that the chromospheric features with intensity contrast larger than 1.35 and adjacent combined area more than one arcmin 2 were considered as plages. Regions with combined areas less than one arcmin 2 and larger than 0.3 arcmin 2 and intensity contrast larger than 1.35 are considered as EN elements. The regions with combined area less than 0.3 arcmin 2 and intensity contrast larger than 1.35 were considered AN elements along with regions of intensity contrast between 1.25 and 1.35. Finally, areas with intensity contrast between 1.15 and 1.25 are taken as the quiet network (QN) elements. By normalizing the intensity of quiet chromosphere to unity value for whole of the data we have determined the average intensity of the plages and network with respect to normalized background, quiet chromosphere. In the following we discuss the long term variations in their areas and intensity contrast. Variations of Ca-K plage Areas and its Comparison with Other Data The temporal variations of plage area is well studied in the past using MWO data ftp.ngdc.noaa.gov/STP/SOLAR DATA /SOLAR CALCIUM/DATA/Mt Wilson/ and we compared our detected plage area with that of MWO and the SIDC sunspot number. We determined the plage area in units of fraction of the solar disk on day-to-day basis for the period 1906 -2005. The data for the period of 1986 -2005 has large gaps due to sky conditions and large number of defective photographic plates. To compare the measured KO plage area with other data, we have computed the average plage areas on half yearly basis. Figure 2 show half yearly averaged plage areas (solid line) for KO data and the corresponding MWO data (dotted line) in the plot. Half yearly averaged plage areas indicate that there is general agreement between the KO and MWO data but about 15 -20 % smaller plage areas for the KO data as compared to MWO data for the two Solar Cycles 18 and 19 during the maximum phase. Tlatov, Pevtsov, and Singh also find a similar behavior for the Solar Cycle 19 and interpret this type of abnormal behavior of Cycle 19 due to masking effect of sunspots in calculation of plage indices. In Figure 3 we show the scatter plot between the half yearly averaged plage areas computed by us (here after called PSR) and MWO Ca-K index, KO Ca-K index (TPS, Tlatov, Pevtsov, and Singh ) and NSO Ca-K index. Panel a shows half-yearly averaged plage areas (KO plage-index) determined using the high-resolution images versus the MWO Ca-K index for the period of 1915 -1985; panel b indicates the PSR versus TPS values for the KO data. Here PSR is for plage area derived by us and TPS includes both plages and enhanced network determined by Tlatov et al.. Panel c shows the PSR values against the NSO index for the period of 1965 -2002. Further, in panel d we show a scatter plot between the half yearly averaged sunspot number and the corresponding plage area along with a linear fit to the data. All the four linear fits to the respective data sets show good correlation between different measurements and correlation coefficient greater than 0.75 with confidence level more than 99 % in all cases. The aim is to show the good correlation between different measurements and to prove the reliability of the methodology adopted and further investigate the variations in the networks and intensity contrast of plages and networks with time. A linear fit seen in panel a to the data gave the following relation between the MWO and KO results. Where MWO PA and KO PA are the plage areas from Mount Wilson Observatory and Kodaikanal Observatory data, respectively. The relation between these two measurements indicate that on an average plage area values from KO data are ≈10 % less than those from MWO data, compared to 20 % difference in earlier measurements by Tlatov, Pevtsov, and Singh. The decrease in difference between KO and MWO values may be because the present measurements are using data digitized with high-resolution compared to the data digitized with low-resolution used by Tlatov, Pevtsov, and Singh. Foukal find the fractional plage areas of the visible Sun were about 8 %, and 4 %, during the maximum phase of Solar Cycle numbers 19 and 21, respectively. Tlatov, Pevtsov, and Singh also find that maximum fractional plage area including the enhanced network during the period of 1957 -1958 to be about 8 %. In contrast, Worden, White, and Woods found that the plage and enhanced network typically cover about 13 %, and 10 %, respectively, of the solar disk during the maximum phase of Cycle 21 and 22, moderate cycles as compared to the Cycle number 19. Further, they reported that during moderate and minimum activity levels, the total plage and enhanced-network areas can reach zero, but the active network can still cover a large portion of the solar disk. Thus there are large variations in the fractional plage area determined by various techniques adopted. It may be noted that there is no absolute way of defining the plages or networks by assigning the threshold values of intensity contrast. These will vary for the data of different observatories depending upon the specifications of the instrument and passband used for observations. It is very important that all the data used in the analysis should be taken with the same instrument and analysed uniformly to study the temporal variations as in the present study. Earlier authors have also fixed the threshold values of intensity contrast empirically as we have done now by examining number of images of the Sun taken at different times. We find that half yearly averaged maximum fractional plage area for a given cycle appears to be related to the strength of the solar cycle represented by the sunspot numbers and it varies from 4 to 7 %, during the period 1906 -2005. Butterfly Diagram for Plages To make the butterfly diagram we need to know the latitude and area of each plage on day to day basis. After the identification of pixels with intensity contrast larger than 1.35 corresponding to plage regions, we converted the image into binary format i.e., plage pixels as 1 and remaining pixels as zero. By using IDL's LABEL REGION.PRO, we found groups of pixels whose value is one, in other words without any gap, each group representing a plage region. In every region defined as plage, the number of pixels were counted to compute the area of the plage and the regions with area less than one arcmin 2 were not considered as plages but considered as EN. Then operating our code for each identified plage region, and by using center of mass concept, we computed the centroid of each plage region in terms of pixel coordinates and also its area. Then we converted the pixel coordinates to heliographic coordinates (latitude and longitude) considering the apparent size of image on that day. Following this procedure, we determined the centroid of all the plages for all the data. In Figures 4 and 5 we plot the latitude centroid of each plage as a function of time for the period of 1906 -1985, popularly known as butterfly diagram. Figure 4 is considering all the plages of different sizes whereas in Figure 5 we have grouped the plages in three sets. Visual inspection of both the figures indicate that all size of plages occur in large numbers during the stronger solar cycles as compared to the weaker cycles. The Solar Cycle number 19, the strongest Cycle of 20th century has more number of plages with larger areas and occur at larger latitude belt. To study quantitatively the coverage of plages along latitudes, we have plotted the number of plages as a function of latitude for each solar cycle ( Figure 6) and for both the hemispheres separately. The FWHM of Gaussian fit to the frequency distribution for the northern and southern hemispheres for each cycle represents the spread in latitude. The plot of FWHM averaged over both the hemispheres for each cycle against maximum sunspot number for the corresponding cycle shown in Figure 7 indicates a correlation between these two. A linear fit to the data gave a correlation coefficient value of 0.75 indicating a confidence level of 99 %. The linear fit indicates that FWHM of the spread along latitude is ≈20.5 for 19th solar cycle, the strongest cycle of 20th century and ≈10 for Cycle 14, the weakest cycle of this period. This implies that plages occur over larger latitude belts for stronger solar cycles as compared to that for weak cycles. Further, we show the scatter plots between plage areas and EN as well as plage areas and AN networks (Figure 8). We observed a linear relationship between the plage areas and EN, AN areas. Long-term Variations in Intensity of Plage and Network Regions We have made an attempt to study the long term variations in the intensity of plages and network regions with respect to normalized intensity of quiet chromo- sphere. While carrying out this exercise we assumed that the quiet chromosphere do not change with the phase of solar cycle. Observations made by Livingston et al. over a period of 30 years show that the Ca-K index measured at disk center does not vary significantly with the cycle of solar activity. It may be noted that intensity contrast for plages and networks are different for different instruments and passband of the filter or selected wavelength band for spectroheliograms and thus any change made in the experimental set up is likely to affect the uniformity of the data. During the 100-year period covered by the Kodaikanal observations, no substantial changes were made to the instrumentation that may have altered the overall quality of the data. As stated earlier we first identified different chromospheric features using different intensity threshold values as defined in Section 3 of this article. We computed the daily average intensity contrast of plage by considering all the pixels in the plage region. Similarly, we computed the average intensity contrast of the EN and AN for each day of the data. Due to the dependence of plage and network intensity on the underlying magnetic fields (Ortiz and Rast, 2005;Sivaraman and Livingston, 1982), the variation in the intensity of plages and network with long periods is likely because of the changes in the solar dynamo processes. Further, the intensity of plage and network is likely to vary on daily basis due to growth and decay of these features. To study the long term variations in the intensity of plages and network, we have computed average value of the intensity on half yearly basis for the period of 1906,-2005. We show the averaged intensity of plages, EN and AN as a function of time in Figure 9. We have also plotted the three year average of the intensity contrast to smoothen the data to view solar cycle variations. Panel a in Figure 9 shows the intensity of the plages appears to vary with the solar cycle phase and with some other quasi-periods. It may be noted that intensity contrast of plages with time definitely indicates that it varies with very long period, probably on time scale almost equal to the length of observations or more. The average intensity contrast of plages vary between 1.55 to 1.90 with some pattern during the period 1906 -2005. We have also studied the long term variations in the intensity of central region of plages considering the area represented by intensity contrast greater than 1.5 instead of intensity contrast of larger than 1.35 for plages. Yearly averaged intensity contrast of central region of plages at an interval of six months seen in panel b of Figure 9 indicates that average intensity varies between 1.7 to 2.1 during this period. Panel c of this figure shows that the intensity of EN varies with the similar pattern as that of plages but with less amplitude. Panel d indicates that the average intensity of AN, which represents small-scale activity over the whole of the solar surface, varies with very regular pattern slightly different than that of plages and EN. Generally, the intensity of AN is more during the active phase as compared to To determine the quasi-periodicities we have computed the power spectral analysis of the time series by interpolating the few missing data points. At the first stage itself the time series is subtracted by the mean value. Then the computed power spectra was normalized. Statistically significant peaks in the power spectrum were found by assuming a Gaussian distribution for each point in the time series. Every point in the power spectrum will have a 2 distribution with two degrees of freedom (DF). The significance level (sig) in the power spectrum was computed as (Torrence and Compo, 1998), Where N is the number of data points. In Figure 10 we show the power spectra for the time series of plage, central region of plage, EN and AN intensities along with 99.9 %, confidence level. The average intensity of plages, central region of plages, and EN indicate a periodic behavior of almost equal to the period of observations. The peak at ≈90 years is very prominent. The average intensity profiles of plages, central region of plages, and EN also exhibits the same behaviour of ≈90 years trend. However, we need a very long time series of data to confirm it. Apart from this periodicity there is a peak corresponding about ≈24 years with a confidence level >99.9 %. Other peaks between 6,-11 years appear to represent the life of active period during different solar cycles and 11 year solar cycle period. The power spectra of average intensity of plages, central region of plages and EN are similar for larger periodicity >10 years but that of EN appear to be noisy for periods <10 years. The power spectrum plot for AN indicates a general periodic behavior of the small-scale activity with a period of ≈10.5 years. Very long period variation in the intensity of AN may be difficult to detect, because of small difference in the limit of minimum and maximum intensity of AN, even if it exists. Ca-K Index Considering Area and Intensity of Features After determining the intensity of each pixel identified as the plage, we added the intensity of all these pixels having value of the intensity contrast > 1.35 and area > one arcmin 2. We define the plage index (PI) for the day as, PI = PAIC. where, PA is the plage area on the solar surface expressed in arcsec 2 and IC is the intensity contrast, considering the area of each pixel as 0.74 arcsec 2. Similarly, we computed the EN and AN index on day to day basis. Top panel of Figure 11 shows the half yearly average plage index for the period of 1906 -2005. Middle two panels of this figure show the EN and AN index for the same period. The amplitude of plage index appears to be twice the amplitude of EN and AN indexes during the active phase of solar cycles. The temporal variations in these three parameters appear to be correlated and they all are in phase. Though, the variations in the plage areas and index appear to dominate the solar-cycle variations, the contributions due to variations in the EN and AN indexes need to be considered in modeling the chromospheric solar cycle variations. This is because the combined network (EN and AN) index amplitude becomes comparable to the plage index. It may be noted that variation in the average plage intensity is large for different solar cycles as compared to variation in intensity of EN and AN for different solar cycles. Finally, we have added all the three indexes to derive the total Ca-K index on day-to-day basis and plotted half yearly averaged total Ca-K index at an interval of three months in bottom most panel of Figure 11 for the period of 1906 -2005. This data will be useful for the study of solar cycle variations of chromosphere. Summary and Discussions We have digitized the Ca-K line spectroheliograms obtained at Kodaikanal observatory for the period of 1906 to 2005 with a pixel resolution of 0.86 arcsec, using low noise CCD camera with 16-bit read out to study the long term variation of chromospheric features. We have developed a computer code to align, calibrate, removal of limb darkening, stray light, and normalize the quiet chromospheric intensity to a uniform value of one. We have identified the plages, EN, AN, and QN which are by using threshold values of intensity contrast and area parameters determined empirically. The plage areas determined from the KO data agrees well with those derived from the MWO data for the period of 1906 -1985 and earlier measurements made by Tlatov, Pevtsov, and Singh for KO and NSO data for the period of 1965 -2002. Figure 11. Panel a shows half yearly averaged KO plage index at an interval of three months for the period 1906,-2005. Panels b and c show the Ca-K index variations for the EN and AN network, respectively. Panel d indicates total Ca-K index considering the contribution from plages, enhanced, and active networks. The gaps represent no observational data available during that period. We have studied the variation in intensity contrast and the areas of the chromospheric features with the time and calculated the total Ca-K index for the year 1906 -2005. Uniformity of the time series and the precision analysis of the data using the code developed by us made it possible to study the long term variations in the intensity contrast of plages, EN and AN with an interval of six months using yearly averaged data. From the averaged data over a period of three years, the plage-intensity contrast appears to be about 1.85 during the active phases of Solar Cycles number 19 and 20, whereas it is about 1.55 during Solar Cycles 14 and 15. The intensity contrast for plages is around 1.50 for 22 and 23 Solar Cycles. The intensity of plages for other solar cycles has intermediate values indicating that intensity of plages vary with very long time period in addition to the variations with phase of solar cycle. These values imply that on an average plages are 20 %, brighter in Solar Cycles 19 and 20 than during the active phase of Cycles 14, 15, 22, and 23. If we assume the intensity contrast as defined by Worden, White, and Woods which means intensity of the quiet chromosphere is considered as zero then the average plage-intensity contrast during the Cycles 19 and 20 becomes about 50 %, more than that during the Cycles 14, 15, 22, and 23. Similarly, the intensity of EN during the active phase of Cycles 19 and 20 is more than the Cycles 14, 15, 22, and 23 by about 5 %. Further, we found that yearly averaged intensity contrast of AN, representing small-scale activity over the whole of the solar surface varies by ≈1 % with the phase of solar cycle, being larger at the maximum phase. It may be noted that Tlatov, Pevtsov, and Singh did not find any change in the plage intensity contrast from the data of KO, and NSO during their period of analysis. They found that the average plage intensity contrast for KO, MWO, and NSO is 1.33, 1.50, and 2.09, respectively. Using the same data set, Bertello, Ulrich, and Boyden show that the plage contrast varies with time and was maximum around 1957 -58 during the strongest Cycle 19. The analysis done by Tlatov, Pevtsov, and Singh indicated the intensity contrast of the plages during the maximum phase of Cycle 19 for the MWO data is more compared to other solar cycles. They reasoned that in Solar Cycles 18 and 19 many sunspots appeared on the disk. The bandpass of the exit slit of the KKL is much larger than MWO. Because of this, the dark sunspots appear inside the plages in KKL data compared to MWO data. The analysis by Tlatov, Pevtsov, and Singh shows a significant increase in the full-disk plage contrast during the maximum phase of Cycle 19, as compared to other solar cycles. This was attributed to a combination of the masking effect of sunspots in calculating the plage index and in the MWO exit slit width being narrower during that period (Bertello, Ulrich, and Boyden, 2010). Further, Penn and Livingston found that magnetic fields and temperature of the sunspots are less during the minimum phase compared to those at maximum phase. Further, it may be noted that after removing the contribution from solar activity from the basal line profiles, Pevtsov, Bertello, and Uitenbroek find a weak dependency of intensity in the line core (K3) of basal profiles with the phase of the solar cycle. The weak dependency on the solar-cycle phase may be due to small-scale activity. We also find 11 year quasi-periodicity in the intensity of active network. We could find the variation in the intensity contrast with time because of high spatial resolution, better photometric accuracy of digitization, making correction for the intensity vignetting in the images due to instrument in addition to limb darkening effect and careful normalization of the quiet chormosphere. Nindos and Zirin and Ortiz and Rast have found that there is a strong correlation between areas of Ca-K emission in active regions and underlying photospheric magnetic field. We, therefore, conclude that our finding of long term variation in the intensity of plages and EN implies that on an average, strength of magnetic field in active regions is larger for stronger solar cycles represented by more number and area of sunspots during that cycle. In this article, we reported the long-time variations of plage intensity, longer than the solar cycle period for the first time. It needs to be investigated how this change in intensity of plage over the long-time period contributed to the TSI and changes in the temperature of the Earth. A group of researchers at the Indian Institute of Astrophysics is in the process of calibrating the Ca-K images without using the step-wedge. This is achieved by using the quiet sun profile which is assumed to be constant during the period of 100 years of observations. The preliminary results show that there is not much difference in the results obtained from both methods (with step wedge and quiet sun profile). However, more quantitative analsysis need to be done.
//! Support for compiling [ethers::solc::Project] use crate::term; use ethers::solc::{report::NoReporter, Artifact, FileFilter, Project, ProjectCompileOutput}; use foundry_utils::to_table; use std::{collections::BTreeMap, path::PathBuf}; /// Compiles the provided [`Project`], throws if there's any compiler error and logs whether /// compilation was successful or if there was a cache hit. pub fn compile( project: &Project, print_names: bool, print_sizes: bool, ) -> eyre::Result<ProjectCompileOutput> { ProjectCompiler::new(print_names, print_sizes).compile(project) } /// Helper type to configure how to compile a project /// /// This is merely a wrapper for [Project::compile()] which also prints to stdout dependent on its /// settings #[derive(Debug, Clone, Copy, Default)] pub struct ProjectCompiler { /// whether to also print the contract names print_names: bool, /// whether to also print the contract sizes print_sizes: bool, } impl ProjectCompiler { /// Create a new instance with the settings pub fn new(print_names: bool, print_sizes: bool) -> Self { Self { print_names, print_sizes } } /// Compiles the project with [`Project::compile()`] pub fn compile(self, project: &Project) -> eyre::Result<ProjectCompileOutput> { self.compile_with(project, |prj| Ok(prj.compile()?)) } /// Compiles the project with [`Project::compile_parse()`] and the given filter. /// /// This will emit artifacts only for files that match the given filter. /// Files that do _not_ match the filter are given a pruned output selection and do not generate /// artifacts. pub fn compile_sparse<F: FileFilter + 'static>( self, project: &Project, filter: F, ) -> eyre::Result<ProjectCompileOutput> { self.compile_with(project, |prj| Ok(prj.compile_sparse(filter)?)) } /// Compiles the project with the given closure /// /// # Example /// /// ```no_run /// let config = foundry_config::Config::load(); /// ProjectCompiler::default() /// .compile_with(&config.project().unwrap(), |prj| Ok(prj.compile()?)); /// ``` pub fn compile_with<F>(self, project: &Project, f: F) -> eyre::Result<ProjectCompileOutput> where F: FnOnce(&Project) -> eyre::Result<ProjectCompileOutput>, { let ProjectCompiler { print_sizes, print_names } = self; if !project.paths.sources.exists() { eyre::bail!( r#"no contracts to compile, contracts folder "{}" does not exist. Check the configured workspace settings: {} If you are in a subdirectory in a Git repository, try adding `--root .`"#, project.paths.sources.display(), project.paths ); } let now = std::time::Instant::now(); tracing::trace!(target : "forge_compile", "start compiling project"); let output = term::with_spinner_reporter(|| f(project))?; let elapsed = now.elapsed(); tracing::trace!(target : "forge_compile", "finished compiling after {:?}", elapsed); if output.has_compiler_errors() { eyre::bail!(output.to_string()) } else if output.is_unchanged() { println!("No files changed, compilation skipped"); } else { // print the compiler output / warnings println!("{}", output); // print any sizes or names if print_names { let compiled_contracts = output.compiled_contracts_by_compiler_version(); for (version, contracts) in compiled_contracts.into_iter() { println!( " compiler version: {}.{}.{}", version.major, version.minor, version.patch ); for (name, _) in contracts { println!(" - {}", name); } } } if print_sizes { // add extra newline if names were already printed if print_names { println!(); } let compiled_contracts = output.compiled_contracts_by_compiler_version(); let mut sizes = BTreeMap::new(); for (_, contracts) in compiled_contracts.into_iter() { for (name, contract) in contracts { let size = contract .get_bytecode_bytes() .map(|bytes| bytes.0.len()) .unwrap_or_default(); sizes.insert(name, size); } } let json = serde_json::to_value(&sizes)?; println!("name size (bytes)"); println!("-----------------------------"); println!("{}", to_table(json)); } } Ok(output) } } /// Compiles the provided [`Project`], throws if there's any compiler error and logs whether /// compilation was successful or if there was a cache hit. /// Doesn't print anything to stdout, thus is "suppressed". pub fn suppress_compile(project: &Project) -> eyre::Result<ProjectCompileOutput> { if !project.paths.sources.exists() { eyre::bail!( r#"no contracts to compile, contracts folder "{}" does not exist. Check the configured workspace settings: {} If you are in a subdirectory in a Git repository, try adding `--root .`"#, project.paths.sources.display(), project.paths ); } let output = ethers::solc::report::with_scoped( &ethers::solc::report::Report::new(NoReporter::default()), || project.compile(), )?; if output.has_compiler_errors() { eyre::bail!(output.to_string()) } Ok(output) } /// Compile a set of files not necessarily included in the `project`'s source dir pub fn compile_files(project: &Project, files: Vec<PathBuf>) -> eyre::Result<ProjectCompileOutput> { let output = term::with_spinner_reporter(|| project.compile_files(files))?; if output.has_compiler_errors() { eyre::bail!(output.to_string()) } println!("{}", output); Ok(output) }
package edu.stevens.cs522.chatserver.activities; import android.app.Activity; import android.os.Bundle; import android.preference.PreferenceFragment; import edu.stevens.cs522.chatserver.R; /** * Created by dduggan. */ public class SettingsActivity extends Activity { public static final String USERNAME_KEY = "username"; public static final String APP_PORT_KEY = "app_port"; public static class SettingsFragment extends PreferenceFragment { @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); // Load the preferences from an XML resource addPreferencesFromResource(R.xml.settings); } } @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); // Display the fragment as the messages content. getFragmentManager().beginTransaction() .replace(android.R.id.content, new SettingsFragment()) .commit(); } }
Samuel L. Jackson gave an interview with the Hollywood Reporter in which he said on hearing about the December murders in San Bernardino, he had wished the killers were white and not Muslim. “When that thing happened in France, we were sitting there going, 'Oh, my God, these terrorists!' And I can't even tell you how much that day the thing that happened in San Bernardino — I was in Hawaii — how much I really wanted that to just be another, you know, crazy white dude, and not really some Muslims, because it's like: 'Oh, s**t. It's here. And it's here in another kind of way,'” Jackson said. “Now, okay, it happened on an Army base and it happened somewhere else. But now? It's like they have a legitimate reason now to look at your Muslim neighbor, friend, whatever in another way. And they become the new young black men." In the wide-ranging interview, covering everything from his career to world affairs, the “Hateful Eight” star elaborated that for years Americans had "been kind of shielded from what the rest of the world's been dealing with. I remember the first time I left the country — in 1980 I went to London — I knew a little bit about the Irish and the English and what was happening, and then something blew up around the corner from where I was, and I was kind of like, 'Woah, what was that?' And they say, 'Oh, Irish terrorists.' It was the first time I'd heard the word 'terrorists.' 'Oh, what do you mean?' And then I started seeing signs in the tube — 'Don't pick up untended packages.' That was the first, 'If you see something, say something.' So I started thinking about it." Jackson said terrorism became America’s problem when “Bush and those guys put us in that fight. And as soon as we drew blood in that war, we became part of something that's been going on for thousands of years. It's like, 'Well, you killed my cousin Akhbar,' duh-duh-duh, and it's like, 'Oh, s**t.' So we'll never be out of it now because people hold on to grudges in that kind of way — we're the Hatfields and the McCoys in the world. So that's happened." Jackson also weighed in on the 2016 presidential campaign, distancing himself from his sometime golf partner Donald Trump, but also trashing Trump’s competitors for the GOP nomination. “There are some other people that aren't as open about what he's saying that are running also, you know, that are just as crazy, that have just as much ill-will toward the common man — and not just the common black man. People who don't have a certain amount of money don't mean anything to them." Jackson said he’ll be voting for Hillary Clinton. "I'm forever a Democrat, you know, and I'm gonna vote for Hillary. I mean, I love Bernie — Bernie's a man of the people — but he can't win. So I gotta cast my vote for a person that can keep those other people from winning, okay?”
// Do not call this directly. Instead, call IntegrateOrUnintegrate func (ai AppImage) _removeIntegration() { log.Println("appimage: Remove integration", ai.Path) err := os.Remove(ai.thumbnailfilepath) if err == nil { log.Println("appimage: Deleted", ai.thumbnailfilepath) } else { log.Println("appimage:", err, ai.thumbnailfilepath) } if ai.updateinformation != "" { go UnSubscribeMQTT(MQTTclient, ai.updateinformation) } err = os.Remove(ai.desktopfilepath) if err == nil { log.Println("appimage: Deleted", ai.desktopfilepath) sendDesktopNotification("Removed", ai.Path, 3000) } else { log.Println("appimage:", err, ai.desktopfilename) } }
Albert Manucy Albert C. Manucy (1910–1997) was an author, historian and a Fulbright Scholar who specialized in Spanish Colonial Florida and the architecture of St. Augustine, Florida. Early life and education Albert Clement Manucy was born in St. Augustine on February 20, 1910. His family were of Minorcan descent; his ancestor Josef Manucy was one of the indentured laborers at Andrew Turnbull's colony in New Smyrna, Florida before fleeing to sanctuary in St. Augustine with the other colony settlers in 1779. He attended Ketterlinus High School and graduated in 1928 and attended the University of Florida, where he received a bachelor's degree in education in 1932 and a master's degree in literature in 1934. National Park Service After graduation, Manucy worked for the National Park Service. He first worked on a WPA research project on Fort Jefferson National Monument in Key West, Florida. When money for that project ran out, he returned to St. Augustine to write Seeing St. Augustine, a guidebook sponsored by the Federal Writers' Project. Manucy eventually became a full-time Park Service staff member in 1938. He was one of the first historians of the National Park Service and would become an authority on the history of Spanish Florida; in particular, the Castillo de San Marcos in his hometown. He traveled around the United States, South America, and Europe to study and research St. Augustine. He published his first book, The Houses of St. Augustine, 1566-1821 in 1962. In 1966, he became the curator for the Southeast Regional Office of the National Park Service in Richmond, Virginia. He retired in 1971 and moved back to St. Augustine in 1975, where he remained until his death. In all, Manucy worked for the National Park Service for 33 years. He wrote on many subjects, including two books on St. Augustine. Architecture and Historic Preservation As a child growing up in St. Augustine, Manucy witnessed the destruction of many local historic houses over the course of his residence there, which inspired him to learn more about architecture. In 1937, Manucy spent time in Washington, DC at the Library of Congress collecting research on St. Augustine for the St. Augustine Historical Program, sponsored by the Carnegie Foundation. This was the beginning of an effort to preserve St. Augustine's historic buildings. This would eventually lead to the formation of the St. Augustine Historical Preservation and Restoration Commission, later known as the Historic St. Augustine Preservation Board. Manucy was awarded a Fulbright scholarship to study the folk architecture of Spain in 1962. He was interested in the influence that it had on the architecture and town plan of St. Augustine. He took a leave of absence from the National Park Service and spent a year traveling around Spain. While conducting research for his book Sixteenth Century St. Augustine: The People and Their Homes, Manucy discovered that from 1566 to 1572, the town of St. Augustine was actually situated on Anastasia Island, across Matanzas Bay from its current location. This was confirmed by colleague and historian Eugene Lyon in the 1990s. Upon his retirement and return to St. Augustine in 1975, Manucy served on the St. Augustine Restoration Foundation as well as on the St. Augustine 1580 committee, formed to create a replica of a Timucua village near the St. Augustine settlement circa 1580. This was never realized, but a film called Dream of Empire was completed. Personal life and legacy Manucy was married three times; first to Clara (1935–1970), then Elsie (1971–1991), and finally Kathleen (1991–1997). He had four children; Bette, Evalina, James, and Mark, all by his first wife Clara. Manucy died on March 2, 1997, in St. Augustine at the age of 87. In 2000, he was honored as a Great Floridian through the "Great Floridian 2000" program, created to pay tribute to notable residents of Florida. His Great Floridian plaque is located at the National Park Service Administration Building in St. Augustine. In addition to the Great Floridian designation, Manucy was the recipient of the Amigos de los Castillos silver medal from the Spanish government, the George Morgan Ward Medal from Rollins College in 1965, the Order of La Florida from the City of St. Augustine in 1983, an Honorary Doctorate from Flagler College in 1984, as well as awards from many historic organizations like the Florida Trust for Historic Preservation and the Eastern National Park and Monument Association.
/** * Used to retrieve group build through REST when WS Client loses connection and reconnects * * @param gcId Id of the GroupConfig where the build was run * @return * @throws RemoteResourceException */ private GroupBuild fallbackSupplier(String gcId) throws RemoteResourceException { GroupBuild build = null; try (GroupConfigurationClient client = new GroupConfigurationClient(configuration)) { build = client.getAllGroupBuilds(gcId, Optional.of("=desc=startTime"), Optional.empty()).iterator().next(); } return build; }
<gh_stars>0 package tree; //https://www.geeksforgeeks.org/find-distance-between-two-nodes-of-a-binary-tree/ public class DistanceTwoNode { class Node { int data; Node left; Node right; Node(int data) { this.data = data; left = right = null; } } public static int findDistance(Node root, int a, int b) { Node lca = LCA(root, a, b); int a_distance = findLevel(lca, a, 0); int b_distance = findLevel(lca, b, 0); return a_distance + b_distance; } private static int findLevel(Node root, int targetNodeVal, int level) { if (root == null) { return -1; } if (root.data == targetNodeVal) { return level; } int left = findLevel(root.left, targetNodeVal, level + 1); if (left == -1) { return findLevel(root.right, targetNodeVal, level + 1); } return left; } private static Node LCA(Node root, int a, int b) { if (root == null) { return null; } if (root.data == a || root.data == b) { return root; } Node left = LCA(root.left, a, b); Node right = LCA(root.right, a, b); if (left != null && right != null) { return root; } if (left != null) { return left; } else { return right; } } }
def expect_udp_messages(self, messages: List[Tuple[str, int]]) -> threading.Thread: messages = [(message, 9000 + source_id) for message, source_id in messages] expect_thread = threading.Thread(target=self.udp_server.expect_messages, args=(messages,)) expect_thread.start() return expect_thread
package com.lodz.android.component.mvp.contract.refresh; import com.lodz.android.component.mvp.contract.base.BaseViewContract; /** * 带基础状态控件和下拉刷新控件的View接口 * Created by zhouL on 2017/7/17. */ public interface BaseRefreshViewContract extends BaseViewContract { /** 设置刷新结束(隐藏刷新进度条) */ void setSwipeRefreshFinish(); /** * 设置刷新控件是否启用 * @param enabled 是否启用 */ void setSwipeRefreshEnabled(boolean enabled); }
Q: How to proceed when the baseline (state-of-the-art) published results claim much better performance than I can reproduce? I am graduate student, to finish my degree I need to build methods outperform what is already there. An issue that I came across with, is that two papers reported way (I mean more than 20%) more than what resulted from my reimplementation. This could be due to two reasons: I missed something during the implementation. Which is what I have been telling myself. For months, I tried all possible combinations and possible paths. One of the methods is straightforward. Still, I could not reach their claimed performance. I contacted the corresponding authors, and no one replied. So I tried to contact the other authors. The first paper, the author replied and sent me the code. He/she told me to keep all details ”confidential”. Well, it turns out they they are not using the data they claim in their the paper, of course their results are different than my reimplementation. And my implementation was correct. The second paper author also replied and they didn’t send me the code because they say it is easy to implement, but confirmed that what I did is correct still I couldn’t understand why such difference. Both papers are published in <2 impact factor journals. Their web servers are not working. They are not honest. Now I am stuck, my method does outperform my reimplementation of their methods but not what they claim. The first paper I can’t say anything because “it is confidential” the second paper I can only confirm that I correctly implemented their method for the most part (based on my chat with the authors) I know that I probably could not publish on this part of my work, because who is going to believe a young scientist who just started her way? But not sure how the committee are going to believe me. What can I say or do? Please help me A: There is absolutely no reason that you can't publish a paper that says "We compared our method to methods X and Y. Since code the original code was not available for X and Y, we reimplemented the methods to the best of our ability. The code for these reimplementations is available in supplementary files A and B. Our new method out performed the reimplementations of X and Y by z%. However, it should be noted that it was not possible to reproduce the reported results for X and Y. " People who want to know will have to look at your re-implementations and decide themselves if they think you have correctly re-implemented. Seniority has nothing to do with it - be transparent, and the world will judge if they believe you or the people that won't release their code. A: People can be dishonest. They can also make honest mistakes and publish bad science. Don't assume that it is you who has an inferior result. And don't assume that a doctoral committee won't believe you. If they are competent to judge you without the earlier results they should be competent to understand what you have done. However, I have two suggestions. The first is to walk through what you have done with your advisor and/or another faculty member who is most competent to understand your work. You may, indeed, have the best results. If you can get support there, then the larger committee should be no problem. I don't think that you need to hide the communication you got from your committee members. It may be necessary to explain why you can't believe the reported results from the other paper. I don't think that "confidentially" really applies here. But the other is a bit harder. See if you can figure out exactly where the other group failed to match their methods to their results. If you can do that, then you have much stronger evidence for your own work. The evidence you mention here seems pretty strong to me (an outsider) that the other paper has a problem. There is no reason not to contradict it if it is incorrect, for whatever reason. A: to finish my degree I need to build methods outperform what is already there No, that is not true. You need to deliver a piece of proper scientific work and advance knowledge and that does not depend on what direction your findings point. Of course, things are easier and more pleasant if your implementation is better. But the actual scientific part of your thesis is to study both the old and your approach scientifically and then conclude whether one is better (and possibly in which situations). The difficulty in your situation is to proove that the discrepancy to literature is not due to your incompetence or lack of hard work (=> you deserve a bad mark) but actually due to "nature" not being as it was supposed to be by the previous paper. What you can and should report is that you were not able to reproduce the findings in papers 1 + 2, in consequence have been in communication with the authors. Importandly, that your implementation has been confirmed as correct by private communication with the authors of paper 2 and by comparison with (confidential) code you received from the authors of paper 1 again by private communication for that purpose. If Well, it turns out they they are not using the data they claim in their the paper, of course their results are different than my reimplementation. means that you got the data set they actually used and got the same results with that, then you can also report that for a related data set, the same results were obtained. If not, it may be possible to kindly ask the authors of paper 1 + 2 whether they'd run a data set you send them and give you the results of their implementations so you can compare that to your results. You can then report (hopefully) that equal results were obtained on a different data set and thank the authors of those papers for running your data. The last two points should make amply clear that the discrepancy is not due to a fault in your implementation - which is what counts for your thesis. As a personal side note, I got top grade on my Diplom (≈ Master) thesis which (among other findings) found that the software implementation I was using did not work as it was supposed to. I was able to point out a plausible and probable reason for that bug (which may have been a leftover debugging "feature") - which is much harder for you as you don't have access to a running instance of their software that you can test (= study) to form and confirm or dismiss hypotheses about its behaviour. As an addition to what @Buffy explained already about the possibility of honest mistakes in published papers: As scientists we tend to work at the edge of what is known. Which also means that we're inherently running a high risk of not (yet) knowing/having realized important conditions and limitations of what we are doing. We thus also run a comparatively high risk that tentative generalizations we consider may turn out to be not all that general after all. Or that we may be plain wrong and realize this only later (or not at all). I believe it is very hard for humans to be completely aware of the limitations of the conclusions we draw - possibly/probably because our brains are "hardwired" to overfit. (Which also puts us into a bad starting position for avoiding overfitting in e.g. machine learning models we build) The take-home message from this is that we need to be careful also when reading published papers: we need to keep the possibility of the paper being wrong, containing honest mistakes or not being as directly applicable to our task at hand as we believe at the first glance in mind. I missed something during the implementation. I experienced something similar once when I was also implementing a reference method from literature (related but different field). It turned out that different defaults in the preprocessing of the data caused the difference - but only after I had the bright idea of trying out to omit a preprocessing step - although the model doesn't make much sense physically without that step, but the paper didn't mention any such step (neither do many papers in my field who do use that step because it is considered necessary because of physics). They are not honest. While that is of course possible, I've seen sufficient honest mistakes to use Hanlon's razor (which I first met as Murphy's razor): and not assume dishonesty or misconduct unless there are extremely strong indications for that. Proving superiority may in any case be something that is impossible due to limitations in the old paper. E.g. if they report validation results based on a small number of cases, the uncertainty on those results may be so large and thus it cannot be excluded that the method is better than it seemed that truly improved methods later on will not be able to demonstrate their superiority in a statistically sound manner. Still, such a shortcoming of the old paper does not limit the scientific content or advance of your work.
Early Phosphatidylinositol 3-Kinase/Akt Pathway Activation Limits Poliovirus-Induced JNK-Mediated Cell Death ABSTRACT Poliovirus (PV)-induced apoptosis seems to play a major role in tissue injury in the central nervous system (CNS). We have previously shown that this process involves PV-induced Bax-dependent mitochondrial dysfunction mediated by early JNK activation in IMR5 neuroblastoma cells. We showed here that PV simultaneously activates the phosphatidylinositol 3-kinase (PI3K)/Akt survival signaling pathway in these cells, limiting the extent of JNK activation and thereby cell death. JNK inhibition is associated with PI3K-dependent negative regulation of the apoptosis signal-regulating kinase 1, which acts upstream from JNK in PV-infected IMR5 cells. In poliomyelitis, this survival pathway may limit the spread of PV-induced damage in the CNS.
import { FastifyInstance, FastifyPluginAsync, FastifyReply, FastifyRequest, } from "fastify" import { CookieSerializeOptions } from "fastify-cookie" import fp from "fastify-plugin" import fastifySecureSession from "fastify-secure-session" const { SECRET, NODE_ENV } = process.env if (!SECRET) { throw new Error("Server misconfigured") } const isProd = NODE_ENV === "production" export const cookieOptions: CookieSerializeOptions = { path: "/", httpOnly: true, sameSite: "lax", secure: isProd ? true : false, } export function handleSessionCookie( fastify: FastifyInstance, request: FastifyRequest, reply: FastifyReply ) { /** * Adapted from https://github.com/fastify/fastify-secure-session/blob/master/index.js#L154. * * We need to manually handle the session cookie because we don't reply to requests through * the normal Fastify request-reply cycle. Instead, we use a handler provided by nextApp.getRequestHandler() * (defined in installSSR.ts) and then indicate that a reply has been sent by setting reply.sent = true. * This means that additional hooks such as 'onSend' will not be invoked (https://www.fastify.io/docs/v1.13.x/Reply/#sent). * Specifically the hook from fastify-secure-session, which would use the Set-Cookie header to set a * session cookie in the reply is not run (https://github.com/fastify/fastify-secure-session/blob/master/index.js#L153) * Thus we have to manually set the session cookie in the response. The cookie will be read in * @app/lib/withUrql.ts and used to protect against CSRF attacks using the Double Submit Cookie pattern: * https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#double-submit-cookie) */ if (request.isSameOrigin) { /** * For security reasons we only enable sessions for requests within our * own website; external URLs that need to issue requests to us must use a * different authentication method such as bearer tokens. */ const session = request.session if (!session || !session.changed) { return } else if (session.deleted) { reply.setCookie( "session", "", Object.assign({}, cookieOptions, { expires: new Date(0), maxAge: 0 }) ) return } reply.setCookie( "session", fastify.encodeSecureSession(session), Object.assign({}, cookieOptions) ) } } const Session: FastifyPluginAsync = async (app) => { app.register(fastifySecureSession, { key: Buffer.from(SECRET, "hex"), cookie: cookieOptions, }) } export default fp(Session)
import { ExtractJwt, Strategy } from 'passport-jwt'; import { PassportStrategy } from '@nestjs/passport'; import { Injectable, UnauthorizedException } from '@nestjs/common'; import { User } from 'src/user/entities/user.entities'; import { UserIdDto } from 'src/user/dto/userId.dto'; @Injectable() export class JwtStrategy extends PassportStrategy(Strategy) { constructor() { super({ jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(), ignoreExpiration: false, secretOrKey: process.env.JWT_SECRET, }); } //Вызовится автоматически паспортом и в качестве пейлоада передастася расшифрованый токен в JSON async validate(payload: any): Promise<UserIdDto> { if (payload.id) { return { _id: payload.id }; } else { throw new UnauthorizedException(); } } /* async validate(req, user: Partial<User>) { //const token = req.header.authorization.slice(7); if (token) { return user; } else { throw new UnauthorizedException(); } } */ }
Polytetrafluoroethylene: Synthesis and Characterization of the Original Extreme Polymer. This Review aims to be a comprehensive, authoritative, and critical review of general interest to the chemistry community (both academia and industry) as it contains an extensive overview of all published data on the homopolymerization of tetrafluoroethylene (TFE), detailing the TFE homopolymerization process and the resulting chemical and physical properties. Several reviews and encyclopedia chapters on the properties and applications of fluoropolymers in general have been published, including various reviews that extensively report copolymers of TFE (listed below). Despite this, a thorough review of the specific methods of synthesis of the homopolymer, and the relationships between synthesis conditions and the physicochemical properties of the material prepared, has not been available. This Review intends to fill that gap. As known, PTFE and its marginally modified derivatives comprise some 60-65% of the total international fluoropolymer market with a global increase of ca. 7% per annum of its production. Numerous companies, such as Asahi Glass, Solvay Specialty Polymers, Daikin, DuPont/Chemours, Juhua, 3F, 3M/Dyneon, etc., produce TFE homopolymers. Such polymers, both high-molecular-mass materials and waxes, are chemically inert and hydrophobic and exhibit an excellent thermal stability as well as an exceptionally low coefficient of friction. These polymers find use in applications ranging from coatings and lubrication to pyrotechnics, and an extensive industry (electronic, aerospace, wires and cables, and textiles) has been built around them. South Africa, being the third largest producer of fluorspar (CaF2), the precursor to hydrogen fluoride and fluorine, has embarked on an industrial initiative to locally beneficiate its fluorspar reserves, with the local production of fluoropolymers being one projected outcome. As our manuscript focuses specifically on the homopolymerization of TFE (the starting point for all fluoropolymer industries), it will be of considerable use to start-up companies and other commercial entities looking to enter the fluoropolymer market, as well as to end-user companies. The manuscript commences with a short discussion on the synthesis and production of TFE (both at industrial and laboratory scales), including the safety aspects surrounding handling (because that monomer is regarded as explosive if brought into contact with oxygen due to the formation of peroxides), transport, and storage, and then expands into detailed discussions dealing with aspects such as the various additives used (buffers, chain transfer agents, surfactants, etc.), the solvent environment, and the reaction conditions. A further section reports the properties of PTFE with respect to the polymerization conditions as well as an overview on the specialized techniques used to characterize PTFE. Finally, the applications of PTFE in various fields, ranging from electrical insulation to tribological to medical applications, as well as chemically resistant coatings and pyrotechnics, are discussed.
St David's Anglican Church, Mossman History St David's Church is a small stone and masonry building constructed in three stages between 1912 and 1952 to encompass a porch, nave and apsidal sanctuary, with a vestry and chapel added c. 1982. It replaced an earlier Anglican church on the same site, which was destroyed by a cyclone in March 1911. The original design, as influenced by the architectural traditions of the Byzantine Empire, was completed by the long-serving rector, Reverend Edward Taffs, who drove the project to fruition over the course of his career in charge of the Mossman/Port Douglas Parish of the Anglican Diocese of Carpentaria (1900-1996). The current church and its evocative setting behind an avenue of mature, fern-clad raintrees (Samanea saman) are situated to the north of the town centre, on the road to Daintree. Mossman lies inland from Port Douglas, on the flood-plain of the Mossman River between the Great Dividing Range and the coast, about 70 kilometres (43 mi) north of Cairns. George Elphinstone Dalrymple's North-East Coast Exploring Expedition of late 1873 had brought attention to the resources around the Johnstone, Mulgrave, Russell, Daintree and Mossman Rivers and from 1874 cedar stands on the later were being logged extensively. Behind the coastal river plain the Hodgkinson goldfield was proclaimed on 15 June 1876. Initially accessed via Cairns, in 1877 an alternative route to the coast was found and Port Douglas was established as the field's new port, about four miles south of the mouth of the Mossman River. By 1878 the most readily accessible cedar stands in the Mossman River district had been exhausted, although logging in more difficult-to-reach areas continued into the 1880s. Agricultural settlers followed the timber getters from the late 1870s, one of the earliest on the Mossman being Daniel Hart, an immigrant from Jamaica and a former timber getter. In 1878 he selected land along the river, calling the property Coolshade, and in 1885 subdivided part of it, which became the western half of the town of Mossman. The town was first called Hartsville in his honour. Excessive rain and poor soil productivity resulted in most of the region being planted out with sugarcane from the 1880s. In 1883, Brie Brie Sugar Plantation established Mossman's first mill but this was unsuccessful and largely inoperative by 1886. This did not deter Commissioner WO Hodgkinson from assessing the flat land around the Mossman River as having great potential for cane production during his investigation of sites for central sugar mills conducted in 1886. The impetus for more extensive sugar cropping in the district came with the establishment of the Mossman Central Co-operative Mill in the mid- 1890s under the provisions of the 1893 Sugar Works Guarantee Act. As the Mossman River community consolidated, religious denominations established a presence in the district. Prior to the formation of a separate Port Douglas and Mossman Parish, missionary chaplains from the Church of England's Diocese of North Queensland, based in Townsville, travelled to Port Douglas to conduct services. In 1898 the separate parish was created after parishioners subscribed £100 per year to support a minister. A church was built in Mossman on land that had been part of Daniel Hart's 1885 subdivision (and part of which he donated to the church in 1898). The first St David's Church at Mossman was a simple gable-roofed timber structure, dedicated on Trinity Sunday on 29 May 1899. It took the name of the Welsh patron saint, St David, reputedly because Mr RD Rex, a local farmer and parish secretary during construction had been christened in the new cathedral of St David's in Hobart in 1874. The Bishop of North Queensland agreed to the name. The link with the Tasmanian cathedral was again made in 1955 when a fragment of the stone cross at St David's Cathedral was presented to the Mossman church, where it was set in cement at the entry. From 3 August 1900 the Port Douglas and Mossman Parish was incorporated into the newly created Church of England Diocese of Carpentaria, which encompassed the Torres Strait, Cape York Peninsula, the Southern Gulf of Carpentaria and the whole of the Northern Territory, with the Bishop's seat located at Thursday Island. The Queensland boundary of the diocese extended to south of Port Douglas, with Cairns remaining in the Diocese of North Queensland. Church historians, including Keith Rayner is his 1962 doctoral thesis The History of the Church of England in Queensland, argue that the formation of a new diocese "came at a time of economic and commercial depression, and [when] its European population was markedly declining." The new diocese faced significant challenges due to its size, isolation, lack of resources and the cultural and linguistic diversity of its parishioners. It struggled to be self-supporting, and lacked sufficient internal support to accomplish its missionary goals. Despite these challenges, the Port Douglas and Mossman Parish initially thrived. In January 1902 The Carpentarian noted that the church had resolved to raise £150 for a rectory at St David's, which was constructed in 1903; and in 1904 Mossman became the centre of the parish. The shift away from Port Douglas reflected the decline of the latter as an early port and regional centre, the ascendancy of Cairns as the region's premier port, and the emergence of Mossman as the centre of the Mossman River district. In 1904 the Reverend Edward Taffs took up duties as rector at Mossman and remained there for the next 46 years until his death aged 90. Taffs had migrated from England to Victoria in September 1889 and was ordained a minister in St Paul's Cathedral in Melbourne in 1901; his first appointment being in South Gippsland. He opened the Holy Trinity College at Kew, occupying the position of headmaster while continuing as a minister. In mid-1904 he left Victoria with his family and moved to Mossman to take up the position of rector. He and his wife Mary worked throughout the parish, travelling to Mount Molloy, Mount Carbine and Port Douglas on horseback. Taffs' time in Mossman was an unusually long commitment for an Anglican clergyman at this period in such a remote part of the state. His missionary commitment and principles, as well as his dogged determination, were pivotal to the development and construction of the second St David's Church. On 16 March 1911 a severe cyclone caused extensive damage to buildings at Port Douglas and Mossman. The Carpentarian of Easter Eve 1911 reported that the facilities lost included the churches at Port Douglas and Mossman, the Port Douglas house belonging to the Bishopric Endowment Fund (used as a rectory), a church hall at Mossman that Taffs had erected at his own expense, and a small church at Mount Molloy. A flood two weeks after the cyclone swept away the remaining debris. The paucity of church finances, often reliant upon contributions from English church societies, meant that raising funds for building projects was a constant challenge. For the Parish of Port Douglas and Mossman, available funding was first directed toward re-erecting the Mossman parish hall. Prolonged difficulties in raising further funds, and a commitment to building a new church debt-free, resulted in this hall serving as the temporary church until 1952. The impact of the 1911 cyclone convinced local parishioners of the need to rebuild St David's in stone, a more expensive undertaking than erecting a timber structure. Reverend Taffs reputedly prepared the design for the new church which, in contrast to the simple, gabled timber churches that dominated the area, drew from the architectural traditions of the Byzantine Empire or that of the Romans during the Middle Ages. The culmination of Early Christian architecture, the style that came to be associated with this empire began after AD 330 when Emperor Constantine established the Imperial Roman capital at Byzantium, renaming it Constantinople. From its palette the design for St David's originally incorporated a series of domes and barrel vaults over the nave and apse, a dome on a windowed drum, and rounded arches of coloured stone. The planned church was modest in size - 44 feet (13 m) long by 25 feet (7.6 m) wide - and designed to seat 120 persons. Construction commenced in 1912 after suitable stone was found at Bonnie Doon, a local cane farm on the Mossman River. Kerr reports in his Northern Outpost report of 1995 that stones were transported from the property with the help of the Mossman Mill which agreed to supply half a mile of tramline to enable it to be hauled from the quarry to the construction site. A contract for the nave stone was let for 850 tons at three shillings a ton and Mossman Central Mill railed it to the church site. To withstand potential flooding, substantial foundations were laid by parishioners under the supervision of a stonemason. These were completed in 1915, but further work was delayed by World War I (1914-1918). A decoratively painted timber panel intended for the sanctuary was crafted during this time for use in the temporary church and later transferred to the stone building. After the war the Ladies Building Committee raised money for construction of the chancel and one bay window in 1919, but further obstacles slowed the building program. The price of cement escalated and then in February 1920 another cyclone damaged the rectory and temporary church, and building funds set aside for the new church had to be channeled into repair of the temporary church/hall and rectory. During the 1920s, fund raising waned and in the early 1930s progress remained slow due to the depressed economy. Concerted efforts to complete the church did not resume until the late 1930s after modifications to the plan were made by firms of architects and engineers, Hassall and Redmond, both of Cairns, who called tenders in January 1937. As the prices submitted were more costly than the committee had anticipated, construction of the stone walls did not commence until 1940, with most of the work undertaken by Reverend Taffs and his two grandsons. Further delay followed while funds were raised for the roof. With the start of World War II (1939-1945), voluntary fund-raising, building materials and skilled labour were diverted to the war effort, delaying construction even further. Reverend Taffs died in 1950, just two years before St David's Church was completed. His replacement, Father Ware, considered that his first task was to complete the church. In 1951 the design was revised by Cairns architect Edwin Roy Orchard to incorporate a simpler and less expensive gabled roof and parishioners arranged a loan with the National Bank. Tenders were called in October 1951 and the contract was awarded to Baker and McMaster of Cairns. The work cost £7,586 and was completed in August 1952. On 27 September 1952 Bishop Hudson finally dedicated the stone church, 40 years after construction had commenced. St David's Church suffered structural damage to its sanctuary during a cyclonic storm in 1979. In the same year a building fund was established to construct a vestry. In 1980 a cruciform design was accepted, with small transepts to accommodate a vestry to the north and a chapel to the south of the crossing. The tender was awarded to R and J Carroll in 1982. The new chapel and vestry were consecrated in 1984, completing the building that was started in 1912 under the guidance of Rev. Taffs. Parishioners have since installed leadlight windows designed by artist Chris Oswald (donated in memory of Dorothy Louise Kieseker) and a tile mosaic depicting the risen Christ. In 1996 the Diocese of Carpentaria was dissolved and the Parish of Port Douglas and Mossman, among other Cape York parishes, was returned to the responsibility of the Anglican Diocese of North Queensland. Raintrees (Samanea saman) An avenue comprising about eleven mature raintrees grows either side of Foxton Avenue (the Cook Highway) from its intersection with Mossman Street to the church porch. This avenue contributes substantially to the setting of St David's and the aesthetic experience of it, which is considered to be of State-level cultural heritage significance. The raintree is native to a range stretching from southern Mexico into northern South America and its accepted botanical name, as included in the Australian Plant Census and established in 1916, is Samanea saman (Jacq.) Merr. A plaque outside St David's states that the avenue of trees was planted by Rev. Taffs using seeds obtained from Jamaica by Daniel Hart, one of the area's earliest selectors to whom the land (portion 35) originally belonged; however no evidence has been uncovered to substantiate this account or resolve the incongruity it contains between the year of Hart's death, 1900, and when Taffs become rector at Mossman in 1904. A recent arborist's report estimates the trees are between 80 and 100 years old making the date range in which they were planted between 1908 and 1928. No trees appear in a parish photograph of the timber church destroyed by cyclone in 1911 and apparently built on the site of the later stone church. The first record of the raintree species being introduced outside its native habitat comes from the Caribbean Islands, and since then it has become naturalized and widely planted in pastureland for shade and forage on most of these islands, including Jamaica from where Hart immigrated to Australia. In the second half of the nineteenth century the trees were introduced into many tropical countries as street and shade trees, most extensively in India, southern Asia and the Pacific. The species has been known by a number of other common names including guango (in Jamaica), monkey pod and saman. Raintrees are now common in coastal north-east Queensland, including around Mossman and Port Douglas. Flora of Australia (2001) reports that the trees are naturalized along the Stewart Creek south of Daintree, about 24 kilometres (15 mi) north of Mossman, with other naturalized populations occurring at South Johnstone and Dingo Beach on Cape Gloucester (near Bowen). A mature tree (estimated to be 125 years old) has been identified near the weighbridge on Mossman's Mill Road. In 1907 they were reported as growing in the main street of Ingham and in Townsville. At the opening of the Cook Highway from Cairns to Mossman in late 1933, The Courier Mail of 19 December 1933 described already established raintrees lining part of the Port Douglas to Mossman stretch of the road. The key to the early renown of the raintree stemmed from the apocryphal story that it was a panacea for drought stricken districts through the ability of its leaves to condense moisture out of the air, retain it in large quantities and then expel it through its leaves and trunk. This story, and how it had been disproved, was discussed in two 1911 articles from the Port Douglas and Mossman Record. Otherwise it was seriously considered as a useful crop for the fodder its large seed pods provided for cattle. It appears that in late nineteenth century Queensland the raintree was known by one of its botanical synonyms, an alternate spelling of Pithecellobium saman. In an August 1879 article in The Queenslander, recounting the minutes of the most recent meeting of the Acclimatisation Society of Queensland, this species is discussed and a report tabled from the director of the Botanical Gardens in Jamaica, from where seeds had been obtained by the Queensland Colonial Office. The Chairman reported that the tree was known to the Society, which had a few strong plants in stock. Later in the same year it was reported that the chairman had distributed large quantities of seed in the northern districts of the colony. A letter-to-the-editor published in a June 1884 Sydney Morning Herald issue and received from Fred Turner - who worked at the Brisbane Botanic Gardens from 1874, before becoming Curator of the Acclimatisation Society's Bowen Park and then moving to the Sydney Botanic Gardens in 1880 - reveals that the Brisbane Gardens had sometime previously received seeds of the raintree. An article in a December 1932 issue of The Queenslander describes a substantial raintree growing there. From late 1877 Hart was an active member of the Queensland Acclimatisation Society, based in Brisbane, and at Coolshade he established a fine orchard with European and Mediterranean fruits, Chinese fruits, mangoes, and coconuts using material obtained almost exclusively via the Society. In April 1884 The Brisbane Courier reported on a visit to agricultural properties in the Mossman River district, the foremost of which was Coolshade. In December 1887 the agricultural reporter for The Queenslander newspaper described Hart as an enthusiast in horticulture, and his garden an experimental station. In both articles the extensive range of plants described as growing at the property did not include the raintree. When Hart died in 1900 he left Coolshade and its orchard - located on just over 10 acres (4.12ha) on the Mossman River to the north of the site of St David's and the raintree avenue - to his half brother. In 1907 the title for the triangular parcel of land over which the canopy of the eastern line of raintrees spreads was transferred by this brother to Edward Taffs, Hart's brother died in 1915, leaving Coolshade to a daughter of Taffs. Description St David's Church is located north of the centre of Mossman in a park setting on a triangular site between the Captain Cook Highway (Foxton Avenue), which it addresses, and Mossman Street. It is set approximately four metres back from the street and is aligned at an angle of approximately 60° to the course of the street. When approached from the south and the centre of town it sits at the northern end of a grassed area behind an avenue of large, mature raintrees (Samanea saman) that dominate this section of Foxton Avenue. From the church looking west there are views over George Davis Park with cane fields behind, and distant views of the Main Coast Range (part of the Great Dividing Range) and the Daintree National Park. The church behind the avenue of raintrees is an important aspect of the view from George Davis Park. St. David's is a small church with a terracotta tile-clad roof and walls built of stone, concrete blockwork and detail sections of render. It is aligned on an east-west axis with a nave and small porch at the front and western end, a chapel and vestry to the south and north sides of the crossing respectively, and an apse on the eastern end of the nave. The four stages in which the church was constructed are discernable in differences in its fabric. The fabric of the first three stages is critical to the cultural heritage significance of the church, while the fourth stage is important for its form but not for the materials used. The first stage of construction is represented in the stonework up to floor level in the nave and apse, comprising random rubble to about one metre above ground level. The second stage comprises the walls of the nave and apse above floor level, including the rounded archways and the coloured concrete block quoins, and reflects the Byzantine influence in the original design. The walls of the nave are approximately 8 metres (26 ft) high and it is approximately 12 metres (39 ft) long by 6 metres (20 ft) wide. There are four round arch openings on the exterior elevations of the nave: two on the north elevation and two on the south elevation. The round arch openings near the porch are about 1.5 metres (4 ft 11 in) wide. The eastern arched openings are much wider - about four metres. The polygonal apse begins in a round arched opening almost the width and height of the east wall of the nave. The apse has narrower round arch openings taking up most of each of the five sides. The walls of the nave and apse are constructed of random rubble. There are alternating red and blue coloured concrete block quoins to the corners and the edges of the arched openings. The third stage comprises the porch, including the west gable parapet wall surmounted by a Latin cross at its apex, the gable parapet wall to the nave also with a Latin cross, and the tiled roofs of the nave, apse and porch. The porch is to the west of the nave, its walls also constructed of random rubble with uncoloured concrete block quoins. The western porch facade has a rectangular window with three lights, the outer casements of which open outwards while the central light is fixed. There is a narrow, upright slit in the stone wall above the window. The western facade of the nave has a circular window in-filled with fixed louvers and topped by a stone drip moulding. Both gable parapets have moulded block-work coping. The northern elevation of the porch features a doorway now accessed by a ramp with a bell mounted to the right-hand side of the doorway that is framed in bevelled concrete. The roofs of the nave and porch are gabled, pitched at a 45? angle and have ridges running east-west. The roof of the apse abuts the wall of the nave and has a three-faceted hip. All roofs are clad with terracotta tiles. The fourth construction stage corresponds to the additions undertaken in c. 1982 that installed a chapel centred on the southern side of the crossing and a vestry on its northern side. The walls of these additions are constructed of concrete block: rendered quoins and infill panels of textured blocks. The roofs of the chapel and vestry abut the wall of the nave, have three-faceted hips at each end, and are clad with coloured concrete tiles. Each of the three facets of the chapel southernmost end has a narrow window with rounded top. On the northern side the c. 1982 vestry addition mirrors that of the chapel opposite. Like the chapel the vestry has windows in the three of the end wall facets. There is a door in its western facade. A small building housing toilets is immediately adjacent to the northeast corner of the nave. Random rubble stonework is largely exposed on the interior of the church, as are the blockwork quoins. The porch, nave and apse have unpainted timber ceilings. There are panels of painted timber under the window sills in the apse. The porch (4 by 2 metres (13.1 ft × 6.6 ft) x 3 metres (9.8 ft) high) has a carpeted floor, unpainted, rendered walls and a stained timber ceiling. The window in the northern wall of the porch has stained glass depicting a scene of Christ and children. The concrete frame above this window features the Christogram IHS (derived from the first three letters of the Greek name of Jesus). The nave has concrete floors and a ceiling of stained timber panelling with six pendant light fittings and two fans hanging from it. The windows in the round-arch openings on the southern side of the nave contain stained glass depicting the Madonna and Child flanked by St Margaret and St Hilda. On the northern side Christ is depicted as a shepherd, flanked by St Andrew and St David. Between these windows and the arches leading into the chapel and vestry the stonework has been rendered. The archways between the nave and chapel and vestry either side of the crossing are in-filled with stained timber boarding, to the south to the spring-line of the arch and to the north to door head height. The chapel is open to the main body of the church and its ceiling corresponds to the lower line of the in-fill panel, while the vestry is separated by a stained timber panel wall and a central door. The chapel and vestry have concrete floors, unpainted concrete block walls and stained timber ceilings. Two concrete steps lead through a wide round-headed arch at the east end of the nave to the apse, which has unfinished random rubble walls and timber paneled ceiling similar to the nave. The concrete floor is covered with red carpet. There is a mosaic depicting Christ in the archway directly behind the altar. Raintrees (Samanea saman) An avenue of raintrees (Samanea saman) is located along a section of Foxton Avenue (Captain Cook Highway) to the south and west of the church. Six are located on the eastern side of the road with another five on the western side, and make an emotive setting for St David's Church particularly on the approach out of the centre of town to the south. The interlocking canopies that stretch over the road create a tunnel of shade beyond which stands the church in the sunlight. The large volume of space protected by this canopy stands above a carpet of grass on the triangular lot between Foxton Avenue and Mossman Street, and effect which also highlights the positioning of the church. Samanea saman generally grow to between 15 and 25 metres (49 and 82 ft) tall with a broad dome-shaped canopy typically 30 metres (98 ft) in diameter. The trees leading to St David's Church are substantial and mature; some about 15 metres (49 ft) tall with canopies of about 30 metres (98 ft) in diameter. Some of the trees have multiple trunks, the circumferences of which being between 6 to 10 metres (20 to 33 ft). Ferns and other epiphytes grow from the trunk and branches of several trees. Heritage listing St David's Anglican Church and Raintrees (Samanea saman) was listed on the Queensland Heritage Register on 6 August 2010 having satisfied the following criteria. The place is important in demonstrating the evolution or pattern of Queensland's history. St David's Church, constructed in stages between 1912 and 1952, is important in demonstrating the pattern of Queensland's history related to the spread of the Anglican Christian denomination. The lengthy construction period combined with the perseverance and dedication of the local minister, Reverend Taffs, exemplifies the pattern of the development, establishment and resilience of the former Anglican Diocese of Carpentaria (1900-1996) in far northern Queensland. The church embodies its ongoing struggle to establish a presence and raise funds for building projects during the first half of the twentieth century. Also the self-reliance that many ministers and their parishioners displayed is reflected in the idiosyncratic nature of the design and materials of St David's Church at Mossman. The place is important because of its aesthetic significance. St David's Anglican Church is set behind an avenue of substantial, fern-clad raintrees (Samanea saman) forming an archway along part of the Captain Cook Highway (Foxton Avenue) that takes road travellers out of the centre of Mossman toward Daintree. These trees and the beautiful arbour they make, as well as the unusual use of stone and Byzantine-influenced design details in the church, make an important aesthetic contribution to the town of Mossman.
<filename>met_ml/train/fluxnet_etl.py import math import os.path import random from glob import glob import dask import intake import numpy as np import pandas as pd import xarray as xr from joblib import dump, load train_vars = ["P", "t_min", "t_max"] meta_vars = ["t", "lat", "elev"] target_vars = ["SW_IN_F", "LW_IN_F", "PA_F", 'RH'] predict_vars = train_vars + meta_vars all_vars = predict_vars + target_vars read_vars = ["P", "TA_F"] + target_vars # these missing in the metadata and were looked up using google earth elevs = { "AR-Vir": 105.0, "AU-Wac": 753.0, "AR-SLu": 508.0, "AU-Rig": 151.0, "AU-Stp": 228.0, "CN-Du2": 1321.0, "JP-SMF": 213.0, "AU-How": 39.0, "AU-DaP": 71.0, "CN-Sw2": 1445.0, "AU-Dry": 176.0, "AU-Emr": 175.0, "CN-Din": 292.0, "AU-DaS": 74.0, "CN-Cng": 143.0, "AU-Whr": 151.0, "AU-Fog": 4.0, "AU-RDF": 189.0, "RU-Sam": 11.0, "AU-Cum": 39.0, "CN-Qia": 112.0, "CN-Du3": 1313.0, "CN-Ha2": 3198.0, "CN-Cha": 767.0, "AU-Gin": 51.0, "AU-Ade": 76.0, "CN-HaM": 4004.0, "AU-GWW": 448.0, "AU-Ync": 126.0, "JP-MBF": 572.0, "MY-PSO": 147.0, "AU-TTE": 552.0, "AU-ASM": 606.0, "CN-Dan": 4313.0, "AU-Cpr": 63.0, "AU-Lox": 45.0, "AU-Rob": 710.0, } sites_to_skip = [ "CA-Man", # missing RH "DE-RuR", # missing RH "CA-Man", # missing RH "DE-RuR", # missing RH "DE-RuS", # missing RH "MY-PSO", # missing RH # "CN-Cha", # found nans in df # "CN-Dan", # found nans in df # "CN-Din", # found nans in df # "CN-Qia", # found nans in df # "DK-ZaH", # found nans in df # "FI-Lom", # found nans in df # "IT-Isp", # found nans in df # "IT-SR2", # found nans in df # "US-Me5", # found nans in df # "US-PFa", # found nans in df ] def get_fluxnet(cat, all_site_meta, from_cache=True): """load the fluxnet dataset""" if not from_cache: # use dask to speed things up fluxnet_df = load_fluxnet(cat, all_site_meta) # dump(x_data_computed, "./etl_data/x_data_computed.joblib") # dump(y_data_computed, "./etl_data/y_data_computed.joblib") # dump(meta, "./etl_data/meta.joblib") else: x_data_computed = load("../data/etl/x_data_computed.joblib") y_data_computed = load("../data/etl/y_data_computed.joblib") meta = load("../data/etl/meta.joblib") return fluxnet_df @dask.delayed def load_fluxnet_site(entry): try: df = entry.read() df.index = pd.to_datetime(df['TIMESTAMP_START'], format='%Y%m%d%H%M') out = df[["P"]].resample("1D").sum() out["t_min"] = df["TA_F"].resample("1D").min() out["t_max"] = df["TA_F"].resample("1D").max() out[target_vars] = df[target_vars].resample("1D").mean() return out except: return None def add_meta(df, meta): df["t"] = df.index.dayofyear # make_cyclic_doy(df.index.dayofyear) df["lat"] = meta["lat"] # np.sin(np.radians(meta["lat"])) df["elev"] = meta["elev"] return df def get_meta(all_site_meta): all_sites = all_site_meta.index.get_level_values(0).unique() meta = { key: extract_site_meta(all_site_meta, key) for key in all_sites if key not in sites_to_skip } return meta def load_fluxnet(cat, all_site_meta): """return lists of x and y data""" meta = get_meta(all_site_meta) meta_df = pd.DataFrame.from_dict(meta, orient="index") site_data = {} for site, site_meta in meta.items(): site_data[site] = load_fluxnet_site(cat["raw_fullset"](station=site.lower(), kind='fullset', freq='hh')) site_data = dask.compute(site_data)[0] out = {} var_names = train_vars + target_vars for name, df in site_data.items(): if df is not None: out[name] = add_meta(df.loc[:, var_names], meta[name]) else: print(f'failed to read {name}, look into this...') return pd.concat(out.values(), keys=out.keys()) def make_cyclic_doy(doy): # TODO: consider updating this to handle leap years return np.cos((doy - 1) / 365 * 2 * np.pi) def make_lookback(df, lookback=90): df = df[all_vars] # sort columns coords = {'features': all_vars} da = xr.DataArray(df.values, dims=("samples", "features"), coords=coords) lba = da.rolling(samples=lookback).construct("lookback") lba.coords['lookback'] = np.linspace(-1 * (lookback - 1), 0, num=lookback, dtype=int) mask = lba.isnull().any(("lookback", "features")) return lba.where(~mask, drop=True).transpose("samples", "lookback", "features") def first_entry(entry): try: return entry.astype(float).values[0] except: return float(entry) def extract_site_meta(meta, site): out = {} out["lat"] = first_entry(meta[site]["LOCATION_LAT"]) out["lon"] = first_entry(meta[site]["LOCATION_LONG"]) try: out["elev"] = first_entry(meta[site]["LOCATION_ELEV"]) except: try: out["elev"] = elevs[site] except KeyError: print(f"failed to get elevation for {site}") return out def get_training_sites(glob_path, thresh_days): # pick a list of sites with sufficiently long temporal records thresh = pd.Timedelta(thresh_days, "D") # ~10years paths = glob(glob_path) sites = [] starts = [] stops = [] for f in paths: df = pd.read_csv(f) sites.append(os.path.split(f)[-1].split("_")[1]) starts.append(df["TIMESTAMP"].values[0]) stops.append(df["TIMESTAMP"].values[-1]) site_df = pd.DataFrame( { "site": sites, "start": pd.to_datetime(starts, format="%Y%m%d"), "stop": pd.to_datetime(stops, format="%Y%m%d"), } ).set_index("site") site_df["dur"] = site_df["stop"] - site_df["start"] train_sites = site_df[site_df.dur > thresh] return train_sites
async def _handle_transition(self): transitions = [ self.to_first, self.previous, self._cancel_menu, self.next, self.to_last, ] if self.cancel_button is False: transitions.remove(self.close) if self.skip_buttons is False: transitions.remove(self.to_first) transitions.remove(self.to_last) transition_map = { (button.emoji.name if isinstance(button.emoji, Emoji) else button.emoji) if isinstance(button, Reaction) else button: transition for button, transition in zip(self.output.reactions, transitions) } if not transition_map[self.input.name] == self.close: await call_hook(self, '_hook_after_update') await transition_map[self.input.name]()
Prevalence and Clinical Correlates of Minimally Invasive Facial Cosmetic Surgery in Chinese College Students Background: Minimally invasive facial cosmetic surgery (MIFCS) is becoming more and more popular and acceptable in Chinese young people, and it influences people in many aspects. However, there is little research on the associations between MIFCS and psychopathology in Chinese college students. The purpose of this study was to identify the prevalence of MIFCS and its clinical correlates among Chinese college students. Methods: A cross-sectional design was applied in this study. A total of 8089 students completed an online questionnaire on demographic data, depression (Self-Rating Depression Scale), anxiety (Self-Rating Anxiety Scale) and MIFCS. Logistic regression was used to identify independent factors associated with MIFCS. Findings: The prevalence of MIFCS in Chinese college students was 27% (221/8098). Students with MIFCS were more likely to be from urban areas, from a single child household, experience depression or anxiety and have a history of smoking (all p<005). They were also less likely to be right-handed or have a good relationship with father or mother (all p<005). Moreover, age, sex, community (urban or rural), right-handedness, depression, family income and smoking were independently associated with MIFCS. Interpretation: These data suggest that minimally invasive facial cosmetic surgery (MIFCS) is very common in Chinese college students, indicating the importance of paying attention to MIFCS. This study provides valuable evidence for college counselors and doctors in the cosmetic department to provide better and healthier services to students who undergo MIFCS, especially those with depression. Funding: This work was supported by the National Key R&D Program of China (No.2017YFC1309904), the National Key R&D Program of China (No.2017YFC1309902), the National Key R&D Program of China (No.2016YFC1306204), the National Key R&D Program of China (No. 2016YFC1306105) and the Key Research and Development Program of Hunan Province (No. 2017SK2032). These sources had no further role in this study design, in the data collection and analysis, in the writing of the report, and in the decision to submit the paper for publication. Declaration of Interest: The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest. Ethical Approval: This study was approved by the Ethics Committee of the Second Xiangya Hospital.
<reponame>frou/kajiki """Kajiki public API.""" from .loader import FileLoader, MockLoader, PackageLoader from .template import Template from .text import TextTemplate from .util import expose, flattener from .version import __release__, __version__ from .xml_template import XMLTemplate __all__ = [ "expose", "flattener", "Template", "MockLoader", "FileLoader", "PackageLoader", "TextTemplate", "XMLTemplate", "__version__", "__release__", ]
package de.czyrux.store.core.data.sources; import java.util.List; import de.czyrux.store.core.data.util.TimeDelayer; import de.czyrux.store.core.domain.product.Product; import de.czyrux.store.core.domain.product.ProductDataSource; import io.reactivex.Single; public class InMemoryProductDataSource implements ProductDataSource { private final TimeDelayer timeDelayer; public InMemoryProductDataSource(TimeDelayer timeDelayer) { this.timeDelayer = timeDelayer; } @Override public Single<List<Product>> getAllCatalog() { return Single.fromCallable(() -> { timeDelayer.delay(); return ProductProvider.getProductList(); }); } }
/** * Test the getGraphSpace() method without an id filter predicate */ @Test public void testGetGraphSpaceWithoutIdPredicate() throws IOException { List<GraphHead> graphHeads = Lists.newArrayList(getSocialGraphHeads()); List<GraphHead> queryResult = socialNetworkStore.getGraphSpace( Query.elements() .fromAll() .noFilter()) .readRemainsAndClose(); validateEPGMElementCollections(graphHeads, queryResult); }
GuruLib is short for Guru Library which is a free online personal library database where you can easily keep track of your games, movies, books, and music. Guru Library is designed to automatically organize and categorize your collection with the fewest data entry possible. At GuruLib you can either keep your library private and protected, or you can make it public and allow your friends to browse through your proud collection. You and your friends can also discuss about the latest games, movies, music, or books, as well as make suggestions on message boards.
def new_B_n(self): if self.B_gauss: Bt = self.bfield.Bgaus(self.r) self.bfield.new_random_numbers() Bu = self.bfield.Bgaus(self.r) self.B = np.sqrt(Bt ** 2. + Bu ** 2.) self.Psin = np.arctan2(Bt , Bu) if self.Bn_const: self.n = self.n * np.ones(int(self.Nd)) if not self.B_gauss: self.B = self.B * np.ones(int(self.Nd)) else: if np.isscalar(self.n): n0 = self.n else: n0 = self.n[0] try: if np.isscalar(self.n2): n2 = self.n2 else: n2 = self.n2[0] try: self.beta2 self.n = n0 * (np.ones(int(self.Nd)) + self.r**2./self.r_core**2.)**(-1.5 * self.beta) +\ n2 * (np.ones(int(self.Nd)) + self.r**2./self.r_core2**2.)**(-1.5 * self.beta2) self.B = self.B * (self.n / (n0 + n2) )**self.eta except NameError: self.n = np.sqrt(n0**2. * (np.ones(int(self.Nd)) + self.r**2./self.r_core**2.)**(-3. * self.beta) +\ n2**2. * (np.ones(int(self.Nd)) + self.r**2./self.r_core2**2.)**(-3. * self.beta) ) self.B = self.B * (self.n / np.sqrt(n0**2. + n2**2.) )**self.eta except AttributeError: self.n = n0 * (np.ones(int(self.Nd)) + self.r**2./self.r_core**2.)**(-1.5 * self.beta) self.B = self.B * (self.n / n0 )**self.eta return
. INTRODUCTION Low back pain (LBP) is one of the most common disorders affecting office employees working with a computer, which inevitably leads to lower quality of life. The aim of this study was to analyze the quality of life of patients with LBP after application of TENS (Transcutaneous Electrical Nervous Stimulation). MATERIAL AND METHODS The study group included 30 computer workers (26 females and 4 males) aged 30-60 years (45 +/- 12). The quality of life was assessed using the World Health Organization Quality of Life instrument-Abbreviated version (WHOQOL-Bref) before and after 10 applications of TENS. Outcomes were evaluated with a visual analog score (VAS) for pain and Schober's test to measure the ability of a patient to flex his or her lower back. RESULTS The mean VAS value decreased significantly from 3.83 +/- 1.31 cm at baseline to 3.36 +/- 1.21 cm after treatment. Patients reported pain relief. The highest quality of life scores in terms of social relationships were found in the social category evaluating personal relationships, social support and sexual activity (15.91 +/- 2.07) and mental health (14.32 +/- 1.59). After treatment a significant increase in the flexion of lower back was observed in the majority of patients. No significant correlations between the quality of life and the intensity of pain and the flexion of lower back before and after treatment were found. CONCLUSIONS TENS therapy is an effective technique for pain relief in patients with LBP. TENS can also be used with other methods of LBP treatment and may improve the patients' quality of life.
def split_data(train_df, test_df, valid_set_size=0.33): binary_features = [] categorical_features = [] numerical_features = [] X_train = train_df.iloc[:, 1:-1] X_test = test_df.iloc[:, 1:] for key, value in dtypes.items(): if value in ['int8']: binary_features.append(key) if value in ['int16','category']: categorical_features.append(key) else: numerical_features.append(key) categorical_features.remove('MachineIdentifier') X_train = X_train.drop(columns=list(categorical_features)).values X_test = X_test.drop(columns=list(categorical_features)).values Y_train = train_df['HasDetections'].values X_train, X_validation, Y_train, Y_validation = train_test_split( X_train, Y_train, test_size=valid_set_size, random_state=777) return X_train, X_validation, X_test, Y_train, Y_validation
Cross-Layer Multipath Multichannel MAC protocol for MANETs Utilising multiple disjoint paths in multiple channels can improve network performance by enabling a node to reroute data along discovered paths seamlessly when link failure is detected. However, depending on a stale/invalid route to recover from a broken link could increase the delay to recover from the broken link and degrade the network performance. In this paper, we propose a new MAC protocol (RIVC-MB) to enhance communication reliability in the multipath multichannel routing protocol. The reliability of transmitting/re-routing the data packet in multipath multichannel routing protocol is improved by providing early route invalidity detection and early switchover. Waiting time to access the medium is also improved, when a node is attempting to access a busy medium, by re-routing the data packet via the alternative route. The RIVC-MB protocol uses the cross-layer interaction between MAC and routing protocols to achieve these goals. The proposed protocol is implemented and extensively evaluated using the NS-2 simulator. Simulation results show that the new proposed protocol reduces the end-to-end delay, and reduces both the number of route error control packets and the number of dropped data packets in the routing layer. It also reduces the collision rate in the MAC layer in a dense network.
Crowdtap, a people powered marketing platform, polled roughly 6,000 consumers on its platform about the role that social media plays in regards to Super Bowl advertising. While the Seattle Seahawks and New England Patriots will square off in Sunday’s Super Bowl XLIX, many eyeballs will be on the ads showing in between the game action. 64.9% of respondents are either extremely or somewhat likely to interact with brands on social media while watching to find Super Bowl related content. 73.6% are either extremely or somewhat likely to take action on social media (e.g. use a hashtag, share a photo, tweet, etc.) if a Super Bowl TV ad asks them to.
/* * This file is a part of the TChecker project. * * See files AUTHORS and LICENSE for copyright details. * */ #ifndef TCHECKER_BASICTYPES_HH #define TCHECKER_BASICTYPES_HH #include <cstdint> #include <iostream> #include <limits> /*! \file basictypes.hh \brief Definition of basic types for models */ namespace tchecker { /*! \brief Type of integers */ using integer_t = int32_t; /*! \brief Type of event identifiers */ using event_id_t = uint32_t; /*! \brief Type of process identifiers */ using process_id_t = uint32_t; /*! \brief Type of variable indentifiers */ using variable_id_t = uint32_t; /*! \brief Type of variable size */ using variable_size_t = uint32_t; /*! \brief Type of integer variable identifiers */ using intvar_id_t = tchecker::variable_id_t; static_assert((std::numeric_limits<tchecker::intvar_id_t>::min() >= std::numeric_limits<tchecker::variable_id_t>::min()) && (std::numeric_limits<tchecker::intvar_id_t>::max() <= std::numeric_limits<tchecker::variable_id_t>::max()), "variable_id_t should contain intvar_id_t"); /*! \brief Type of clock identifiers */ using clock_id_t = tchecker::variable_id_t; static_assert((std::numeric_limits<tchecker::clock_id_t>::min() >= std::numeric_limits<tchecker::variable_id_t>::min()) && (std::numeric_limits<tchecker::clock_id_t>::max() <= std::numeric_limits<tchecker::variable_id_t>::max()), "variable_id_t should contain clock_id_t"); /*! \brief Definition of static variable zero clock name */ std::string const zero_clock_name("0"); /*! \brief Definition of static variable zero clock ID */ tchecker::clock_id_t const zero_clock_id(0); /*! \brief Type of label identifiers */ using label_id_t = uint32_t; /*! \brief Type of location identifiers */ using loc_id_t = uint32_t; /*! \brief Type of edge identifiers */ using edge_id_t = uint32_t; /*! \brief Type of synchronization identifiers */ using sync_id_t = uint32_t; /*! \brief Type of node identifiers */ using node_id_t = uint32_t; /*! \brief Strength of synchronization constraint */ enum sync_strength_t { SYNC_WEAK, /*!< Broadcast constraint */ SYNC_STRONG /*!< Handshaking constraint */ }; /*! \brief Output operator \param os : output stream \param s : synchronization strength \return os after s has been output */ std::ostream & operator<< (std::ostream & os, enum tchecker::sync_strength_t s); /*! \brief Status of states */ enum state_status_t { STATE_OK, /*!< state computation OK */ STATE_INCOMPATIBLE_EDGE, /*!< state computation failed due to unmatching locations */ STATE_INTVARS_GUARD_VIOLATED, /*!< state computation failed due to intvars guard violation */ STATE_INTVARS_SRC_INVARIANT_VIOLATED, /*!< state computation failed due to intvars source invariant violation */ STATE_INTVARS_TGT_INVARIANT_VIOLATED, /*!< state computation failed due to intvars target invariant violation */ STATE_INTVARS_STATEMENT_FAILED, /*!< state computation failed due to intvars edge statement */ STATE_CLOCKS_GUARD_VIOLATED, /*!< state computation failed due to clocks guard violation */ STATE_CLOCKS_SRC_INVARIANT_VIOLATED, /*!< state computation failed due to clocks source invariant violation */ STATE_CLOCKS_TGT_INVARIANT_VIOLATED, /*!< state computation failed due to clocks target invariant violation */ STATE_EMPTY_ZONE, /*!< state computation resulted in an empty zone (no details provided) */ }; } // end of namespace tchecker #endif // TCHECKER_BASICTYPES_HH
<filename>app/src/main/java/me/tadho/markgo/view/fragments/MapsPickerFragment.java<gh_stars>0 /* * Copyright (c) 2017 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package me.tadho.markgo.view.fragments; import android.annotation.SuppressLint; import android.content.res.Resources; import android.os.Bundle; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.support.design.widget.FloatingActionButton; import android.support.v4.app.Fragment; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import com.google.android.gms.location.LocationRequest; import com.google.android.gms.maps.CameraUpdateFactory; import com.google.android.gms.maps.GoogleMap; import com.google.android.gms.maps.MapView; import com.google.android.gms.maps.MapsInitializer; import com.google.android.gms.maps.OnMapReadyCallback; import com.google.android.gms.maps.model.CameraPosition; import com.google.android.gms.maps.model.LatLng; import com.google.android.gms.maps.model.MapStyleOptions; import com.google.android.gms.maps.model.Marker; import com.google.android.gms.maps.model.MarkerOptions; import com.patloew.rxlocation.RxLocation; import java.util.concurrent.TimeUnit; import io.reactivex.Single; import io.reactivex.disposables.Disposable; import me.tadho.markgo.R; import me.tadho.markgo.data.enumeration.Consts; import me.tadho.markgo.utils.DisplayUtility; import me.tadho.markgo.view.MapsActivity; import timber.log.Timber; public class MapsPickerFragment extends Fragment implements View.OnClickListener, OnMapReadyCallback{ private Disposable pickLocationDisposable; private MapView mMapView; private GoogleMap googleMap; private LatLng mLatLng; private LatLng initialLatLng; private Marker pickerMarker; private RxLocation rxLocation; private LocationRequest locationRequest; private FloatingActionButton fabSubmit; private FloatingActionButton fabMyLocation; @Override public void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); Timber.d("Getting fragment argument"); initialLatLng = new LatLng(0,0); mLatLng = Consts.MALANG_LATLNG; if (getArguments() != null) { mLatLng = getArguments().getParcelable(Consts.LATLNG_EXTRA); initialLatLng = mLatLng; Timber.d("Argument found -> "+mLatLng); } } @Nullable @Override @SuppressLint("MissingPermission") public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { View rootView = inflater.inflate(R.layout.fragment_maps_picker, container, false); if (mMapView == null) mMapView = rootView.findViewById(R.id.maps_picker_view); mMapView.onCreate(savedInstanceState); try { MapsInitializer.initialize(getActivity().getApplicationContext()); } catch (Exception e) {e.printStackTrace();} mMapView.getMapAsync(this); return rootView; } @Override public void onViewCreated(@NonNull View view, @Nullable Bundle savedInstanceState) { super.onViewCreated(view, savedInstanceState); fabSubmit = getView().findViewById(R.id.fab_submit_location); fabMyLocation = getView().findViewById(R.id.fab_my_location); fabSubmit.setOnClickListener(this); fabMyLocation.setOnClickListener(this); } @Override public void onClick(View v) { switch (v.getId()) { case R.id.fab_submit_location : Timber.d("Submit Location button clicked"); if (!initialLatLng.equals(mLatLng)) ((MapsActivity)getActivity()).sendActivityResult(mLatLng); else getActivity().onBackPressed(); break; case R.id.fab_my_location : Timber.d("My Location button clicked"); pickLocationDisposable = myLocationSingle().subscribe(); break; } } @Override @SuppressLint("MissingPermission") public void onMapReady(GoogleMap mMap) { googleMap = mMap; if (!DisplayUtility.isDay()) { try { boolean success = googleMap.setMapStyle(MapStyleOptions .loadRawResourceStyle(getActivity().getBaseContext(), R.raw.map_style_dark)); if (!success) { Timber.e("MapsActivityRaw", "Style parsing failed."); } } catch (Resources.NotFoundException e) { Timber.e("MapsActivityRaw", "Can't find style.", e); } } googleMap.getUiSettings().setMapToolbarEnabled(false); googleMap.getUiSettings().setMyLocationButtonEnabled(false); googleMap.setMyLocationEnabled(true); googleMap.setLatLngBoundsForCameraTarget(Consts.MALANG_BOUNDS); MarkerOptions markerOptions = new MarkerOptions() .position(mLatLng) .draggable(true); pickerMarker = googleMap.addMarker(markerOptions); googleMap.setOnMarkerClickListener(marker -> { Timber.d("Marker clicked, playing dead!"); return true; }); googleMap.setOnMapClickListener(latLng -> { pickerMarker.setPosition(latLng); mLatLng = latLng; }); googleMap.setOnMarkerDragListener(new GoogleMap.OnMarkerDragListener() { @Override public void onMarkerDragStart(Marker marker) {} @Override public void onMarkerDrag(Marker marker) {} @Override public void onMarkerDragEnd(Marker marker) { mLatLng = marker.getPosition(); } }); CameraPosition cameraPosition = new CameraPosition.Builder() .target(mLatLng).zoom(17).build(); googleMap.moveCamera(CameraUpdateFactory.newCameraPosition(cameraPosition)); } private Single myLocationSingle() { locationRequest = LocationRequest.create() .setPriority(LocationRequest.PRIORITY_HIGH_ACCURACY) .setNumUpdates(1) .setInterval(3000); rxLocation = new RxLocation(getActivity().getBaseContext()); rxLocation.setDefaultTimeout(10, TimeUnit.SECONDS); return rxLocation.settings() .checkAndHandleResolution(locationRequest) .flatMap(this::getMyLocationSingle); } @SuppressLint("MissingPermission") private Single getMyLocationSingle(Boolean isActivated) { if (isActivated) return rxLocation.location() .updates(locationRequest) .map(loc -> new LatLng(loc.getLatitude(),loc.getLongitude())) .take(1) .single(mLatLng) .doOnSuccess(latLng -> { Timber.d("Getting my location -> "+latLng); Timber.d("Setting marker on my location"); pickerMarker.setPosition(latLng); Timber.d("Animating camera on my location"); CameraPosition cameraPosition = new CameraPosition.Builder() .target(latLng).zoom(17).build(); googleMap.animateCamera( CameraUpdateFactory.newCameraPosition(cameraPosition), 800,null); mLatLng = latLng; }) .doOnError(e -> { Timber.e("Failed to get location updates"); Timber.e(e.getMessage()); }); Timber.d("location isn't activated, return dummy single"); return Single.just(mLatLng); } @Override public void onResume() { super.onResume(); mMapView.onResume(); } @Override public void onPause() { super.onPause(); mMapView.onPause(); } @Override public void onSaveInstanceState(@NonNull Bundle outState) { super.onSaveInstanceState(outState); mMapView.onSaveInstanceState(outState); } @Override public void onDestroy() { super.onDestroy(); if (pickLocationDisposable != null) { pickLocationDisposable.dispose(); pickLocationDisposable = null; } pickerMarker.remove(); pickerMarker = null; googleMap.setOnMarkerClickListener(null); googleMap.setOnMarkerDragListener(null); googleMap.setOnMapClickListener(null); googleMap.clear(); googleMap = null; fabSubmit.setOnClickListener(null); fabMyLocation.setOnClickListener(null); mMapView.getMapAsync(null); mMapView.onDestroy(); } @Override public void onLowMemory() { super.onLowMemory(); mMapView.onLowMemory(); } }
Business mogul and reality television star Donald Trump will hold a fundraiser for Florida Rep. Allen West, according to an invitation obtained by The Daily Caller. The breakfast fundraiser is set for Oct. 1 and will be held at Trump Tower. The minimum donation is $500 for a “young professional,” while a “host level” donation ranks at $2,500. West is in a competitive race with political newcomer Patrick Murphy — a Democrat who entered with the sole purpose of ousting West. Currently, West has over $3 million cash on hand, and has raised over $10.5 million since January 2011. Murphy has just over $800,000 cash on hand, and has raised just over $2 million.
Electrocardiographic analysis during uninterrupted cardiopulmonary resuscitation Objective:Prior studies have shown that interruptions of chest compressions could result in high failure rates of resuscitation. Chest compression artifacts force the interruption of compressions before electrocardiographic rhythm analysis. It was the goal of this study to evaluate the accuracy of an automated electrocardiographic rhythm analysis algorithm designed to attenuate compression-induced artifact and minimize uninterrupted chest compressions. Design:Retrospective diagnostic analysis. Setting:Out-of-hospital cardiopulmonary resuscitation. Subjects:Eight hundred thirty-two patients. Interventions:Patients were treated with defibrillation and cardiopulmonary resuscitation. Continuous data were recorded using automated external defibrillators with concurrent measurement of electrocardiographic and sternal motion during chest compressions. Measurements and Results:Human electrocardiographics recorded by automated external defibrillators were annotated and randomly selected to build distinct training and testing databases. The artifact reduction and tolerant filter was applied to the electrocardiographic signal. The algorithm was optimized with the training database (sensitivity, 93.9%; specificity, 91.2%) and tested with the testing database (sensitivity, 92.1%; specificity, 90.5%). Average attenuation of compression-induced artifact was more than 35 dB. Conclusions:Shockable ventricular arrhythmias can be differentiated from electrocardiographic rhythms not requiring defibrillation in the presence of chest compression-induced artifact with sensitivity and specificity above 90%. With the artifact reduction and tolerant filter, it is possible to effectively eliminate pre- and postshock compression pauses.
. OBJECTIVE To study the association between RB gene and the oncogenesis of bone tumors. METHODS Southern blot and immunohistochemical techniques were used to detect the structural anomalies of RB gene in 34 cases of bone tumors and the expression of RB protein in 99 paraffin-embedded bone neoplasma. RESULTS The deletion and/or rearrangement of RB gene were detected only in 42.9% (9/21) of osteosarcoma; lack of RB protein expression was noticed in 12 cases including 7 cases of osteosarcoma (7/26, 26.9%) and 5 cases of chondrosarcoma (5/23, 21.7%). Beniga giant cell tumor of bone and chondroblastoma showed positive RB protein expression; osteosarcoma cells presenting poor differentiation and apparent atypia all showed no expression of RB protein. Most of the high-grade chondrosarcoma also had no RB protein expression. CONCLUSION The alterations of RB gene and loss of RB protein may play a role in the pathogenesis and progression of malignant bone neoplasms.
Neuroblastoma tumors have a hierarchy of tumor initiating cells reflecting neural crest development A number of different tumors have been shown to contain subsets of cells that have tumor forming capability, known as tumor initiating cells (TICs). We have established neuroblastoma TICs (NTIC) from primary neuroblastoma tumors and from bone marrow metastases. These cell lines genetically resemble the tumor of origin. A number of these NTIC cell lines express markers of committed neuroendocrine sympathoadrenal cells such as TH and DBH. We found that other cell lines are partially or completely devoid of these neuroendocrine markers and instead show expression of markers of the early neural crest including NESTIN, SOX10/SOX9 (SOXE proteins), CD24, CD133 and NOTCH. Some combinations of the markers are reflective of cells of the premigratory neural crest. Our data suggests that the developmental hierarchy of the neural crest is retained in the tumor. We will present data on the developmental classification, the developmental plasticity and response of the NTICs to external differentiation clues.
// baseFlags returns top level flags. func (s *SysbenchBase) baseFlags() []string { var ret []string if s.Threads > 0 { ret = append(ret, fmt.Sprintf("--threads=%d", s.Threads)) } if s.Time > 0 { ret = append(ret, fmt.Sprintf("--time=%d", s.Time)) } return ret }
<gh_stars>1-10 import socket import ast class RddaUr5ControlClient(object): """ This class is for controlling UR5 robotic arm with RDDA. """ def __init__(self): self.udp_recv_port = 56801 self.udp_sent_port = 56800 self.udp_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self.udp_client.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.udp_client.bind(("", self.udp_recv_port)) def set_rdda_stiffness(self, stiff0, stiff1): command_str = 'set_rdda_stiffness,%s,%s' % (stiff0, stiff1) self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(16) return data.decode() def init_rdda_stiffness(self): command_str = 'init_rdda_stiffness' self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(16) return data.decode() def set_rdda_positions(self, position0, position1): command_str = 'set_rdda_positions,%s,%s' % (position0, position1) self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(16) return data.decode() def set_rdda_max_velocities(self, max_velocity0, max_velocity1): command_str = 'set_rdda_max_velocities,%s,%s' % (max_velocity0, max_velocity1) self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(16) return data.decode() def set_rdda_max_efforts(self, max_effort0, max_effort1): command_str = 'set_rdda_max_efforts,%s,%s' % (max_effort0, max_effort1) self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(16) return data.decode() def home_rdda(self): command_str = 'home_rdda' self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(16) return data.decode() def read_rdda_positions(self): command_str = 'read_rdda_positions' self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(64) return ast.literal_eval(data.decode()) def read_rdda_lower_bounds(self): command_str = 'read_rdda_lower_bounds' self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(64) return ast.literal_eval(data.decode()) def read_rdda_upper_bounds(self): command_str = 'read_rdda_upper_bounds' self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(64) return ast.literal_eval(data.decode()) def read_rdda_origins(self): command_str = 'read_rdda_origins' self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(64) return ast.literal_eval(data.decode()) def move_ur5(self, x, y, z, velocity): command_str = 'move_ur5,%s,%s,%s,%s' % (x, y, z, velocity) self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(16) return data.decode() def move_ur5_linear(self, y_target): command_str = 'move_ur5_linear,%s' % y_target self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(16) return data.decode() def home_ur5(self): command_str = 'home_ur5' self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(16) return data.decode() def move_read_discrete(self, step_size, step_num): command_str = 'move_read_discrete,%s,%s' % (step_size, step_num) self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(10240) return ast.literal_eval(data.decode()) def move_read_continuous(self, step_size, step_num): command_str = 'move_read_continuous,%s,%s' % (step_size, step_num) self.udp_client.sendto(command_str.encode(), ('<broadcast>', self.udp_sent_port)) data, addr = self.udp_client.recvfrom(10240) return ast.literal_eval(data.decode())
<gh_stars>1-10 import { StatusBar } from "expo-status-bar"; import React from "react"; import { useEffect } from "react"; import { StyleSheet, RefreshControl, View, FlatList, ActionSheetIOS, } from "react-native"; import List from "./components/List/List"; type ActionType = { payload: any; type: string; }; const initialState = { companies: [], }; const reducer = (state = [], action: ActionType) => { switch (action.type) { case "SET_COMPANIES": return (state = action.payload); default: return state; } }; const App: React.FC = () => { const [data, dispatch] = React.useReducer(reducer, initialState); const [refreshing, setRefreshing] = React.useState(false); const refetchData = () => { setRefreshing(true); setInterval(() => { setRefreshing(!true); }, 1000); }; useEffect(() => { if (refreshing) { fetch("https://random-data-api.com/api/company/random_company?size=100") .then((res) => res.json()) .then((data) => { dispatch({ type: "SET_COMPANIES", payload: data?.map((ele: any) => ({ id: ele.id, suffix: ele.suffix, industry: ele.industry, uid: ele.uid, })), }); }); } }, [refreshing]); return ( <View style={styles.container}> <StatusBar style="auto" /> <FlatList style={styles.flatlist} data={data} renderItem={(item: any) => <List item={item} />} keyExtractor={(item) => item.uid} refreshing={true} horizontal={false} onEndReached={refetchData} inverted={!true} refreshControl={ <RefreshControl refreshing={refreshing} onRefresh={refetchData} /> } ></FlatList> </View> ); }; export default App; const styles = StyleSheet.create({ container: { flex: 1, backgroundColor: "#fff", alignItems: "center", justifyContent: "center", paddingTop: 25, }, flatlist: { flex: 1, width: "100%", paddingVertical: 10, }, });
// NewUpdateProfileDefault creates a UpdateProfileDefault with default headers values func NewUpdateProfileDefault(code int) *UpdateProfileDefault { return &UpdateProfileDefault{ _statusCode: code, } }
Value of Combined Plasma NGAL, Cystatin C and NT-proBNP in the Diagnosis of Cardiorenal Syndrome Type 1 Background: The presence of acute kidney injury in the setting of acute heart failure (AHF) or acute decompensated heart failure (ADHF) is very common occurrence and was termed cardiorenal syndrome 1 (CRS1). Neutrophil gelatinase-associated lipocalin (NGAL) in the blood and urine is one of the earliest biomarkers of acute kidney injury due to ischemia or renal toxicity. Cystatin C is early marker of renal dysfunction. NT-pro BNP is valuable in the diagnosis, prognosis and treatment of acute and chronic heart failure. This study was aimed to evaluate the diagnostic efficacy of the combination of plasma NGAL, Cystatin C and NTproBNP in diagnosis of CRS1. Methods: there were 139 patients with AHF or ADHF in the department of Cardiovascular resuscitation and Interventional cardiology at Ho Chi Minh City 115People Hospital from November 2018 to May 2019. This was a prospective cohort study. Results: there were 48 cases (rate 34.5%) with CRS1, mean age 66.12 ± 15.77, men accounted for 50.4%. There were no significant differences of vital signs on admission, diagnosis, type of heart failure between CRS1 and Non-CRS1 groups. The urea, creatinin on first day (creatininD1) and third day (creatininD3), NT-pro BNP, Cystatin C, NGAL levels were higher in the group with CRS1 than Non-CRS1, the difference was statistically significant p 353.23 ng/ml, Area Under Curve (AUC)was 0.732 (95% CI 0.65-0.80, p 1.81 mg/dl, AUC was 0.787 (95% CI 0.71-0.85, p < 0.001), sensitivity 53.19%, specificity 77.17%, positive predictive value 54.3%, negative predictive value 76.3%.Combined three biomarker plasma NGAL, Cystatin C and NT proBNP, the specificity of the diagnosis was the highest 95.6%, the positive predictive value was the highest 84.62% in diagnosing CRS1. Conclusions: The combined plasma NGAL, Cystatin C and NT-pro BNP is high value in the diagnosis of CRS1 in patients with AHF or ADHF.
<filename>loader/props.go package loader import ( "github.com/galaco/bsp/primitives/game" "github.com/galaco/lambda-core/lib/util" "github.com/galaco/lambda-core/loader/prop" "github.com/galaco/lambda-core/model" "github.com/galaco/lambda-core/resource" "github.com/galaco/filesystem" "strings" ) // LoadStaticProps GetFile all staticprops referenced in a // bsp's game lump func LoadStaticProps(propLump *game.StaticPropLump, fs *filesystem.FileSystem) []model.StaticProp { ResourceManager := resource.Manager() errorProp, err := prop.LoadProp(ResourceManager.ErrorModelName(), fs) // If we have no error model, expect this to be fatal issue if errorProp == nil && err != nil { util.Logger().Panic(err) } propPaths := make([]string, 0) for _, propEntry := range propLump.PropLumps { propPaths = append(propPaths, propLump.DictLump.Name[propEntry.GetPropType()]) } propPaths = util.RemoveDuplicatesFromList(propPaths) util.Logger().Notice("Found %d staticprops", len(propPaths)) numLoaded := 0 for _, path := range propPaths { if !strings.HasSuffix(path, ".mdl") { path += ".mdl" } _, err := prop.LoadProp(path, fs) if err != nil { continue } numLoaded++ } util.Logger().Notice("Loaded %d props, failed to load %d props", numLoaded, len(propPaths)-numLoaded) staticPropList := make([]model.StaticProp, 0) for _, propEntry := range propLump.PropLumps { modelName := propLump.DictLump.Name[propEntry.GetPropType()] m := ResourceManager.Model(modelName) if m != nil { staticPropList = append(staticPropList, *model.NewStaticProp(propEntry, &propLump.LeafLump, m)) continue } // Model missing, use error model m = ResourceManager.Model(ResourceManager.ErrorModelName()) staticPropList = append(staticPropList, *model.NewStaticProp(propEntry, &propLump.LeafLump, m)) } return staticPropList }
/*! \brief ctor * \param cfg code to execute * \param s settings to create a RunTimeEnv * \param table symbols to load into the RunTimeEnv * \throws SymException if a certain symbol can not be loaded * * After construction the Executor is ready to execute \a cfg in the * created RunTimeEnv using the symbol set \a symbols that are loaded. * No data (possible output) is yet overwritten. */ calc::Executor::Executor( CFGNode* cfg, const RunTimeEnvSettings& s, const ASTSymbolTable& table): CFGVisitor(cfg), d_rte(s), d_timeoutput(0), d_progressInfo(new ProgressInfo()), d_counter(COUNT_NR) { d_progressCallBack=&defaultProgressCallBack; d_progressInfo->nrTimeSteps=d_rte.timer().lastInt(); d_progressInfo->inTimeStep =0; d_progressInfo->inStatementLineNr=0; for( ASTSymbolTable::const_iterator i=table.begin(); i!=table.end(); ++i) d_rte.load(i->second); }
/** * Removes the port of a domain. * @param domain the domain * @return the domain without the port. */ public static String removePort(String domain) { int portIndex = domain.indexOf(":"); if (portIndex != -1) { return domain.substring(0, portIndex); }else { return domain; } }
The US Environmental Protection Agency (EPA) placed the ban on BP in November, freezing it out of new supply and exploration contracts, after it pleaded guilty to criminal charges related to the Gulf of Mexico disaster. The EPA accused it of a “lack of business integrity” over the handling of the 2010 accident, which killed 11 men and spewed millions of barrels of oil into the Gulf in the worst offshore spill in US history. BP, which was taken by surprise by the EPA ban, initially suggested it would only be a brief suspension that would soon be resolved through an agreement. But in February the EPA took further action to issue a “mandatory debarment” against BP Exploration and Production’s Houston headquarters. BP has now sued the EPA and two senior officials at the regulator, demanding the ban be lifted. It alleges that “the suspension of BP is unlawful, arbitrary, capricious, and an abuse of EPA’s discretion” and says it “faces a substantial threat of irreparable harm if an injunction is not granted”. The claim comes just two weeks after BP chief Bob Dudley told reporters the ban was not “causing distress in any way”. BP is one of the largest fuel suppliers to the US government, with contracts worth more than $1.34bn, primarily supplying the military. It also has more than 700 oil and gas exploration blocks in the Gulf, which remains one of its most important regions. Existing contracts are not affected by the ban. BP said in a court filing: “EPA’s suspension of BP is not temporary and there is no pending agency investigation or legal or debarment proceedings that would permit the suspension to continue lawfully.” The oil giant said on July 30: “Prolonged suspension or debarment from entering new federal contracts... could have a material adverse impact on the group’s operations in the US.” It hinted that further action against the EPA was possible, noting: “Decisions reached by the EPA can be challenged in federal court.” Asked at the time about the possibility of a challenge, Mr Dudley said: “I think that’s a more routine discussion we have with the EPA. The fact we haven’t got the details of that worked out isn’t causing distress in any way.” He played down the significance of the ban, saying: “We have largest acreage position in Gulf of Mexico, more than 700 blocks... that’s plenty, we have a lot. We have been debarred from supplying fuel to the US military going forward but quite frankly we have a very big business in the US and this is not distracting us from what we do.” BP has previously said the contracts are relatively low margin despite the high revenues involved. The move against the EPA adds yet another strand to the ever-growing web of litigation in which BP is embroiled over the Gulf disaster. The company is still on trial over civil penalties and could face fines of more than $17bn under the Clean Water Act - compared with $3.5bn it has budgeted - if it is found grossly negligent, a charge it denies. It is also battling to stem payouts under the compensation settlement it struck last year with businesses who say they lost money in the spill. BP says many of the payouts are for “fictitious” losses. The cost has risen from an original $7.8bn estimate to $9.6bn and is this quarter expected to use up the remaining $300m BP has set aside, pushing the total bill for the disaster above $42.4bn. BP shares closed up 3.35 at 446¼p.
<filename>primer/ch06/exercise6.22.cpp // // Created by zing on 5/20/2020. // #include <iostream> using namespace std; void reset(int &a){ a = 0; } void swap(int *&lft, int *&rht) { auto tmp = lft; lft = rht; rht = tmp; } int main() { int i = 42, j = 99,k = 200; auto lft = &i; auto rht = &j; int *kk = &k; swap(lft, rht); cout << *lft << " " << *rht << endl; swap(lft, kk); cout << *lft << " " << *kk << endl; reset(k); cout << *lft << " " << *kk << endl; return 0; }
Broadband near-infrared emission of chromium-doped sulfide glass-ceramics containing Ga2S3 nanocrystals. Upon 808 nm excitation, an intense broadband near-infrared emission from Cr4+ has been observed in 80GeS2-20Ga2S3 chalcogenide glass-ceramics (GCs) containing Ga2S3 nanocrystals. The emission band peaking at 1250 nm covers the O, E, S bands (1000-1500 nm). The formation of Ga2S3 nanocrystals (∼20nm) increases the emission intensity of Cr4+ by more than three times. The quantum efficiency of the present GCs is as great as 36% at room temperature.
Channeling chaotic transport in a wave-particle experiment A numerical and experimental study of a control method aimed at channeling chaos by building barriers in phase space is performed on a paradigm for wave-particle interaction, i.e., a traveling wave tube. Control of chaotic diffusion is achieved by adding small apt modifications to the system with a low additional cost of energy. This modification is realized experimentally through additional waves with small amplitudes. Robustness of the method is investigated both numerically and experimentally. I. INTRODUCTION The interaction of a beam of charged particles with electromagnetic waves is ubiquitous in physics, and it is central to many useful devices such as particle accelerators, plasma fusion experiments or free electron lasers. In these experimental set-ups, the waves are used to accelerate the particles or to guide them by assigning a specific mean trajectory. However, the dynamics of these systems is usually characterized by the competition of many degrees of freedom and thus, shows generically chaotic behaviors. Such behaviors represent a limit to the achievement of high performances in these devices. Consequently, there has been a growing interest in controlling chaos which here means to reduce it when and where it is undesirable and to increase it when it is useful. The sensitivity of chaotic systems to small perturbations triggered a strong interdisciplinary effort to control chaos. After the seminal work on optimal control by Pontryagin, efficient methods were proposed for controlling chaotic systems by nudging targeted trajectories. However, for many body experiments such methods are hopeless due to the high number of trajectories to deal with simultaneously. It was recently proposed a local control method which aims at building barriers in phase space and hence confines all the trajectories rather than following them individually. These barriers are stable structures in phase space like for instance invariant tori, which are generically broken by the perturbation. The reduction of chaotic behaviors is achieved by using a small apt perturbation of the system which keeps its Hamiltonian structure. In this article, we consider a traveling wave tube (TWT) specially designed to study wave particle interaction which is used to investigate experimentally the control method and its robustness. The dynamics in this experimental apparatus can be accurately represented using a Hamiltonian which describes the motion of a charged particle (with unit mass) interacting with two electrostatic waves : where (p, x) ∈ R are the momentum and position of the particle in a tube of length L. The amplitudes, wave numbers, frequencies and phases of the two waves are denoted respectively i, k i, i and i for i = 1, 2. We notice that the beam intensity is sufficiently low such that the wave growth rate is negligible upon the length of the experiment that is we are in the test-particle regime. Generically, the dynamics of the particles governed by Hamiltonian is a mixture of regular and chaotic behaviors, mainly depending on the amplitudes of the waves. The Chirikov parameter defined as the ratio between the two half-width of the primary resonances by the distance between these resonances, i.e., gives a first rough approximation of the chaoticity degree of the system. Hamiltonian has a typical behavior of integrable system for small values of this parameter (s≪ 1). For large enough amplitudes of the waves (s ∼ 1), large scale chaos occurs in phase space. As a consequence, the particle can have an arbitrary velocity in between the two phase velocities of the waves ( 2 /k 2 and 1 /k 1 ). In this TWT, such typical chaotic behavior has been observed directly. This chaotic diffusion of the particles in phase space can be reduced by using an apt control term which consists here as an additional wave (or more generally a set of waves) of small amplitude. The characteristics of this additional wave are computed explicitly, and then the wave is injected in addition to the two others. The results presented in this article were announced in Ref.. The paper is organized as follows : The control method is briefly recalled in Sec. II A and its application to the considered Hamiltonian is presented in Sec. II B. Numerical investigations of the effect of the control term and its robustness are reported in Secs. II C and II D. In Sec. III A, a description of the experimental set-up precedes the results of the implementation of the control term shown in Sec. III B as well as its robustness in Sec. III C. II. LOCAL CONTROL METHOD APPLIED TO A TWO WAVE MODEL The Hamiltonian of an integrable system can be locally written as a function H 0 (A) of the action variables The equations of motion for H 0 (A) show that the action variables are constant, and consequently the trajectories with given actions A are confined to evolve on a d-dimensional torus with frequency vector 0 (A) = ∂H 0 /∂A. The dynamics on this torus is periodic or quasi-periodic: (t) = 0 (A)t + with frequency vector 0 (A). In the particular case given by Hamiltonian an integrable situation is given by 1 = 2 = 0 so that the dynamics of the integrable system H = p 2 /2 is characterized by constant velocity (p = const). A monokinetic beam of charged particles remains monokinetic. If the system described by H 0 is perturbed, i.e. we consider the Hamiltonian the integrability is generically lost and the system becomes chaotic. Even if KAM theorem establishes the stability with respect to small perturbations of invariant tori with a sufficiently incommensurate frequency vector these tori are destroyed when the amplitude of the perturbation V is large enough. The break-up of invariant tori leads to a loss of stability of the system until the last invariant torus of the integrable case is destroyed and then large scale diffusion occurs in phase space. In the case of a beam of charged particles whose dynamics is given by Hamiltonian, for 1, 2 sufficiently large, an initially monokinetic beam will spread in velocity due to this diffusion. A. Expression of the local control term The aim is to provide an explicit expression for an additional perturbation such that a specific invariant torus is reconstructed in the modified system. We state here the main result which has been extensively described in Ref. : We consider Hamiltonian systems written as where is a non-resonant vector of R d. Without loss of generality, we consider a region near A = 0 (by translation of the actions) and, since the Hamiltonian is nearly integrable, the perturbation W has constant and linear parts in actions of order, i.e. where Q is of order O( A 2 ). We notice that for = 0, the Hamiltonian H has an invariant torus with frequency vector at A = 0 for any Q not necessarily small. The controlled Hamiltonian we construct is The control term f we construct only depends on the angle variables and is given by where ∂ is the derivative operator with respect to, and is a linear operator defined as a pseudo-inverse of ∂, i.e. acting on W = k W k e ik as Note that f is of order 2. For any function W, Hamiltonian has an invariant torus with frequency vector close to. The equation of the torus which is restored by the addition of f is which is of order for W given by Eq. B. Computation of the control term for a two wave system We consider Hamiltonian with two waves, where the wavenumbers are chosen according to a dispersion relation k 1 = K( 1 ) and k 2 = K( 2 ) plotted on Fig. 1. In order to compute f, Hamiltonian with 1.5 degrees of freedom is mapped into an autonomous Hamiltonian with two degrees of freedom by considering that t mod 2 is an additional angle variable. We denote E its conjugate action. The autonomous Hamiltonian is FIG. 1: TWT dispersion relation (circles) with the helix mode at 30 MHz (square) and the beam mode at the same frequency but with propagation velocity choose equal to about 2.5 10 6 m/s (triangle); the control wave corresponds to the beating of these two modes Then, the momentum p is shifted by in order to define a local control in the region p ≈ 0. The Hamiltonian is rewritten as We rewrite Hamiltonian into the form where: The frequency vector of the selected invariant torus is = (, 1). From Eq. we have that f is given by provided = 1 /k 1 and = 2 /k 2. Adding this exact control term to Hamiltonian, the following invariant rotational torus is restored : This barrier of diffusion prevents a beam of particles to diffuse everywhere in phase space. We emphasize that the barrier persists for all the magnitudes of the waves ( 1, 2 ). The control term has four Fourier modes, (2k 1, −2 1 ), (2k 2, −2 2 ), ((k 1 + k 2 ), −( 1 + 2 )) and ((k 1 − k 2 ), −( 1 − 2 )). If we want to restore an invariant torus in between the two primary resonances approximately located at p ≈ 1 /k 1 and p ≈ 2 /k 2, the frequency has to be chosen between the two group velocities of the waves. If we consider a beam of particles with a velocity in between the velocities of the waves, i.e., v 1 = 1 /k 1 and v 2 = 2 /k 2, the main Fourier mode of the control term is A convenient choice is = (v 1 + v 2 )/2 and the control term is given by: Using this approximate control term does not guarantee the existence of an invariant torus. However, since the difference between f given by Eq. and f 2 given by Eq. is small, it is expected that for a Chirikov parameter s not too large, the effect of the control term is still effective and the barrier is restored close to C. Numerical results In this section we perform a numerical investigation of the effect of the exact and approximate control terms on the electron beam dynamics. We introduce the parameter r given by the ratio of the two wave amplitudes r = 1 / 2. In order to reproduce as close as possible the experimental set-up described in the next section (see also ), we consider the following values of amplitudes, wave numbers, frequencies and phases of the two electrostatic waves: ( 1, k 1, 1, 1 ) = (r, 1, 0, 0) and ( 2, k 2, 2, 2 ) = (, k, k, 0). Thus Hamiltonian can be written as i.e. v 1 = 0 and v 2 = 1. We perform simulations with r = 0.082 and k = 5/3. The amplitudes of the waves are determined by r and that are related to the Chirikov parameter by the following equation: The value of will be given by s. In the following we consider two values of s, that is s = 0.85 ( ∼ 0.11) and s = 1.27 ( ∼ 0.24). In this case the expression of the exact control term given by Eq. becomes while the approximate control term given by Eq. is Poincar sections of Hamiltonian In order to study the effect of a simplified control term on the electron beam dynamics we perform numerical simulations adding the control term given by Eq. to Hamiltonian. As one can see from the Poincar section depicted in Fig. 2, panels (c), the effect of the approximate control term is still present with the recreation of a set of invariant tori for s = 0.85. However, this regularization apparently disappears when we consider the fully chaotic regime with s = 1.27 (see Fig. 3, panel (c)). Nevertheless the approximate control term has still a significant effect on the reduction of chaotic diffusion. This fact can be observed on the probability distribution functions of the electron beam velocity. This diagnostic will also be used in the experiment in order to see the effect of the control terms. In Figs. 4 and 5, the initial velocity distribution function of a set of 10 4 particles is compared with the final one obtained by integrating over a time t = 50, the dynamics governed by Hamiltonian without control terms, plus the exact control term and plus the approximate control term. This investigation is performed for the two different values of the Chirikov parameter, The exact control term is also efficient in the fully chaotic regime (s = 1.27). Concerning the approximate control term it is very efficient for s = 0.85 while its efficiency is smaller in the strongly chaotic regime. However, it has still some regularizing effect, inducing the reconstruction of stable islands in phase space which can catch and thus confine a portion of the initial beam particles. D. Robustness of the method The robustness of the control method for the case s = 1.27 is studied with respect to an error on the phase or on the amplitude of the computed control term. In experiment, given the frequency 1 + 2, the wave number k 1 + k 2 of the control term does not satisfy in general the dispersion relation k = K() since the dispersion relation is not linear. In our case it means that the experimentally implemented control term is not the exact one. For this reason we investigate the robustness of the control term given by Eq. with a phase error, that is and with an error on its amplitude ruled by a factor, that is The values given by Eq. are = 0 and = 1. In order to quantify the robustness of the approximate control term given by Eq. or Eq., we introduce the kinetic coherence indicator defined as the ratio of the variance of the initial beam over the variance of the distribution function after a given integration time. The number of particles, the integration time and the initial conditions are equal to the ones used in the previous section. In Fig. 6 we show the kinetic coherence as a function of the phase of the approximate control term for the strongly chaotic regime s = 1.27. We notice that or − will give the same velocity distribution function for symmetry reason. Therefore we only consider the range ∈ . The efficiency of the approximated control term is very sensitive with respect to the phase. In fact an error of 5 o − 6 o causes a decrease of the kinetic coherence of about 50% and with an error greater than 30 o the kinetic coherence drops in the range of values of the non-controlled case. Concerning the robustness with respect to an error on the amplitude of the approximate control term, we plot on Fig. 7 the behavior of the kinetic coherence as a function of the -factor which multiplies the amplitude of the approximate control term. We notice that around the reference value of = 1 (no error) there is a region ( ∈ ) where the approximate control term is very efficient in confining the beam of test particles with a kinetic coherence in between . On the other hand reducing the amplitude of the control term, i.e. its energy, there is a region where one has still a confining effect on the beam particle. For example for = 0.6 the kinetic coherence is larger by 50% of the value of the non-controlled case. A. Experimental set-up The experimental implementation of the control term is performed in a long traveling wave tube (TWT) extensively used to mimic beam plasma interaction and recently to observe resonance overlap responsible for Hamiltonian chaos. The TWT sketched in Fig. 8 is made up of three main elements: an electron gun, a slow wave structure (SWS) formed by a helix with axially movable antennas, and an electron velocity analyzer. The electron gun creates a beam which propagates along the axis of the SWS and is confined by a strong axial magnetic field with a typical amplitude of 0.05 T which does not affect the axial motion of the electrons. The central part of the gun consists of the grid-cathode subassembly of a ceramic microwave triode and the anode is replaced by a Cu plate with an on-axis hole whose aperture defines the beam diameter equal to 1 mm. Beam currents, I b < 1 mA, and maximal cathode voltages, |V c | < 200 V, can be set independently; an example of typical velocity distribution functions is given in Figs. 9 and 10 (panel (a)). Two correction coils provide perpendicular magnetic fields to control the tilt of the electron beam with respect to the axis of the helix. For the data shown in this article I b is chosen weak enough to ensure that the beam induces no wave growth and the beam electrons can be considered as test electrons. The SWS is long enough to allow nonlinear processes to develop. It consists in a wire helix that is rigidly held together by three threaded alumina rods and is enclosed by a glass vacuum tube. The pressure at the ion pumps on both ends of the device is 2 10 −9 Torr. The 4 meter long helix is made of a 0.3 mm diameter Be-Cu wire; its radius is equal to 11.3 mm and its pitch to 0.8 mm. A resistive rf termination at each end of the helix reduces reflections. The maximal voltage standing wave ratio is 1.2 due to residual end reflections and irregularities of the helix. The glass vacuum jacket is enclosed by an axially slotted 57.5 mm radius cylinder that defines the rf ground. Inside this cylinder but outside the vacuum jacket are four axially movable antennas which are capacitively coupled to the helix and can excite or detect helix modes in the frequency range from 5 to 95 MHz. Only the helix modes are launched, since empty waveguide modes can only propagate above 2 GHz. These modes have electric field components along the helix axis. Launched electromagnetic waves travel along the helix with the speed of light; their phase velocities, v j, along the axis of the helix are smaller by approximately the tangent of the pitch angle, giving 2.8 10 6 m/s < v j < 5.3 10 6 m/s. Waves on the beamless helix are slightly damped, with |k 0i j |/|k 0r j | ≈ 0.005 where k 0 = k 0r + i k 0i is the beamless complex wave number. The dispersion relation closely resembles that of a finite radius, finite temperature plasma, but, unlike a plasma, the helix does not introduce appreciable noise. Finally the cumulative changes of the electron beam distribution are measured with the velocity analyzer, located at the end of the interaction region. This trochoidal analyzer works on the principle that electrons undergo an E B drift when passing through a region in which an electric field E is perpendicular to a magnetic field B. A small fraction (0.5%) of the electrons passes through a hole in the center of the front collector, and is slowed down by three retarding electrodes. Then the electrons having the correct drift energy determined by the potential difference on two parallel deflector plates are collected after passing through an off-axis hole at the back of the analyzer. The time averaged collected current is measured by means of a pico-ampermeter. Retarding potential and measured current are computer controlled, allowing an easy acquisition and treatment with an energy resolution lower than 0.5 eV. In the absence of any emitted wave, after propagating along the helix, the beam exhibits a sharp velocity distribution function with a velocity width mainly limited by the analyzer resolution as shown in Figs. 9 and 10 (panel (a)). For Fig. 9a, the beam with radius 3mm is diffracted by passing through the three grounded grids of a spreader just after leaving the gun while for Fig. 10a, the beam radius is 1 mm and the spreader has beem removed for the sake of simplicity. B. Experimental implementation of the control term We apply an oscillating signal at the frequency of 30 MHz on one antenna. It generates two waves: a helix mode with a phase velocity equal to v = 4.07 10 6 m/s, a beam mode with a phase velocity equal to the beam velocity v b (in fact two modes with pulsation = kv b ± b corresponding to the beam plasma mode with pulsation b = (n b e 2 /m 0 ) 1/2, Doppler shifted by the beam velocity v b, merging in a single mode since b ≪ in our conditions). Figures 9 and 10 (panel (b)) show the measured velocity distributions of the beam after interacting with these two modes over the length of the TWT for two different values of the Chirikov parameter. The case with s = 0.85 was previously investigated. The red square (resp. blue triangle) shows the phase velocity v (resp.v b ) of the helix (resp. beam) mode on the middle of the resonant domain determined as the trapping velocity width of the helix mode v ± 2 eC h /m (resp. v b ± 2 eC b /m ) where is the signal amplitude applied on the antenna and C h = 3542mV (resp. C b = 286mV) is the real amplitude of the helix (resp. beam) mode. Both C h and C b are determined experimentally by the estimations of the coupling constant for helix C h (resp. beam C b ) mode. As shown in Fig. 11 the helix mode coupling coefficient C h is obtained by fitting a parabola through the measured upper bound velocity (circles) after the cold test beam with initial velocity equal to the wave phase velocity has been trapped by the wave at a given frequency over the total length of the TWT. As shown in Fig. 12, the beam mode coupling coefficient C b is obtained by fitting a parabola through the measured upper bound velocity (circles) for a beam with a mean velocity very different from the helix mode phase velocity at the considered frequency. These two domains overlap and the break up of invariant KAM tori (or barriers to velocity diffusion) results in a large spread of the initially narrow beam of Figs. 9 and 10 (panel (b)) over the chaotic region. We now use an arbitrary waveform generator to launch the same signal at 30 MHz and an additional control wave with frequency equal to 60 MHz, an amplitude and a phase given by Eq.. The beam velocity is also chosen in such a way that the wave number of the helix mode at 60 MHz properly satisfies the dispersion relation function shown as circles in Fig. 1. We neglect the influence of the beam mode at 60 MHz since its amplitude is at least an order of magnitude smaller than the control amplitude as shown by comparing Figs. 11a and 12 for 30 MHz. As observed on Figs. 9 and 10(panel (c)) where the grey circle indicates the phase velocity of the controlling wave, the beam recovers a large part of its initial kinetic coherence. For s = 0.85 (see Fig. 9c) the beam does not spread in velocity beyond the reconstructed KAM tori, in agreement with the numerical simulations of Fig. 2. For the more chaotic regime (see Fig. 10b) with s = 1.27 the improvement of the kinetic coherence is still present as shown in Fig. 10c. It can no more be associated with the reconstruction of a local velocity barrier, as expected from is not yet evident. Two independent measures are shown by circles and squares to give an error estimate.. the numerical results in Fig. 3 (panel (c)). For this last overlap parameter an experimental exploration of the robustness of the method will be shown in the next section. C. Robustness of the method In our experiment the control term is given by an additional wave whose frequency, amplitude and phase are computed as shown in Sec. II. In order to quantify the robustness of the method we will compare the various experimental situations to a reference one (Fig. 10a). This reference is taken as the (initial) cold beam distribution function. An example of the distribution we were able to reach with control is given in Fig. 10c. The control amplitude is 140 mV in agreement with 144 mV given by the method up to experimental errors. The phase is chosen experimentally and arbitrarily labelled 0 o. The beam velocity is chosen equal to 2.498 10 6 m/s in agreement with 2.51 10 6 m/s as estimated from the dispersion relation shown in Fig. 1. We investigate the robustness of the control method with respect to variation of phase and amplitude in the approximate control term given by Eq.. We use the kinetic coherence indicator to quantify the effect of the control, defined as the ratio of variance of the cold beam distribution function over the variance of the distribution function. Other indicators (integral and uniform distances) were used and gave similar results. Figure 13 shows the velocity distribution functions for two values of the phase (−5 o and 22.5 o ) keeping the other parameters constant. It shows that for a phase equal to −5 o close to the reference value the two velocity distribution functions are very similar, and more peaked at −5 o than at 0 o. For 22.5 o, the control wave has the opposite effect, increasing chaos. In Fig. 14 we show the kinetic coherence as a function of the phase of the approximate control term. It shows a narrow region around the reference value where the control wave is the most efficient. In Fig. 15, we represent the kinetic coherence as a function of the amplitude of the control wave. When changing the control wave amplitude a resonance condition in a narrow region around the optimized amplitude is still observed. For amplitudes smaller than the reference (computed) value the effect of the control decays fast and the electron velocities are more widely spread than in the non-controlled case. Besides, for larger values, the beam velocity spread increases but the control term energy becomes comparable to the beam mode energy changing radically the initial system. We have observed, due to beam current conservation, a lower peak at initial beam velocity implies that electron velocities are more widely spread. An enlargement of distribution around the main peak is shown in Figs. 16 a,b and confirms that 140 mV appears to be the optimum. Finally we check the sensitivity of the control mode with respect to the initial beam velocity. This corresponds introducing an error both on the wave number and on the amplitude of the control mode. The overlap parameter s depends on the phase velocity difference between the helix and beam modes (see Eq. ( 2)); for such a reason we also measure the non-controlled velocity distribution function for each initial beam velocity. Figure 17a clearly exhibits the resonant condition expected at the reference value 2.51 10 6 m/s. We also note that, without control, chaos is continuously increasing as expected since when the phase velocity difference decreases resonance overlap (and chaos) increases. Figure 18 shows how two beams with close initial velocities with similar chaotic behavior have two different responses to the same control term. IV. SUMMARY AND CONCLUSION Even if a modification of the perturbation of a Hamiltonian system generically leads to the enhancement of the chaotic behavior, we have applied numerically and experimentally a general strategy and an explicit algorithm to design a small but apt modification of the potential which drastically reduces chaos and its attendant diffusion by channeling chaotic transport. The experimental results show that the method is tractable and robust, therefore constituting a natural way to control the dynamics. The robustness of the method has been with an additional cost of energy which corresponds to less than 1% of the initial energy of the two-wave system. We stress the importance of a fine tuning of the parameters of the theoretically computed control term (e.g., amplitude, phase velocity) in order to force the experiment to operate in a more regular regime. For such a reason an iterative process to find some optimal experimental conditions is suggested for future improvement of the method. Other control terms can be used to increase stability (by taking into account the other Fourier modes of f given in Eq. when experimentally feasible). The achievement of control and all the tests on a TWT assert the possibility to practically control a wide range of systems at a low additional cost of energy. V. ACKNOWLEDGMENT A.M. and F.D. are grateful to J-C. Chezeaux, D. Guyomarc'h, and B. Squizzaro for their skillful technical assistance, and to D.F. Escande and Y. Elskens for fruitful discussions and a critical reading of the manuscript. A. M. benefits from a grant by Ministre de la Recherche. This work is partially supported by Euratom/CEA (contract EUR 344-88-1 FUA F).
import java.io.*; import java.util.StringTokenizer; import java.util.ArrayList; public class CF902B{ public static void main(String[]args)throws IOException{ BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); int n = Integer.parseInt(br.readLine()); Node[] tree = new Node[n]; StringTokenizer st = new StringTokenizer(br.readLine()); StringTokenizer stt = new StringTokenizer(br.readLine()); int sum = 1; int ic = Integer.parseInt(stt.nextToken()); for(int x = 0; x < n; x++){ tree[x] = new Node(ic); } for(int x = 1; x < n; x++){//read in is parent? int i = Integer.parseInt(st.nextToken()) - 1; tree[x].setParent(tree[i]); tree[i].addChild(tree[x]); } for(int x = 1; x < n; x++){ int i = Integer.parseInt(stt.nextToken()); if(tree[x].getColor() != i){ tree[x].setColor(i); sum++; } } System.out.println(sum); } } class Node{ //ArrayList<Node> parents = new ArrayList<Node>(); Node parent; ArrayList<Node> children = new ArrayList<Node>(); int color; Node(int c){ color = c; } void addChild(Node a){ children.add(a); } void setParent(Node a){ parent = a; } int getColor(){ return color; } void setColor(int a){ color = a; for(int x = 0; x < children.size(); x++){ children.get(x).setColor(a); } } }
package pl.fulllegitcode.utilunity; import android.app.FragmentManager; import android.app.FragmentTransaction; import com.unity3d.player.UnityPlayer; import pl.fulllegitcode.util.PermissionResult; import pl.fulllegitcode.util.RequestPermissionsCallback; import pl.fulllegitcode.util.RequestPermissionsDelegate; import pl.fulllegitcode.util.Util; public class UtilUnity { public static PermissionResult checkPermissions(String[] permissions) { return Util.checkPermissions(UnityPlayer.currentActivity, permissions); } public static void requestPermissions(String[] permissions, RequestPermissionsCallback callback) { final RequestPermissionsDelegate delegate = Util.requestPermissions(permissions, callback); final PermissionFragment fragment = new PermissionFragment(); fragment.requestCode = delegate.requestCode(); fragment.callback = new PermissionFragment.Callback() { @Override public void onCreate() { delegate.run(fragment); } @Override public void onResult(String[] permissions, int[] grantResults) { delegate.onRequestPermissionsResult(permissions, grantResults); } }; FragmentManager fragmentManager = UnityPlayer.currentActivity.getFragmentManager(); FragmentTransaction transaction = fragmentManager.beginTransaction(); transaction.add(fragment, "permission" + Math.random()); transaction.commit(); } public static float getBatteryLevel() { return Util.getBatteryLevel(UnityPlayer.currentActivity); } public static float getTemperature() { return Util.getTemperature(UnityPlayer.currentActivity); } }
<gh_stars>0 package parser; import org.antlr.v4.runtime.ParserRuleContext; import org.antlr.v4.runtime.tree.ParseTree; import semirings.Semiring; import java.util.ArrayList; import java.util.List; public class ASTVisitor extends QSygusParserBaseVisitor<ProgramNode> { Semiring sr; /* gTerm : SYMBOL | literal | '(' SYMBOL gTermStar ')' | '(' 'Constant' sortExpr ')' | '(' 'Vairiable' sortExpr ')' | '(' 'InputVariable' sortExpr ')' | '(' 'LocalVariable' sortExpr ')' | letGTerm ; */ @Override public GTermNode visitGTerm(QSygusParserParser.GTermContext ctx) { if(ctx.gTermStar() != null){ List<GTermNode> children = new ArrayList<GTermNode>(); QSygusParserParser.GTermStarContext gtermstar= ctx.gTermStar(); while(gtermstar.gTermStar() !=null){ children.add(0,visitGTerm(gtermstar.gTerm())); gtermstar = gtermstar.gTermStar(); } return new GTermNode(ctx.SYMBOL().getText(),children); } return new GTermNode(ctx.children.get(0).getText(), null); } @Override public NTNode visitNtDef(QSygusParserParser.NtDefContext ctx) { String ntName = ctx.SYMBOL().getText(); String sort = getSplitedText(ctx.sortExpr()); List<RuleNode> rules = new ArrayList<RuleNode>(); QSygusParserParser.GTermPlusContext gtermplus = ctx.gTermPlus(); while(gtermplus.gTermPlus() != null){ rules.add(0,visitGTermWeighted(gtermplus.gTermWeighted())); gtermplus = gtermplus.gTermPlus(); } rules.add(0,visitGTermWeighted(gtermplus.gTermWeighted())); return new NTNode(ntName, sort, rules); } @Override public RuleNode visitGTermWeighted(QSygusParserParser.GTermWeightedContext ctx) { List<String> weight = new ArrayList<String>(); QSygusParserParser.LiteralPlusContext literalPlus = ctx.literalPlus(); if(ctx.literalPlus()!=null) { while (literalPlus.literalPlus() != null) { weight.add(0, literalPlus.literal().getText()); literalPlus = literalPlus.literalPlus(); } weight.add(0, literalPlus.literal().getText()); } return new RuleNode(weight, visitGTerm(ctx.gTerm())); } @Override public GrammarNode visitSynthFunCmd(QSygusParserParser.SynthFunCmdContext ctx) { String funName = ctx.SYMBOL().getText(); String argList = getSplitedText(ctx.argList()); String sort = getSplitedText(ctx.sortExpr()); List<NTNode> ntNodes = new ArrayList<NTNode>(); QSygusParserParser.NtDefPlusContext ntdefPlus = ctx.ntDefPlus(); while(ntdefPlus.ntDefPlus() != null){ ntNodes.add(0, visitNtDef(ntdefPlus.ntDef())); ntdefPlus = ntdefPlus.ntDefPlus(); } ntNodes.add(0, visitNtDef(ntdefPlus.ntDef())); return new GrammarNode(funName,argList,sort,ntNodes); } @Override public OptimizationNode visitWeightOptimizationCmd(QSygusParserParser.WeightOptimizationCmdContext ctx){ String flag = null; List<String> weightTuple = new ArrayList<String>(); QSygusParserParser.WeightPairContext weightpair = ctx.weightPair(); if(weightpair.symbolPlus() != null){ flag = weightpair.SYMBOL().getText(); QSygusParserParser.SymbolPlusContext symbolPlus = weightpair.symbolPlus(); while(symbolPlus.symbolPlus()!= null){ weightTuple.add(0,symbolPlus.SYMBOL().getText()); symbolPlus = symbolPlus.symbolPlus(); } weightTuple.add(0,symbolPlus.SYMBOL().getText()); return new OptimizationNode(flag, weightTuple); } weightTuple.add(weightpair.SYMBOL().getText()); return new OptimizationNode(flag, weightTuple); } @Override public ProgramNode visitProg(QSygusParserParser.ProgContext ctx) { List<QSygusNode.Tuple<String,String>> semirings = new ArrayList<QSygusNode.Tuple<String,String>>(); QSygusParserParser.WeightPlusContext weightplus = ctx.setWeightCmd().weightPlus(); while(weightplus.weightPlus() != null){ semirings.add(0, new QSygusNode.Tuple<String, String>(weightplus.SYMBOL().getText(),weightplus.weight().getText())); weightplus = weightplus.weightPlus(); } semirings.add(0,new QSygusNode.Tuple<String, String>(weightplus.SYMBOL().getText(),weightplus.weight().getText())); List<String> preCmds = new ArrayList<String>(); List<String> postCmds = new ArrayList<String>(); GrammarNode synthFun = null; TermNode weightConstraint = null; OptimizationNode weightOpt = null; QSygusParserParser.CmdPlusContext cmdPlus = ctx.cmdPlus(); Boolean post = true; while(cmdPlus.cmdPlus() != null){ if(cmdPlus.cmd().synthFunCmd() != null){ synthFun = visitSynthFunCmd(cmdPlus.cmd().synthFunCmd()); post = false; cmdPlus = cmdPlus.cmdPlus(); continue; } if(cmdPlus.cmd().weightConstraintCmd() != null){ weightConstraint = visitTerm(cmdPlus.cmd().weightConstraintCmd().term()); cmdPlus = cmdPlus.cmdPlus(); continue; } if(cmdPlus.cmd().weightOptimizationCmd() !=null){ weightOpt = visitWeightOptimizationCmd(cmdPlus.cmd().weightOptimizationCmd()); cmdPlus = cmdPlus.cmdPlus(); continue; } if(!post) preCmds.add(0,getSplitedText(cmdPlus.cmd())); else postCmds.add(0,getSplitedText(cmdPlus.cmd())); cmdPlus = cmdPlus.cmdPlus(); } preCmds.add(0,getSplitedText(cmdPlus.cmd())); return new QSygusNode(preCmds, postCmds, synthFun, semirings, weightConstraint, weightOpt); } @Override public TermNode visitTerm(QSygusParserParser.TermContext ctx) { String symbol; List<TermNode> children = new ArrayList<TermNode>(); if(ctx.termStar() != null){ symbol = ctx.SYMBOL().getText(); QSygusParserParser.TermStarContext termStart = ctx.termStar(); while(termStart.termStar() != null){ children.add(0,visitTerm(termStart.term())); termStart = termStart.termStar(); } }else{ symbol = ctx.children.get(0).getText(); } return new TermNode(symbol, children); } @Override public TermNode visitFunDefCmd(QSygusParserParser.FunDefCmdContext ctx){ return visitTerm(ctx.term()); } public String getSplitedText(ParseTree ctx){ String result = ""; if(ctx.getChildCount() != 0) { for (int i = 0; i < ctx.getChildCount(); i++) { result = result + " " + getSplitedText(ctx.getChild(i)); } }else{ return ctx.getText(); } return result; } }
Cardiovascular Manifestations of COVID19 Severe acute respiratory syndrome coronavirus 2 (SARSCoV2), which is the cause of COVID19, was first reported in Wuhan, China. SARSCoV2 especially involves alveolar epithelial cells, which results in respiratory symptoms more severe in patients with cardiovascular disease (CVD) probably linked with increased secretion of angiotensinconverting enzyme 2 in these patients compared with healthy individuals. Cardiac manifestations may contribute to overall mortality and even be the primary cause of death in many of these patients. A higher prevalence of hypertension (HTN) followed by diabetes mellitus and CVD was observed in COVID19 patients. A higher casefatality rate was seen among patients with preexisting comorbid conditions, such as diabetes, chronic respiratory disease, HTN, and cancer, compared to a lesser rate in the entire population. Cardiovascular (CV) manifestations of COVID19 encompass a wide spectrum, including myocardial injury, infarction, myocarditissimulating STsegment elevation myocardial infarction, nonischemic cardiomyopathy, coronary vasospasm, pericarditis, or stress (takotsubo) cardiomyopathy. This review is intended to summarize our current understanding of the CV manifestations of COVID19 and also to study the relationship between SARSCoV2 and CVDs and discuss possible mechanisms of action behind SARSCoV2 infectioninduced damage to the CV system. SARS-CoV-2 especially involves alveolar epithelial cells, which results in respiratory symptoms. These tend to be more severe in patients with CVD and is probably linked to increased secretion levels of angiotensin-converting enzyme 2 (ACE2) in these patients as compared to healthy individuals. The level of ACE2 in the heart and atherosclerotic vessels is increased in patients with coronary artery disease (CAD) and heart failure (HF). Invariably, the primary cause of death in COVID-19 infection is respiratory failure. However, cardiac manifestations may contribute to overall mortality and may even be the primary cause of death in many of these patients. In 8%-25% of overall COVID-19-infected population, concomitant cardiovascular (CV) conditions are observed and this percentage is even higher among of those who die. [7,9, In a recent meta-analysis of eight studies (46,248 patients), a higher prevalence of hypertension (HTN, 17% ± 7%) followed by diabetes mellitus (8% ± 6%) and cardiovascular disease (CVD, 5% ± 4%) was observed in COVID-19 patients. In another study of 44,672 cases, a higher case-fatality rate (CFR) was seen among patients with pre-existing comorbid conditions (10.5% for CVD, 7.3% for diabetes, 6.3% for chronic respiratory disease, 6% for HTN, and 5.6% for cancer) compared to the overall CFR of 2.3% in the entire population. In a retrospective study in which 191 patients were included of whom 137 were discharged and 54 died in hospital, 48% of patients had single comorbidity, with HTN being the most common 30%, followed by diabetes 19%, and coronary heart disease in 8% of patients. CV manifestations of COVID-19 encompass a wide spectrum, including myocardial injury, infarction, myocarditis-simulating ST-segment elevation myocardial infarction (STEMI), nonischemic cardiomyopathy, coronary vasospasm, pericarditis, or stress (takotsubo) cardiomyopathy. Elevated cardiac biomarkers indicate an unfavorable prognosis. Guo et al. conducted a study on 187 patients with COVID-19 of whom 52 (27.8%) exhibited myocardial injury as demonstrated by elevated troponin T (TnT) levels. Mortality was markedly higher in patients with elevated TnT levels than in patients with normal TnT levels (59.6% vs. 8.9%). The highest mortality (69.44%) and shortest survival were seen in those with both elevated TnT levels and underlying CVD. In a study by Shi et al. comprising 416 hospitalized patients from Wuhan, 82 (19.7%) patients with myocardial injury were compared to 334 (80.3%) patients without myocardial injury. In a study by Wang et al., it was observed that 16.7% of patients developed arrhythmia and 7.2% experienced acute cardiac injury as well as other COVID-19-associated complications. Some cases of acute-onset HF, myocardial infarction (MI), myocarditis, and cardiac arrest have also been reported. Huang et al. reported that 12% of patients with COVID-19 were diagnosed as having acute myocardial injury, which is shown by elevated levels of high-sensitive troponin I (hsTnI). Based on early reports, patients with CVD may represent 25% of those in an intensive care unit (ICU) plus those with HTN accounting for 58% of patients. In addition, Zhou et al. found that myocardial injury, defined by raised serum cardiac troponin I (cTnI) levels, in COVID-19 patients was associated with over 50% mortality rate. Furthermore, HF was prevalent in 23% of patients presenting with COVID-19, which was also more prevalent among patients who died compared to those who survived (51.9% vs. 11.7%). These reports provide evidence of cardiac involvement as a possible late phenomenon of the viral respiratory infection. This review is intended to summarize our current understanding of the CV manifestations of COVID- 19 and also to study the relationship between SARS-CoV-2 and CVDs. We will discuss also possible mechanisms of action behind SARS-CoV-2 infection-induced damage to cardiovascular system. CARDIOVASCULAR MANIFESTATIONS To outline the spectrum of CV presentations of COVID-19 is a difficult task. Based on the current evidence, it appears that the CV sequelae may range from direct or indirect myocardial injury, myocarditis, possible acute coronary syndrome (ACS), cardiac arrhythmias, HF, and cardiogenic shock. It is well known that the CV mortality is higher in influenza pandemics than in all other causes. Acute respiratory viral infections, such as coronaviruses, are known to trigger factors for CVD. During the current COVID-19 pandemic, an increased morbidity and mortality has been reported in the elderly population and in those with comorbid conditions. The most prevalent comorbidities were HTN, diabetes, and CVDs. It is often difficult to differentiate between complications arising from comorbid conditions and possible direct CV damage by COVID-19 infection. Patients presenting with pre-existing CVD appear to have heightened vulnerability to develop COVID-19 and tend to have more severe diseases with worse clinical outcomes. Various CV risk factors also adversely affect prognosis of these patients, although they do not seem to increase likelihood of developing the infection. A meta-analysis of six published studies from China including 1527 patients with COVID-19 reported 9.7%, 16.4%, and 17.1% prevalence of diabetes, cardio-cerebrovascular disease, and HTN, respectively. Although the prevalence of diabetes and HTN in this cohort was same as in the Chinese general population, the prevalence of cardio-cerebrovascular disease was significantly higher. More importantly, the presence of diabetes, cardio-cerebrovascular disease, and HTN was associated with a 2-fold, 3-fold, and 2-fold greater risk of severe disease or requiring ICU admission, suggesting prognostic impact of these comorbidities. A much larger report from the Chinese Centers for Disease Control and Prevention described clinical outcomes in 44,672 confirmed cases of COVID-19. The overall CFR was 2.3% in the entire cohort but significantly higher (6%, 7.3%, and 10.5%, respectively) in patients with HTN, diabetes, and CVD. In this review, COVID-19's impact on the CV system will be divided into primary/direct or secondary/indirect cardiac involvement; there is, of course, considerable overlap between the two. Primary cardiac manifestations of COVID-19 disease include ACS, myocarditis, and arrhythmias. Secondary cardiac involvement is usually part of (due to) a systemic inflammatory syndrome and can manifest as acute myocardial injury/biomarker elevation and/or HF Congestive Heart Failure(CHF). Secondary cardiac involvement is often accompanied by the evidence of other end-organ damage. Finally, we will review additional vascular complications of COVID-19 disease. PATHOPHYSIOLOGY OF CARDIAC MANIFESTATION OF COVID-19 SARS-CoV-2 is caused by an enveloped, positive-sense, single-stranded RNA beta-coronavirus. Seven species of these beta-coronaviruses are known to cause human infections, with four mainly causing mild flu-like symptoms and the remaining three resulting in potentially fatal illnesses (SARS, Middle East respiratory syndrome , and the current COVID-19). Although the respiratory tract is the primary target for SARS-CoV-2, CV system may be involved in several different ways. SARS-CoV-2 enters human cells by binding to ACE2, a membrane-bound aminopeptidase which serves many physiological functions in the lungs, heart, kidneys, and other organs. ACE2 plays an important role in the neurohumoral regulation of CV system in normal health as well as in various disease conditions. It is highly expressed lung alveolar cells, which provides an explanation for the respiratory symptoms experienced by patients with COVID-19. More than 7.5% of myocardial cells have positive ACE2 expression, based on single-cell RNA sequencing, which could mediate SARS-CoV-2 entry into cardiomyocytes and cause direct cardiotoxicity. The binding of SARS-CoV-2 to ACE2 can result in alteration of ACE2 signaling pathways, leading to acute myocardial and lung injury. More severe forms of COVID-19 are characterized by acute systemic inflammatory response and cytokine storm, which can result in injury to multiple organs, leading to multiorgan failure. Studies have shown high circulatory levels of pro-inflammatory cytokines in patients with severe/critical COVID-19. Further, an abnormal T-cell and monocyte response have been observed in COVID-19 patients, leading to a systemic hyper-inflammatory response characterized by increased pro-inflammatory cytokine and chemokine production (tumor necrosis factor , interleukin -2, IL-6, IL-7, and CCL2 among others). Macrophage activation syndrome-like manifestations, classically associated with rheumatic diseases including Kawasaki disease, have also been reported in COVID-19 patients supporting the hypothesis that the increase of Kawasaki-like presentations could be a result of COVID-19-induced systemic hyper-inflammation and consequent vasculitis. Xiong et al. observed that plasma TnT levels were significantly positively linear correlated with plasma high-sensitivity C-reactive protein (CRP) levels, indicating that myocardial injury may be closely associated with inflammatory pathogenesis. Huang et al. also highlighted that in patients with COVID-19, the cytokine storm resulted from an imbalance between T helper 1 and T helper 2 responses, itself leading to myocardial injury. The release of inflammatory cytokines after infection may cause reduction in coronary blood flow, decrease in oxygen supply, destabilization of coronary plaque, and micro-thrombogenesis. Increased cardiometabolic demand associated with the systemic infection coupled with hypoxia caused by acute respiratory illness can impair myocardial oxygen demand-supply relationship and lead to acute myocardial injury. Myocardial injury can be the result of a mismatch between myocardial oxygen supply and demand, being classified as type 2 MI. Severe respiratory complications and subsequent hypoxia are common findings in patients with COVID-19. In a meta-analysis of 19 studies, including a total of 2874 patients, the most predominant chest X-ray finding was bilateral pneumonia, with ground-glass opacity being reported in 68.5% of patients. In addition, ground-glass opacity was the most frequent chest CT finding (97.6%) in a Chinese cohort of 83 patients with COVID-19-related pneumonia and was associated with severe outcomes in all (100%) patients. Systemic infection and fever increase the metabolic needs of peripheral tissues and end organs, resulting in a rise of metabolic demands of the myocardial cells. The associated tachycardia decreases diastolic perfusion time which may lead to inadequate subendocardial perfusion in patients with CAD, resulting in cardiac injury. Systemic inflammation as well as increased shear stress can precipitate plaque rupture, resulting in acute MI. Prothrombotic milieu created by systemic inflammation further increases the risk. Vascular endothelium is an active organ with paracrine, autocrine, and endocrine functions, which is vital for the regulation of vascular tone and the maintenance of vascular homeostasis. Endothelial dysfunction is the primary factor of microvascular dysfunction characterized by vasoconstriction and subsequent organ ischemia, inflammation associated with tissue edema, and a procoagulant state. Varga et al. observed the direct viral infection of endothelial cells in several patients, all of whom had underlying conditions including HTN, kidney disease, CAD, and diabetes mellitus. The presence of viral particles within endothelial cells and an accumulation of inflammatory cells, with the evidence of endothelial and inflammatory cell death, have been described by authors, suggesting that SARS-CoV-2 infection facilitates endotheliitis as a direct consequence of viral involvement and of the host inflammatory response. Moreover, it has been suggested that the induction of apoptosis and proptosis might have an important role in endothelial cell damage in patients with COVID-19. COVID-19-endotheliitis could explain the systemic impaired microcirculatory function in different vascular beds and their clinical sequelae in patients with COVID-19. Various antiviral drugs, corticosteroids, and other therapies aimed at treating COVID-19 can also have deleterious effects on the CV system. Electrolyte imbalances can occur in any critical systemic illness and precipitate arrhythmias, particularly in patients with underlying cardiac disorder. There is particular concern about hypokalemia in COVID-19, due to interaction of SARS-CoV-2 with renin-angiotensin-aldosterone system (RAAS). Hypokalemia increases vulnerability to various tachyarrhythmias. ACUTE CORONARY SYNDROMES ACSs comprise a spectrum of disease entities ranging from non-STEMI (NSTEMI) and unstable angina to STEMI. The first two often termed collectively as NSTE-ACS differ in their pathophysiological characteristics from STEMI, in that they result from an acute nonocclusive thrombus overlying a disrupted plaque. On the other hand, STEMI is usually attributable to an acute thrombosis overlying a disrupted plaque, which is completely occlusive of the epicardial coronary artery. Some pathophysiological mechanisms can be implicated to explain the matter. As ACE2 receptors are also present in the vascular endothelial cells, direct viral infection can lead to plaque instability and type 1 MI. Severe systemic inflammatory response in the third phase of the disease may lead to plaque instability and rupture. Microangiopathy has also been described to produce further coronary artery involvement. Such small-vessel involvement can be due to systemic vasculitis or due to microembolization from ACS or disseminated hypercoagulability impairing blood flow. Acute inflammation can lead to endothelial dysfunction, consequently causing vasoconstriction and ischemia in the sensitive organs such as heart. Furthermore, inflammation-induced endothelial dysfunction increases the risk of thrombosis and thrombotic events. In addition, fever and the surge of inflammation could increase the cardiac demand by increasing metabolism and heat rate, resulting in cardiac ischemia and cardiac events, particularly in patients with underlying CVD. Clayton et al. in a study have reported an association between recent respiratory infection and MI. This significant association was further highlighted in a meta-analysis of case-control studies. STEMI causes acute myocardial injury pattern on electrocardiography (ECG) and needs to be immediately treated to prevent irreversible myocardial damage. Timely reperfusion strategy is the treatment of choice. Of the two available reperfusion therapies, primary percutaneous coronary intervention (PCI) is preferable to fibrinolytic therapy because it is safer and more effective. On the other hand, moderate-and high-risk NSTE-ACS patients who are medically stabilized can be treated with an urgent, but not necessarily emergent, invasive strategy (i.e., coronary angiography with intent to revascularize). STEs seen in COVID-19 patients often have nonobstructive coronary disease which further adds to the complexity of treating these patients. Banglore et al reported a case series comprising of 18 patients who developed ST-segment elevation with Covid-19 in New York. Among these 9 patients underwent coronary angiography. Of these nine patients, three had no obstructive CAD and 5/6 with obstructive disease underwent PCI (one after receiving thrombolytics). Hence, nonobstructive disease was observed in one-third of the patients who underwent coronary angiography. These cases of STE with no obstructive coronary disease on angiography may be related to perimyocarditis, although the pathophysiology remains under investigation. The prognosis of STEMI presentation in the setting of COVID-19 was worse with a 72% rate of in-hospital mortality. Interestingly, mortality in patients with nonobstructive coronary lesion was higher (90%) than among those with obstructive lesions (50%), although the absolute numbers were limited. Stefanini et al. studied 28 patients in Italy with STEMI and COVID-19. It was observed that STEMI represented the first clinical manifestation of COVID-19 in the majority of cases (85.7%). Early mortality was 39.3%. Of note, angiography demonstrated the absence of obstructive CAD in 39.3% of cases. The high prevalence of STEMI mimics in this population further emphasizes the need for angiography (either invasive or noninvasive) as opposed to empiric fibrinolytic therapy, given the potential for harm when administering fibrinolytics for non-ACS presentations. A study done in Hong Kong evaluated primary PCI in seven patients presenting with STEMI and calculated the time from onset of symptoms to medical contact to be a median of 318 min, compared with a median of 82.5-91.5 min recorded in the previous year. During the peak time of COIVD-19 outbreak patients with STEMI presented significantly late after the onset of symptoms as compared to normal times the previous year. Similarly, the door-to-device time was almost 30 min longer, at a median of 110 min, compared with a median of 84.9 min the previous year. When put in the context of existing data on door-to-balloon (D2B) time and CV outcomes, while 60-90 min estimates the absolute risk reduction of 1-year mortality at 2.4%, the odds worsens when this time increases to 90-120 min. In the same study, an incremental increase in the D2B time by 1 h was associated with a 64% increase in 1-year mortality. While COVID-19 may potentially increase the risk of ACS, cath lab activations for STEMI in the United States (US) have decreased significantly during the pandemic. Among nine high-volume centers in the US, there was a 38% reduction in STEMI activations compared to the 14-month period before the pandemic. A similar finding was reported in Spain where there was a 40% reduction in STEMI activations. Although the reason for this is not clear, it is postulated that it may be related to patients' fear of exposure to SARS-CoV-2 when presenting to the hospital. It is unknown how many people worldwide may not be seeking medical care for possible ACS due to fear of COVID-19. It is possible that due to delays in seeking appropriate medical care, patients may eventually present to the hospital with HF, cardiogenic shock, or mechanical complications from ACS. Studies must be performed to assess the impact COVID-19 that could have on the CV mortality through such indirect mechanisms. In the COVID-19 pandemic, the management of STEMI has been particularly difficult. Soon after presentation at the emergency department, multiple factors have been the cause of delay for a primary PCI approach, e.g., determining whether STEMI or COVID is the primary problem, and deciphering mimickers of STEMI, such as takotsubo cardiomyopathy, myocarditis, right ventricular (RV) failure, or massive pulmonary embolism (PE). Screening for COVID infection may take time, and within the catheterization laboratory, additional time is needed for donning and doffing personal protective equipment. In addition, redeployment of staff has resulted in shortages to team availability. From a procedure room standpoint, cardiac catheterization laboratories are positive pressure rooms with a significantly higher risk of aerosolization, and therefore, some have been converted to negative pressure rooms, while others must be fitted with high-efficiency particulate air filters. In addition, specific protocols for anesthesia, emergent intubation, and protection of staff and equipment have been implemented, all of which add time to the care of these patients. American College of Cardiology Interventional Council and Society for Cardiovascular Angiography and Intervention have issued statements regarding the management of STEMI during the COVID-19 pandemic, according to which primary PCI should remain the standard of care in these patients, with fibrinolytic therapy reserved for patients with relative contraindications, or those with severe bilateral COVID pneumonia or acute respiratory distress syndrome (ARDS), given their poor overall prognosis. NSTEMI patients with suspected or confirmed COVID-19 should be managed by aggressive medical management. High-risk patients or hemodynamically unstable patients should be managed by early invasive strategy (<24 h), as in all patients with NSTEMI. A noninvasive or medical approach would be the best for low-risk patients. During this pandemic, cardiologists have to face multiple challenges: on the one hand, there is the necessity to contain the spread of the infection; on the other hand, there is the increasing difficulty in identifying and treating patients with suspected or confirmed COVID-19 and contemporary cardiological urgencies; protocols are being constantly refined and updated, but clinical judgment will be fundamental too. For example, it is suggested that patients with STEMI and suspected COVID-19 infection should be treated by primary PCI only if they are at high cardiological risk, while low-risk cases can be treated with thrombolysis. Although this may be useful in the immediate period to contain the pandemic, the consequences of these new approaches are uncertain, and we will have evidence of this in the coming future. MYOCARDITIS Myocardial injury from SARS-CoV-2 infection may also occur via nonischemic mechanisms, such as acute and fulminant myocarditis and stress-induced cardiomyopathy. SARS-CoV-2 has been associated with cases of myocarditis and acute decompensated HF. Myocarditis was initially reported in China by cases of cardiogenic shock and reduced left ventricular ejection fraction (LVEF) among COVID-19 patients. These patients had extremely elevated levels of cardiac biomarkers (namely troponin, creatine kinase -MB, and brain natriuretic peptide ) and required inotropic or extracorporeal membrane oxygenation (ECMO) support to maintain adequate cardiac output. Based on the clinical presentation and elevations in biomarkers, these cases were diagnosed as "fulminant myocarditis" and were treated with a combination of steroids, intravenous immunoglobulins (IVIGs), antivirals, antibiotics, anti-inflammatory agents, renal replacement therapy, and mechanical ventilation. There is new emerging evidence that SARS-CoV-2 has a similar effect on the myocardium as SARS-CoV and MERS. Myocardial involvement has been confirmed in SARS-CoV-2-positive patients via magnetic resonance imaging (MRI) and endomyocardial biopsy. Cardiac biopsy findings in COVID-19 patients have shown inflammatory infiltration of the myocardium with T-lymphocyte and macrophages, interstitial edema, and in some cases, evidence of cytoplasmic vacuoles, indicating direct viral involvement of the myocardial cells. In a postmortem case series of 68 patients in a cohort of 150 COVID-19 patients from China, 7% of patients died of circulatory failure with some degree of myocardial involvement, as marked by the elevations in troponin. However, in 33% of cases, myocarditis could have played a contributing role to patient death. However, it is unclear if these cases can truly be classified as myocarditis, given the lack of ejection fraction assessment via transthoracic echocardiogram (TTE), MRI, or biopsy. From a different case series in Seattle, WA, troponin elevation was seen in 15% of patients, but none of the patients who underwent TTE had evidence of a reduced ejection fraction. The differentiation between myocarditis and stress-induced cardiomyopathy can be challenging, since cardiac magnetic resonance (CMR) and/or biopsy are not available in most cases. Fried et al and Sala et al each reported a case of COVID-19 with midleft ventricular (LV) or basal-to-mid-LV hypokinesia, a pattern of mid-ventricular, or reverse Takotsubo stress cardiomyopathy respectively. Sala et al. reported on a 43-year-old woman with COVID-19 who presented with reverse takotsubo syndrome and mild LV systolic dysfunction. Cardiac magnetic resonance (CMR) revealed diffuse myocardial edema on the basal and mid-LV segments, with no detectable scar. Endomyocardial biopsy documented diffuse T-lymphocytic inflammatory infiltrates and huge interstitial edema without any other substantial damage. Molecular analysis showed the absence of the SARS-CoV-2 genome within the myocardium. The incidence of acute HF was 33% in critically ill patients with COVID-19 without a history of LV systolic dysfunction in Washington state. Importantly, cardiomyopathy can develop in COVID-19 with mild or absent respiratory symptoms. Although the clinical picture is still referred to as myocarditis in many instances, myocardial infection by SARS-CoV-2 was not proven in most cases with COVID-19 myocardial involvement. To date, only isolated case reports provided data on the pathology of the myocardial tissue from COVID-19 patients, which preclude drawing definitive conclusions on this topic. In a case report of a 69-year-old woman presenting with COVID-19 and cardiogenic shock, Tavazzi et al. described, for the first time, a biopsy-proven myocardial localization of viral particles. Although the clinical presentation suggested severe and necrotizing acute myocarditis, the pathology report demonstrated only low-grade myocardial inflammation and absence of myocyte necrosis. Importantly, SARS-CoV-2 was only demonstrated in the interstitial cytopathic macrophages and their surroundings, whereas no viral particle was found in cardiac myocytes, which showed nonspecific damage (mainly focal myofibrillar lysis). Either transient viremia or infected macrophage migration from the lung might occur in COVID-19 patients with nonacute myocardial involvement. The available data seem to rule out a classic myocarditis presentation (i.e., direct infection of myocardial cells by the virus). Rather, it has been suggested that myocardial involvement in COVID-19 may be caused by the cytokine-release syndrome. A case reported by Hu et al. from China described the evaluation of a 37-year-old man who presented with chest pain, dyspnea, and diarrhea and was found to have fulminant myocarditis with acute elevations in cardiac troponin T (cTnT) and N-terminal pro-B-type natriuretic peptide (NT-proBNP) and LVEF of 27%, with normal coronaries on computed tomography coronary angiography. He was treated with IVIG and methyl prednisolone for immunosuppression, along with supportive care with vasopressors and diuretics, and ultimately had recovery of LV function (LVEF 66%) and improved biomarkers. A case report from Italy described a 53-year-old woman with confirmed SARS-CoV-2 infection who presented with fatigue, elevated cardiac biomarkers, ECG changes, and a depressed LVEF to 40% with diffuse hypokinesis. A cardiac magnetic resonance imaging showed increased wall thickness, diffuse biventricular hypokinesis, and diffuse late gadolinium enhancement involving most of the myocardium. Notably, her CRP was never elevated above the reference range, and she never suffered from respiratory failure. She was treated with antivirals, including lopinavir/ritonavir, and intravenous methyl prednisolone, and on day 6, a repeat echocardiogram showed partial recovery of her LVEF. This report suggests diffuse myocardial inflammation in some cases, rather than secondary inflammatory myocardial suppression. Other coronaviruses, including MERS-CoV, have been reported to cause acute myocarditis and HF. It remains possible that, in some cases, SARS-CoV-2 causes myocardial dysfunction through viral myocarditis; however, at the point of this writing, pathological evaluation of suspected cases of COVID-19-associated myocarditis, including the possibility of viral entry into cardiomyocytes, is extremely limited. The coronary microvasculature and endothelium may be at risk for viral entry due to ACE2 expression on these vascular cells. Only a few cases with myocarditis and pericarditis involvement were reported, without consistent histological evidence. Only one case of cardiac tamponade in a 47-year-old man with SARS-CoV-2 infected without CV risk has been reported in the literature as a complication of myocarditis and pericarditis. The causes of hydropericardium and cardiac tamponade also include infectious and inflammatory causes (15%) and mechanical complications of MI (12%). Patients who died of complications of COVID-19 are elderly and often have comorbidity due to CVD. Myocarditis, with the presence of inflammatory infiltrates in the myocardial interstitium and with the structural damage validated by the laboratory markers of damage and cardiac necrosis, can be a secondary complication of the immune response and not of the direct action of the virus on cardiomyocytes. To date, there is no evidence of RNA coronaviruses in the heart. Considering the reported presence of ACE2 in different cell types and also in the heart, a hematogenous diffusion of the pathogen and a similar interaction as happens in the lung, the hypothesis remains that the SARS-CoV-2 action on the heart in old people is mediated by systemic imbalance caused by the alteration of the functioning of the RAAS on comorbidity background. CMR availability issues, coupled with the potential for in-hospital spread of the virus related to the logistics of performing such tests, hamper the use of this valuable imaging modality. In this context, endomyocardial biopsy could provide key insights, whereas bedside echocardiography could give information on LV function. Certain ethnic groups may be disproportionately affected by SARS-CoV-2. COVID-19 death rates were shown to be higher among the African-American population than other ethnicities in many American states. Although this may partially be explained by the greater number of CV risk factors or the genetic predisposition to poorer cardiac outcomes, healthcare disparities cannot be dismissed. Bias in the health and care provisions may be the driving force behind disproportionate suffering in minorities. Arrhythmias could occur in the context of myocarditis. Peretto et al. reported in a recent study that 78.7% of myocarditis patients exhibited some form of ventricular arrhythmia. The characteristics of arrhythmias differ between active and healed myocarditis, suggesting that the pathophysiology is dependent on the stage of myocardial injury. ARRHYTHMIAS Arrhythmia has been recognized as one of the possible clinical manifestations of COVID-19 disease. One observational study of the clinical characteristics of COVID-19 patients in Hubei, China, reported a 7.3% incidence of palpitations among 137 patients. Wang et al. reported that arrhythmia was a cause of ICU transfer in 44.4% of COVID-19 patients. Caution is encouraged when interpreting these data, as the sample size tends to be small and hence prone to overestimation. The exact nature of arrhythmias was not usually reported, so assessing whether the arrhythmias are secondary to other conditions such as electrolyte imbalance or pre-existing arrhythmias is difficult. Therefore, the actual prevalence of arrhythmias in COVID-19 patients remains unknown. Nevertheless, arrhythmias could occur in the context of myocarditis. However, the largest observational study from China, with 1099 patients from 552 hospitals, did not report any arrhythmia. Goyal et al. in a recent retrospective case series on 393 consecutive patients with COVID-19 in two hospitals in New York City found that patients who received mechanical ventilation were more likely to have atrial arrhythmias (18.5% vs. 1.9%). Arrhythmia was seen in 7.4% of the entire cohort, with higher rates in patients receiving ICU care (18.5%) as compared to non-ICU care (1.8%). Arrhythmias may be induced by the presence of acidosis and metabolic disturbances, as seen in critical illness with multiorgan dysfunction or catecholaminergic pressor infusion for hypotension and shock. Finally, QT-prolonging agents given to some COVID-19 patients may increase the susceptibility to arrhythmia as discussed below. In 136 COVID-19 patients who experienced in-hospital cardiac arrest, Shao et al. revealed that the most common initial rhythm was asystole in 89.7% of cases. Pulseless electrical activity was found in 4.4%, whereas a shockable rhythm was identified in only 5.9% of patients. This was similar to another study where they found that, among patients who suffered a cardiac arrest, the predominant rhythm was asystole/pulseless electrical activity (94%), followed by shockable ventricular tachycardia/fibrillation (6%). Du et al. reported that arrhythmia occurred in 51 of 85 fatal cases of COVID-19 from Wuhan, and two patients died of malignant arrhythmias. Cardiac arrest-triggered sudden death appears to be a common cause of death in patients with COVID-19. In 85 fatal cases of COVID-19, cardiac arrest is the direct cause of death of seven patients. Moreover, patients with elevated TnT experienced higher risk of ventricular arrhythmias (17.3% in high TnT group vs. 1.5% in normal TnT group). In addition to acquired arrhythmia, patients with inherited arrhythmia syndromes, including long and short QT syndrome, Brugada syndrome, and catecholaminergic polymorphic ventricular tachycardia, are believed to be more susceptible to pro-arrhythmic effects of SARS-CoV-2 such as stress, fever, use of antiviral drugs, and electrolyte disturbance. The possible pathophysiology of arrhythmias in SARS-CoV-2 includes direct injury to cardiomyocytes disrupting the plasma membrane and electrical conduction; infection of the pericardium causing massive edema; ischemia from microvascular disease due to possible infection of the pericytes; re-entrant arrhythmias due to myocardial fibrosis or scars; and pro-inflammatory cytokines predisposing to arrhythmogenicity. Scenarios 1, 2, and 3 could occur in the acute setting, whereas scenarios 4 and 5 occur in chronic or healed myocarditis. In scenario 5, pro-inflammatory cytokines (e.g., IL-6) might cause displacement of plakoglobin, a desmosomal protein, from the cardiomyocyte membrane. This could be arrhythmogenic as inadequate cell-to-cell adherence is postulated to damage the cell membrane, leading to cardiac cell death and fibrofatty replacement. Moreover, reduced surface expression of desmosomal proteins is a known etiology of arrhythmogenic cardiomyopathies. Substantial evidence now suggests an increase in the serum IL-6 in COVID-19 patients, especially in those with severe presentations. Therefore, it is plausible that SARS-CoV-2 infection precipitates arrhythmias in patients with a genetic predisposition. Clinicians should be vigilant for arrhythmias, especially in areas where both the COVID-19 burden and the arrhythmogenic cardiomyopathy prevalence are high, such as the North-Eastern (Veneto) region of Italy. While there is a paucity of literature detailing COVID-19-related arrhythmogenic complications, there are reports of ventricular tachycardia and ventricular fibrillation as the late manifestations of COVID-19. An early case series from China reports a 16.7% incidence of arrhythmia but did not specify the cause or type. A later report found a 5.9% incidence of malignant arrhythmias, with a significantly greater incidence in those with evidence of myocardial injury (17.3% vs. 1.5%). This perhaps suggests that myocardial injury may serve as a substrate for subsequent cardiac arrhythmias, and frequent arrhythmia should heighten suspicion for a myocardial inflammatory process. This phenomenon may, in part, account for the reported increase in out-of-hospital arrests noticed during the COVID-19 pandemic period. Notably, however, analysis of in-hospital arrests in COVID-19 patients seem to be rarely from shockable rhythms (89.7% asystole, 4.4% pulseless electrical activity, and 5.9% shockable rhythm). Arrhythmias could also be precipitated by electrolyte imbalances, which have been observed in populations with COVID-19. The interaction of SARS-CoV-2 with the RAAS has caused increasing concerns about sodium and potassium disorders, which may increase vulnerability to various tachyarrhythmias. In addition, hypoxia, a common clinical manifestation of severe COVID-19, has been associated with alterations of cardiomyocyte gap junctions which could contribute to the development of atrial arrhythmias, especially atrial fibrillation. SARS-CoV-2 invades cells by binding to ACE2 receptors, which can enhance urinary potassium excretion due to increased availability of angiotensin II. Treatment of arrhythmias should focus on addressing all reversible causes, especially electrolyte abnormalities, and follow standard guidelines for the management of arrhythmias. In the setting of frequent and uncontrolled ventricular arrhythmia not responding to antiarrhythmic therapy, transvenous pacemaker insertion and/or mechanical circulatory support should be considered. Guo et al. reported sustained ventricular tachycardia or ventricular fibrillation in 5.9% of 187 patients in a designated hospital to treat patients with COVID-19 in China. Higher rates were noted among patients admitted to the ICU (44.4%) with a significantly higher incidence in patients with elevated TnT. Arrhythmias can also be induced by novel medical therapies for COVID-19; despite the unclear data about the effectiveness of chloroquine phosphate and hydroxychloroquine sulfate for the treatment of COVID-19, the Food and Drug Administration of the United States of America issued an emergency authorization for their use under determined circumstances in patients with COVID-19. Both agents may increase the risk for torsades de pointes or other ventricular arrhythmias via QTc prolongation and could also lead to advanced types of atrioventricular (AV) block. To the best of our knowledge, there are no specific reports on occurrence of bradycardia in COVID-19 infection. However, an experimental study has shown that coronavirus-infected rabbits have ECG abnormalities including second-degree AV block secondary to myocarditis and HF. In severely ill patients admitted in the ICU due to COVID-19, transient bradycardia and asystole may occur due to patient turning for prone position, intubation, or trachea suction that likely occur due to the temporary increased vagal tone. Currently, there are no special considerations or treatment algorithms specifically for arrhythmias related to COVID-19. The first principle for atrial arrhythmias presenting with rapid ventricular response (RVR) in the setting of COVID-19 is that there is no need to acutely lower heart rates in these patients if they are hemodynamically stable. Easing their respiratory distress with oxygen and treating their fever may decrease some of their drive for RVR. Furthermore, it is recommended that patients be initiated on appropriate anticoagulation in the absence of coagulopathy, bleeding, or other contraindications. To avoid iatrogenic hypotension, bradycardia, or decompensation of systolic HF, it is recommended to avoid intravenous calcium channel blockers. If patients are hemodynamically stable and without evidence of HF, oral -blockers can be slowly introduced. If there is concern for acute HF, hypotension, or other hemodynamic derangements, amiodarone is the antiarrhythmic of choice. Patients presenting with unstable atrial arrhythmias or malignant ventricular arrhythmias should be treated as per advanced cardiac life support guidelines with immediate cardiology consultation. ACUTE MYOCARDIAL INJURY Myocardial injury is defined as an elevation of high-sensitivity cTnI above 99 th percentile upper reference limit. Possible pathophysiological mechanisms of myocardial injury are: 1. Direct myocardial injury: It is postulated that SARS-CoV-2 enters human cells by binding to ACE2, leading to acute injury in tissues where it is expressed. The virus has still not been isolated in the cardiac tissue; however, it is believed to be associated with cardiomyocytes degeneration, inflammatory infiltrates in the myocardial interstitium, vasculitis, and microthrombi formation Systemic inflammation: Cytokine storm observed in COVID-19 patients can result in injury to multiple organs, leading to multiorgan failure. Systemic inflammation is a well-known factor of plaque instability, being able to result in acute MI. Corroborating the inflammatory hypothesis, the plasma IL-6 seems to be consistently increased in patients with COVID-19 and cardiac injury, often evolving in life-threatening arrhythmias and/or fulminant myocarditis 3. Hypoxia: Increased metabolic demand due to the systemic infection and hypoxia caused by respiratory distress can impair myocardial oxygen demand-supply. The incidence of acute myocardial cardiac injury in COVID-19 patients, previously reported to be 7.2%, has recently been found to be much higher in two different studies, with an incidence of 19.7% and 27.8%, respectively. These studies also demonstrated that cardiac injury was independently associated with an increased risk of mortality and that COVID-19 patients with cardiac injury presented with more severe acute illness, worse radiographic findings, and a higher risk for invasive ventilation. A recent meta-analysis also suggested that there may be a correlation between the values of cTnI and the severity of clinical presentation: cTnI values were found to be significantly increased in COVID-19 patients with severe disease compared to mild-moderate cases. These findings are compatible with acute myocardial injury being predictive of negative outcomes in COVID-19 patients. Elevated troponin levels have been observed between 7% and 27.8% of COVID-19 patients. An increasing number of reports have described cardiac injury and absence of coronary obstruction, during severe COVID-19 infection. Two single-center studies described this clinical finding. In a retrospective cohort of 416 patients with laboratory-confirmed COVID-19, Shi et al. reported that 19.7% had evidence of myocardial injury as defined by an hsTnI value greater than the 99 th percentile reference limit. In-hospital mortality was 51.2% among patients with myocardial injury compared with 4.5% among patients without myocardial injury. Furthermore, the mortality rate was associated with the magnitude of troponin elevation. Similarly, Guo et al. observed that among 187 patients hospitalized with COVID-19, 52 (27.8%) exhibited myocardial injury as demonstrated by the elevation of cTnT. In-hospital mortality was more than 6-fold higher in patients with elevated cTnT levels than in patients with normal cTnT levels (59.6% vs. 8.9%). Moreover, patients with underlying CVD and increased cTnT levels comprised a subgroup with even higher mortality (69.4%). In contrast, patients with underlying CVD without cTnT elevation experienced a more favorable prognosis (mortality 13.3%), albeit still higher than patients without CVD or elevated cTnT (mortality 7.6%). In this study, cTnT levels were statistically significantly correlated with the blood concentrations of CRP and NT-proBNP, suggesting a link to the degree of systemic inflammation and myocardial wall stress. In both studies, patients with evidence of myocardial injury were also older with a higher prevalence of coronary heart disease, cerebrovascular disease, chronic HF, chronic renal failure, chronic obstructive pulmonary disease, HTN, and diabetes. Elevated troponin levels also have a strong prognostic implication in those with COVID-19 disease. Several studies have shown that those with elevated troponin levels at baseline have a greater risk of having a severe disease, increased ICU admissions, and significantly higher mortality. In a cohort study, the presence of elevated troponin levels was second to the presence of ARDS in the strength of association with mortality. Guo et al., in a single-center retrospective analysis of 187 COVID-19 patients, studied the relationship of baseline troponin levels and other comorbidities with mortality. They reported that the risk of death can be stratified according to the presence of elevated troponin and/or history of CVD. The risk of death in these patients increased linearly, with 7.62% of those dying with no history of CVD compared with 13.3% of those with presence of only history of CVD, 37.5% in those with presence of elevated troponin levels only, and 69.4% in those with both elevated troponin levels and history of CVD. Notably, elevated troponin level carried a strong prognostic value even in the absence of CVD history. In addition, the authors reported that in survivors, during the hospitalization period, the troponin levels remained stable and within normal limits. On the other hand, nonsurvivors showed a trend of gradual and progressive increase in the troponin levels. This suggests that troponin elevation may reflect progression of the disease to a severe stage, notably through a continual inflammatory surge. If troponin elevation occurs in the absence of clinical symptoms, ECG changes, and other indications, extensive investigations, such as echocardiography and coronary angiography, are not recommended routinely to exclude acute coronary event. Similarly, although it is crucial to ensure adherence to long-term prescribed CV therapies, it is unclear whether isolated elevation of troponin warrants any CV therapy. HEART FAILURE HF and cardiogenic shock appear to be the important causes of morbidity and mortality in COVID-19. The development of new HF is common in patients with COVID-19 disease. In a study of 191 patients with confirmed COVID-19 from two Chinese hospitals, 23% of patients had a clinical diagnosis of HF. Of the patients who died during the study, 52% had HF versus 12% with HF were among the survivors. Interestingly, the development of HF syndrome was more commonly observed than acute kidney injury. In another retrospective case series of 150 patients with COVID-19 from two Chinese institutions, 33% of deaths were attributed to respiratory failure with myocardial damage or HF, with an additional 7% reported as HF without respiratory failure. In a clinical review of these deaths, the researchers suggested that fulminant myocarditis may have been the etiology of the HF; however, no additional diagnostic details were included. Arentz et al., in a small US case series, identified seven out of 21 critically ill patients (33%) who developed cardiomyopathies during the course of their ICU stay. The exact etiology of ventricular failure in COVID-19 remains unknown. Patients developed dilated cardiomyopathy, characterized by globally decreased LV systolic function, clinical signs of cardiogenic shock, elevated CK or TnI levels, or hypoxemia, without a history of systolic dysfunction. Chen et al. from China reported HF as a complication in 24.4% of COVID-19 population, using age-related amino-terminal pro-BNP cut-offs, which yielded 90% sensitivity and 84% specificity for acute HF; there was significant difference in the prevalence of HF between COVID-19 survivors and nonsurvivors (3.2% vs. 49.4%). Among those with HF, nearly half did not have a previous history of HTN or CVD. In a meta-analysis of 43 studies involving 3600 patients, the prevalence of HF as a complication was 17.1% among critically ill patients compared to 1.9% among noncritically ill patients. HF is characterized by decreased LVEF and drastically elevated NT-proBNP. Guo et al. reported that patients with elevated TnT have a higher level of cardiac biomarkers and NT-proBNP. Moreover, a tight correlation was identified between NT-proBNP and TnT levels, indicating that patients with myocardial injury are at higher risks of cardiac dysfunction or HF. Although COVID-19 patients often display comorbidities affecting cardiac diastolic function including diabetes, obesity, and HTN, few studies have revealed a relationship between HF with preserved ejection fraction (HFpEF) and COVID-19. Sinkey et al. reported a case of a postpartum patient with COVID-19 and preeclampsia who developed HFpEF. Notably, loss of ACE2, the receptor for SARS-CoV-2, increases the pro-inflammatory macrophage phenotype in the heart from patients with HFpEF. Further study is warranted to explore the precise interplay between SARS-CoV-2 and HFpEF. HF in COVID-19 patients is attributable to myocardial injury, systemic inflammatory response, pulmonary HTN and ARDS, renal dysfunction, retention of water and sodium, and imbalance of myocardial oxygen demand and supply. Some experts have speculated that HF syndrome seen in COVID-19 is mediated predominantly through systemic inflammation and cytokine storm. This theory is grounded in the reports from several studies that have shown markedly elevated inflammatory markers including IL-6, D-dimer, and lactate dehydrogenase in patients with severe COVID-19. Higher levels of the serum BNP have been shown to correlate with cardiogenic PE in ARDS. Interestingly, patients with COVID-19 may have high levels of BNP in the absence of significant ventricular dysfunction. Still, the presence of elevated cardiac biomarkers, particularly troponin, should raise clinical suspicion of HF. Interpretation of cardiac biomarkers does, however, present significant challenges as there are multiple mechanisms of cardiac injury. HF could be attributable to either the exacerbation of underlying CVD or the new onset of cardiomyopathy (particularly, myocarditis or stress cardiomyopathy) in patients with COVID-19. Isolated right HF can be observed in the presence of pulmonary HTN in the setting of severe ARDS or PE. Older adults with CVD often have LV hypertrophy and diastolic dysfunction. Thus, these patients may be prone to develop pulmonary edema when they are given copious amounts of intravenous fluids to maintain blood pressure or as a vehicle for parenteral drug infusion. However, pulmonary edema as observed in the setting of COVID-19 could also represent the manifestation of a pulmonary vascular injury, which might be direct or mediated by the excess of local angiotensin II, inducing severe vasoconstriction and microvascular dysfunction and activating inflammation process. Shock mechanisms in the era of COVID-19 could be either exclusively distributive (septic) or mixed (distributive and cardiogenic). Cardiogenic shock might be prominent in case of fulminant myocardial involvement. BNP and TTE are valuable to guide treatment. Right heart catheterization can also be useful in case of discrepancy of other tests but at the expense of a higher risk of in-hospital spread of the virus. Patients with chronic HF who have an exacerbation in the context of COVID-19 infection, management with diuresis, and guideline-directed therapy is recommended, including continuing use of previously prescribed angiotensin-converting enzyme inhibitors (ACEis), angiotensin receptor blockers (ARBs), and angiotensin receptor-neprilysin inhibitors (ARNIs) in the absence of acute contraindications (hypotension and acute kidney injury). Although it has been hypothesized that angiotensin pathway inhibitors may increase viral cell entry, it has also been postulated that these agents may mitigate potential deleterious increases in angiotensin II activity with SARS-CoV-2 infection. Until additional evidence is available, recommendations from major CV societies advise continuation of these agents. Routine echocardiography is not encouraged in the absence of other indications (e.g., new murmur, hemodynamic instability) to limit personnel exposure. Patients presenting with new systolic dysfunction (e.g., stress cardiomyopathy and myocarditis) warrant cardiology consultation, echocardiography, and assessment of cardiac biomarkers. If these patients are hemodynamically stable, close monitoring of hemodynamics and telemetry, diuresis (as needed), and guideline-directed therapy is recommended, except for new initiation of ACEis, ARBs, or ARNIs outside of clinical trials. Once patients are recovering, these agents can be initiated toward the end of hospitalization with close monitoring or during early outpatient follow-up. If patients are hemodynamically unstable (e.g., fulminant myocarditis), an advanced HF team should be consulted for recommendations on hemodynamic management, including consideration of mechanical support. In COVID-19 patients not responding to conventional medical treatment, it is important to determine whether a concomitant cardiogenic component is present, particularly when considering mechanical respiratory and circulatory support with extracorporeal membrane oxygenation (ECMO), as this may have impact on selection of devices (venovenous vs. venoarterial). Initial data in the setting of ARDS due to showed that the prognosis remained poor even with use of ECMO (mortality rate: 82.3%). In 12 critically ill COVID-19 patients requiring ECMO, Zeng et al. found that nearly half of them died of septic shock and multiorgan failure. Duration of ECMO support ranged from 3 to 28 days. A pooled analysis of early reports including 234 ARDS patients revealed that only 7.2% received ECMO. The mortality rate was 94.1% in patients who received ECMO and 70.9% in patients on conventional therapy. The pooled effect of ECMO versus conventional therapy on mortality was neutral. The Extracorporeal Life Support Organization recommends the use of ECMO only in expert centers for patients with severe ARDS after multidisciplinary team discussion on a case-by-case basis. ECMO can be considered futile, and the patient can be returned to conventional management, if no lung or cardiac recovery is observed after 21 days. Venovenous ECMO is a treatment for refractory respiratory failure, and venoarterial ECMO may be used when the patient is also in the need of circulatory support. However, it is unknown whether particular populations of patients respond better to therapy with ECMO than others and what criteria are the best used for careful selection of patients who are most likely to benefit in the present resource-constrained environment. In addition, the time course for possible recovery and successful decannulation strategies from ECMO have not been well described at this time. ADDITIONAL VASCULAR COMPLICATIONS OF COVID-19 DISEASE In autopsy evaluations of three patients who died of SARS-CoV-1, microthromboses and macrothromboses were observed. A prominent finding of SARS-CoV-2 is disarray of the coagulation and fibrinolytic system, with > 70% of nonsurvivors having most criteria fulfilling disseminated intravascular coagulation (DIC). It may be hypothesized that myocardial injury is a result of microthrombus formation in the myocardial vasculature in the setting of a hypercoagulable state like DIC. Patients suffering from COVID-19 infection are at a risk for venous thromboembolic (VTE) and arterial thromboembolic events, especially in the setting of DIC. Infections and sepsis are the leading causes of DIC, in general. The exact mechanism of DIC in the setting of sepsis and ARDS is complex but is generally thought to be related to an immune-mediated exhaustion of the coagulation and fibrinolytic systems promoting bleeding and thrombosis in the same patient. Endothelial injury and inflammatory cytokines, such as IL-6 and TNF-a, upregulate tissue factor expression, driving a prothrombotic state. Dysregulation of antithrombin III, plasminogen activator inhibitor type 1, and protein C in the setting of significant inflammation and sepsis promotes an anticoagulated state. Furthermore, platelet activation also ensues in the context of sepsis and inflammation, further tipping the fine balance of the coagulation system. It is postulated that the immune activation seen in severe COVID-19 infection is likely sufficient to trigger DIC, microvascular dysfunction, and myocardial injury. Lodigiani et al. from Italy described a 7.7% incidence of at least one thromboembolic event for hospitalized patients with COVID-19. This rate can be as high as 31% in those requiring ICU-level care. Further, acute arterial thrombotic events other than ACS, such as cerebrovascular accident or systemic thrombosis, have been observed in COVID-19 patients with no or few predisposing factors. Although the mechanism of coagulopathy is unclear, it is likely multifactorial with critical illness, inflammation, and endothelial dysfunction contributing to coagulopathy. It is evident that COVID-19 patients develop some degree of abnormal coagulation parameters. It has been reported from China that elevated levels of D-dimer (>1 g/L) and fibrin degradation products are strongly associated with in-hospital death. Thromboembolic anomalies and coagulopathy, including VTE, PE, and DIC, are believed to be highly prevalent in COVID-19 patients. For example, a mass of PE was noted in COVID-19 patients, and the prevalence of PE was twice higher in ICU COVID-19 as all ICU or influenza ICU patients. Retrospective studies have identified PE as the most common thrombotic event. Those with thrombotic complications have a higher risk of death and higher D-dimer levels. Chen et al. reported median D-dimer level of 11.07 g/ml compared to 2.44 g/ml in those without PE. Wichmann et al. in a prospective cohort study examining 12 postmortem COVID-19 patient autopsies found deep venous thrombosis in 58% and PE in 33%. Thromboembolic events, particularly PE, may contribute to the rate of cardiac injury detected in severe COVID-19 disease as RV strain may lead to the elevation in cardiac biomarkers. Even in the absence of significant clot burden, PE may significantly impair RV performance, especially in the setting of high RV afterload that is characteristic of ARDS. In severe cases, cor pulmonale can develop, which may contribute to the mixed shock or sudden cardiac arrest observed in severe COVID-19 disease. CONCLUSION The COVID-19 due to its greater transmissibility has posed a pandemic, representing the most important public health crisis of the current era. Patients having a history of CVD are especially vulnerable to COVID-19 and are often afflicted with severe forms of the infection. CV manifestations of COVID-19 can be either primary/direct or secondary/indirect. Primary cardiac manifestations of COVID-19 include ACS, myocarditis, and arrhythmias. Secondary cardiac involvement is usually due to a systemic inflammatory syndrome and can manifest as acute myocardial injury/biomarker elevation and/ or HF, PE, and cardiogenic shock. Management of CV manifestations of COVID-19 has to be decided case by case as one size does not fit all, and a close collaboration of different teams is required to treat very sick patients. Financial support and sponsorship Nil. Conflicts of interest There are no conflicts of interest
Unisexual infections with Schistosoma haematobium in the white mouse. In unisexual infections with an Iranian strain of Schistosoma haematobium in white mice, female worms matured normally and deposited eggs in the liver but these eggs did not contain miracidia. Both female and male worms grew to more than half the length of corresponding mated worms in bisexual infections, and males acquired the normal number of testes.
Identification and Validation of a New Set of Five Genes for Prediction of Risk in Early Breast Cancer Molecular tests predicting the outcome of breast cancer patients based on gene expression levels can be used to assist in making treatment decisions after consideration of conventional markers. In this study we identified a subset of 20 mRNA differentially regulated in breast cancer analyzing several publicly available array gene expression data using R/Bioconductor package. Using RTqPCR we evaluate 261 consecutive invasive breast cancer cases not selected for age, adjuvant treatment, nodal and estrogen receptor status from paraffin embedded sections. The biological samples dataset was split into a training (137 cases) and a validation set (124 cases). The gene signature was developed on the training set and a multivariate stepwise Cox analysis selected five genes independently associated with DFS: FGF18 (HR = 1.13, p = 0.05), BCL2 (HR = 0.57, p = 0.001), PRC1 (HR = 1.51, p = 0.001), MMP9 (HR = 1.11, p = 0.08), SERF1a (HR = 0.83, p = 0.007). These five genes were combined into a linear score (signature) weighted according to the coefficients of the Cox model, as: 0.125FGF18 − 0.560BCL2 + 0.409PRC1 + 0.104MMP9 − 0.188SERF1A (HR = 2.7, 95% CI = 1.94.0, p < 0.001). The signature was then evaluated on the validation set assessing the discrimination ability by a Kaplan Meier analysis, using the same cut offs classifying patients at low, intermediate or high risk of disease relapse as defined on the training set (p < 0.001). Our signature, after a further clinical validation, could be proposed as prognostic signature for disease free survival in breast cancer patients where the indication for adjuvant chemotherapy added to endocrine treatment is uncertain. The array gene expression analysis "Mammaprint®" identifies a 70 gene-signature indicative for poor prognosis in patients with lymph node-negative disease or with 1-3 positive nodes, predicting chemotherapy benefit in the "high risk" group, vs. no apparent benefit in the "low risk" group, in a non-randomized clinical setting. It needs fresh/frozen tissue of the primary breast tumors. The multigene assay "Oncotype DX ® " evaluate gene expression analysis of 21 genes starting from paraffin-embedded tissue calculating a recurrence score to classify patients at low, intermediate, or high risk for recurrence. From two independent retrospective analyses from phase III clinical trial with adjuvant tamoxifen-alone control arms, the 21-gene recurrent score (RS) assay defines a group of patients with low scores who do not appear to benefit from chemotherapy, and a second group with very high scores who derive major benefit from chemotherapy, independently of age and tumor size [1,. Other studies using a supervised approach based on clinical outcome endpoint to tumor grade as a basis for gene findings have resulted in development of multiple commercial reference lab assays for prognostication (MapQuant Dx, Theros Breast Cancer Index ). The above-mentioned multigene assays are expensive and validations have been made on patients selected by age and nodal or Estrogen Receptor status and or received adjuvant treatment. Analyzing data from several array based gene expression wide analysis publicly available on NCBI Gene Expression Omnibus (GEO; http://www.ncbi.nlm.nih.gov/geo/), we identified a subset of 20 mRNA differentially regulated in breast cancer. We activated a protocol evaluating these markers to create a new gene signature based on real time PCR from paraffin embedded tissue and on a "real life" breast cancer patient population. The enrolled cases were not selected for age, adjuvant treatment, nodal and estrogen receptor status. Results and Discussion Formalin-fixed and paraffin-embedded (FFPE) tissues represent one of the largest tissue sources, for which well-documented clinical follow-up is available, and therefore large-scale retrospective studies are possible. As described recently by Bussolati et al., in a near future the possibility of obtaining high-quality total RNA from archival tissues will guarantee a more powerful and robust gene expression analysis. In order to identify a small number of informative genes providing prognostic information for breast cancer, we evaluated in silico a set of published signatures and tested by gene expression array on the 408 breast cancer cases deposited in NCBI Gene Expression Omnibus. By several steps involving univariate analysis for the association with disease free survival (DFS), unsupervised hierarchical clustering algorithm, and multivariate Cox modelling selection, we found 20 highly related genes with DFS. These candidate genes were subsequently evaluated in vitro by RTqPCR analyzing a total of 261 cases representing the training (137 cases) and the validation (124 cases) datasets (see the workflow shown in Figure 1). Gene Selection on the Published Datasets We used data deposited in NCBI Gene Expression Omnibus (GEO; http://www.ncbi.nlm.nih.gov/geo/, GEO Series accession number GSE1456 and GSE3494), including 408 breast cancer cases. Files containing raw intensity data of Affymetrix HU133A and HU133B arrays of the two datasets (GSE1456 and GSE3494) were preprocessed using R/Bioconductor (GCRMA package, quantile normalization, median polish summarization). The two data sets were pre-processed together using the supercomputer Michelangelo (http://www.litbio.org). The candidate genes were selected from the above mentioned datasets as those included in 4 previously proposed signatures: the "70-gene signature" developed by van de Vijver et al. and van't Veer et al. including 70 genes, the "recurrence-score" developed by Paik et al. including 21 genes, the "two-gene-ratio model" including 2 genes and the "Insulin Resistance" signature including 15 genes (Table 1). Since some genes are present in more than one signature, the final extracted set was made up of 98 genes (194 Affy-probes) ( Table 1). Gene Selection on the Merged GEO Datasets The 98 genes selected from the published signatures were first tested in univariate analysis for their association with disease free survival (DFS). Forty-eight genes resulted associated with DFS with a p value < 0.01 and were selected for the subsequent step. Using an unsupervised hierarchical clustering algorithm, 20 clusters were selected grouping genes with similar expression profiles. A gene was selected within each cluster using a multivariate Cox model, choosing the one most associated with DFS: the final 20-genes set, all highly associated with DFS, are reported in Table 2. Tumor Samples Among 350 consecutive invasive breast cancer patients with full information about tumor, adjuvant treatments, follow up, relapse, death and causes of death, treated between 1998 and 2001, 89 cases (25.4%) were removed from the study because of the low RNA concentration (below 10 ng/L) or high degradation (Ct values for ACTB and B2M over 34). The remaining 261 cases were split in two biological sample datasets: The training (137 cases) and the validation set (124 cases) by a simple criteria of consecutiveness. The clinical and demographic characteristics of the patients included in the training and in the validation set are summarized in Table 3 and reported in detail in the supplementary file. Due to a simple criteria of consecutiveness building the sets, the Training set has a longer mean follow up (100.7 months; range 59-123) as compared with the Validation set (89.2; 61-121). Nevertheless, the only significant differences between the two sets was the use of anthracycline-based regimens in the adjuvant setting (Training 16% vs. Validation 32.2%; p = 0.01) and an higher incidence of G3 tumors in the Validation Set (30.6% vs. 19.7, p = 0.04). The lack of information about HER2 Status is related to the temporal context of the selected cases and it was evaluated "a posteriori" just in 40% of relapsed patients. Any other clinical and biological pattern is similar and reflecting the "real life" picture of the disease in North East of Italy at this time. Signature Definition on the Training Set A multivariate stepwise Cox analysis was run on the breast cancer samples including the 20 selected genes. The Cox model selected a final set of five genes independently associated with DFS (Table 4) These five genes were combined into a linear score (signature) weighted according to the coefficients of the Cox model (Table 4), as: This score ranged from −2.95 to 2.91, with a mean value of −0.48 a SD of 1.00. The linear score was highly associated with DFS in the training set: HR = 2.7, 95% CI = 1.9-4.0, p < 0.001. The score was then categorized in three groups according to the tertiles of its distribution. The DFS according to the three risk groups is reported in Figure 2 Signature Evaluation on the Validation Set The signature defined on the training set was evaluated on the independent set of data of the 124 patients included in the validation set. The discrimination ability of the signature was assessed on the validation set by a Kaplan Meier analysis, using the same cut offs classifying patients at low, intermediate or high risk of disease relapse as defined on the training set. The score resulted highly associated with DFS also in the validation set (p < 0.001) (Figure 3). Patients with an "intermediate risk" signature had an HR = 2.1 (95% CI = 0.72-6.2, p = 0.17) and patients with a high risk signature had an HR = 5.4 (95% CI = 2.0-14.4, p = 0.001) as compared to patients with a low risk signature. Inter and Intra Assay Reproducibility Three serial sections from three cases each were evaluated independently in triplicate calculating the coefficients of variation (CVs) for the Recurrent Score in the same run and in different runs. The intra-assay and the inter-assay CVs was 3.7% and 4.7%, respectively. Multivariate Analysis The Multivariate Analysis (Cox Regression) indicates that Nodal Status (p = 0.00001), T Size (p = 0.0002) and the five-gene Signature (p = 0.0004) are significantly related to DFS, while Ki67 (cut off: 14%), Grading and Chemo-or Endocrine Adjuvant Treatments are not ( Table 6). The five-gene Signature HR is slightly affected by adjuvant treatments: Table 7 summarized data about the five-gene signature in presence or absence of Adjuvant treatment. Discussion In this study we developed a five-gene recurrence score able to estimate the likelihood of recurrence in a series of consecutive breast cancer tissue samples. These five informative genes were selected by a multistep approach summarized in Figure 1. Firstly, we identified in silico a subset of 20 mRNA differentially regulated in breast cancer analyzing several publicly available array gene expression data using R/Bioconductor package. We further evaluated, in vitro, the expression level of these 20 genes in 261 consecutive invasive breast cancer cases not selected for age, adjuvant treatment, nodal and estrogen receptor status from paraffin embedded sections. The only requested feature was a minimum follow up of 5 years with full clinical data. Each tissue block was reviewed by a pathologist to ensure greater than 70% content of tumor cells. The gene expression analysis was based on RTqPCR. The biological samples dataset was split into a training and a validation dataset. The gene signature was developed on the training set by a multivariate stepwise Cox analysis selecting five genes independently associated with DFS. These five genes were combined into a linear score (signature) weighted according to the coefficients of the Cox model. The signature was then evaluated on the validation set assessing the discrimination ability by a Kaplan Meier analysis, using the same cut offs classifying patients at low, intermediate or high risk of disease relapse as defined on the training set. These five genes of interest were identified without any a priori selection for gene function or cancer involvement, but simply for the relationship between their expression level and DFS. Interestingly, except for SERF1a which the function is still unknown, they have been described to play an important role in cancer as follows: (a) FGF18: Its over-expression in tumors has also been demonstrated. FGF18 expression is up-regulated through the constitutive activation of the Wnt pathway observed in most colorectal carcinomas. As a secreted protein, FGF18 can thus affect both the tumor and the connective tissue cells of the tumor microenvironment. (b) BCL2: Over-expression of BCL2 protein has been identified in a variety of solid organ malignancies, including breast cancer. BCL2 transcript over-expression is related to unfavorable prognosis in Oncotype Dx and in Mammaprint®. (c) PRC1: It associates with the mitotic spindle and has been found to play a crucial role in the completion of cytokinesis. PRC1 is negatively regulated by p53 and it is over-expressed in p53 defective cells suggesting that the gene is tightly regulated in a cancer-specific manner. (d) MMP9: Metalloproteases are frequently up-regulated in the tumor microenvironment. MMP9 influence many aspects of tissue function by cleaving a diverse range of extracellular matrix, cell adhesion, and cell surface receptors, and regulate the bioavailability of many growth factors and chemokines. (e) SERF1a: The function of SERF1a is not already known. The biological properties of these genes are related with four of the six hallmarks of cancer proposed by Hanahan et al. : FGF18 should be included in "Self-sufficiency in growth signal" group, BCL2 in "Evading apoptosis" group, PRC1 in "Limitless replication potential" group, MMP9 in "Tissue invasion and metastasis" group, while the function of SERF1a is still unknown. These findings establish a link between our proposed molecular signature of breast cancer and the underlying capabilities acquired during the multistep development of human tumors previously categorized. For an experimental point of view, our assay appears affordable, not time consuming, it needs FFPE tissue and it might be performed easily in almost all laboratories with the required RT-qPCR instrumentations. Importantly it was validated on a "real life" clinical setting with a set of consecutive breast cancer cases irrespectively from age, nodal and estrogen receptor status, adjuvant treatment with at least a minimum follow up of 5 years. An important limit of our approach was that the test was possible in 74.6% of the initial set of cases due to RNA degradation from FFPE tissues according to the literature regarding other signatures. RNA degradation can be monitored simply evaluating the Ct values of the housekeeping genes used for normalization. Multicentric studies will be needed to evaluate possible pitfalls due to experimental inter-laboratory variability and above all increasing the reliability of the assay. A further step will be the analysis of the predictive value of the five-gene signature in ER positive population of tamoxifen alone benefit and of chemotherapy added to tamoxifen. Tumor Samples Enrolled in This Study Tumor samples were obtained from routinely processed formalin-fixed, paraffin embedded sections retrieved from 350 consecutive invasive breast cancer patients with full information about tumor, adjuvant treatments, follow up, relapse, death and causes of death, treated between 1998 and 2001. In order to test our signature in a "real life" clinical setting, we decided to use consecutive non metastatic breast cancer cases irrespectively from age, nodal and estrogen receptor status, adjuvant treatment. The only requested pattern was a minimum follow up of 5 years with full clinical data. All patient information was handled in accordance with review board approved protocols and in compliance with the Helsinki declaration. Hematoxylin and Eosin (H & E) sections were reviewed to identify paraffin blocks with tumor areas. Histological type and grade were assessed according to the World Health Organization criteria. The detailed histological and clinical feature of each patient enrolled in this study is available in the supplementary information file. Paraffin blocks corresponding to histology sections that showed the highest relative amount of tumor vs. stroma, few infiltrating lymphoid cells and that lacked significant areas of necrosis were selected. Three 20 m thick sections were cut, followed by one H & E control slide. The tumor area selected for the analysis was marked on this control slide to ensure greater than 70% content of neoplastic cells. Tumor areas dissected ranged from 0.5 to 1.0 cm 2 wide. Ethics Statement The use of tissues for this study has been approved by the Ethics Committee of Centro Oncologico, ASS1 triestina & Universit di Trieste, Italy. A comprehensive written informed consent was signed for the surgical treatment that produced the tissue samples and the related diagnostic procedures. All information regarding the human material used in this study was managed using anonymous numerical codes, clinical data were not used and samples were handled in compliance with the Helsinki declaration (http://www.wma.net/en/30publications/10policies/b3/). RNA Isolation Paraffin-embedded tumor material obtained from the 20 m thick sections was de-paraffinized in xilene at 50 °C for 3 min and rinsed twice in absolute ethanol at room temperature. Total RNA was extracted using the RecoverAll kit (Ambion, Austin, TX, USA), including a DNase step according to the manufacturer's recommended protocol. RNA concentration was measured by Quant-iT™ RNA kit (Invitrogen, Carlsbad, CA, USA). Two Step RTqPCR Analysis Fourteen L of total RNA was subjected to reverse transcription using SuperScript® VILO™ cDNA Synthesis kit (Invitrogen, Carlsbad, CA, USA) according to the manufacturer's recommended protocol. One microlitres of cDNA was amplified in duplicate adding 10 picomoles of each primer (see Table 8 for sequence details) to the 1x QuantiFast™ SYBR® Green PCR solution (Qiagen, Hilden, Germany) in a final volume of 25 L. Cycling conditions consisted of 5 min at 95 °C, 10 s at 95 °C, 30 s at 60 °C for a total of 40 cycles, using Stratagene Mx3000™ or ABI SDS 7000™ instruments. Plate reading was performed during the 60 °C step. For each primer set, standard curves made from serial dilutions of cDNA from MCF7 cell lines (see Table 2) were used to estimate PCR reaction efficiency (E) using the formula: E (%) = (10 − 1) 100. The expression levels of each of the 20 genes selected were normalized by GeNorm using 2 housekeeping genes (B2M e ACTB) and the relative quantification was calculated by the statistical computing language R. The human breast cancer cell line MCF7 was purchased from American Type Culture Collection (ATCC HTB22; derived from a human breast adenocarcinoma). Cells were maintained in minimal essential medium (MEM) (Invitrogen/Life technologies, Villebon-sur-Yvette, France) supplemented with 2 mM L-glutamine, 1.5 g/L sodium bicarbonate, 0.1 mM nonessential aa, 1 mM pyruvate sodium, 0.01 mg/mL bovine insulin, and 10% fetal bovine serum (Thermo Scientific, Waltham, MA, USA) at 37 °C in a humidified atmosphere of 5% CO 2. Training and Validation Dataset The biological samples dataset was split into the training and the validation dataset. The training set consists of the first 144 consecutive cases and the validation of the last 127 cases. The gene signature was developed on the training set. Once the signature has been fully specified, the validation set was accessed once and only for estimating the prediction accuracy of the signature. A multivariate stepwise Cox analysis was run on the breast cancer training set samples including the 20 selected genes. The stepwise procedure was run to select genes independently associated with DFS (p for inclusion <0.10). The overall workflow shown in Figure 1 summarizes every step starting from selection of markers from the literature since the validation of the gene signature. Reproducibility within and between blocks was assessed by performing the test in serial sections from three blocks representing three cases. We finally performed a multivariate Cox proportional-hazards analysis in a model that included treatment received (no adjuvant therapy vs. chemotherapy, hormonal therapy, or both) and the final gene Signature (both Training and Validation sets included), using the NCSS 2001 Statistical software (NCSS Inc., Kaysville, UT, USA, 2001). Univariate and Multivariate Analysis We performed a univariate analysis including Age, T size, Nodal status, Grading, Ki67, adjuvant treatments and the 5-gene signature, followed by a multivariate Cox proportional-hazards analysis in a model that included treatment received (no adjuvant therapy vs. chemotherapy, hormonal therapy, or both) and the 5-gene Signature (Low/Intermediate/High Risk; both Training and Validation sets included), using the NCSS 2001 Statistical software (NCSS Inc., Kaysville, UT, USA, 2001). Conclusions We developed a prognostic tool for early breast cancer based on the analysis of the relative expression level of FGF18, BCL2, PRC1, MMP9 and SERF1A in combination. Our signature has a good discriminating ability when tested on the validation set. We suppose that, after a necessary further clinical validation on a higher number of cases, it could be proposed as non expensive prognostic signature for disease free survival in breast cancer patients where the indication for adjuvant chemotherapy added to endocrine treatment is uncertain.
/** * by combining these two resources, we will have it to be freed out when the Map or reduce Task is done. * */ public static class ReusableSerializationResource { private Kryo kryo; private ByteBuffer buffer; public ReusableSerializationResource (Kryo kryoInstance, ByteBuffer bufferInstance) { this.kryo = kryoInstance; this.buffer = bufferInstance; } public Kryo getKryoInstance() { return this.kryo; } public ByteBuffer getByteBuffer() { return this.buffer; } public void freeResource() { try { if ( (buffer != null) && (buffer.isDirect())) { Field cleanerField = buffer.getClass().getDeclaredField("cleaner"); cleanerField.setAccessible(true); Cleaner cleaner = (Cleaner) cleanerField.get(buffer); cleaner.clean(); } } catch (Exception ex) { LOG.error("fails to free shuffle resource.", ex); } } }
This invention is directed to a frame for displaying objects. More particularly, the invention is directed to a way of supporting the frame in different orientations so that the object in the frame can be viewed from the desired orientation. Principally the invention is directed to frames in which pictures will be displayed and more particularly to low cost plastic frames. The particular frame of this invention is designed to rest on a support surface as opposed to a frame that is hung on a wall. In the past, frames that have been designed to rest on a support surface such as a desk or a table had a support to maintain the frame in a desired orientation. Usually this support is a portion of the back of the frame that is hingedly connected to the frame. The hinge member can be pivoted away from the back of the frame to support for the frame in the desired orientation. In recent years plastic has been used as a frame material and in particular transparent plastic. The plastic material can be easily shaped to provide a low cost frame that can be used for displaying pictures. However, it is very difficult to incorporate a hinged support section in the back of a plastic material to provide a support that will maintain the frame at the proper orientation when the frame is positioned on a desk or a table. It is very difficult to incorporate a hinged member into a frame that is made of a hard plastic material. On some platic frames a portion of the back plate of the frame pivot out from the frame to provide the support member. In these applications the plastic material acts as the hinge mechanism for the support member. However, it is difficult to build such a plastic hinge into the material and such hinges can be broken if used improperly. Other plastic frame manufacturers have developed a support pedestal upon which the frame could be rested to provide support when the frame was used on a desk top or a table. The support pedestal is usually a plastic L-shaped member that is bulky and makes it difficult to package the frame in a compact package that can be easily shipped and displayed in a store. Accordingly, there is a need in the industry to provide a low cost plastic picture frame having a support member that will allow the frame to be utilized on a desk or a table where its support member can be easily packaged with the frame. Accordingly, it is an object of the invention to provide an improved picture frame. It is an object of the invention to provide a picture frame with a removable support member that can be positioned flat against the back of the picture frame to facilitate packaging of the frame. These and other objects of the invention form a review of the following detailed description of the invention.
package demo.com.campussecondbookrecycle.Adapters; import android.content.Context; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.TextView; import androidx.annotation.NonNull; import androidx.recyclerview.widget.RecyclerView; import com.bumptech.glide.Glide; import java.util.List; import demo.com.campussecondbookrecycle.Models.BookBrief; import demo.com.campussecondbookrecycle.R; public class BookListAdapter extends RecyclerView.Adapter<BookListAdapter.BookListViewHolder> { private Context mContext; private List<BookBrief> books; private OnItemClickListener mItemClickListener; public BookListAdapter(Context mContext) { this.mContext = mContext; } public void setData(List<BookBrief> books){ this.books = books; } // public BookListAdapter(Context mContext, List<BookBrief> books) { // this.mContext = mContext; // this.books = books; // } public void setOnItemClickListener(OnItemClickListener onItemClickListener){ mItemClickListener = onItemClickListener; } @NonNull @Override public BookListViewHolder onCreateViewHolder(@NonNull ViewGroup parent, int viewType) { View viewItem = LayoutInflater.from(mContext).inflate(R.layout.item_list_books,parent,false); BookListViewHolder viewHolder = new BookListViewHolder(viewItem); viewHolder.itemView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { mItemClickListener.onItemClick(v); } }); viewHolder.itemView.setOnLongClickListener(new View.OnLongClickListener() { @Override public boolean onLongClick(View v) { mItemClickListener.onItemLongClick(v); return true; } }); return viewHolder; } @Override public void onBindViewHolder(@NonNull BookListViewHolder holder, int position) { BookBrief bookBrief = books.get(position); Glide.with(mContext).load(bookBrief.getMainImage()).into(holder.mIvBook); Log.d("url",bookBrief.getMainImage()); holder.mTvAuthor.setText(bookBrief.getAuthor()); holder.mTvName.setText(bookBrief.getName()); holder.mTvPrice.setText("¥" + bookBrief.getPrice()); String desc = ""; int quality = bookBrief.getQuality(); switch (quality){ case 1: desc = "全新品"; break; case 2: desc = "良品"; break; case 3: desc = "中品"; break; } holder.mTvQuality.setText(desc); } @Override public int getItemCount() { return books == null ? 0 : books.size(); } class BookListViewHolder extends RecyclerView.ViewHolder{ TextView mTvName, mTvAuthor, mTvPrice,mTvQuality; ImageView mIvBook; public BookListViewHolder(@NonNull View itemView) { super(itemView); mTvName = itemView.findViewById(R.id.tv_name); mTvAuthor = itemView.findViewById(R.id.tv_author); mTvPrice = itemView.findViewById(R.id.tv_price); mIvBook = itemView.findViewById(R.id.iv_book); mTvQuality = itemView.findViewById(R.id.tv_quality); } } public static interface OnItemClickListener{ void onItemClick(View view); void onItemLongClick(View view); } }
Q: Dual N/P-Channel MOSFET Dies with Smoke I have built the following N-MOS & P-MOS push-pull dual MOSFET circuit. Its purpose is to control some external LEDs from a 3.3V microprocessor. However, there seems to be a problem, where the dual MOSFET chip “SI4554DY-T1-GE3 Dual N/P-Channel” dies a horrible fumy smoke death, when 12V is connected as shown in the schematic below. The smoke appears even when no load is connected and the MOSFETs are not switched (idle). As far as I can see in the datasheet, none of the limits (V[GS] < 20V, V[DS] < 40V) are exceeded. Can you help in identifying the problem? Thank you! A: Your Push-Pull configuration is inverted. N-channel MOSFET is supposed to be connected to +ve rail and P-channel MOSFET should be connected to -ve rail. Your circuit blows up because both the MOSFETs will turn on for some amount of time when input changes from low-to-high or high-to-low. This will cause short circuit and you will get the magic smoke! Please see the reference link below: http://www.talkingelectronics.com/projects/MOSFET/MOSFET.html A: Push pull-circuits of that design are notorious for fusing through due to inadvertently turning on both mosfets simultaneously. Obviously, this can happen during switching, but it can also happen as the power is applied to the circuit. The current pulse is normally very short, however, the smaller the mosfet devices the more probable a failure will occur on one or both of them. As such, when using rail-rail push-pull drivers like this it is required that some protection be provided to ensure that the current can not spike through the bridge. Below is an example that uses an in-line inductor as a current choke. L1 and D1 in the schematic above should be sized to limit the rise time of the current to be significantly less than the switching time of the mosfets. Resistor R2 should be included to force the circuit into a particular state while the logic that is driving it is powering up. This is especially true if the signal originates from a micro that is initially configured as a high impedance pin. Whether this resistor is pulled to ground of logic 1 will depend on which state you want the output to start in. C1 is intended to try and protect the mosfets from any start-up voltage spikes on the power supply. R1 should also not be over-sized. It needs to drain the capacitance of M1 and charge M2 quickly enough when the transistor turns off. Ultimately, with this type of driver, it is preferred that separate control signals be used with a built in dead-time where both switches are turned off before one is turned on. In addition to giving you more protection for your driver, it also adds the functionality of being able to disconnect the output entirely.
<gh_stars>0 package org.firstinspires.ftc.robotcore.external.hardware.camera; import com.qualcomm.robotcore.hardware.HardwareDevice; public interface WebcamName extends HardwareDevice { /** * Returns the USB serial number of the webcam * @return the USB serial number of the webcam */ /** * Returns the USB device path currently associated with this webcam. * May be null if the webcam is not presently attached. * * @return returns the USB device path associated with this name. * @see UsbManager#getDeviceList() */ /** * Returns whether this camera currently attached to the robot controller * @return whether this camera currently attached to the robot controller */ boolean isAttached(); }
Kabbalah as Sacred Psychology Abstract Highlighting three themes from the Kabbalah, this essay explores how psychotherapy can be enhanced through the integration of spiritual teachings and practices. Beginning with an exploration of repentance in the Jewish mystical tradition, the author examines the positive effects of spiritual awakening on identity formation and psychological development. Focusing on the Kabbalah's myths of the cosmology and messianic redemption, the author shows how these mystical motifs serve as therapeutic paradigms for growth, healing and renewal.
A TEEN who was found hanged in a hedgerow told her teachers that she was forced to "keep a big family secret" for her stepdad, an inquest heard. "Alarm bells" were said to be ringing after 13-year-old Amber Peat, had confided in the school over her alleged hellish home life. One of her teachers added that Amber had mentioned a "big secret" that she wasn't allowed to talk about - which turned out to be her stepdad had been jailed for 16 months over £120,000 tax fraud. An inquest in Nottingham heard Daniel Peat and an accomplice admitted to attempting to falsely claim more than £200,000 in tax rebates. Amber's body was found in June 2015 after she went missing in the early evening of May 30. Her body was found three days after she walked out of the home she lived at with her mum Kelly, stepdad Daniel and two younger siblings. Concerns about Amber were called in to a safeguarding team, Nottingham County Council's Multi-Agency Safeguarding Hub (MASH), by Amber's former vice principal. When referring Amber to the council's safeguarding unit she said: "This one is ringing alarm bells. It just does not feel right. My gut instinct is there is something not right here and I can't put my finger on it." Amber had told teachers that she was woken up by her stepdad at 11.30pm and was ordered to clean the floor of their Mansfield home. Her form tutor Rebecca Beard told the hearing Amber said she had to carry her belongings in a plastic carrier bag as a punishment for bad behaviour. She said that she became concerned for Amber's welfare after she came in "devastated" while wearing ill-fitting grey jogging bottoms one day in March 2015. Ms Beard told the court: "The other children in the classroom thought that she had actually wet herself, because it was so unusual that someone would be wearing something like that." Amber allegedly told Ms Beard that she had been forced to wear them by Mr Peat because of her bad behaviour over the weekend. The court heard her parents waited almost eight hours before they reported her missing in the early hours of the morning. On the seventh day of the hearing Vice Principal Karen Green said Amber and her family had moved around quite a bit, that once people started to work with them they would move on. Coroner Laurinda Bower was was told that two calls were made from Amber's school - Queen Elizabeth School in Mansfield - one to MASH before her death in September 2014 and another six months later. During the calls a worker took information from the callers, a key worker at the school and a Karen Green, and handed it to a social worker. However the court heard how the case didn't pass a "threshold" that would have triggered an intervention. MASH officer Elizabeth Fisher, who took the call from the vice principal, said that the information provided was "limited" and "without context", thus not ticking boxes for a full social services review. The school was advised to carry out an "Early Help" assessment, something it failed to do. Ms Bower suggested that Mrs Fisher should have asked more questions to gain a wider idea of the situation. Sharon Clay, a school key worker, called the MASH team in March 2015, two months before Amber's death. She reported concerns over Amber wearing tracksuit bottoms to school, which the inquest has previously heard she was made to do by Mr Peat after failing to put her normal trousers in the wash, and bringing her items in a cheap plastic bag. She also said how she thought Amber was being "emotionally abused" by her stepdad. She added that Amber was "always hungry" and losing weight. MASH worker Joanna Shephard, who took the call, said: "You would need more incidents over a period of time." She also said that she was not aware of the bigger picture at surrounding Amber. She advised Ms Clay that the school should discuss the matter with Amber's mum, Kelly Peat. But Ms Bower asked if any thought had been given to the impact it could have on Amber if the disclosures she'd made to her teacher were passed back to her parents, adding: "As we discuss this now, do you see the concerns I am raising?" Mrs Shepherd replied: "Yes. But I could not question the social worker on that." The hearing was told that since Amber's death procedures had been put in place, including better recording systems, that aimed to gather and collate information better.
The type of sugar moiety is a major determinant of the small intestinal uptake and subsequent biliary excretion of dietary quercetin glycosides. Quercetin is an important dietary flavonoid with putative beneficial effects in the prevention of cancer and CVD. The in vivo bioactivity of quercetin depends on its bioavailability, which varies widely between foods. We used an in situ rat intestinal perfusion model to study whether differential small intestinal hydrolysis of the sugar moiety of five naturally occurring quercetin glycosides determines the small intestinal uptake and subsequent biliary excretion of quercetin. After 30 min perfusion, a decrease of intact quercetin glycoside in perfusate was observed for quercetin-3-O-ss-glucoside (20.9 (sem 1.4) micromol/l) and quercetin-4'-O-ss-glucoside (23.5 (sem 1.6) micromol/l), but not of quercetin-3-O-ss-galactoside, quercetin-3-O-ss-rhamnoside and quercetin-3-O-alpha-arabinopyranoside. Appearance of free quercetin in perfusate and conjugated quercetin metabolites (quercetin, isorhamnetin, and tamarixetin) in portal and peripheral plasma and bile were also significantly greater after treatment with quercetin-3-O-ss-glucoside or quercetin-4'-O-ss-glucoside compared with any of the other glycosides. Thus, the type of sugar moiety is a major determinant of the small intestinal absorption of quercetin glycosides, but the position (3 or 4') of the glucose moiety does not further influence absorption. The poor bioavailability of important dietary quercetin glycosides has implications for their in vivo bioactivities.
An economic analysis of the use of satellite imagery in mapping tree cover across Victoria The objective in this paper is to identify and quantify some of the current and potential social benefits from using Landsat TM based tree cover data sets to make natural resource management decisions. In most cases these benefits were able to be identified. In some cases benefits could be quantified on the basis ofcost savings. From these cost savings it was found that the use ofthe data sets would accrue a positive net benefit to society.
Multi-Objective Antlion Algorithm for Short-Term Hydro-thermal Self-scheduling with Uncertainties In this paper, a stochastic multi-objective structure is introduced in joint energy and reserve market to allow energy generation companies participating in the short-term hydro-thermal self-scheduling with uncertainties. To solve this problem, an antlion optimization (ALO) algorithm is used. In addition, uncertainties including energy price, spinning and non-spinning reserve prices, output power of the wind, photovoltaic and small hydro units are mentioned. In this study, two methods are used to generate stochastic multi-objective scenarios, namely lattice monte-carlo simulation and roulette wheel mechanism (RWM). After that, the main purpose of the study is described, i.e. making GENCOs able to achieve the maximum profit and the minimum emission by using a multi-objective function considering a stochastic process. To reach this aim, the mixed integer programming (MIP) which includes a set of multi stage deterministic scenarios is employed. However, some special cases should be introduced in the formulation structure of the presented scheduling regarding hydro-thermal units to make the SMO-HTSS problem with wind, photovoltaic and small hydro units similar to the real time modeling. Since optimal solutions are produced in this method, one can allude to the application of the -constraint. Nevertheless, in order to select one of the most appropriate solutions among Pareto solutions obtained, the utilization of fuzzy method has been presented. In the end, as shown in this paper, the ALO algorithm is limited to the -constraint; some tests are carried out on an IEEE 118-bus test system to verify the accuracy and validity of the proposed method.
// Day 23: Amphipod // ////////////////////// // Ignore all dead code warnings #![allow(dead_code)] use std::fmt::Debug; use pathfinding::directed::astar::astar; // Example input part 1 // ############# // #...........# // ###B#C#B#D### // #A#D#C#A# // ######### // Puzzle input part 1 // ############# // #...........# // ###D#A#A#D### // #C#C#B#B# // ######### // Example input part 2 // ############# // #...........# // ###B#C#B#D### // #D#C#B#A# // #D#B#A#C# // #A#D#C#A# // ######### // Puzzle input part 2 // ############# // #...........# // ###D#A#A#D### // #D#C#B#A# // #D#B#A#C# // #C#C#B#B# // ######### const DEPTH: usize = 4; // ############# // #12.3.4.5.67# // ###1#5#.#.### // #2#.#.#.# // #3#.#.#.# // #4#.#.#.# // ######### #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct Cave([u8; 7 + 4 * DEPTH]); const STEP_COST: [usize; 5] = [0, 1, 10, 100, 1000]; #[inline] fn step_cost(amphipod: u8) -> usize { STEP_COST[amphipod as usize] } impl Cave { // Note that room is 1-indexed fn room(&self, room: usize, seat: usize) -> u8 { self.0[7 + (room - 1) * DEPTH + seat] } fn room_ready_for_move_in(&self, room: usize) -> bool { debug_assert!(0 < room && room < 5); for seat in 0..DEPTH { let amphipod = self.room(room, seat); if amphipod != 0 && amphipod != room as u8 { return false; } } true } fn open_hallway_path(&self, room: usize, hallway_location: usize) -> bool { debug_assert!(0 < room && room < 5); debug_assert!(hallway_location < 7); let l = EXIT_L[room]; let r = EXIT_R[room]; if hallway_location <= l { for i in (hallway_location + 1)..=l { if self.hallway(i) != 0 { return false; } } return true; } if hallway_location >= r { for i in r..hallway_location { if self.hallway(i) != 0 { return false; } } return true; } panic!( "Range related internal error in open_hallway_path({:?}, {}, {})", self, room, hallway_location ); } fn hallway(&self, i: usize) -> u8 { debug_assert!(i < 7); self.0[i] } fn set_hallway(&mut self, i: usize, arg: u8) { debug_assert!(i < 7); debug_assert!(arg < 5); self.0[i] = arg; } // Assumes an amphipod is right outside the room and pulls it into the deepest // free seat in the room. Returns the steps this takes. // Remember that rooms are 1-indexed. fn push_room(&mut self, room: usize) -> usize { debug_assert!(0 < room && room < 5); let mut steps = DEPTH; let doorstep = 7 + (room - 1) * DEPTH - 1; // Not a real index, but becomes one +1. while steps > 0 { if self.0[doorstep + steps] == 0 { self.0[doorstep + steps] = room as u8; return steps; } steps -= 1; } panic!("Tried to push room {} into a full room", room); } // Removes the top amphipod from the room and returns the step count used. fn pop_room(&mut self, room: usize) -> usize { debug_assert!(0 < room && room < 5); let doorstep = 7 + (room - 1) * DEPTH - 1; // Not a real index, but becomes one +1. for steps in 1..=DEPTH { if self.0[doorstep + steps] != 0 { self.0[doorstep + steps] = 0; return steps; } } panic!("Tried to pop room {} from an empty room", room); } // Returns the top amphipod in the room. // You should only need to call this if you already made sure that there // are foreign amphipods in the room. fn top(&self, room: usize) -> u8 { debug_assert!(0 < room && room < 5); for seat in 0..DEPTH { let amphipod = self.room(room, seat); if amphipod != 0 { return amphipod; } } panic!( "Tried to get top amphipod in room {} but there were none", room ); } fn reachable(&self, room: usize) -> (usize, usize) { debug_assert!(0 < room && room < 5); let mut l = EXIT_R[room]; for i in (0..=EXIT_L[room]).rev() { if self.hallway(i) == 0 { l = i; } else { break; } } let mut r = EXIT_L[room]; for i in EXIT_R[room]..7 { if self.hallway(i) == 0 { r = i; } else { break; } } (l, r) } } fn heuristic1(cave: &Cave) -> usize { let mut total = 0; // For each Amphipod in the hallway, calculate how long it will take home. for i in 0..7 { let amphipod = cave.0[i]; if amphipod > 0 { total += center_steps(amphipod, i) * step_cost(amphipod); total += DEPTH * step_cost(amphipod); } } // For each Amphipod that is in the wrong room, calculate how long it will // take home. for room in 1..=4 { // Rooms are 1-indexed to match amphipod indexing. for seat in 0..DEPTH { let amphipod = cave.room(room, seat); if amphipod > 0 && amphipod as usize != room { total += (seat + 1) * step_cost(amphipod); // Go to the top total += 2 * (amphipod as isize - room as isize).abs() as usize * step_cost(amphipod); // Go to the right room total += DEPTH * step_cost(amphipod); // Go to the bottom } else if amphipod > 0 { // We are in the right room, just go down to the bottom. total += (DEPTH - seat - 1) * step_cost(amphipod); } } } // Now everything is at the bottom which is incorrect. We correct this by // substracting a constant. if total < DEPTH * (DEPTH - 1) / 2 * 1111 { panic!("Heuristic 1 is wrong for {:?}", cave); } total -= DEPTH * (DEPTH - 1) / 2 * 1111; // For the better heuristic, we can also take into account that amphipods // that have a different type amphipod below them will need to leaf and go // back in again. total } // [Get above A from 0, Get above B from 0, ...] const CENTER_STEPS: [usize; 7 * 4] = [ 2, 4, 6, 8, // Starting at Hallway:0 1, 3, 5, 7, // Starting at Hallway:1 1, 1, 3, 5, // Starting at Hallway:2 3, 1, 1, 3, // Starting at Hallway:3 5, 3, 1, 1, // Starting at Hallway:4 7, 5, 3, 1, // Starting at Hallway:5 8, 6, 4, 2, // Starting at Hallway:6 ]; const fn center_steps(amphipod: u8, hallway_location: usize) -> usize { CENTER_STEPS[(amphipod as usize - 1) + hallway_location * 4] } fn nbhd(cave: &Cave) -> Vec<(Cave, usize)> { // Amphipods only move under specific circumstances which makes the tree smaller. // 1. Amphipods will never move from the hallway into a room unless that room // is their destination room // 2. and that room contains no amphipods which do // not also have that room as their own destination. // 3. Amphipods don't move on the hallway. // If follows, that in a given cave state each room either allows entry or // exit, but not both. // We can also always prioritize "moving in" over moving out. If a move in // is possible, that this is one of the best moves. // This means if we find only one such move, we should only return only this. for hallway_location in 0..7 { let amphipod = cave.hallway(hallway_location); if amphipod > 0 && cave.room_ready_for_move_in(amphipod as usize) && cave.open_hallway_path(amphipod as usize, hallway_location) { // Found a solution, return this as the only solution. let mut new_cave = cave.clone(); let mut steps = center_steps(amphipod, hallway_location); new_cave.set_hallway(hallway_location, 0); steps += new_cave.push_room(amphipod as usize); return vec![(new_cave, steps * step_cost(amphipod))]; } } // If we end up here, no one can move in and we need to generate all possible // movements of amphipods outside their homes. let mut result = Vec::new(); for room in 1..=4 { // Rooms are 1-indexed // Check if there is a foreign amphipod in this room. if cave.room_ready_for_move_in(room) { // No foreigen amphipod, so we don't need to move out. continue; } let amphipod = cave.top(room); // Find the spaces above the room that we can move out to. let (l, r) = cave.reachable(room); for i in l..=r { // We can move into this space. let mut new_cave = cave.clone(); let mut steps = center_steps(room as u8, i); new_cave.set_hallway(i, amphipod); steps += new_cave.pop_room(room); result.push((new_cave, steps * step_cost(amphipod))); } } result } // Where do you end up if you move out of a room and take a step to the left? const EXIT_L: [usize; 5] = [666, 1, 2, 3, 4]; // Where do you end up if you move out of a room and take a step to the right? const EXIT_R: [usize; 5] = [666, 2, 3, 4, 5]; fn success(cave: &Cave) -> bool { // Check if everything is where it should be. // Hallway must be only 0s. for i in 0..7 { if cave.hallway(i) != 0 { return false; } } // All the rooms must be filled with corresponding amphipods. for room in 1..=4 { for seat in 0..DEPTH { if cave.room(room, seat) != room as u8 { return false; } } } true } fn main() { // Part 1: // ############# // #...........# // ###D#A#A#D### // #C#C#B#B# // ######### // let cave = Cave([0, 0, 0, 0, 0, 0, 0, 4, 3, 1, 3, 1, 2, 4, 2]); // ############# // #...........# // ###D#A#A#D### // #D#C#B#A# // #D#B#A#C# // #C#C#B#B# // ######### let cave = Cave([ 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 3, 1, 3, 2, 3, 1, 2, 1, 2, 4, 1, 3, 2, ]); let solution = astar( &cave, |cave| nbhd(cave), |cave| heuristic1(cave), // |_cave| 0, |cave| success(cave), ) .expect("No path found."); println!("Part 2 costs {} energy.", solution.1); } /* // Only enable this when DEPTH == 2. // Test this module. #[cfg(test)] mod test { use super::*; #[test] fn test_center_steps() { assert_eq!(center_steps(4, 2), 5); assert_eq!(center_steps(3, 2), 3); } const TARGET: Cave = Cave([0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4]); #[test] fn test_heuristic_on_final_state_is_zero() { assert_eq!(heuristic1(&TARGET), 0); } #[test] fn test_heuristic_1() { // ############# // #...........# // ###D#B#C#A### // #A#B#C#D# // ######### let cave = Cave([0, 0, 0, 0, 0, 0, 0, 4, 1, 2, 2, 3, 3, 1, 4]); assert_eq!(heuristic1(&cave), 8008); } #[test] fn test_heuristic2() { // ############# // #...........# // ###D#A#A#D### // #C#C#B#B# // ######### let cave = Cave([0, 0, 0, 0, 0, 0, 0, 4, 3, 1, 3, 1, 2, 4, 2]); assert_eq!(heuristic1(&cave), 10441); } #[test] fn test_heuristic3() { // ############# // #.. . . C .D# // ###A#B#.#.### // #A#B#C#D# // ######### let cave = Cave([0, 0, 0, 0, 3, 0, 4, 1, 1, 2, 2, 0, 3, 0, 4]); assert_eq!(heuristic1(&cave), 3200); } #[test] fn test_nbhd1() { // ############# // #.._D_._._..# // ###A#B#C#.### // #A#B#C#D# // ######### let cave = Cave([0, 0, 4, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 0, 4]); let nbhd = nbhd(&cave); assert_eq!(nbhd.len(), 1); assert_eq!(nbhd[0].1, 6000); assert_eq!(nbhd[0].0, TARGET); } #[test] fn test_nbhd2() { // ############# // #.._D_._._..# // ###A#B#.#D### // #A#B#C#C# // ######### let cave = Cave([0, 0, 4, 0, 0, 0, 0, 1, 1, 2, 2, 0, 3, 4, 3]); assert_eq!(cave.reachable(4), (3, 6)); let nbhd = nbhd(&cave); assert_eq!(nbhd.len(), 4); assert_eq!(nbhd[0].1, 4000); assert_eq!(nbhd[1].1, 2000); assert_eq!(nbhd[2].1, 2000); assert_eq!(nbhd[3].1, 3000); } #[test] fn test_nbhd3() { // ############# // #.._D_._C_D.# // ###A#B#.#.### // #A#B#C#.# // ######### let cave = Cave([0, 0, 4, 0, 3, 4, 0, 1, 1, 2, 2, 0, 3, 0, 0]); assert_eq!(cave.reachable(3), (3, 3)); assert!(!cave.open_hallway_path(4, 2)); assert!(cave.open_hallway_path(3, 4)); assert!(cave.open_hallway_path(4, 5)); let nbhd = nbhd(&cave); assert_eq!(nbhd.len(), 1); assert_eq!(nbhd[0].1, 200); } #[test] fn test_success() { assert!(success(&TARGET)); let cave = Cave([0, 0, 4, 0, 0, 0, 0, 1, 1, 2, 2, 0, 3, 4, 3]); assert!(!success(&cave)); let cave = Cave([0, 0, 0, 0, 0, 0, 0, 4, 3, 1, 3, 1, 2, 4, 2]); assert!(!success(&cave)); } #[test] // #[ignore = "reason"] fn test_mini_search() { // ############# // #.._D_._._..# // ###A#B#.#D### // #A#B#C#C# // ######### let cave = Cave([0, 0, 4, 0, 0, 0, 0, 1, 1, 2, 2, 0, 3, 4, 3]); let solution = astar( &cave, |cave| nbhd(cave), |cave| heuristic1(cave), |cave| success(cave), ) .expect("No path found."); println!("{:?}", solution); assert_eq!(solution.0.len(), 6); assert_eq!(solution.1, 11500); assert_eq!( solution.0[0], Cave([0, 0, 4, 0, 0, 0, 0, 1, 1, 2, 2, 0, 3, 4, 3]) ); assert_eq!( solution.0[1], Cave([0, 0, 4, 0, 0, 4, 0, 1, 1, 2, 2, 0, 3, 0, 3]) ); assert_eq!( solution.0[2], Cave([0, 0, 4, 0, 3, 4, 0, 1, 1, 2, 2, 0, 3, 0, 0]) ); assert_eq!( solution.0[3], Cave([0, 0, 4, 0, 0, 4, 0, 1, 1, 2, 2, 3, 3, 0, 0]) ); assert_eq!( solution.0[4], Cave([0, 0, 0, 0, 0, 4, 0, 1, 1, 2, 2, 3, 3, 0, 4]) ); assert_eq!( solution.0[5], Cave([0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4]) ); } #[test] fn test_puzzle_part1() { // ############# // #...........# // ###D#A#A#D### // #C#C#B#B# // ######### let cave = Cave([0, 0, 0, 0, 0, 0, 0, 4, 3, 1, 3, 1, 2, 4, 2]); let solution = astar( &cave, |cave| nbhd(cave), |cave| heuristic1(cave), |cave| success(cave), ) .expect("No path found."); assert_eq!(solution.1, 14467); } } */
<filename>core/boolstub/combinator_generator.go<gh_stars>0 package boolstub import "github.com/gopereza/pereza/core/common" const ( conditionStart = "if v." conditionEnd = " {\n" conditionFixedSize = len(conditionStart) + len(conditionEnd) ) // Dynamic allocate func CombinatorBoolResultStub(typeName string, fieldNames, jsonNames []string) []byte { generator := NewCombinatorGenerator(fieldNames, jsonNames) body := generator.Generate() result := make([]byte, 0, common.WrapSignatureSize+len(body)) result = common.AppendHeader(result, typeName) result = append(result, body...) result = common.AppendFooter(result) return result } type CombinatorGenerator struct { fieldNames []string fastConditionMap map[string][]byte pattern *DumpGenerator replacer *BoolStateReplacer buffer []byte returnDepth int } func NewCombinatorGenerator(fieldNames, jsonNames []string) *CombinatorGenerator { pattern := NewDumpGenerator(jsonNames) length := len(fieldNames) return &CombinatorGenerator{ fieldNames: fieldNames, fastConditionMap: FastConditionMap(fieldNames), pattern: pattern, replacer: NewBoolStateReplacer(length), buffer: make([]byte, 0, 1024), // dynamic allocate returnDepth: length - 1, } } func (g *CombinatorGenerator) Generate() []byte { g.generate(0, FillBooleans(len(g.fieldNames), true)) return g.buffer } func (g *CombinatorGenerator) generate(depth int, states []bool) { fieldName := g.fieldNames[depth] trueState := g.replacer.Replace(states, depth, true) falseState := g.replacer.Replace(states, depth, false) if depth == g.returnDepth { g.append(g.fastConditionMap[fieldName]) g.append(g.pattern.Generate(trueState)) g.conditionClose() g.append(g.pattern.Generate(falseState)) } else { g.append(g.fastConditionMap[fieldName]) g.generate(depth+1, trueState) g.conditionClose() g.generate(depth+1, falseState) } g.replacer.PoolPut(trueState) g.replacer.PoolPut(falseState) } func (g *CombinatorGenerator) append(data []byte) { g.buffer = append(g.buffer, data...) } func (g *CombinatorGenerator) conditionClose() { g.buffer = append(g.buffer, '}', '\n') } func FastConditionMap(fieldNames []string) map[string][]byte { length := len(fieldNames) fastConditionMap := make(map[string][]byte, length) capacity := common.StringSliceSize(fieldNames) + length*conditionFixedSize once := make([]byte, 0, capacity) for _, fieldName := range fieldNames { current := conditionFixedSize + len(fieldName) once = AppendCondition(once, fieldName) fastConditionMap[fieldName] = once once = once[current:] } return fastConditionMap } func Condition(fieldName string) []byte { size := conditionFixedSize + len(fieldName) buffer := make([]byte, 0, size) return AppendCondition(buffer, fieldName) } func AppendCondition(dst []byte, fieldName string) []byte { result := append(dst, conditionStart...) result = append(result, fieldName...) result = append(result, conditionEnd...) return result }
Public R&Amp;D and Industrial Innovations at the Project Levels: An Exploration of Taiwan&Apos;S Public Research Projects This study investigates the role of the ITRI in Taiwan`s technological catch-up. The authors examine the relationship between public R&D and industrial innovations in Taiwan using data encompassing 252 ITRI annual research projects and the survey on the characteristics of 5902 cases of transferred technologies within these projects. The authors develop a new index of innovative output to measure the monetary value of patents for research projects. They find that the influences of accumulated R&D stock, high-level R&D personnel, and the intensity of process innovations on project-level R&D productivity to be more pronounced when the monetary value of patents, instead of simple patent counts, is used as the proxy for innovation outputs. (JEL O12, L63)
Holocene Atmospheric Mercury Levels Reconstructed from Peat Bog Mercury Stable Isotopes. Environmental regulations on mercury (Hg) emissions and associated ecosystem restoration are closely linked to what Hg levels we consider natural. It is widely accepted that atmospheric Hg deposition has increased by a factor 3 ± 1 since preindustrial times. However, no long-term historical records of actual atmospheric gaseous elemental Hg (GEM) concentrations exist. In this study we report Hg stable isotope signatures in Pyrenean peat records (southwestern Europe) that are used as tracers of Hg deposition pathway (200Hg, wet vs dry Hg deposition) and atmospheric Hg sources and cycling (202Hg, 199Hg). By anchoring peat-derived GEM dry deposition to modern atmospheric GEM levels we are able to reconstruct the first millennial-scale atmospheric GEM concentration record. Reconstructed GEM levels from 1970 to 2010 agree with monitoring data, and maximum 20th century GEM levels of 3.9 ± 0.5 ng m-3 were 15 ± 4 times the natural Holocene background of 0.27 ± 0.11 ng m-3. We suggest that a -0.7 shift in 202Hg during the medieval and Renaissance periods is caused by deforestation and associated biomass burning Hg emissions. Our findings suggest therefore that human impacts on the global mercury cycle are subtler and substantially larger than currently thought.
<reponame>levindu/OpenCC /* * Open Chinese Convert * * Copyright 2010-2014 BYVoid <<EMAIL>> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "Dict.hpp" namespace opencc { /** * Serializable dictionary interface * @ingroup opencc_cpp_api */ class OPENCC_EXPORT SerializableDict { public: /** * Serializes the dictionary and writes in to a file. */ virtual void SerializeToFile(FILE* fp) const = 0; /** * Serializes the dictionary and writes in to a file. */ virtual void SerializeToFile(const string& fileName) const { FILE* fp = fopen(fileName.c_str(), "wb"); if (fp == NULL) { throw FileNotWritable(fileName); } SerializeToFile(fp); fclose(fp); } template <typename DICT> static bool TryLoadFromFile(const string& fileName, std::shared_ptr<DICT>* dict) { FILE* fp = #ifdef _MSC_VER // well, the 'GetPlatformString' shall return a 'wstring' _wfopen(UTF8Util::GetPlatformString(fileName).c_str(), L"rb") #else fopen(UTF8Util::GetPlatformString(fileName).c_str(), "rb") #endif // _MSC_VER ; if (fp == NULL) { return false; } std::shared_ptr<DICT> loadedDict = DICT::NewFromFile(fp); fclose(fp); *dict = loadedDict; return true; } template <typename DICT> static std::shared_ptr<DICT> NewFromFile(const string& fileName) { std::shared_ptr<DICT> dict; if (!TryLoadFromFile<DICT>(fileName, &dict)) { throw FileNotFound(fileName); } return dict; } }; }
DAVID SULLIVAN said yesterday that manager Gianfranco Zola’s job is “100 per cent secure” and then suggested he may take a vow of silence for the rest of their relegation battle. Zola reacted angrily to David Sullivan’s comments, and their timing on the eve of last night’s match against Birmingham, that everyone at the debt-ridden club would need to take a 25 per cent summer pay cut. But Zola was given a solid assurance by Sullivan, at least until the end of the season, when the Hammers’ Premier League fate will be known. Sullivan, co-chairman and co-owner with David Gold, was asked how secure Zola’s position is and said: “It’s 100 per cent secure. “In 17 years we sacked two managers at Birmingham. We’re not sackers, we support managers and we will bring in players to improve the team in the summer. “Over the next 14 games we’ll all learn an awful lot about everyone at West Ham, the team, the manager and we’ll just see what we can produce. “I’m very confident over the next 14 games that the team will improve and we’ll claw our way up the table.
Voice recognition systems have been used in a wide range of fields, providing various functions required by users. Voice recognition techniques may be implemented in electronic devices including voice input devices (for example, microphones). Such voice recognition techniques have recently replaced physical inputs of electronic devices, and are becoming an increasingly popular way to control the electronic devices by voice. In general, voice recognition may be roughly divided into two operations, preprocessing and recognition. For example, a conventional voice recognition technology may analyze an input voice, extract features of the input voice, and measure similarity with a previously collected voice model database to convert the most similar extracted features into characters or commands. The user's voice often includes meaningless sounds such as ambient noise. Thus, the voice recognition system may determine the extracted voice recognition section. For instance, when the user wakes up the voice input device using a voice start button or a call instruction, the voice input device (e.g., microphone) may be opened. Then, background energy may be used to estimate start and end points of the voice uttered by the user. When the end point of the uttered voice is detected, the voice input device may be closed. The voice recognition system may analyze the voice through a preprocessing operation to extract features for voice recognition. The voice recognition system may compare the input voice with the voice database to output the most likely word(s) as a result of voice recognition. In addition, the voice recognition system may convert the recognized result into sound, and notify the user by sound. Alternatively, the voice recognition system may notify the user by displaying the result. However, when a user who is not skillful in the use of the voice recognition system is not acquainted with the recognizable vocabulary, the user may be inconvenienced due to uttering an incorrect command. As an example, if the a utters the command, “Starbucks at Gangnam station”, but realizes the utterance of the wrong command, and then reutters “Ah no, it's not . . . ”, a conventional voice recognition system may detect “Starbucks at Gangnam station” as an end point and then close the voice input device. Thus, the voice recognition system may retrieve information on “Starbucks at Gangnam station” and output the result. This may cause inconvenience by forcing the user to wait for retrieval of the unwanted information on “Starbucks at Gangnam station” prior to re-uttering a new command in order to acquire the user's desired information. Alternatively, if the conventional voice recognition system recognizes the command “Starbucks at Gangnam station . . . Ah no, it's not . . . ”, the system may fail to retrieve corresponding information as the command does not exist in the voice recognition system.
def _get_settings_filename(self): return os.path.join( widget_settings_dir(), "{0.__module__}.{0.__qualname__}.pickle".format(self.widget_class), )
<filename>SOT23ZenerTester/FixtureProgram/SingleShot/parse_config_file.cpp /** * parse_config_file.cpp * * Definitions for parser code purpose-built to parse the text configuration * file read by the SingleShot program. * * Written in 2019 by <NAME>. * Originally distributed at https://github.com/slugrustle/electronics * * To the extent possible under law, the author has dedicated all copyright * and related and neighboring rights to this software to the public domain * worldwide. This software is distributed without any warranty. * The text of the CC0 Public Domain Dedication should be reproduced at the * end of this file. If not, see http://creativecommons.org/publicdomain/zero/1.0/ */ #include <algorithm> #include <cstdio> #include "parser_helpers.h" #include "parse_config_file.h" /** * If the input string is a case insensitive match for a ZenerSocket * enum value, socket is set to that value, and true is returned. * Otherwise, socket is not modified, and false is returned. */ bool map_zener_string_enum(const std::string input, ZenerSocket &socket) { if (case_insensitive_same(input, "J5L")) { socket = ZenerSocket::J5L; return true; } if (case_insensitive_same(input, "J5R")) { socket = ZenerSocket::J5R; return true; } if (case_insensitive_same(input, "J6L")) { socket = ZenerSocket::J6L; return true; } if (case_insensitive_same(input, "J6R")) { socket = ZenerSocket::J6R; return true; } if (case_insensitive_same(input, "J7L")) { socket = ZenerSocket::J7L; return true; } if (case_insensitive_same(input, "J7R")) { socket = ZenerSocket::J7R; return true; } if (case_insensitive_same(input, "J8L")) { socket = ZenerSocket::J8L; return true; } if (case_insensitive_same(input, "J8R")) { socket = ZenerSocket::J8R; return true; } if (case_insensitive_same(input, "J9L")) { socket = ZenerSocket::J9L; return true; } if (case_insensitive_same(input, "J9R")) { socket = ZenerSocket::J9R; return true; } return false; } /** * If the input string is a case insensitive match for a FixtureResistor * enum value, resistor is set to that value, and true is returned. * Otherwise, resistor is not modified, and false is returned. */ bool map_resistor_string_enum(const std::string input, FixtureResistor &resistor) { if (case_insensitive_same(input, "100R")) { resistor = FixtureResistor::R100R; return true; } if (case_insensitive_same(input, "1K")) { resistor = FixtureResistor::R1K; return true; } if (case_insensitive_same(input, "10K")) { resistor = FixtureResistor::R10K; return true; } if (case_insensitive_same(input, "100K")) { resistor = FixtureResistor::R100K; return true; } if (case_insensitive_same(input, "1M")) { resistor = FixtureResistor::R1M; return true; } return false; } test_config_t parse_config_file(std::ifstream &input_stream) { test_config_t test_config = { 0 }; test_config.read_ok = false; test_config.diolan_serial_present = false; test_config.num_gpio_required = 0u; for (size_t jZener = 0u; jZener < NUM_ZENER_SOCKETS; jZener++) { uint32_t this_num_gpio_required = static_cast<uint32_t>(ZENER_RELAY_PIN.at(jZener)) + 1u; test_config.num_gpio_required = std::max(this_num_gpio_required, test_config.num_gpio_required); } for (size_t jResistor = 0u; jResistor < NUM_FIXTURE_RESISTORS; jResistor++) { uint32_t this_num_gpio_required = static_cast<uint32_t>(RESISTOR_RELAY_PIN.at(jResistor)) + 1u; test_config.num_gpio_required = std::max(this_num_gpio_required, test_config.num_gpio_required); } bool any_read_errors = false; std::array<bool, NUM_SERIAL_DEVICES> found_com_ports; std::fill(found_com_ports.begin(), found_com_ports.end(), false); std::array<bool, NUM_SERIAL_DEVICES> found_baud_rates; std::fill(found_baud_rates.begin(), found_baud_rates.end(), false); std::array<bool, NUM_SERIAL_DEVICES> found_parities; std::fill(found_parities.begin(), found_parities.end(), false); bool found_zeners_installed = false; std::array<bool, NUM_ZENER_SOCKETS> found_test_voltages; std::fill(found_test_voltages.begin(), found_test_voltages.end(), false); std::array<bool, NUM_ZENER_SOCKETS> found_test_resistors; std::fill(found_test_resistors.begin(), found_test_resistors.end(), false); std::string line; size_t jLine = 0u; const std::string byte_order_mark = u8"\uFEFF"; const std::set<char> token_delimeters = {',', ' ', '\n', '\r', '\t', '\f', '\v'}; while (std::getline(input_stream, line)) { /* If the first line begins with the UTF8 Byte Order Mark, remove it. */ if (jLine == 0u && line.size() >= byte_order_mark.size() && line.substr(0u, byte_order_mark.size()).compare(byte_order_mark) == 0) { line = line.substr(byte_order_mark.size()); } jLine++; std::vector<std::string> tokens = tokenize(line, token_delimeters); if (!tokens.empty() && tokens.front().at(0) != '#') { /* Non-empty non-comment lines are parsed in this block. */ std::string &first_token = tokens.front(); bool found_any_com_port = false; for (size_t jDevice = 0u; jDevice < NUM_SERIAL_DEVICES; jDevice++) { std::string field_name = SERIAL_DEVICE_NAMES.at(jDevice) + "_COM_PORT"; if (case_insensitive_same(first_token, field_name)) { if (found_com_ports.at(jDevice)) { std::printf("Warning: redefinition of %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } else { found_com_ports.at(jDevice) = true; if (tokens.size() < 2u) { std::printf("Error: no argument for %s on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } else { if (tokens.size() > 2u) { std::printf("Warning: more than one argument to %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } std::string &second_token = tokens.at(1); if (!case_insensitive_same(second_token.substr(0u, 3u), "COM")) { std::printf("Error: first argument to %s does not start with COM on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } else { bool parse_ok = false; int64_t port_number = parse_int64(second_token.substr(3u), parse_ok); if (!parse_ok || port_number < 1ll || port_number > 255ll) { std::printf("Error: %s argument has non-integer or <1 or >255 for COM port number on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } else { test_config.com_port_printables.at(jDevice) = to_uppercase(second_token); if (port_number <= 9ll) { test_config.com_ports.at(jDevice) = test_config.com_port_printables.at(jDevice); } else { test_config.com_ports.at(jDevice) = "\\\\.\\" + test_config.com_port_printables.at(jDevice); } } } } } found_any_com_port = true; break; } } if (found_any_com_port) continue; bool found_any_baud_rate = false; for (size_t jDevice = 0u; jDevice < NUM_SERIAL_DEVICES; jDevice++) { std::string field_name = SERIAL_DEVICE_NAMES.at(jDevice) + "_BAUD_RATE"; if (case_insensitive_same(first_token, field_name)) { if (found_baud_rates.at(jDevice)) { std::printf("Warning: redefinition of %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } else { found_baud_rates.at(jDevice) = true; if (tokens.size() < 2u) { std::printf("Error: no argument for %s on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } else { if (tokens.size() > 2u) { std::printf("Warning: more than one argument to %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } std::string &second_token = tokens.at(1); bool parse_ok = false; int64_t baud_rate = parse_int64(second_token, parse_ok); if (!parse_ok) { std::printf("Error: non-integer value for %s on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } else if (baud_rate == static_cast<int64_t>(BaudRate::B4800)) test_config.baud_rates.at(jDevice) = BaudRate::B4800; else if (baud_rate == static_cast<int64_t>(BaudRate::B9600)) test_config.baud_rates.at(jDevice) = BaudRate::B9600; else if (baud_rate == static_cast<int64_t>(BaudRate::B19200)) test_config.baud_rates.at(jDevice) = BaudRate::B19200; else if (baud_rate == static_cast<int64_t>(BaudRate::B38400)) test_config.baud_rates.at(jDevice) = BaudRate::B38400; else if (baud_rate == static_cast<int64_t>(BaudRate::B57600)) test_config.baud_rates.at(jDevice) = BaudRate::B57600; else if (baud_rate == static_cast<int64_t>(BaudRate::B115200)) test_config.baud_rates.at(jDevice) = BaudRate::B115200; else { std::printf("Error: unsupported value for %s on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } } } found_any_baud_rate = true; break; } } if (found_any_baud_rate) continue; bool found_any_parity = false; for (size_t jDevice = 0u; jDevice < NUM_SERIAL_DEVICES; jDevice++) { std::string field_name = SERIAL_DEVICE_NAMES.at(jDevice) + "_PARITY"; if (case_insensitive_same(first_token, field_name)) { if (found_parities.at(jDevice)) { std::printf("Warning: redefinition of %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } else { found_parities.at(jDevice) = true; if (tokens.size() < 2u) { std::printf("Error: no argument for %s on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } else { if (tokens.size() > 2u) { std::printf("Warning: more than one argument to %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } std::string &second_token = tokens.at(1); if (case_insensitive_same(second_token, "EVEN")) test_config.parities.at(jDevice) = ParityType::EVEN; else if (case_insensitive_same(second_token, "ODD")) test_config.parities.at(jDevice) = ParityType::ODD; else if (case_insensitive_same(second_token, "NONE")) test_config.parities.at(jDevice) = ParityType::NONE; else { std::printf("Error: invalid argument for %s on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } } } found_any_parity = true; break; } } if (found_any_parity) continue; if (case_insensitive_same(first_token, "DIOLAN_SERIAL_NUMBER")) { if (test_config.diolan_serial_present) { std::printf("Warning: redefinition of DIOLAN_SERIAL_NUMBER on line %zu of configuration file.\n", jLine); } else if (tokens.size() > 1u) { test_config.diolan_serial_present = true; if (tokens.size() > 2u) { std::printf("Warning: more than one argument to DIOLAN_SERIAL_NUMBER on line %zu of configuration file.\n", jLine); } std::string &second_token = tokens.at(1); bool parse_ok = false; int64_t diolan_serial = parse_int64(second_token, parse_ok); if (!parse_ok || diolan_serial < 0ll || diolan_serial > static_cast<int64_t>(std::numeric_limits<uint32_t>::max())) { std::printf("Error: non-integer or out of range value for DIOLAN_SERIAL_NUMBER on line %zu of configuration file.\n", jLine); any_read_errors = true; } else { test_config.diolan_serial_number = static_cast<uint32_t>(diolan_serial); } } continue; } if (case_insensitive_same(first_token, "ZENERS_INSTALLED")) { if (found_zeners_installed) { std::printf("Warning: redefinition of ZENERS_INSTALLED on line %zu of configuration file.\n", jLine); } else { found_zeners_installed = true; if (tokens.size() < 2u) { std::printf("Error: no argument for ZENERS_INSTALLED on line %zu of configuration file.\n", jLine); any_read_errors = true; } else { if (tokens.size() > NUM_ZENER_SOCKETS + 1u) { std::printf("Warning: more than %zu arguments to ZENERS_INSTALLED on line %zu of configuration file.\n", NUM_ZENER_SOCKETS, jLine); } for (size_t jToken = 1u; jToken < std::min(static_cast<size_t>(NUM_ZENER_SOCKETS + 1u), tokens.size()); jToken++) { ZenerSocket socket; if (!map_zener_string_enum(tokens.at(jToken), socket)) { std::printf("Error: invalid argument for ZENERS_INSTALLED on line %zu of configuration file.\n", jLine); any_read_errors = true; } else { std::pair<std::set<ZenerSocket>::iterator, bool> retval = test_config.zeners_installed.insert(socket); if (!retval.second) { std::printf("Warning: duplicate argument to ZENERS_INSTALLED on line %zu of configuration file.\n", jLine); } } } } } continue; } bool found_any_test_voltage = false; for (size_t jSocket = 0u; jSocket < NUM_ZENER_SOCKETS; jSocket++) { std::string field_name = "TEST_VOLTAGE_" + ZENER_SOCKET_NAMES.at(jSocket); if (case_insensitive_same(first_token, field_name)) { if (found_test_voltages.at(jSocket)) { std::printf("Warning: redefinition of %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } else { found_test_voltages.at(jSocket) = true; if (tokens.size() < 2u) { std::printf("Error: no argument for %s on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } else { if (tokens.size() > 2u) { std::printf("Warning: more than one argument to %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } std::string &second_token = tokens.at(1); double test_voltage_val = parse_double(second_token); if (std::isnan(test_voltage_val) || test_voltage_val < TEST_VOLTAGE_LOWER_LIMIT || test_voltage_val > TEST_VOLTAGE_UPPER_LIMIT) { std::printf("Error: non-number or <%.1e V or >%.1e V for %s on line %zu of configuration file.\n", TEST_VOLTAGE_LOWER_LIMIT, TEST_VOLTAGE_UPPER_LIMIT, field_name.c_str(), jLine); any_read_errors = true; } else { test_config.test_voltages.at(jSocket) = test_voltage_val; } } } found_any_test_voltage = true; break; } } if (found_any_test_voltage) continue; bool found_any_test_resistor = false; for (size_t jSocket = 0u; jSocket < NUM_ZENER_SOCKETS; jSocket++) { std::string field_name = "RESISTOR_" + ZENER_SOCKET_NAMES.at(jSocket); if (case_insensitive_same(first_token, field_name)) { if (found_test_resistors.at(jSocket)) { std::printf("Warning: redefinition of %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } else { found_test_resistors.at(jSocket) = true; if (tokens.size() < 2u) { std::printf("Error: no argument for %s on line %zu of configuration file.\n", field_name.c_str(), jLine); any_read_errors = true; } else { if (tokens.size() > 2u) { std::printf("Warning: more than one argument to %s on line %zu of configuration file.\n", field_name.c_str(), jLine); } std::string &second_token = tokens.at(1); FixtureResistor resistor_val = FixtureResistor::R1M; if (map_resistor_string_enum(second_token, resistor_val)) { test_config.test_resistors.at(jSocket) = resistor_val; } else { std::printf("Error: value for %s on line %zu of configuration file is not a valid resistor designator.\n", field_name.c_str(), jLine); any_read_errors = true; } } } found_any_test_resistor = true; break; } } if (found_any_test_resistor) continue; std::printf("Warning: Unrecognized non-comment on line %zu of configuration file.\n", jLine); } } for (size_t jDevice1 = 0u; jDevice1 < NUM_SERIAL_DEVICES - 1u; jDevice1++) { for (size_t jDevice2 = jDevice1 + 1u; jDevice2 < NUM_SERIAL_DEVICES; jDevice2++) { if (found_com_ports.at(jDevice1) && found_com_ports.at(jDevice2) && test_config.com_ports.at(jDevice1).compare(test_config.com_ports.at(jDevice2)) == 0) { std::printf("Error: The %s and %s were configured for the same COM port. This is not allowed.\n", SERIAL_DEVICE_NAMES.at(jDevice1).c_str(), SERIAL_DEVICE_NAMES.at(jDevice2).c_str()); any_read_errors = true; } } } bool something_missing = false; for (size_t jDevice = 0u; jDevice < NUM_SERIAL_DEVICES; jDevice++) { if (!found_com_ports.at(jDevice)) { something_missing = true; std::printf("Error: configuration file is missing %s_COM_PORT definition.\n", SERIAL_DEVICE_NAMES.at(jDevice).c_str()); } if (!found_baud_rates.at(jDevice)) { something_missing = true; std::printf("Error: configuration file is missing %s_BAUD_RATE definition.\n", SERIAL_DEVICE_NAMES.at(jDevice).c_str()); } if (!found_parities.at(jDevice)) { something_missing = true; std::printf("Error: configuration file is missing %s_PARITY definition.\n", SERIAL_DEVICE_NAMES.at(jDevice).c_str()); } } if (!found_zeners_installed) { something_missing = true; std::printf("Error: configuration file is missing ZENERS_INSTALLED definition.\n"); } for (size_t jSocket = 0u; jSocket < NUM_ZENER_SOCKETS; jSocket++) { if (!found_test_voltages.at(jSocket)) { something_missing = true; std::printf("Error: configuration file is missing TEST_VOLTAGE_%s definition.\n", ZENER_SOCKET_NAMES.at(jSocket).c_str()); } if (!found_test_resistors.at(jSocket)) { something_missing = true; std::printf("Error: configuration file is missing RESISTOR_%s definition.\n", ZENER_SOCKET_NAMES.at(jSocket).c_str()); } } test_config.read_ok = (!something_missing && !any_read_errors); return test_config; } /* Creative Commons Legal Code CC0 1.0 Universal CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER. Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; ii. moral rights retained by the original author(s) and/or performer(s); iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; v. rights protecting the extraction, dissemination, use and reuse of data in a Work; vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. */
import { NextPage } from 'next'; import React from 'react'; import UserSettings from '../../../components/UserProfile/UserSettings'; import UserPermissions from '../../../components/UserProfile/UserSettings/UserPermissions'; const UserPermissionsPage: NextPage = () => { return ( <UserSettings> <UserPermissions /> </UserSettings> ); }; export default UserPermissionsPage;
<filename>crusher.c /* <NAME> Copyright (C) 2001 <NAME> This file is released under the terms of the MIT license. Read LICENSE.txt for more information. */ #include <allegro.h> #include "dkbk.h" static int alarm_time_blue; static int alarm_msec_blue; static int alarm_time_red; static int alarm_msec_red; static int alarm_crazy_time; static int alarm_crazy_switch; void init_crusher() { crusher.w = 32; crusher.h = 32; crusher.x = funnel.x1+(funnel.x2-funnel.x1)/2 - crusher.w/2; crusher.y = GAME_H - crusher.h - 8; alarm_time_blue = -1; alarm_time_red = -1; alarm_crazy_time = -1; play_sample(datafile[SAMP_CRUSHER_MOTOR].dat, 255, 128, 1000, TRUE); } void shutdown_crusher() { stop_sample(datafile[SAMP_CRUSHER_MOTOR].dat); stop_sample(datafile[SAMP_ALARM].dat); } void update_crusher() { if (alarm_crazy_time < 0) { if (alarm_time_blue >= 0) { alarm_time_blue++; if (1000*alarm_time_blue/FRAMES_PER_SECOND > alarm_msec_blue) alarm_time_blue = -1; } if (alarm_time_red >= 0) { alarm_time_red++; if (1000*alarm_time_red/FRAMES_PER_SECOND > alarm_msec_red) alarm_time_red = -1; } } else { alarm_crazy_time++; if (alarm_crazy_time > FRAMES_PER_SECOND*3/2) { stop_sample(datafile[SAMP_ALARM].dat); alarm_crazy_time = -1; alarm_time_blue = -1; alarm_time_red = -1; } else { alarm_crazy_switch++; if (alarm_crazy_switch > FRAMES_PER_SECOND/8) { alarm_crazy_switch = 0; if (alarm_time_blue >= 0) { alarm_time_blue = -1; alarm_time_red = 0; } else { alarm_time_blue = 0; alarm_time_red = -1; } } } } } static void strap_proc(BITMAP *bmp, int x, int y, int c) { static int color = 0; (void)c; putpixel(bmp, x, y, (color&2)? makecol(64, 64, 64): makecol(0, 0, 0)); color++; } void draw_crusher(BITMAP *bmp) { static fixed angle = 0; int x, y, x1, y1, x2, y2; x = crusher.x+crusher.w/2-get_bitmap(BMP_CRUSHER)->w/2; y = crusher.y+crusher.h/2-get_bitmap(BMP_CRUSHER)->h/2+8; /* crusher body */ draw_sprite(bmp, get_bitmap(BMP_CRUSHER), x, y); /* crusher motor */ draw_sprite(bmp, get_bitmap(BMP_CRUSHER_MOTOR), x1 = x-get_bitmap(BMP_CRUSHER_MOTOR)->w+1+rand()%3-1, y1 = y+rand()%3-1); /* crusher pulley */ rotate_sprite(bmp, get_bitmap(BMP_CRUSHER_PULLEY), x2 = x+17-get_bitmap(BMP_CRUSHER_PULLEY)->w/2+rand()%3-1, y2 = y+13-get_bitmap(BMP_CRUSHER_PULLEY)->h/2+rand()%3-1, angle); /* strap */ do_line(bmp, x1+12, y1+8, x2+get_bitmap(BMP_CRUSHER_PULLEY)->w/2-1, y2, makecol(0, 0, 0), strap_proc); do_line(bmp, x1+12, y1+18, x2+get_bitmap(BMP_CRUSHER_PULLEY)->w/2-1, y2+get_bitmap(BMP_CRUSHER_PULLEY)->h-1, makecol(0, 0, 0), strap_proc); angle = fadd(angle, itofix(16)); if (angle > itofix(256)) angle = fsub(angle, itofix(256)); /* scorer */ draw_sprite(bmp, get_bitmap(BMP_SCORER), x+get_bitmap(BMP_CRUSHER)->w, y+get_bitmap(BMP_CRUSHER)->h-get_bitmap(BMP_SCORER)->h); /* alarm */ if (alarm_time_blue >= 0) { masked_blit(get_bitmap(BMP_SCORER_ALARM), bmp, 0, 0, x+get_bitmap(BMP_CRUSHER)->w+63, y+get_bitmap(BMP_CRUSHER)->h-get_bitmap(BMP_SCORER)->h+27, 8, 9); } if (alarm_time_red >= 0) { masked_blit(get_bitmap(BMP_SCORER_ALARM), bmp, get_bitmap(BMP_SCORER_ALARM)->w-8, 0, x+get_bitmap(BMP_CRUSHER)->w+75, y+get_bitmap(BMP_CRUSHER)->h-get_bitmap(BMP_SCORER)->h+27, 8, 9); } } void active_blue_alarm(int msec) { if (alarm_crazy_time < 0) { alarm_time_blue = 0; alarm_msec_blue = msec; } } void active_red_alarm(int msec) { if (alarm_crazy_time < 0) { alarm_time_red = 0; alarm_msec_red = msec; play_sample(datafile[SAMP_ALARM].dat, 64, 128, 1000, FALSE); } } void active_crazy_alarm() { play_sample(datafile[SAMP_ALARM].dat, 255, 128, 1000, TRUE); alarm_crazy_time = 0; alarm_crazy_switch = 0; alarm_time_blue = 0; alarm_time_red = -1; }
Social Relations in the Trans-Saharan and Western Sudanese Trade: an Overview In summarizing, the evidence suggests that during the greater part of the second millenium A.D., the trans-Saharan and western Sudanese trade was embedded in societal frameworks and integrated into existing social structures. It was a socio-economic function which benefited the entire community. Although no fixed national boundaries existed, there were created semi-permanent trading blocks or market networks, which corresponded to the area under de facto ownership or control of a dominant ethnic group or state.
<commit_before><commit_msg>Add JUnit testing for L2RewriteService class. Change-Id: I902ce652c3edb0857cca697ead04a9ffd475db67 Signed-off-by: Alexis de Talhouët <3f85d13b1d0fb141169e8f22f55d7ee87dcd5f00@inocybe.com> <commit_after>/* * Copyright (c) 2015 Inocybe and others. All rights reserved. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html */ package org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.services; import org.junit.Test; /** * Unit test for {@link L2RewriteService} */ public class L2RewriteServiceTest { @Test public void test() { // TODO nothing has been done in L2RewriteService class yet. } }
// Copyright (c) 2018-2020 <NAME> // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_RPC_AUXPOW_MINER_H #define BITCOIN_RPC_AUXPOW_MINER_H #include <miner.h> #include <powdata.h> #include <rpc/request.h> #include <script/script.h> #include <script/standard.h> #include <sync.h> #include <txmempool.h> #include <uint256.h> #include <univalue.h> #include <map> #include <memory> #include <string> #include <utility> #include <vector> class ChainstateManager; namespace auxpow_tests { class AuxpowMinerForTest; } /** * This class holds "global" state used to construct blocks for the auxpow * mining RPCs and the map of already constructed blocks to look them up * in the submitauxblock RPC. * * It is used as a singleton that is initialised during startup, taking the * place of the previously real global and static variables. */ class AuxpowMiner { private: /** The lock used for state in this object. */ mutable RecursiveMutex cs; /** All currently "active" block templates. */ std::vector<std::unique_ptr<CBlockTemplate>> templates; /** Maps block hashes to pointers in vTemplates. Does not own the memory. */ std::map<uint256, const CBlock*> blocks; /** * Maps coinbase script hashes and PoW algorithms to pointers in vTemplates. * Does not own the memory. */ std::map<std::pair<PowAlgo, CScriptID>, const CBlock*> curBlocks; /** The current extra nonce for block creation. */ unsigned extraNonce = 0; /* Some data about when the current block (pblock) was constructed. */ unsigned txUpdatedLast; const CBlockIndex* pindexPrev = nullptr; uint64_t startTime; /** * Constructs a new current block if necessary (checking the current state to * see if "enough changed" for this), and returns a pointer to the block * that should be returned to a miner for working on at the moment. Also * fills in the difficulty target value. */ const CBlock* getCurrentBlock (const ChainstateManager& chainman, const CTxMemPool& mempool, PowAlgo algo, const CScript& scriptPubKey, uint256& target) EXCLUSIVE_LOCKS_REQUIRED (cs); /** * Looks up a previously constructed block by its (hex-encoded) hash. If the * block is found, it is returned. Otherwise, a JSONRPCError is thrown. */ const CBlock* lookupSavedBlock (const std::string& hashHex) const EXCLUSIVE_LOCKS_REQUIRED (cs); friend class auxpow_tests::AuxpowMinerForTest; public: AuxpowMiner () = default; /** * Performs the main work for the "createauxblock" RPC: Construct a new block * to work on with the given address for the block reward and return the * necessary information for the miner to construct an auxpow for it. */ UniValue createAuxBlock (const JSONRPCRequest& request, const CScript& scriptPubKey); /** * Performs the main work for the "submitauxblock" RPC: Look up the block * previously created for the given hash, attach the given auxpow to it * and try to submit it. Returns true if all was successful and the block * was accepted. */ bool submitAuxBlock (const JSONRPCRequest& request, const std::string& hashHex, const std::string& auxpowHex) const; /** * Performs the main logic needed for the "create" form of the "getwork" RPC. */ UniValue createWork (const JSONRPCRequest& request, const CScript& scriptPubKey); /** * Performs the "submit" form of the "getwork" RPC. */ bool submitWork (const JSONRPCRequest& request, const std::string& hashHex, const std::string& dataHex) const; /** * Returns the singleton instance of AuxpowMiner that is used for RPCs. */ static AuxpowMiner& get (); }; #endif // BITCOIN_RPC_AUXPOW_MINER_H
918Pattern of childhood injuries: findings from hospital based injury surveillance system in Oman Background Globally, injuries cause death and disability for millions of children every year. Literature from high-income rapidly developing countries, such as the Arab Gulf states, on this burden is sparse. Realising this gap, a surveillance system was established in two hospitals of Oman. Data on childhood injuries was collected and analysed to better understand such injuries in the Arab Gulf States. Methods Data was collected over a 6-month period in two large hospitals of Oman. All patients up to18 years who were admitted with a history of trauma between October 2014 and April 2015 were included. External cause and place of occurrence according to age and sex was analysed. Findings 35% of all cases were paediatric (891/2549 cases) and of those, 69.3% were males. Children between 05 years accounted for 53% of the study population. Most common external causes of injuries were falls (51%), exposure to mechanical forces (20.4%), and transport injuries (16.5%) for all ages. Analysis by age revealed that falls accounted for 50.9% of injuries for ages 05 years and 53.3% for 612 years. Transport injuries (43.5%) were the most common cause for children 1318 years, which were also more common in males (20%) than females (8.4%). Larger proportion of females (13.5%) was injured by contact with heat and hot substances vs. males (6%). Home was the place of injury for most children 05 years (86.4%) and 612 years (61.5%), whereas streets and highways were the most common place of injuries for age 1318 years. Conclusion Childhood injuries are a significant cause of hospital admissions in Oman. Significant age-related differences in cause of injury highlight the need for targeted interventions. Prevention of home-based falls and transport injuries must be a priority for all children. Additionally, road safety interventions and education must be the top agenda for young Omani males.