content
stringlengths
7
2.61M
// // Generated by classdumpios 1.0.1 (64 bit) (iOS port by DreamDevLost)(Debug version compiled Sep 26 2020 13:48:20). // // Copyright (C) 1997-2019 <NAME>. // #import "_TtC8AppStore16DynamicTypeLabel.h" @class MISSING_TYPE; @interface _TtC8AppStore26CappedSizeDynamicTypeLabel : _TtC8AppStore16DynamicTypeLabel { MISSING_TYPE *maxSizeCategory; // 4182187265 = 0xf9471d01 } - (void).cxx_destruct; // IMP=0x00000001002e2f24 - (id)initWithCoder:(id)arg1; // IMP=0x00000001002e2a3c @end
/** * The base concurrent implementation of the {@link Pool}. * * @param <E> the object's type. * @author JavaSaBr */ public abstract class ConcurrentPool<E> implements Pool<E> { /** * The storage of objects. */ @NotNull protected final ConcurrentArray<E> pool; public ConcurrentPool(@NotNull Class<? super E> type) { this.pool = createPool(type); } protected abstract @NotNull ConcurrentArray<E> createPool(@NotNull Class<? super E> type); @Override public boolean isEmpty() { return pool.isEmpty(); } @Override public void put(@NotNull E object) { pool.runInWriteLock(object, Array::add); } @Override public void remove(@NotNull E object) { pool.runInWriteLock(object, Array::fastRemove); } @Override public @Nullable E take() { if (pool.isEmpty()) { return null; } E object = pool.getInWriteLock(Array::pop); if (object == null) { return null; } return object; } @Override public String toString() { return pool.toString(); } }
import React from "react"; export interface RadioProps { /** HTML id of the input element */ id: string; /** HTML name of the input element */ name: string; /** Holds text for label associated with input element */ label: string; /** boolean indicating if this radio is selected */ checked?: boolean; /** The value of the input */ value?: string | number | string[]; /** Optional boolean to trigger a related context block. */ trigger?: boolean; /** Event listener which fires when a change event occurs (varies on input type) */ onChange: (event: React.ChangeEvent<HTMLInputElement>) => void; /** Event listener which fires when a input loses focus */ onBlur?: (event: React.ChangeEvent<HTMLInputElement>) => void; } const Radio: React.FunctionComponent<RadioProps> = ({ id, name, label, checked, value, trigger, onBlur, onChange, }): React.ReactElement => { const clicked: boolean = id === value; return ( <label data-tc-wenv-id={id} data-tc-wenv-trigger={trigger} className={clicked ? "active" : ""} > <input id={id} name={name} type="radio" checked={checked} value={value} onChange={onChange} onBlur={onBlur} /> <span>{label}</span> </label> ); }; export default Radio;
Treatment, prevention and public health management of impetigo, scabies, crusted scabies and fungal skin infections in endemic populations: a systematic review Summary We conducted a systematic review of the treatment, prevention and public health control of skin infections including impetigo, scabies, crusted scabies and tinea in resourcelimited settings where skin infections are endemic. The aim is to inform strategies, guidelines and research to improve skin health in populations that are inequitably affected by infections of the skin and the downstream consequences of these. The systematic review is reported according to the PRISMA statement. From 1759 titles identified, 81 full text studies were reviewed and key findings outlined for impetigo, scabies, crusted scabies and tinea. Improvements in primary care and public health management of skin infections will have broad and lasting impacts on overall quality of life including reductions in morbidity and mortality from sepsis, skeletal infections, kidney and heart disease. Introduction Children in developing countries and other resource-limited settings bear a disproportionate burden of skin infections, owing to poverty, poorer living conditions, normalisation and limited access to primary healthcare. More than 162 million children are estimated to have impetigo at any one time and more than 110 million children with scabies. There are no estimates for the global burden of tinea in children, although fungal skin infections were the leading skin disease and placed in the top 10 most prevalent diseases worldwide in 2010. Primary infection with impetigo and secondary bacterial infection of scabies, crusted scabies and tinea with the bacteria Staphylococcus aureus and Streptococcus pyogenes (Group A Streptococcus, GAS) lead to morbidity, mortality and socioeconomic costs via invasive infection. Invasive S. aureus has a global incidence estimate of 20-50 cases/100 000 population per year with a case fatality rate of 5-30%. An estimated 163 000 people die from GAS bacteraemia each year. Moreover, post-streptococcal sequelae of acute rheumatic fever (ARF) and acute post-streptococcal glomerulonephritis (APSGN) can lead to long-term consequences of chronic heart and kidney disease. Due to differences in the social determinants of health, there exists a marked disparity in the burden of skin infections and their sequelae between resource-rich and resourcelimited settings. 280 Systematic reviews of skin infection treatments that have only included randomised clinical trials (RCT), exclude a large body of available evidence from resource-limited settings where the burden is highest. RCTs are often conducted in hospital outpatient departments (OPD) in high-income settings, and findings may not be directly applicable to resourcelimited settings where cultural practices, access, availability, cost and acceptability of treatments may differ. There remains a lack of consensus on the best treatments and population health approaches for the prevention and control of skin infections, both individual skin conditions and skin infections collectively, in these resource-limited settings due to a lack of a review of the evidence that is externally valid to these populations. We conducted a systematic review of studies from resource-limited and endemic settings regarding the prevention, treatment and public health management of impetigo, scabies, crusted scabies and tinea to inform the development of evidence-based guidelines and future research priorities for skin infections in endemic populations. Search strategy and selection criteria This systematic review is reported according to the Preferred Reporting items for Systematic Reviews and Meta-Analyses (PRISMA) statement. The methods and search strategy have been described previously. Briefly peer reviewed and grey literature databases were searched. Studies published in English since 1960 using any experimental study (RCTs, clinical controlled trials, before and after studies and interrupted time series analyses) or observational study design (cohort and ecological studies) were included. Eligible participant types included Indigenous peoples and populations in resource-limited settings (low, lowmiddle and middle-income countries and resource-limited populations in Organisation for Economic Cooperation and Development (OECD) countries) (see Appendix S1 for definitions) with a diagnosis of impetigo, scabies, crusted scabies, tinea capitis, tinea corporis or tinea unguium (onychomycosis) in persons of any age or sex. We reviewed any clinical or public health interventions aiming to reduce skin infections with any type of comparator. Outcomes were categorised as primary (cure or decrease in prevalence for population-based studies) or secondary (microbiological cure, symptom relief, recurrence, adherence, acceptability, adverse events and spread to contacts). Two authors (AB and PM) independently screened the titles and abstracts of all studies identified in the search process and selected the studies for eligibility assessment. Full reports of these studies were obtained and assessed by two independent reviewers (10 reviewers in total). Any discrepancies for inclusion were resolved by consensus discussion. Assessment of methodological quality and data extraction Two reviewers independently scored for methodological quality of clinical trials using The Cochrane Collaboration's tool for assessing risk of bias. Observational studies were assessed for blinding, completeness of outcome data, outcome reporting and other sources of bias including confounders. All data were entered into data extraction forms using Covidence online software (Veritas Health Innovation, Melbourne, VIC, Australia) by the two independent reviewers and discrepancies resolved via discussion. Statistical analysis and synthesis The data are presented in a narrative synthesis. Metaanalysis was not performed due to the heterogeneity of studies. Calculations were performed using STATA13 (Statacorp, Texas, USA). For reading ease, results are presented in common theme groups in each area of clinical treatment or public health prevention and control relevant to skin infections in resource-limited settings. As many population-based studies incorporate multiple strategies such as health education, treatment and hygiene practices, it is recommended that all evidence is considered by the reader as a whole. We used the GRADE approach to rate evidence across studies for specific clinical outcomes to link evidence-quality evaluations to recommendations in clinical guidelines (Table 1). Summary of clinical treatment recommendations for resource-limited settings Comprehensive community skin health programmes. Moderate quality evidence that treatment combined with comprehensive skin control measures (health promotion, environmental interventions and screening) add benefit in sustaining a reduction in scabies prevalence alone (2B) and impetigo and scabies prevalence combined (2C) (Appendix S2). No studies assessed the effect of a community skin health programme on impetigo or tinea alone, whilst one study described this for scabies, one for scabies and impetigo, and one for general skin infections. High-quality evidence from studies using control communities would be advantageous in determining the measurable benefit over standard treatment (Table 3). In Bangladesh, moderate quality evidence was provided from a study where permethrin Mass Drug Administration (MDA) was followed by randomisation of male boarding school students to a scabies control programme (repeat permethrin treatment for scabies, health promotion activities with a designated scabies class monitor, daily bathing with soap and bags for bedding and clothing storage) or control. At 4 months, scabies prevalence was 5% (intervention) and 50% (control), P < 0.001. In Australia, low- *1D and 2D recommendations are not routinely included by the GRADE approach as these are based on expert consensus, rather than scientific evidence. These additional recommendation grades were created due to lack of available supporting evidence but an identified need to make recommendations to guide clinical and public health management. quality evidence was provided from a permethrin MDA that included a comprehensive skin control programme (annual treatment and community clean up days, health promotion and repeat treatment with permethrin for scabies) in a remote Indigenous community. Scabies prevalence declined from 35% to 12%, P < 0.0001 and impetigo from 11% to 2%, P = 0.0005. In Kenya, low-quality evidence was provided from a 5-year dermatology project within primary healthcare (training of healthcare workers and school-based treatments) that did not show a sustained reduction in impetigo, scabies or tinea. Impetigo. Directed antimicrobial therapy. High-quality evidence supports the use of oral co-trimoxazole or intramuscular (IM) benzathine penicillin G (BPG) for the treatment of impetigo (1A). Oral amoxicillin or oral erythromycin are suitable alternatives (2B). Oral Unable to obtain full-text (n = 8) Wrong outcomes (n = 8) Wrong patient population (n = 9) Wrong setting (n = 4) Duplicate report of published paper (n = 3) Wrong indication (n = 1) penicillin G is not recommended for treatment of impetigo (2D). Although topical antibiotics are recommended as the preferred treatment for impetigo in industrialised settings, there is no available evidence from resource-limited contexts for topical antibiotics or evidence to not treat impetigo. High-quality evidence from two open label RCTs with Australian Indigenous children compared oral cotrimoxazole vs. IM BPG and found no difference in clinical or microbiological cure of impetigo. Moderate quality RCT evidence reported clinical cure in 89% of patients in both groups when oral amoxicillin and oral erythromycin for 7 days in Mali were compared. Low-quality RCT evidence in Canadian Indigenous children compared oral penicillin G for 10 days with IM BPG, with treatment failure equivalent: 16% and 14% respectively. No studies assessed topical agents or used a placebo-controlled design for impetigo. Mass Drug Administration. No studies assessed MDA for impetigo alone. Impetigo was a secondary outcome in scabies MDAs reported below. Complimentary/alternative therapies. No studies assessed complimentary therapies for impetigo. Hand-washing and hygiene practices. High-quality evidence supports daily hand-washing with soap for the treatment and prevention of impetigo, with no benefit found for antibacterial soap over regular soap (1A). In Pakistan, high-quality evidence from two RCTs enrolling households with children assessed hand-washing with soap for impetigo and found a benefit for soap, but no difference between antibacterial (triclocarbon 1.2%) and standard soap. Scabies. All studies on scabies treatment used clinical cure or symptom relief as end points. High-quality RCT evidence from an Iranian hospital OPD found that two applications of 5% permethrin achieved a superior clinical cure (85%) compared to 1% lindane (49%), P < 0.05. Clinical cure was similar with topical ivermectin or topical permethrin in an Iranian dermatology OPD. When topical ivermectin 1% and topical permethrin 5% were compared with oral ivermectin, clinical response at 1 week was superior with either topical treatment (69% and 75% vs. 30%, P < 0.05) whilst cure at 4 weeks was universal for all three agents. Topical permethrin and topical ivermectin were superior to topical crotamiton at 4 weeks follow-up. Topical permethrin was superior to Tenutex emulsion. Very low-quality evidence from a refugee camp on the Thai-Burmese border assessed safety of permethrin and BB in pregnancy. Twenty-nine studies incorporated a topical scabicide/s, mostly permethrin. (Table S4). One study directly compared neck to toe application (head to toe in children) with application to lesions only. Overall, head to toe or neck to toe was recommended in 26 studies, lesion only in four (three of which were topical ivermectin and not specified in seven studies. Full body application of topical scabicides is recommended (1D). The effective application of topical scabicides requires a private setting where the clothes can be removed for application. This is not always practical or achievable in overcrowded households and may limit the effect of topical therapy. A comparison of topical 5% permethrin with oral ivermectin in a high-quality RCT from India found lesion count and pruritus significantly lower for permethrin at 1 week whilst clinical cure at 4 weeks was the same. Moderate quality evidence from India reached similar conclusions. From Iran, low-quality evidence is provided from two studies that compared oral ivermectin with topical permethrin and found superior symptom relief with permethrin at 2 weeks, whilst clinical cure was the same. There is moderate-high-quality evidence that oral ivermectin achieved superior clinical cure than topical lindane or topical sulphur. Comparisons of oral ivermectin with topical BB showed discrepant results: no difference in clinical cure based on high-quality RCT evidence from Vanuatu whilst oral ivermectin was superior for clinical cure in moderate quality evidence from Senegal and Nigeria. Mass drug administration. There is moderate quality evidence for MDA to control scabies in resource-limited communities (1B), with high-quality comparison RCTs needed to determine the best agent. Moderate quality evidence for the population effect of MDA for scabies on scabies and impetigo prevalence was achieved using either topical permethrin or oral ivermectin (1B) [23,. Oral ivermectin is superior to topical permethrin and standard of care for community-wide use in children >5 years and non-pregnant adults in isolated settings with high prevalence of scabies and impetigo (1B). High-quality studies conducted in mainland populations are required to determine the effectiveness of the MDA approach in highly mobile populations. Scabies only-Low to moderate quality evidence from four studies in Fiji, India and Tanzania assessed MDA impact on scabies prevalence only. Two doses of oral ivermectin achieved a 95% reduction in scabies in India whilst single dose ivermectin MDA was not superior to BB in Fiji. Ivermectin delivered in a lymphatic filariasis MDA reported a 68-98% decline in scabies. When 25% BB was delivered in an MDA to an Indian orphanage, cure was 100% at 6 weeks. Scabies and impetigo-Permethrin MDA-Low-quality evidence is provided from permethrin MDA's, which were all ecological in design with different populations reviewed at baseline and follow-up. Four studies from Panama and remote Australian Aboriginal communities showed a reduction in scabies and impetigo prevalence following MDA with 5% permethrin. The first scabies MDA used permethrin in a remote Kuna Indian population in Panama in 1986 and although interrupted by political tensions demonstrated a sustained response. The permethrin MDAs were combined with impetigo treatment and broad-based community skin programmes including surveillance, health promotion, home cleaning and retreatment of cases in Australia. Ivermectin vs. Permethrin MDA-Moderate quality evidence is provided from a cluster RCT where oral ivermectin and topical permethrin MDAs were compared with standard case treatment with topical permethrin for scabies in three Fijian island communities. Ivermectin was superior at 12 months for scabies and impetigo. Ivermectin MDA-Low-quality evidence is provided from two studies that assessed the effect of oral ivermectin MDA on scabies prevalence. In the Solomon Islands, two doses of oral ivermectin reduced the prevalence of scabies at 3 years and this was sustained at a further follow-up 15 years later. In contrast, an oral ivermectin MDA delivered in a remote Australian Aboriginal community did not show significant or sustained declines in scabies prevalence. Azithromycin MDA-Very low-quality evidence from an azithromycin MDA for trachoma in a remote Australian Aboriginal population reported impetigo reduction at 2-3 weeks which returned to baseline at 6 months. Scabies prevalence was unchanged. Complimentary therapy. Moderate quality evidence that cold cream can be used as an adjunct to topical sulphur for scabies (2B). In a Mexican orphanage RCT, topical 10% sulphur in pork fat was compared with topical 10% sulphur in cold cream with high rates of cure. Preliminary data for aloe vera for scabies treatment. Communicable disease control and prevention. There is low-quality evidence for treatment of household contacts for the community control of scabies (2C). Treatment of cases and contacts is recommended in scabies outbreaks (2C), however, high-quality studies comparing treatments during outbreaks are required. Low-quality evidence for the treatment of household contacts as the primary intervention for scabies control from one cohort of Australian Aboriginal households where a sixfold reduction in scabies in compliant households was found. Fifteen other studies treated close contacts, family members or the household as co-interventions for scabies, however, without a comparison group, the effect cannot be reliably assessed. Moderate quality evidence found that oral ivermectin halted a scabies outbreak amongst healthcare workers and patients in Peru, and topical BB for cases and contacts with community education terminated an outbreak in Israel. Environmental co-interventions. Although washing and storage measures are unlikely to cause harm and should be encouraged, high-quality studies assessing the clinical effectiveness of washing clothing and bed linen, storage of items in plastic bags, exposure to sunlight and household spraying are required before these measures can be strongly recommended as adjuncts in the control of scabies. No studies used a control group to assess the effect of environmental interventions for scabies. Twelve studies included washing of clothing and bed linen, two studies included storage of items in plastic bags, four studies included exposing items to direct sunlight and one study included household spraying, as co-interventions (Table 2). Directed antimicrobial therapy. Moderate quality evidence from a prospective cohort study of Australian Aboriginal inpatients receiving oral ivermectin at days 0, 14 and 28 and daily topical permethrin alternating with keratolytic therapy (topical urea 10% and lactic acid 5%), found 40% achieved complete cure at 4 weeks. Standard treatment protocols. Moderate-quality evidence from a retrospective study used a standard treatment protocol in Australian Aboriginal inpatients with crusted scabies achieving 55% without recurrence at 8 years. Coordinated case management. Low-quality evidence supports topical BB, regular keratolytics, moisturiser and regular screening for new lesions in home-based case management to prevent crusted scabies. High-quality evidence of similar clinical and mycological cure was provided by a multicentre RCT from Guatemala, Chile, Costa Rica, USA and India comparing daily oral fluconazole for 3 or 6 weeks with daily griseofulvin. Low-quality RCT evidence from Iran reported no difference between daily fluconazole or daily griseofulvin at 8 weeks. Low-quality evidence from India found griseofulvin twice daily, fluconazole weekly and terbinafine daily all performed similarly. In addition, all used ketoconazole 2% shampoo and prednisolone prescribed for kerion. From China, low-quality cluster RCT evidence confirmed griseofulvin daily for 4 weeks or terbinafine daily for 2-4 weeks performed similarly. Tinea corporis-Low to moderate quality evidence for topical sertaconazole, butenafine, miconazole or clotrimazole over other agents for tinea corporis (2C). Low-quality evidence that oral alternatives for tinea corporis are terbinafine or fluconazole (2C). Although the systematic review on topical treatments for tinea corporis recommends topical terbinafine as a first-line agent, no high-quality studies from resource-limited contexts were available to evaluate. Most included trials came from dermatology outpatient clinics in India or Iran. Community setting, population level evidence is needed for tinea corporis treatment. Moderate quality RCT evidence from Iran confirmed similar clinical cure at 8 weeks for topical butenafine compared with topical clotrimazole and similar cure rates at 4 weeks for topical miconazole and topical sertaconazole. Moderate quality RCT evidence from India found sertaconazole outperformed miconazole with 62% and 45% cured at 2 weeks respectively, P < 0.05. Low-quality evidence from India found topical clotrimazole and topical amorolfine were comparable and that topical sertaconazole was superior to topical butenafine. Similarly, very low-quality pilot RCT evidence from India found superiority of topical sertaconazole over topical terbinafine or topical luliconazole for clinical cure and symptom relief. Very low-quality RCT evidence also found no difference between topical sertaconazole and topical terbinafine and that topical terbinafine and topical luliconazole could not be differentiated. Similarly, low-quality RCT evidence from India found that daily oral terbinafine or weekly fluconazole achieved similar clinical cures and topical butenafine was no better than weekly fluconazole combined with topical Whitfield's ointment (3% salicylic acid and 6% benzoic acid) at 4 weeks. Low-quality evidence from a prospective cohort of Australian Aboriginal people with tinea corporis and tinea unguium found daily oral terbinafine cured 32%. High-quality RCT evidence from India trialled two different dosing regimens of terbinafine and showed no difference. Low-quality RCT evidence from Brazil found monthly or second monthly dosing of oral terbinafine had similar outcomes and photodynamic therapy (PDT) every 15 days for 6 months was superior to weekly oral fluconazole. No additional benefit of topical nail lacquer over oral terbinafine alone was found in moderate quality evidence. Mass drug administration. No studies assessed the effect of antifungal MDAs on the prevalence of fungal skin infections. Complimentary/alternative therapy. Further studies are needed to assess the role of aloe vera gel, as only very low-quality evidence from one study is available. Communicable disease prevention and control. No studies assessed the effect of communicable disease control practices on fungal infections on which to base relevant recommendations for resource-limited settings. Hygiene practices. Daily soap use may be of benefit in the treatment of tinea capitis and tinea corporis. This is recommended in combination with anti-fungal treatment (2C). From Tanzania, low-quality RCT evidence found mycological cure at 2 months to be similar with either daily washing with triclosan soap or placebo. Infrastructure including high-quality water supply, swimming pools and housing improvement for skin infections. Water provision. An adequate supply of water for washing and cleaning will reduce the burden of impetigo and scabies (2C). From studies in remote Australian Indigenous communities, the installation of community swimming pools may assist in the prevention of impetigo, along with other health benefits (2C). No studies assessed the effect of quality water supply or swimming pools on scabies or tinea on which to base recommendations for resource-limited settings. Low-quality evidence from Panama found that when unlimited, high-quality water was compared to a community with a limited water supply, declines in scabies and impetigo incidence were reported. Low-quality evidence from three studies in Australian Aboriginal communities found a small benefit following the installation of swimming pools for impetigo and skin infections. Housing improvement programmes. Programmes to improve housing may assist in the prevention and control of skin infections in resource-limited populations (2C). Low-quality evidence from a housing intervention evaluation of remote Australian Aboriginal communities, found construction of new, standardised housing and the demolition of uninhabitable dwellings did not change the prevalence of skin infections at 10 months. Low-quality evidence from a study that ran for 12 years showed reductions in skin infections following household improvements based on health and safety priorities in a 'survey and fix methodology.' Discussion This is the first systematic review to comprehensively inform treatment, public health control and areas for future research in the control of skin infections using evidence generated in and from settings where skin infection burden is the highest. High-quality evidence for treatment of the individual and community with scabies and for the individual with impetigo is synthesised for inclusion into evidence-based guidelines. Similarly, high-quality evidence for comprehensively addressing scabies and impetigo concurrently is presented, with further studies needed to determine the measurable benefit of additional interventions over treatment alone. The integration of oral antibiotics for treatment of impetigo, use of oral ivermectin or topical permethrin MDA for scabies in endemic or outbreak settings and community education and health promotion activities in skin health programmes are supported by the evidence and should form the basis of skin control programmes when needed. Evidence gaps include community control of dermatophyte infections and targeted environmental health interventions to improve skin health. Progress towards the streamlined integration of data collection on skin infections when planning MDAs for other infections needs ongoing prioritisation. MDA for trachoma and yaws with azithromycin may also reduce the burden of impetigo, whilst ivermectin MDA for lymphatic filariasis and scabies will reduce scabies and impetigo prevalence as part of the roadmap towards defeating neglected tropical diseases. This pragmatic, evidence-based strategy is now being tested in larger populations with results awaited (ACTRN12618000461291p) to inform whether community control of scabies will prevent severe skin infections. For impetigo, duration of treatment, the role of topical therapy and added benefit of comprehensive skin disease control programmes over treatment alone are gaps in the literature. Whilst 3 or 5 days of cotrimoxazole for impetigo treatment in resource-limited settings is effective, more comparison studies are needed to optimise treatment duration and utility of cheap, widely available, palatable alternative agents in high-burden contexts. Cephalexin for up to 10 days remains in guidelines for impetigo, yet this is lengthy, costly and may be impractical with no evidence supporting its use for impetigo in high-burden contexts. Unlike developed settings where topical mupirocin and fusidic acid are recommended, there are currently no trials using topical antibiotics for impetigo in high burden settings. Results from New Zealand comparing topical antibiotics or antiseptics with placebo are awaited . Knowledge gaps identified include the patient preference for agent to treat scabies, and the additional benefit of comprehensive control programmes for scabies above treatment alone. Topical permethrin has more rapid reduction in symptoms but requires a private space in which to apply the cream to the full body. Conversely, clinical response is slower, but ease of administration and overall community efficacy in MDA support the use of ivermectin. Future studies should address the role of a second dose of ivermectin in asymptomatic individuals as unhatched eggs are refractory to ivermectin. Moxidectin shows promise for future human scabies trials as it has a longer half-life and is ovicidal. Most studies assessing antifungal treatments were from dermatology OPD in middle-income country hospitals, which limits the external validity to other resource-limited settings. Studies assessing the effectiveness of topical and oral (for severe disease) treatments of tinea in a range of resource-limited populations would be of benefit to make recommendations applicable to real life and uncontrolled settings at the individual and population level. Future integration of treatment of tinea into comprehensive skin disease control programmes that address scabies and impetigo may be a way forward. Despite practical advantages, we found limited evidence for environmental interventions to control skin infections. Although sound attempts to evaluate housing programmes have been made, we remain unable to recommend small-scale environmental interventions due to a lack of comparative studies. For example, no studies compared household spraying with no intervention to eradicate the scabies mite. Similarly, there was no evidence for hot washing of clothing compared to not washing clothing. Although environmental measures are unlikely to cause harm in combination with treatment of the skin infection, research is needed to determine any measurable benefit above standard treatment to inform environmental health teams tasked with managing scabies outbreaks, clinicians managing skin infections or governments and communities intending to include environmental policy recommendations in comprehensive skin health programmes in endemic areas. Although 1759 non-duplicate studies were found for potential inclusion in this systematic review, most were excluded prior to the final appraisal of 81 studies meeting the full inclusion criteria (see Figure 1). This is the complete synthesis of available literature on these four skin conditions. It is possible that restriction to English language publications or being unable to find the full text publication has been a limitation in the scope of this, although <30 full-text studies were excluded for this reason. Conclusions A summary of the evidence-based recommendations for skin infections in high-burden contexts also highlights the need for further rigorous, experimental studies to fill the evidence gaps. Pragmatic, practical, high-quality, wellfunded RCTs are essential in the settings where the findings will have external validity if meaningful progress is to be made towards reducing the gap in skin health outcomes between the rich and poor. Acknowledging that RCTs may present ethical issues for some groups, robust observational studies of appropriately funded public health interventions can be tested across large populations with designs that control for confounders and in meaningful partnership with the communities under study using participatory research methods. Supporting Information Additional Supporting Information may be found in the online version of this article: Table S1. List of studies included in the systematic review. Table S2. Risk of bias table with overall quality ratings using the GRADE approach for included experimental and controlled studies Table S3. Risk of bias table with overall quality rating using the GRADE approach for included observational studies Table S4. Method of application of topical scabicides in 29 included studies Appendix S1. Definitions for Indigenous peoples and Income groupings used Appendix S2. Evidence Summary and Recommendations for skin infection-related research to guide practice in resource-limited settings. Data S1. PRISMA Checklist
// climb up the page tree up to the top to be able to call PageTree.indexOf for a page dictionary private int indexOfPageTree(COSDictionary pageDict) { COSDictionary parent = pageDict; while (true) { COSDictionary prevParent = parent.getCOSDictionary(COSName.PARENT, COSName.P); if (prevParent == null) { break; } parent = prevParent; } if (parent.containsKey(COSName.KIDS) && COSName.PAGES.equals(parent.getCOSName(COSName.TYPE))) { PDPageTree pages = new PDPageTree(parent); return pages.indexOf(new PDPage(pageDict)); } return -1; }
<filename>zuihou-commons/zuihou-core/src/main/java/com/github/zuihou/context/BaseContextHandler.java<gh_stars>1-10 package com.github.zuihou.context; import java.util.HashMap; import java.util.Map; import com.github.zuihou.utils.NumberHelper; import com.github.zuihou.utils.StrHelper; import cn.hutool.core.util.StrUtil; /** * 获取当前域中的 用户id appid 用户昵称 * 注意: appid 通过token解析, 用户id 和 用户昵称必须在前端 通过请求头的方法传入。 否则这里无法获取 * * @author zuihou * @createTime 2017-12-13 16:52 */ public class BaseContextHandler { public static final ThreadLocal<Map<String, String>> THREAD_LOCAL = new ThreadLocal<>(); public static void set(String key, Long value) { Map<String, String> map = getLocalMap(); map.put(key, value == null ? "0" : String.valueOf(value)); } public static void set(String key, String value) { Map<String, String> map = getLocalMap(); map.put(key, value == null ? "" : value); } public static void set(String key, Boolean value) { Map<String, String> map = getLocalMap(); map.put(key, value == null ? "false" : value.toString()); } public static void main(String[] args) { Boolean s = true; System.out.println(s.toString()); } public static Map<String, String> getLocalMap() { Map<String, String> map = THREAD_LOCAL.get(); if (map == null) { map = new HashMap<>(10); THREAD_LOCAL.set(map); } return map; } public static void setLocalMap(Map<String, String> threadLocalMap) { THREAD_LOCAL.set(threadLocalMap); } public static String get(String key) { Map<String, String> map = getLocalMap(); return map.getOrDefault(key, ""); } public static Boolean isBoot() { Object value = get(BaseContextConstants.IS_BOOT); return NumberHelper.boolValueOf0(value); } /** * 账号id * * @param userId */ public static void setBoot(Boolean val) { set(BaseContextConstants.IS_BOOT, val); } /** * 账号id * * @return */ public static Long getUserId() { Object value = get(BaseContextConstants.JWT_KEY_USER_ID); return NumberHelper.longValueOf0(value); } /** * 账号id * * @param userId */ public static void setUserId(Long userId) { set(BaseContextConstants.JWT_KEY_USER_ID, userId); } public static void setUserId(String userId) { setUserId(NumberHelper.longValueOf0(userId)); } /** * 账号表中的name * * @return */ public static String getAccount() { Object value = get(BaseContextConstants.JWT_KEY_ACCOUNT); return returnObjectValue(value); } /** * 账号表中的name * * @param name */ public static void setAccount(String name) { set(BaseContextConstants.JWT_KEY_ACCOUNT, name); } /** * 登录的账号 * * @return */ public static String getName() { Object value = get(BaseContextConstants.JWT_KEY_NAME); return returnObjectValue(value); } /** * 登录的账号 * * @param account */ public static void setName(String account) { set(BaseContextConstants.JWT_KEY_NAME, account); } /** * 获取用户token * * @return */ public static String getToken() { Object value = get(BaseContextConstants.TOKEN_NAME); return StrHelper.getObjectValue(value); } public static void setToken(String token) { set(BaseContextConstants.TOKEN_NAME, token); } public static Long getOrgId() { Object value = get(BaseContextConstants.JWT_KEY_ORG_ID); return NumberHelper.longValueOf0(value); } public static void setOrgId(String val) { set(BaseContextConstants.JWT_KEY_ORG_ID, val); } public static Long getStationId() { Object value = get(BaseContextConstants.JWT_KEY_STATION_ID); return NumberHelper.longValueOf0(value); } public static void setStationId(String val) { set(BaseContextConstants.JWT_KEY_STATION_ID, val); } public static String getTenant() { Object value = get(BaseContextConstants.TENANT); return StrHelper.getObjectValue(value); } public static void setTenant(String val) { set(BaseContextConstants.TENANT, val); } public static String getDatabase(String tenant) { Object value = get(BaseContextConstants.DATABASE_NAME); String objectValue = StrHelper.getObjectValue(value); return objectValue + StrUtil.UNDERLINE + tenant; } public static void setDatabase(String val) { set(BaseContextConstants.DATABASE_NAME, val); } private static String returnObjectValue(Object value) { return value == null ? "" : value.toString(); } public static void remove() { if (THREAD_LOCAL != null) { THREAD_LOCAL.remove(); } } }
def _versions_to_output(self, versions: [str]): versions.sort(key=lambda x: semver.parse_version_info(x[self.version_key])) output = [{self.version_key: version[self.version_key]} for version in versions] return output
<gh_stars>0 package io.github.poulad.hnp.web.model; import javax.annotation.Nonnull; public record ImageDto( @Nonnull String url ) { }
<reponame>htw-inka/ofxMagicMirror<gh_stars>1-10 /* Copyright (C) 2010 ARToolkitPlus Authors This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Authors: <NAME> */ #include <cassert> #include "../extra/Hull.h" #include "Tracker.h" #include "arGetInitRot2Sub.h" namespace ARToolKitPlus { ARFloat Tracker::arMultiGetTransMatHull(ARMarkerInfo *marker_info, int marker_num, ARMultiMarkerInfoT *config) { //return arMultiGetTransMat(marker_info, marker_num, config); int numInPoints=0; std::vector<int> trackedMarkers; int trackedCenterX=-1,trackedCenterY=-1; //const int indices[4] = {idx0,idx1,idx2,idx3}; //rpp_vec ppos2d[4]; //rpp_vec ppos3d[4]; const int maxHullPoints = 16; int indices[maxHullPoints]; rpp_vec ppos2d[maxHullPoints]; rpp_vec ppos3d[maxHullPoints]; // create an array of 2D points and keep references // to the source points // for(int i=0; i<marker_num; i++) { int mId = marker_info[i].id; int configIdx = -1; for(int j=0; j<config->marker_num; j++) if(config->marker[j].patt_id==mId) { configIdx = j; break; } if(configIdx==-1) continue; trackedMarkers.push_back(i); for(int c=0; c<4; c++) { int dir = marker_info[i].dir; int cornerIdx = (c+4-dir)%4; hullInPoints[numInPoints].x = (MarkerPoint::coord_type)marker_info[i].vertex[cornerIdx][0]; hullInPoints[numInPoints].y = (MarkerPoint::coord_type)marker_info[i].vertex[cornerIdx][1]; hullInPoints[numInPoints].cornerIdx = c; hullInPoints[numInPoints].markerIdx = configIdx; numInPoints++; } if(numInPoints>=MAX_HULL_POINTS) break; } // next get the convex hull of all points // (decrease amount by one to ignore last point, which is identical to first point) // int numHullPoints = nearHull_2D(hullInPoints, numInPoints, numInPoints, hullOutPoints)-1; int idx0,idx1,idx2,idx3; if(hullTrackingMode==HULL_FOUR && numHullPoints != 0) { // find those points with furthest distance and that lie on // opposite parts of the hull. this fixes the first two points of our quad. // findLongestDiameter(hullOutPoints, numHullPoints, idx0,idx1); assert(iabs(idx0-idx1)>0); // find the point that is furthest away of the line // of our first two points. this fixes the third point of the quad findFurthestAway(hullOutPoints, numHullPoints, idx0,idx1, idx2); sortIntegers(idx0,idx1, idx2); // of all other points find the one that results in // a quad with the largest area. maximizeArea(hullOutPoints, numHullPoints, idx0,idx1,idx2,idx3); // now that we have all four points we must sort them... // sortInLastInteger(idx0,idx1,idx2, idx3); numHullPoints = 4; indices[0] = idx0; indices[1] = idx1; indices[2] = idx2; indices[3] = idx3; } else { if(numHullPoints>maxHullPoints) numHullPoints = maxHullPoints; for(int i=0; i<numHullPoints; i++) indices[i] = i; } assert(numHullPoints<=maxHullPoints); // create arrays of vertices for the 2D and 3D positions // //const int indices[4] = {idx0,idx1,idx2,idx3}; //rpp_vec ppos2d[4]; //rpp_vec ppos3d[4]; for(int i=0; i<numHullPoints; i++) { //int idx = indices[(i+1)%4]; int idx = indices[i]; const MarkerPoint& pt = hullOutPoints[idx]; trackedCorners.push_back(CornerPoint(pt.x,pt.y)); trackedCenterX += pt.x; trackedCenterY += pt.y; ppos2d[i][0] = pt.x; ppos2d[i][1] = pt.y; ppos2d[i][2] = 1.0f; assert(pt.markerIdx < config->marker_num); const ARMultiEachMarkerInfoT& markerInfo = config->marker[pt.markerIdx]; int cornerIdx = pt.cornerIdx; ppos3d[i][0] = markerInfo.pos3d[cornerIdx][0]; ppos3d[i][1] = markerInfo.pos3d[cornerIdx][1]; ppos3d[i][2] = 0; } trackedCenterX /= 4; trackedCenterY /= 4; // prepare structures and data we need for input and output // parameters of the rpp functions const rpp_float cc[2] = {arCamera->mat[0][2],arCamera->mat[1][2]}; const rpp_float fc[2] = {arCamera->mat[0][0],arCamera->mat[1][1]}; rpp_float err = 1e+20; rpp_mat R, R_init; rpp_vec t; if(poseEstimator==POSE_ESTIMATOR_RPP) { robustPlanarPose(err,R,t,cc,fc,ppos3d,ppos2d,numHullPoints,R_init, true,0,0,0); if(err>1e+10) return(-1); // an actual error has occurred in robustPlanarPose() for(int k=0; k<3; k++) { config->trans[k][3] = (ARFloat)t[k]; for(int j=0; j<3; j++) config->trans[k][j] = (ARFloat)R[k][j]; } } else { ARFloat rot[3][3]; int minIdx=-1, minDist=0x7fffffff; for(size_t i=0; i<trackedMarkers.size(); i++) { assert(trackedMarkers[i]>=0 && trackedMarkers[i]<marker_num); int idx = trackedMarkers[i]; const ARMarkerInfo& mInfo = marker_info[idx]; int dx = trackedCenterX-(int)mInfo.pos[0], dy = trackedCenterY-(int)mInfo.pos[1]; int d = dx*dx + dy*dy; if(d<minDist) { minDist = d; minIdx = (int)idx; } } //trackedCorners.push_back(CornerPoint((int)marker_info[minIdx].pos[0],(int)marker_info[minIdx].pos[1])); if(minIdx >= 0) { return -1; } if(arGetInitRot(marker_info+minIdx, arCamera->mat, rot )<0) return -1; // finally use the normal pose estimator to get the pose // ARFloat tmp_pos2d[maxHullPoints][2], tmp_pos3d[maxHullPoints][2]; for(int i=0; i<numHullPoints; i++) { tmp_pos2d[i][0] = (ARFloat)ppos2d[i][0]; tmp_pos2d[i][1] = (ARFloat)ppos2d[i][1]; tmp_pos3d[i][0] = (ARFloat)ppos3d[i][0]; tmp_pos3d[i][1] = (ARFloat)ppos3d[i][1]; } for(int i=0; i<AR_GET_TRANS_MAT_MAX_LOOP_COUNT; i++ ) { err = arGetTransMat3(rot, tmp_pos2d, tmp_pos3d, numHullPoints, config->trans, arCamera); if(err<AR_GET_TRANS_MAT_MAX_FIT_ERROR) break; } } return (ARFloat)err; } } // namespace ARToolKitPlus
#include <boost/log/keywords/filter.hpp>
<reponame>yanghaiji/javayh-demo package com.javayh.advanced.java.algorithm.example; /** * <p> * * </p> * * @author <NAME> * @version 1.0.0 * @since 2020-09-25 */ public class 水仙花 { public static void main(String[] args) { //打印出所有的 "水仙花数 ",所谓 "水仙花数 "是指一个三位数,其各位数字立方和等于该数本身。 // 例如:153是一个 "水仙花数 ",因为153=1的三次方+5的三次方+3的三次方。 System.out.println(narcissus(153, 3)); System.out.println(narcissus(370, 3)); System.out.println(narcissus(407, 3)); System.out.println(narcissus(371, 3)); System.out.println(narcissus(334, 3)); } /** * @param num 原数据 * @param pow 次方数 * @return */ static boolean narcissus(Integer num, Integer pow) { int x, y, z = 0; //计算出十位与各位的数 int ten = num % 100; //计算出百位 x = num / 100; //计算出十位 y = ten / 10; //计算出各位 z = ten % 10; int sum = (int) (Math.pow(x, pow) + Math.pow(y, pow) + Math.pow(z, pow)); System.out.println("计算前的数据:" + num); System.out.println("计算水仙花数的和为:" + sum); return sum == num; } }
ACROSS the island of Ireland, long regarded as western Europe's last bastion of traditional religious power, a huge change is underway in the way issues of personal and sexual behaviour are handled by society and the state. Voters in the Republic of Ireland will decide on May 22nd on whether to make their country the first in the world to legalise same-sex marriage by popular ballot. The opinion polls suggest that among voters who have made up their minds, a clear majority (around 70%) will say "yes" in the referendum, ignoring the advice of the leadership of the Catholic church to which about 84% of the country, at least formally, adheres. In Northern Ireland, meanwhile, a judge delivered a verdict today that was widely hailed as a milestone victory for gay rights against religious conservatism. She said a Christian-run bakery in Belfast had unlawfully practised "direct discrimination" when it turned away an order for a cake. The case arose when Gareth Lee, a gay-rights activist, approached Ashers Bakery with an order to bake a cake featuring two Sesame Street characters and the slogan "Support Gay Marriage". After initially accepting the request, the bakery contacted the customer and said it could not decorate the cake as requested, on grounds of conscience. Get our daily newsletter Upgrade your inbox and get our Daily Dispatch and Editor's Picks. Ordering the bakery to pay compensation, Judge Isobel Brownlie acknowledged that the McArthur family which ran the establishment had strong convictions of faith but she insisted that the business had an obligation to "provide service to all". "The defendants are not a religious organisation. They are conducting a business for profit and, notwithstanding their genuine religious beliefs, there are no exceptions available under the 2006 regulations which apply in this case." Mr Lee (pictured) emerged smiling from the court-room, while the McArthurs pledged to stay in business and consider their options for appeal. On the face of things, then, trends in the two parts of Ireland are broadly similar. But only on the face of things. The referendum debate in the Republic has taken place against the background of an overwhelming public consensus (including among practising Catholics and indeed priests) that church power in Ireland was until recently excesssive and widely abused. Whether they are believers or sceptics, most citizens of the Republic now accept that priestly power had terrible consequences for vulnerable groups, from single mothers to children in care, who did not conform to the church's notion of the traditional family. As a result, campaigners for a "no" vote have put their case in very cautious and defensive terms. In this climate, the principle that gay people are among the many vulnerable communities who have suffered unfairly, and deserve redress, wins ready acceptance. In Northern Ireland, things are not quite the same. Hard-line religious conservatism, whether Protestant or Catholic, may be a minority cause but it certainly exists and can make common cause across sectarian boundaries. All the signs are that the Ashers bakery case will redouble the complaints among Northern Irish traditionalists that they are the ones who face bullying and disrespect, and are indeed suffering for their cause. And nothing emboldens people of deep convictions more than a sense that they are suffering. At the level of practical politics, there will be even more energetic moves by members of the Democratic Unionist Party (for which most Northern Irish Protestants vote) to allow firms to turn away business on grounds of conscience. In the end the initiative will certainly be vetoed by their partners in government, Sinn Fein. But the DUP will no less certainly make huge political mileage out of the case. If the object of gay-rights campaigners is to change hearts and minds in a once-conservative island, they are making progress on one front, but not all fronts.
package harvester.model; import harvester.dataObjects.Contributor; import harvester.dataObjects.Creator; import harvester.dataObjects.Document; import org.dom4j.Element; import se.kb.oai.OAIException; import se.kb.oai.pmh.*; import java.io.IOException; import java.sql.Date; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Iterator; import java.util.Locale; /** * Created by Thagus on 08/12/16. */ public class DCHarvester { private ModelDatabase db; private DateFormat df = new SimpleDateFormat("yyyy-MM-dd", Locale.ENGLISH); public DCHarvester(){ db = ModelDatabase.instance(); } /** * Method to harvest an OAI-PMH collection that follows the Dublin Core standard * @param repository the URL of the OAI-PMH collection */ public void harvest(String repository) throws OAIException, IOException { OaiPmhServer server = new OaiPmhServer(repository); //To list all identifiers in the repository that has "oai_dc" metadata //IdentifiersList list = server.listIdentifiers("oai_dc"); //System.out.println("To read: " + list.size() + " documents"); RecordsList recordsList = server.listRecords("oai_dc"); System.out.println("To read: " + recordsList.size() + " documents"); boolean more = true; while (more) { for (Record record : recordsList.asList()) { //Read the record of the given identifier //Record record = server.getRecord(header.getIdentifier(), "oai_dc"); Header header = record.getHeader(); Document document = new Document(header.getIdentifier()); ArrayList<String> careers = new ArrayList<>(); ArrayList<String> academicDegrees = new ArrayList<>(); ArrayList<String> creatorNames = new ArrayList<>(); ArrayList<String> contributorNames = new ArrayList<>(); //Now use the dom4j to handle the metadata Element root = record.getMetadata(); //Iterate through every element withing the record to obtain the metadata for (Iterator i = root.elementIterator(); i.hasNext(); ) { Element element = (Element) i.next(); String elementName = element.getName(); String elementText = element.getText(); //System.out.println(elementName + ": " + elementText); if(elementText.length()>0) { switch (elementName) { case "identifier": document.setLocationURL(elementText); break; case "title": document.setTitle(elementText); break; case "type": document.addType(elementText); break; case "subject": //Career of Creator careers.add(elementText); break; case "creator": //Name of Creator creatorNames.add(elementText); break; case "contributor": //Name of Contributor contributorNames.add(elementText); break; case "description": document.setDescription(elementText); break; case "publisher": document.setPublisher(elementText); break; case "format": document.addFormat(elementText); break; case "date": try { document.setDate(new Date(df.parse(elementText).getTime())); } catch (ParseException e) { e.printStackTrace(); } break; case "coverage": //Academic degree of Creator academicDegrees.add(elementText); break; case "language": document.setLanguage(elementText); break; default: System.out.println("Metadata not recognized: " + elementName + " \"" + elementText + "\""); break; } } } //System.out.println("==========================================================="); for(int i=0; i<creatorNames.size(); i++){ Creator creator = new Creator(creatorNames.get(i), careers.get(i), academicDegrees.get(i)); document.addCreator(creator); } for(String name : contributorNames){ Contributor contributor = new Contributor(name); document.addContributor(contributor); } feedDatabase(document); } //if (list.getResumptionToken() != null) // list = server.listIdentifiers(list.getResumptionToken()); if(recordsList.getResumptionToken() != null) recordsList = server.listRecords(recordsList.getResumptionToken()); else more = false; } } /** * Adds the document to the database * * @param doc The document to be added */ private synchronized void feedDatabase(Document doc){ if(doc.getLocationURL()!=null && doc.getLocationURL().length()>0) { db.docOps.addDocument(doc); } } }
package sqlite.test03; import android.database.Cursor; import com.abubusoft.kripton.android.Logger; import com.abubusoft.kripton.android.sqlite.Dao; import com.abubusoft.kripton.android.sqlite.KriptonContentValues; import com.abubusoft.kripton.common.StringUtils; import java.util.ArrayList; import java.util.List; /** * <p> * DAO implementation for entity <code>Bean01</code>, based on interface <code>DaoBean01</code> * </p> * * @see Bean01 * @see DaoBean01 * @see Bean01Table */ public class DaoBean01Impl extends Dao implements DaoBean01 { /** * SQL definition for method listAll */ private static final String LIST_ALL_SQL1 = "SELECT id, bean_list, lista, message_date, message_text, value FROM bean01 WHERE 1=1"; public DaoBean01Impl(BindDummy01DaoFactory daoFactory) { super(daoFactory.getContext()); } /** * <h2>Select SQL:</h2> * * <pre>SELECT id, bean_list, lista, message_date, message_text, value FROM bean01 WHERE 1=1</pre> * * <h2>Mapped class:</h2> * {@link Bean01} * * <h2>Projected columns:</h2> * <dl> * <dt>id</dt><dd>is associated to bean's property <strong>id</strong></dd> * <dt>bean_list</dt><dd>is associated to bean's property <strong>beanList</strong></dd> * <dt>lista</dt><dd>is associated to bean's property <strong>lista</strong></dd> * <dt>message_date</dt><dd>is associated to bean's property <strong>messageDate</strong></dd> * <dt>message_text</dt><dd>is associated to bean's property <strong>messageText</strong></dd> * <dt>value</dt><dd>is associated to bean's property <strong>value</strong></dd> * </dl> * * @return collection of bean or empty collection. */ @Override public List<Bean01> listAll() { // common part generation - BEGIN KriptonContentValues _contentValues=contentValues(); // query SQL is statically defined String _sql=LIST_ALL_SQL1; // add where arguments String[] _sqlArgs=_contentValues.whereArgsAsArray(); // log section for select BEGIN if (_context.isLogEnabled()) { // manage log Logger.info(_sql); // log for where parameters -- BEGIN int _whereParamCounter=0; for (String _whereParamItem: _contentValues.whereArgs()) { Logger.info("==> param%s: '%s'",(_whereParamCounter++), StringUtils.checkSize(_whereParamItem)); } // log for where parameters -- END } // log section for select END try (Cursor _cursor = getDatabase().query(_sql, _sqlArgs)) { // log section BEGIN if (_context.isLogEnabled()) { Logger.info("Rows found: %s",_cursor.getCount()); } // log section END // common part generation - END // Specialized part - SelectBeanListHelper - BEGIN ArrayList<Bean01> resultList=new ArrayList<Bean01>(_cursor.getCount()); Bean01 resultBean=null; if (_cursor.moveToFirst()) { int index0=_cursor.getColumnIndex("id"); int index1=_cursor.getColumnIndex("bean_list"); int index2=_cursor.getColumnIndex("lista"); int index3=_cursor.getColumnIndex("message_date"); int index4=_cursor.getColumnIndex("message_text"); int index5=_cursor.getColumnIndex("value"); do { resultBean=new Bean01(); resultBean.setId(_cursor.getLong(index0)); if (!_cursor.isNull(index1)) { resultBean.setBeanList(Bean01Table.parseBeanList(_cursor.getBlob(index1))); } if (!_cursor.isNull(index2)) { resultBean.setLista(Bean01Table.parseLista(_cursor.getBlob(index2))); } if (!_cursor.isNull(index3)) { resultBean.setMessageDate(_cursor.getLong(index3)); } resultBean.setMessageText(_cursor.getString(index4)); if (!_cursor.isNull(index5)) { resultBean.setValue(_cursor.getLong(index5)); } resultList.add(resultBean); } while (_cursor.moveToNext()); } return resultList; } // Specialized part - SelectBeanListHelper - END } public static void clearCompiledStatements() { } }
def run(self): sys.excepthook = lambda type, value, traceback: self.showExceptionDialog(type, value, traceback) self.show_and_loop()
/** * Factory method. * * @param id the desired node id * @param loadingPolicy the desired node loading policy * @return a new StretchNode */ public static StretchNode create(String id, LoadingPolicy loadingPolicy) { return new StretchNode( id, loadingPolicy, PITCH_RATIO_PARAM.create(), STRETCH_PARAM.create(), FORMANT_RATIO_PARAM.create()); }
import 'mocha'; import { assert } from 'chai'; import ProfileProvider from '../../../src/providers/ror2/model_implementation/ProfileProvider'; import ProfileImpl from '../../../src/r2mm/model_implementation/ProfileImpl'; import LogOutputProvider from '../../../src/providers/ror2/data/LogOutputProvider'; import LogOutput from '../../../src/r2mm/data/LogOutput'; import ThunderstoreDownloaderProvider from '../../../src/providers/ror2/downloading/ThunderstoreDownloaderProvider'; import BetterThunderstoreDownloader from '../../../src/r2mm/downloading/BetterThunderstoreDownloader'; import LocalModInstaller from '../../../src/r2mm/installing/LocalModInstaller'; import LocalModInstallerProvider from '../../../src/providers/ror2/installing/LocalModInstallerProvider'; import ProfileInstallerProvider from '../../../src/providers/ror2/installing/ProfileInstallerProvider'; import ProfileInstaller from '../../../src/r2mm/installing/ProfileInstaller'; import LoggerProvider from '../../../src/providers/ror2/logging/LoggerProvider'; import { Logger } from '../../../src/r2mm/logging/Logger'; describe('Providers', () => { context("ProfileProvider", async () => { it("Not provided", () => { assert.throws(() => { ProfileProvider.instance; }) }); it("Provided", () => { ProfileProvider.provide(() => new ProfileImpl()); assert.doesNotThrow(() => { ProfileProvider.instance; }); }); }); context("LogOutputProvider", async () => { it("Not provided", () => { assert.throws(() => { LogOutputProvider.instance; }) }); it("Provided", () => { LogOutputProvider.provide(() => LogOutput.getSingleton()); assert.doesNotThrow(() => { LogOutputProvider.instance; }, new RegExp("has not been provided")); }); }); context("ThunderstoreDownloaderProvider", async () => { it("Not provided", () => { assert.throws(() => { ThunderstoreDownloaderProvider.instance; }) }); it("Provided", () => { ThunderstoreDownloaderProvider.provide(() => new BetterThunderstoreDownloader()); assert.doesNotThrow(() => { ThunderstoreDownloaderProvider.instance; }); }); }); context("LocalModInstallerProvider", async () => { it("Not provided", () => { assert.throws(() => { LocalModInstallerProvider.instance; }) }); it("Provided", () => { LocalModInstallerProvider.provide(() => new LocalModInstaller()); assert.doesNotThrow(() => { LocalModInstallerProvider.instance; }); }); }); context("LoggerProvider", async () => { it("Not provided", () => { assert.throws(() => { LoggerProvider.instance; }) }); it("Provided", () => { LoggerProvider.provide(() => new Logger()); assert.doesNotThrow(() => { LoggerProvider.instance; }); }); }); });
Hybrid Strategy and Firm Performance: The Moderating Role of Individual and Technological Ambidexterity It is discussed whether hybrid strategies are beneficial or detrimental to a firms performance, because hybrid strategies lead to organizational tensions that arise from the simultaneous pursuit of distinct strategic activities. However, existing studies on the relationship between hybrid strategy and firm performance have largely neglected the role of the organizational architecture. This study tests the hypothesis that an ambidextrous organizational architecture positively moderates the relationship between a hybrid strategy and firm performance. Particularly, the roles of specific organization structures and HRM practices (individual ambidexterity) and information and communication technologies (technological ambidexterity) are assessed. Further, a hybrid strategys performance impact is defined in two distinct ways and measured relative to three different comparison groups, that is, the entire sample, no-emphasis strategies, and pure strategies. A novel multi-source dataset on German and Polish manufacturing firms is constructed from three independent sources, including a dataset on objective firm performance indicators. Evidence is found that a hybrid strategy positively impacts firm performance in the presence of organizational ambidexterity, but negatively impacts firm performance in the absence of organizational ambidexterity. These findings are robust across two types of organizational ambidexterity, three different comparison groups, and further robustness tests.
Lasers scanners employing hologon discs have been developed for a variety of applications including bar code scanning in electronic point of sale systems and various types of imaging systems. U.S. Pat. No. 4,610,500 issued to Kramer on Sept. 9, 1986, for example, discloses a hologon laser scanner apparatus that includes a hologon scanner disc and a motor for rotating the hologon scanner disc. The hologon scanner disc is coupled to the motor with a shaft that is inserted into a hub provided in the center of the hologon disc. The end of the shaft defines a shoulder that bears against the lower surface of the hub. A screw is used to clamp the hub against the shoulder. The hologon scanner disc includes one or more deflection gratings which are used to deflect the path of a laser beam focused on the disc as the disc is rotated by the motor. A lens is provided to focus the deflected laser beam on a image surface. Problems have been experienced with laser scanners of the type disclosed in U.S. Pat. No. 4,610,500, namely those having a disc coupled to a driving motor by a shaft. As described in Japanese Kokai No. 59-101068, the hologon scanner disc is susceptible to the formation of tiny cracks due to vibrations at high speeds where the disc is mounted to the motor drive shaft. Japanese Kokai No. 59-101068 proposes to eliminate the problems associated with the mounting of hologon scanner discs on drive shafts by making the scanner disc part of the rotor of the motor. Specifically, the scanner disc is mounted in the inner circumference of a ring that has a number of magnets located on its outer circumference. The ring is mounted in a frame structure made of a magnetic material. A field-generating magnet is mounted within the frame structure. The overall structure constitutes a linear motor with the frame structure acting as the primary core and the magnets located on the ring as the secondary conductor. Application of an electrical signal to the field-generating magnets causes the ring, with the scanner disc included, to rotate. The use of the hologon scanner disc as part of the motor rotor provides advantages over mounting the disc to a drive shaft in addition to the elimination of the cracks described above, for example, the overall size of the scanning apparatus is greatly reduced. At the same time, however, additional problems are created which are particularly detrimental to printing systems employing the scanning apparatus. The hologon scanner disc has a tendency to heat up in a nonuniform manner due to the heat generated in the stator and rotor windings. The nonuniform heating causes mechanical strain and/or shifting of the grating provided on the disc, which in turn causes the laser beam to be improperly deflected. Thus, the optical distortions of the disc due to nonuniform heating directly result in degradation of image quality. In view of the above, it is an object of the invention to provide a holographic scanner motor (a motor that utilizes the hologon scanning disc as part of the rotor) having an improved structure that aids in maintaining the temperature stability of the hologon scanner disc to prevent optical distortions due to nonuniform heating. It is a further object of the invention to provide a holographic scanner motor having an improved structure which aids in the cooling of hologon scanner disc that is compact in design.
Bio-Clock-Aware Office Lighting Control In modern society, humans spend over 90% or their time indoors. However, despite the growing scientific understanding of the impact of light on biological mechanisms, benefits of this understanding are not harvested in practical systems. Existing light in the built environment, offices in particular, is designed predominantly to meet visual performance requirements. Increasing attention is being given to the biological effects of light, especially how it could be used to promote occupants health and well-being through the circadian functions that regulate sleep, mood, and alertness. While Human Centric Lighting is being offered based on generic insights on population average experiences, in this paper, we suggest a personalized bio-adaptive office lighting system, controlled to emit a lighting recipe tailored to the individual employee. We introduce a novel optimization algorithm that finds the best office lighting profile to achieve circadian alignment to the 24-hour cycle. The system aims to support employees circadian rhythm and ensure that they receive the right light at the right time of the day. In particular, we use existing, commonly accepted chronobiologic models to find the most effective light exposure pattern.
Hypoxia destroys the microstructure of microtubules and causes dysfunction of endothelial cells via the PI3K/Stathmin1 pathway Background Endothelial cells (EC) are sensitive to changes in the microenvironment, including hypoxia and ischemia. Disruption of the microtubular network has been reported in cases of ischemia. However, the signaling pathways involved in hypoxia-induced microtubular disruption are unknown. The purpose of this study was to investigate the molecular mechanisms involved in hypoxia-induced microtubular disassembly in human umbilical vein endothelial cells (HUVECs). Results HUVECs were cultured under normoxic or hypoxic conditions and pretreated with or without colchicine or paclitaxel. The MTT assay, Transwell assay, trans-endothelial permeability assay, and 5-bromo-2-deoxy-uridine staining were used to test the survival rate, migration, permeability, and proliferation of cells, respectively. Transmission electron microscopy and phalloidin staining were used to observe the microstructure and polymerization of microtubules. The results show that the functions of HUVECs and the microtubular structure were destroyed by hypoxia, but were protected by paclitaxel and a reactive oxygen species (ROS) inhibitor. We further used western blot, a luciferase assay, and co-immunoprecipitation to describe a non-transcription-independent mechanism for PI3K activation-inhibited microtubular stability mediated by Stathmin1, a PI3K interactor that functions in microtubule depolymerization. Finally, we determined that hypoxia and ROS blocked the interaction between PI3K and Stathmin1 to activate disassembly of microtubules. Conclusion Thus, our data demonstrate that hypoxia induced the production of ROS and damaged EC function by destroying the microtubular structure through the PI3K/stathmin1 pathway. from activation of cellular reactive oxygen species (ROS) production is a part of the pathological course of hypoxia. Uncontrolled ROS production causes tissue damage, vascular barrier dysfunction, and inflammation. Microtubule dynamics also control fundamental cellular functions, such as cell shape, polarity, motility, migration and division, as well as participate in other aspects of EC biology. The integrity of the microtubular system is necessary for protein trafficking, further affecting the frequency and velocity of vesicle transport and altering plasma membrane composition through their direct effects on membrane trafficking pathways. The EC microtubular network plays a role in vascular permeability. Disruption of microtubules with nocodazole promotes barrier dysfunction, which is attenuated by pretreatment with forskolin. Furthermore, tumor necrosis factor (TNF)-, thrombin-, and transforming growth factor (TGF)-induced endothelial permeability is associated with destabilization of tubulin and the peripheral microtubular network. Stathmin1 is a cytosolic 19-kDa phosphoprotein that plays an oncogenic role and acts as a prognostic marker in several kinds of cancers. Stathmin1 is overexpressed in tumor tissues and is correlated with cancer progression and poor prognosis by regulating cell division, motility, and migration, all of which are critical processes in ECs. Stathmin1 is a ubiquitous cytoplasmic phosphoprotein that regulates microtubular dynamics by the depolymerization effect and is a significant marker of activation of the phosphatidylinositol 3-kinase-Akt pathway (PI3K-Akt) pathway. The PI3K/Akt pathway has been linked to an extraordinarily diverse group of cellular functions, including cell growth, proliferation, differentiation, motility, survival, and intracellular trafficking. Mammalian class I PI3K can be divided into classes IA and IB; the class IA PI3Ks are heterodimers of a 110 kDa catalytic subunit (p110, p110, or p110) and a regulatory subunit of 85 or 55 kDa (p85/p55). P110 PI3K (PIK3CA) has an important role in EC cell migration and angiogenesis, so the dominant form of class IA PI3K was used in our study to enhance PI3K activity and observe the effect on microtubular stability. Transwell assay HUVEC migration was evaluated with transwell system (Corning Costar, MA, USA). Briefly, 1 10 5 HUVECs were seeded in the upper chambers for 12 h attachment. Then cells were subjected to hypoxic condition and chemical treatment for 6 h. The medium in upper chambers was switch to M199 with 0.5% FBS, while, that in the lower chamber were switch to M199 with 1% FBS. After 12 h incubation, the cells on the bottom were fixed with 4% paraformaldehyde and stained with 1% crystal violet, the non-migrating cells in the upper chamber were removed. Finally, the crystal violet was dissolved in 33% acetic acid, and the absorbance was measured at 600 nm. The amount of cell migration was determined as the ratio of the OD values of the treatment relative to the control. Each treatment was repeated in three independent chambers. Trans-endothelial permeability assay Cells were grown on 0.2 mm pore-size collagen IV (1 mg/ mL)-coated tissue culture inserts (Nunc, Fisher Scientific, Pittsburgh, PA) until confluent. Monolayers were then serum-starved for 1 h and either left untreated or exposed to hypoxic condition in triplicate with desired agents. Following treatment, fluorescein isothiocyanate (FITC) dextran (10 kDa) dissolved in the medium was placed in the upper chamber at a concentration of 0.4 mg/mL and allowed to equilibrate for 2 h. Samples were then taken from the lower chamber for fluorescence measurements. Fluorescence was measured by excitation at 492 nm and the emission collected at 520 nm. Transmission electron microscopy (TEM) After treated, HUVECs were fixed for 1 h with cold 3% glutaraldehyde in 0.1 M cacodylate buffer (pH 7.3) and washes with 0.2 M cacodylate, and then cells were pelleted and then postfixed with 2% osmium tetroxide in 0.1 M cacodylate for 1 h at 4 °C, stained en bloc with 2% uranyl acetate for an additional 1 h. After three more washes in double-distilled water, the samples were dehydrated in a series of acetone solutions and embedded in Epon 812 according to the standard procedure. Ultrathin sections (70 nm) were prepared, stained with both uranyl acetate and lead citrate, and assessed using a Hitachi 7400 electron microscope (Hitachi, Tokyo, Japan). Random fields taken from individual samples were photographed at 30,000. ROS assay After hypoxia treatment, 5 M dichlorofluorescein diacetate (DCF) was added into HUVEC cells culture medium and incubate for 1 h at 37 °C. Then cells were washed cells twice with PBS and scrape cells into RIPA buffer and incubate for 10 min on ice. After spin at 13,000 rpm for 10 min, 100 l supernatant was transferred into 96-well plates for fluorescence assay (485 nm excitation and 527 nm emission). Protein concentration was tested for the normalization of cell number. 5-Bromo-2-deoxy-uridine (BrdU) incorporation assays HUVEC cells were subjected to hypoxic condition and chemical treatment for 6 h and then incubated with 0.1 mg/ml BrdU for 3 h before fixed by 4% paraformaldehyde (PFA). Cells were incubated in 0.5% Triton X-100, 1.5 N HCl, and washed by PBS between incubation, then add 0.25% trypsin EDTA, and incubate at 37 °C for 5 min. Cells were washed by PBS and blocked in blocking buffer for 2 h. BrdU antibody (1:100, ab6326, Abcam) was used and then incubated overnight at 4 °C in a hydration chamber. After wash with PBST, secondary antibody containing 2-(4-amidinophenyl)-6-indolecarbamidine dihydrochloride (DAPI) were added and incubated at RT for another 3 h, then observed with Olympus IX81. Phalloidin staining HUVEC cells were plated on fibronectin (10 g/mL)coated glass coverslips treated with indicated condition. Attached cells were fixed with 4%PFA, followed by permeabilization with 0.2% Triton X-100 at room temperature for 10 min, cells were stained with 100 nM Alexa Fluor ™ 488-conjugated phalloidin (A12379, Thermo Fisher) in dark for 30 min and then incubated with DAPI at room temperature for 10 min. After wash with PBS, stained cells were examined by confocal microscope. MTT assay for cell proliferation The 3-(4,5-dimethylthiazol-2-yl)-2,5-diphenyltetrazolium bromide (MTT) assay was used to determine the effect of treatment on cell viability. HUVEC were inoculated into 96-well plates at 5 10 4 cells/ml, after treated with hypoxia and chemicals, 5 mg/ml MTT (Beyotime Biotech, Jiangsu, China) was added into the cell culture medium. After further 4 h incubation, the supernatant fraction was removed and 150 l of dimethyl sulphoxide (DMSO) was added. The optical density (OD) at 490 nm was measured using a microplate reader (Bio-Rad, Hercules, CA, USA). Statistic methods The data are expressed as mean ± SEM. All experiments were performed in triplicate. All statistical analyses were performed with the SPSS 19.0 using non-parametric tests. The Kruskall Wallis test followed by the Mann-Whitney test was used to detect differences between groups. P < 0.05 was considered statistically significant. Hypoxia destroys microtubular ultrastructure and induces HUVEC dysfunction Microscopic images revealed the increase in mortality of HUVECs under a hypoxic condition, similar to those treated with colchicine, a reagent that disassembles microtubules. Paclitaxel, a microtubule stabilizer, reversed the mortality of HUVECs under the hypoxic condition (Fig. 1a upper). Proliferation of HUVECs was further confirmed by BrdU staining. Hypoxia and colchicine treatment significantly inhibited the percentage of BrdU-positive cells, but paclitaxel reversed the hypoxia-induced inhibitory effect (Fig. 1a lower). Proliferation rate was further confirmed by the MTT assay (Fig. 1b). Migration and barrier integrity, important functional indices of venous ECs during vascular growth and repair, were also damaged during the hypoxia and colchicine treatments, while paclitaxel reversed the malfunction induced by hypoxia (Fig. 1c, d). Furthermore, microtubular structures were observed by transmission electron microscopy (TEM) and phalloidin staining and the results showed that hypoxia destroyed microtubular ultrastructure and destabilized microtubule polymerization (Fig. 1e, f ). These results suggest that hypoxia inhibits proliferation and induces malfunction of HUVECs through disassembly of microtubules. Hypoxia induces ROS production causing HUVEC malfunction Hypoxia damages mitochondrial function, leading to leakage of electrons from the respiratory chain, which increases ROS. Thus, the ROS level increased remarkably in HUVECs under the hypoxic condition (Fig. 2a), and the survival rate of HUVECs increased in response to N-acetylcysteine (NAC) and wortmannin treatment according to the microscopic observations and the MTT assay (Fig. 2b, c). The NAC and wortmannin treatments also increased proliferation and migration, but reduced the permeability of HUVECs under the hypoxic condition (Fig. 2d, e). Hypoxia induced microtubular disassembly in HUVECs through ROS, whereas NAC and wortmannin protected against destruction of the microtubular ultrastructure induced by hypoxia (Fig. 2f ). A previous study showed that ROS activates the PI3K/Akt pathway in HUVECs, so we were interested in determining whether hypoxia and H 2 O 2 treatment could also activate the PI3K/Akt pathway in HUVECs. As shown Fig. 1 Microtubule stabilization reverses the reduction of proliferation, survival rate, migration, barrier integrity and microtubular depolymerization induced by hypoxia in HUVECs. HUVECs were cultured under normoxic or hypoxic conditions with or without pretreatment of 10 M paclitaxel or 4 M colchicine for 1 h. a Cells were immunostained by BrdU antibody and DAPI for proliferation assay. Bar = 50 m. b The survival rate (%) of HUVECs was determined using MTT assay. c Permeability of HUVECs was tested in cell culture inserts through determining the fluorescence leaked into lower chamber. All the data were normalized to control group. d Cell migration was tested by transwell method. e HUVECs were processed for transmission electron microscopy (TEM) to examine microtubular structure, bar = 200 nm. f Hypoxia-induced microtubule depolymerization was shown by immunofluorescent confocal micrographs in HUVEC cells. Cells were stained with phalloidin (green) and the nuclear stain DAPI (blue). Bar = 50 m. Representative results from three independent experiments were depicted here. The data are presented as the mean ± SEM. *P < 0.05 Fig. 2 Hypoxia induces the production of ROS, which reduces the proliferation, survival rate, migration, barrier integrity and microtubular depolymerization in HUVECs. HUVECs were cultured under normoxia or hypoxia condition with or without pretreatment of N-acetyl cysteine (NAC, 1 mM) or wortmannin (1 M) for 30 min. a ROS production in HUVECs. b Microscopic observation and BrdU staining assessed the HUVECs proliferation. Bar = 50 m. c The survival rate (%) of HUVECs determined using the MTT assay. d Permeability of HUVECs was tested in cell culture inserts through determining the fluorescence leaked into lower chamber. All the data were normalized to control group. e Cell migration was tested by transwell method. f HUVECs were processed for TEM to examine microtubule structure. Bar = 0.5 m. g HUVECs were cultured under normoxia, hypoxia or H 2 O 2 conditions with or without pretreatment of 1 mM NAC or 1 M wortmannin for 30 min, total cell lysates were immunoblotted with antibodies to P-Akt, Akt and P85. Representative results from three independent experiments are depicted here. The relative intensity of band was normalized to P85. The data were presented as the mean ± SEM. *P < 0.05 in Fig. 2g, both hypoxia and H 2 O 2 increased phosphorylation of Akt, and 1 mM NAC or 1 M wortmannin for 30 min decreased the expression of P-Akt, but NAC downregulated the phosphorylation rate to a lower level than wortmannin. This result indicates that ROS caused microtubular disassembly under the hypoxic condition, and connected the hypoxia and malfunction of HUVECs through the PI3K/Akt pathway. Stathmin1 is downregulated after hypoxia and H 2 O 2 treatments Stathmin1 is involved in microtubular depolymerization, regulation of microtubular dynamics, and is a marker of PI3K pathway activation. Thus, we focused on Stathmin1 to determine whether ROS affects the function of microtubules by regulating the expression of Stathmin1. Acetylated tubulin is widely used as a marker of stable microtubular structures. The stability of microtubules was detected by western blot using an antibody to acetyl -tubulin K40. Consistent with Figs. 1 and 2, the hypoxia and H 2 O 2 treatments significantly decreased the protein levels of acetyl -tubulin K40, indicating microtubule disassembly, and upregulated Stathmin1 protein and mRNA levels, while treatment with NAC or wortmannin increased the acetyl -tubulin protein levels K40 and downregulated Stathmin1 protein and mRNA levels (Fig. 3a, b). To test whether PI3K/Akt regulates microtubular stability, we treated HUVECs with PI3K short interfering RNAs (siRNAs) and detected the expression of acetyl -tubulin K40. Two of the four PI3K siRNAs tested inhibited the protein levels of acetyl -tubulin K40 (Fig. 3c). PI3K is widely known as a regulator of gene transcription ; thus, to determine whether PI3K regulates microtubular stability in a transcriptiondependent manner, we detected the expression of acetyl -tubulin K40 in the presence of the mRNA synthesis inhibitor actinomycin D and the protein synthesis inhibitor cycloheximide with or without wortmannin (Fig. 3d). Neither actinomycin D nor cycloheximide inhibited acetyl -tubulin expression K40, whereas wortmannin inhibited microtubular stability in the presence of actinomycin D and cycloheximide. ROS decreases Stathmin1 expression and blocks the interaction between PI3K and Stathmin1 To examine the regulation of PI3K in microtubular stability, we transfected expression vectors that expressed wild-type constitutively active (PIK3CA-WT) and kinase-dead (PIK3CA-H1047R and E545K) mutants then knocked down endogenous PI3K with siRNA. PIK3CA increased the expression of acetyl -tubulin K40, suggesting that this active form of PI3K regulates microtubular stability. PIK3CA-H1047R and E545K are the most commonly used mutations leading to amino acid changes in the kinase domain, and cannot be tyrosine phosphorylated. These two mutations did not rescue the expression of acetyl -tubulin K40. Knockdown of PIK3CA induced downregulation of acetyl -tubulin K40 (Fig. 4a). These results suggest that the active form of PI3K is the primary mediator of microtubular disassembly. As microtubular stability is independent of PI3K transcriptional factor function, we examined whether PI3K regulates microtubular stability via Stathmin1, a PI3K activation marker and inhibitor of microtubule polymerization. Depleting Stathmin1 with siRNA should have a similar effect as wortmannin and therefore Stathmin1 siRNA should make wortmannin more effective at inducing microtubular disassembly. Stathmin1 knockdown with one of the two siRNAs increased expression of acetyl -tubulin K40 and wortmannin was more effective for protecting microtubular stability when Stathmin1 was knocked down (Fig. 4b, c). Overexpressing Stathmin1 significantly decreased the expression of acetyl -tubulin K40 and promoted the effects of wortmannin on microtubular disassembly. These results suggest that the PI3K/ Stathmin1 pathway regulates microtubular stability. However, it is unknown how PI3K regulates Stathmin1 and the effects of hypoxia and ROS. Thus, a co-immunoprecipitation assay was performed (Fig. 4d). As results, PI3K directly interacted with Stathmin1, and the hypoxia and H 2 O 2 treatments blocked their interaction. Discussion Cardiovascular disease remains the leading cause of mortality in Europe, causing almost 4.1 million deaths per year, or 46% of all deaths. Impairment in local blood flow changes the microenvironment leading to hypoxia, which is a common pathological course. ECs control many physiological and pathological actions, including inflammatory cell recruitment, regulation of vascular resistance and initiation of coagulation. Thus, malfunctioning ECs initiate many cardiac vascular diseases. Microtubular dynamics have a close relationship with proliferation, survival, migration, and the barrier integrity of ECs, which play an important role repairing the vasculature to prevent vasculopathy. However, the functional role of hypoxia-induced oxidative stress in the balance of the EC microenvironment and the molecular mechanisms involved remain undefined. In this study, we utilized HUVECs as a model to explore the structural and functional changes in microtubules and the mechanism of how hypoxia affects EC function. The results showed that hypoxia induced disassembly and depolymerization of microtubules, which can be mimicked by colchicine and prevented by paclitaxel. Paclitaxel protected HUVEC functions, whereas The mRNA level of stathmin1 was detected with RT-qPCR. c Western blot of acetyl -tubulin K40, PI3K and P85 (loading control) of HUVEC cells treated with PI3K siRNAs. d HUVEC cells were treated with hypoxia for 8 h with or without protein synthesis inhibitor cycloheximide and the transcription inhibitor actinomycin D, total cell lysates were immunoblotted with antibodies to acetyl -tubulin K40 and P85. Representative results from three independent experiments were depicted here. The relative intensity of band was normalized to P85. The data are presented as the mean ± SEM. *P < 0.05 colchicine mimicked the hypoxic damage, suggesting that hypoxia induces EC malfunction by destroying the microtubular structure. Hypoxia usually induces oxidative stress by promoting ROS production in ECs. As shown in Fig. 2, the ROS level in HUVECs increased significantly under the hypoxic condition. ROS are upstream of the PI3K/Akt pathway and Stathmin1 is associated with PI3K activity. Thus, we determined whether the ROS/PI3K/Akt pathway regulates HUVEC function and microtubular structure. NAC and wortmannin were added to hypoxia-treated HUVECs and either blocked ROS or inhibited the PI3K/Akt pathway. Both reversed EC function and ameliorated the damaged microtubular structure, suggesting that ROS may be the intermediate between hypoxia and microtubular destruction, and hypoxia induced a malfunction in ECs through ROS production and activation of the PI3K/Akt pathway. Hypoxia and H 2 O 2 treatment significantly decreased the expression of acetyl -tubulin K40 and increased Stath-min1 protein and mRNA levels. NAC and wortmannin recovered the level of acetyl -tubulin K40 and inhibited expression of Stathmin1, indicating that hypoxia and ROS damaged the microtubular structure in a Stath-min1-dependent manner. PI3K knockdown destroyed the microtubular structure but not via transcription (Fig. 3c, d). PI3K is an essential factor in hypoxia-induced apoptosis, angiogenesis, and inflammation. Western blot of Stathmin1 siRNA-treated HUVEC cell using antibodies to Stathmin1 and -actin (loading control). c HUVEC cells were transfected with Stathmin1 siRNA or Stathmin1 expression vector and treated with or without 1 M or 10 M wortmannin, total lysates were immunoblotted with antibodies to acetyl -tubulin K40, Stathmin1 and P85. d Immunoprecipitation of PI3K from normoxia, hypoxia and H 2 O 2 treated HUVEC cells using endogenous protein and immunoblotted for Stathmin1 and PI3K. Representative results from three independent experiments were depicted here. The relative intensity of band was normalized to P85. The data are presented as the mean ± SEM. *P < 0.05 Here, we demonstrated that the microtubular structure is a PI3K-dependent function, revealing a new mechanism of the PI3K pathway in HUVECs. PIK3CA, the catalytic subunit of class IA PI3K, plays a major role in PI3K-related progression. PIK3CA-H1047R and E545K cannot be tyrosine phosphorylated and are primarily monomeric. We discovered that PIK3CA-WT increased microtubular stability, but PIK3CA-H1047R and E545K had no similar effect. PIK3CA knockdown further decreased microtubular stability. Overall, these results suggest that the active form of PI3K is the primary mediator of microtubular stability. Knockdown of Stathmin1 improved, whereas overexpression of Stath-min1 inhibited microtubular stability. Wortmannin damaged microtubular stability in a dose-dependent manner. Hypoxia and ROS blocked the interaction between transcription factor PI3K and Stathmin1. Hypoxia, ROS, and disassembly of microtubules impair the proliferation, survival, permeability, and functions of ECs. However, in this study, we demonstrated that oxidative stress resulting from activation of cellular ROS production is a pathological course of hypoxia, which further disrupted microtubular dynamics and EC functions. We also demonstrated that the PI3K/Stathmin1 pathway plays an important role in the hypoxia-destroyed microtubular stability. As hypoxic-injured EC cells are frequently observed in cardiovascular disease and diabetes, it would be interesting to examine whether patients with cardiovascular disease and diabetes respond better to PI3K inhibitors or a microtubular stabilizer in clinical trials. Conclusions Our results demonstrate that hypoxia disturbed the ultrastructure and polymerization of microtubules and induced dysfunction of ECs. Furthermore, ROS activated the PI3K/Akt pathway and downregulated the expression of Stathmin1 by blocking the interaction between PI3K and Stathmin1, which renders PI3K inhibitors and microtubule stabilizers as promising therapeutic drugs in hypoxia-induced vascular disease.
Processing PET/CT Medicine Images and Determining the Object Today, as a result of the developments in medical imaging systems, combined Positron Emission Tomography and Computed Tomography (PET/CT) images are used as effective imaging tools in cancer diagnosis and treatment. The study aims to display the tumor in 3D using PET/CT images and to determine the change in tumor size over time. For rapid detection of tumor tissues from numerous PET/CT images is developed software (in Phyton). With the software, tomography images taken at 3 different times 30.05.2017, 23.10.2017 and 08.02.2019 of a patient with hypopharynx cancer are evaluated. The image and real object coordinates of the cancerous region with a radioactive substance in the images are calculated. For 3 different times, variables related to changes in the tumor size of the patient are evaluated in Matlab software and 3D imaging and volume calculations are made. Thus, the temporal variation of the shape, location, and dimensions of the tumor is determined, and whether the patient's response to treatment is positive is observed with image processing and evaluation techniques.
Identification and expression of multidrug resistance-related ABC transporter genes in Candida krusei. Infections with Candida krusei have increased in recent years as a consequence of its intrinsic resistance to fluconazole, an antifungal azole widely used in immunocompromised individuals to suppress infections due to azole-susceptible C. albicans. One established mechanism for azole resistance is drug efflux by ATP binding cassette (ABC) transporters. Since these transporters recognize structurally diverse drugs, their overexpression can lead to multidrug resistance (MDR). To identify C. krusei genes potentially involved in azole resistance, PCR was performed with primers corresponding to conserved sequences of MDR-related ABC transporters from other fungi. Two genes, ABC1 and ABC2, were identified; Southern blots suggested that both have one or two related gene copies in the C. krusei genome. ABC1 RNA was constitutively expressed at low levels in log phase cells while ABC2 RNA was undetectable. However, both genes were upregulated as cultures approached stationary phase, and this upregulation was correlated with decreased susceptibility to the lethal activity of the azole derivative miconazole. Furthermore, ABC1 was upregulated following brief treatment of C. krusei with miconazole and clotrimazole (but not other azoles), and the unrelated compounds albendazole and cycloheximide. The latter two compounds antagonized fluconazole activity versus C. krusei, supporting a role for the ABC1 transporter in azole efflux. Finally, miconazole-resistant mutants selected in vitro demonstrated increased constitutive expression of ABC1. Based on these expression data, genetic and functional characterization of the ABC1 transporter to directly test its role in C. krusei azole resistance would appear to be warranted.
Life cycle assessment of an intensive sewage treatment plant in Barcelona (Spain) with focus on energy aspects. Life Cycle Assessment was used to evaluate environmental impacts associated to a full-scale wastewater treatment plant (WWTP) in Barcelona Metropolitan Area, with a treatment capacity of 2 million population equivalent, focussing on energy aspects and resources consumption. The wastewater line includes conventional pre-treatment, primary settler, activated sludge with nitrogen removal, and tertiary treatment; and the sludge line consists of thickening, anaerobic digestion, cogeneration, dewatering and thermal drying. Real site data were preferably included in the inventory. Environmental impacts of the resulting impact categories were determined by the CLM 2 baseline method. According to the results, the combustion of natural gas in the cogeneration engine is responsible for the main impact on Climate Change and Depletion of Abiotic Resources, while the combustion of biogas in the cogeneration unit accounts for a minor part. The results suggest that the environmental performance of the WWTP would be enhanced by increasing biogas production through improved anaerobic digestion of sewage sludge.
<gh_stars>0 #include <iostream> using namespace std; int main (){ int i; for ( i = 2; i <= 100; i++) if (i%2 == 0) cout << i << "\n"; return 0; }
Disparities in Care Experienced by American Indian and Alaska Native Medicare Beneficiaries Supplemental Digital Content is available in the text. Background: Little is known about the health care experiences of American Indians and Alaska Natives (AIANs) due to limited data. Objective: The objective of this study was to investigate the health care experiences of AIAN Medicare beneficiaries relative to non-Hispanic Whites using national survey data pooled over 5 years. Subjects: A total of 1,193,248 beneficiaries who responded to the nationally representative 20122016 Medicare Consumer Assessment of Healthcare Providers and Systems (CAHPS) surveys. Methods: Linear regression models predicted CAHPS measures from race and ethnicity. Scores on the CAHPS measures were linearly transformed to a 0100 range and case-mix adjusted. Three AIAN groups were compared with non-Hispanic Whites: single-race AIANs (n=2491; 0.4% of the total sample), multiple-race AIANs (n=15,502; 1.3%), and Hispanic AIANs (n=2264; 0.2%). Results: Among AIAN groups, single-race AIANs were most likely to live in rural areas and areas served by the Indian Health Service; Hispanic AIANs were most likely to be Spanish-language-preferring (Ps<0.05). Compared with non-Hispanic Whites, single-race AIANs reported worse experiences with getting needed care (adjusted disparity of −5 points; a large difference), getting care quickly (−4 points; a medium difference), doctor communication (−2 points; a small difference), care coordination (−2 points), and customer service (−7 points; P<0.001 for all comparisons). Disparities were similar for Hispanic AIANs but more limited for multiple-race AIANs. Conclusions: Quality improvement efforts are needed to reduce disparities faced by older AIANs. These findings may assist in developing targeted efforts to address cultural, communication, and health system factors presumed to underlie disparities in health care access and customer service.
Is 2018 the year we finally get a new album from Tool? This week, drummer Danny Carey was asked about the status of the long-awaited LP and he made a bold proclamation. Speaking to Loudwire, Carey said the album is “definitely” coming next year. “We’ll probably have it done in the first half [of the year] if things go as planned,” he explained. “There’s setup times and manufacturing – I can never predict all that, it seems like it’s constantly evolving. [What time of year it will be out] I can’t tell you.” Of course, fans of Tool are dubious of any such proclamation, seeing as the album has been “progressing rather nicely” since 2014 and was previously said to be coming in 2016. In the two years since then, the band’s members have teased the album’s release via a series of cryptic social media teases. However, back in January, frontman Maynard Keenan said the album had hit some “roadblocks, and in July, he attributed the delay to his bandmate’s indecisiveness. Asked about the veracity of Carey’s comments, a representative for the band responded by saying simply, “as of now there is no release date.” What we do know for sure: Tool has at least one show planed for 2018 as they were recently announced as one of the headliners of next year’s Rock on the Range Music Festival in Columbus, Ohio. Meanwhile, Carey’s side-project with Mastodon’s Brent Hinds, the Legend of Seagullmen, have confirmed the release a February release for their self-titled debut studio album.
package io.github.wordandahalf.blueprint.utils; import io.github.wordandahalf.blueprint.Blueprints; import java.util.logging.*; public class LoggingUtil { private static Logger logger = Logger.getLogger( LoggingUtil.class.getName() ); static { logger.setUseParentHandlers( false ); ConsoleHandler handler = new ConsoleHandler(); handler.setFormatter( new CustomFormatter() ); if(Blueprints.DEBUG_ENABLED) { handler.setLevel( Level.FINE ); logger.setLevel( Level.FINE ); } else { handler.setLevel( Level.INFO ); logger.setLevel( Level.INFO ); } logger.addHandler( handler ); } public static Logger getLogger() { return logger; } private static class CustomFormatter extends Formatter { @Override public String format( LogRecord record ) { return "[" + record.getLevel().getName() + "] " + record.getMessage() + "\n"; } } }
November 1, 2007 — Patrick Zimmerman Out of the Ground, Into the Light and Into the Sun Frank Lloyd Wright’s Annie Pfeiffer Chapel The Early History: Florida Southern College Florida Southern College was a small Methodist college snuggled down in the orange groves that bordered Lake Hollingsworth in Lakeland, Florida when the renowned architect Frank Lloyd Wright first visited the campus in 1938. There was no hint then among the scent of orange blossoms that the campus itself would one day blossom into Wright’s “Child of the Sun,” the site of the single largest collection of buildings designed by America’s foremost architect. That Florida Southern College existed at all as a canvas upon which Wright could work his architectural magic was something of a wonder in itself. Although the college traced its founding to 1883 in Orlando, it had moved frequently and was nearly forced to close its doors on a number of occasions. It had moved from Orlando to Leesburg to Sutherland to Clearwater Beach and, ultimately, to Lakeland in 1922. It had weathered storms, fires, floods, flu epidemics, and economic depression. It had survived several name changes over the years. But due in large part to Ludd Spivey, who had been appointed president in 1925, the college persevered. This was the Florida Southern College that greeted Frank Lloyd Wright in 1938, an institution with very little money, but which luckily had a president who dreamed big dreams. Frank Lloyd Wright and Florida Southern College Frank Lloyd Wright at Florida Southern During a trip to Europe by Florida Southern College president Dr. Ludd Spivey in 1938, he viewed a war memorial that inspired him to return to the U.S. with the vision of constructing a campus in the orange groves. Even more inspiring to him was the autobiography of Frank Lloyd Wright. When Dr. Spivey flew to Taliesin at Spring Green, he approached Wright with his dream saying, “I have no money with which to build the modern American campus, but if you’ll design the buildings, I’ll work night and day to raise the means.” Wright was 67 years-old when he first visited Lakeland, Florida. As he toured the orange grove area he envisioned the buildings rising “out of the ground, into the light and into the sun.” His master plan called for 18 buildings using the following basic materials: steel for strength; sand because it was native to Florida; and glass to bring God’s outdoors into man’s indoors. The first ground breaking ceremony was held may 24, 1938 for the Annie Pfeiffer Chapel. Dedication of the building took place March 9, 1941. Following the completion of the Chapel, the three seminar units were built. As word spread about Wright’s creations, more and more people visited the campus to see his work. In 1942, ground was broken for the circular E.T. Roux Library, but steel and manpower shortages slowed the construction. These first buildings (Annie Pfeiffer Chapel, The Seminar Buildings, and the E.T. Roux Library) were built with student labor. Dr. Spivey arranged with the students that their tuition could be paid with manual assistance in the construction of the buildings. Dedication for the Roux Library was held in 1945. Next up were the Emile E. Watson-Benjamin Fine Administration Buildings, the first to be built by an outside construction firm, followed by the J. Edgar Wall Waterdome in 1948. The construction of the 1.5 miles of esplanades began at the same time the first phase connecting the library and the administration building. The Ordway Arts Building was next to be constructed and the esplanades were extended from the seminars to the Ordway Building and then back to the chapel, forming the quadrangle. Danforth Chapel went up in the shadow of Annie Pfeiffer Chapel as the foundations were laid for the Polk County Science Building. Wright Overseeing Work at the College Wright visited the campus quite often during his twenty years of work at Florida Southern. Lakeland residents would turn out to see him in his preferred attire, which often included a flowing cape, beret or pork pie hat, and his walking stick, but few would engage him in conversation. Preservation Projects at the College Jeffrey Baker is an architect who specializes in preservation, so it is no surprise that he gets excited about showing off the Annie Pfeiffer Chapel at Florida Southern College, which has hired him to create a preservation plan. Designed by Frank Lloyd Wright, the high-sided, hexagonal chapel has a tower that Mr. Baker says is “structurally incredibly complex, almost like origami in concrete.” But it is a surprise when he says the chapel is in good shape. The fist-size holes in its crumbling exterior walls could easily make you think otherwise. It turns out that what Mr. Baker means by “good shape” is that $2-million “would go a long way” toward fixing the walls, undoing some ill-advised changes made over the years, and putting in a fully functional climate-control system. As preservation price tags go, $2-million is nothing; Yale University just spent $44-million on a 1953 Louis Kahn art museum. The Annie Pfeiffer Chapel was dedicated in 1941, the same year in which three other Wright buildings opened at Florida Southern; it ended up with a dozen structures designed by Wright. Florida Southern has the only college campus planned by Wright, who died in 1959, as well as the largest single collection of Wright buildings anywhere, many of which were built in part by students working in return for tuition and board. But today, for a liberal-arts college of about 1,900 students with a modest endowment, that’s a mixed blessing. Striking and historically important as they are, the Wright buildings present a long list of challenges: some have structural problems that can be traced to Wright’s having relied on new and untested designs. Many are too small for the college’s current needs, and all have been hard to modernize affordably. Bundles of data cables snake indecorously through holes drilled in the walls of Wright’s compact seminar building, for instance, while the ventilation system added a few years ago to his science building disfigures its roof line so badly that you dare not imagine what the famously temperamental architect would have to say about it. It was a great relief when the college received $195,000 from the Getty Foundation’s Campus Heritage Grant program to create a preservation plan for the Wright buildings, and Mr. Baker has been hard at work on the details of repairs and renovations. Anne B. Kerr, who has been Florida Southern’s president for three years, says that while her first responsibility is to Florida Southern’s students, faculty members, and educational mission, the college appreciates its role as conservator. “It seems to me to be very doable to raise money for Frank Lloyd Wright renovations,” she says, adding that “the deterioration is significant enough that if we don’t do something now,” at least some of the buildings will be in serious trouble. In fact, last week the World Monuments Fund included the college’s Wright buildings on its 2008 Watch List of 100 Most Endangered Sites. Two preservation projects are already under way here. One, paid for with a special $1.6-million state appropriation, is the repair of a mile-and-a-half-long network of covered walkways, known as esplanades, that connect the Wright buildings. The walkways’ roofs cantilever out from concrete posts that Wright designed to recall the orange trees that previously grew in a grove on the site. The posts’ bases represent the trees’ trunks, spreading shapes stand in for the branches, and precise incisions in the concrete call to mind the leaves. The grid on which the Wright buildings are arranged was also inspired by the grove, in which trees were placed 18 feet apart. Wright divided and subdivided that 18-foot distance to come up with other important dimensions for campus structures, like the three-foot length and nine-inch height of the concrete blocks used in every building. The grid and the posts were part of Wright’s search for what he referred to as “a real Florida form” that would set a new standard for architecture in the state. But Wright’s esplanade design did not include expansion joints, and that has proven to be a problem. The esplanades, Mr. Baker says, have created their own joints by cracking where they needed to, and at least one post failed altogether and had to be replaced. Luckily, the college has kept all of Wright’s concrete molds. “We have rooms filled with molds,” he says. The other current project is the restoration of Wright’s “water dome,” a 90-foot-diameter pool over which 74 jets were intended to make a dome of water 40 to 50 feet high. Although previously the pumps were never able to create enough pressure to achieve the effect Wright sought, Mr. Baker says Wright “saw it as the spiritual center or heartbeat of the campus — it was the focal point of the entire design.” The original pool has been excavated and has been fitted with new high-pressure jets. Mr. Baker is also planning repairs for the Annie Pfieffer Chapel and several of Wright’s other buildings here. Like many Wright structures, the chapel has a number of innovative elements. The wall blocks, for instance, have openings for colored glass shapes meant to pierce the wall with light. But now the blocks, made on the campus by Florida Southern students, are causing trouble. Because Wright wanted each block to lie flat on the block beneath it, with no mortar separating the two, iron rods were embedded in the walls to hold the blocks in place. Unfortunately the process he specified for grouting around the iron rods didn’t really work, so when water got into the unmortared joints, the rods rusted and the blocks began cracking. Another building in need of work opened in 1945 as the library. It now houses offices and, in the circular former reading room, a visitors’ center displaying concrete molds, uncomfortable-looking wooden furniture that Wright designed for the college, and other artifacts, including designs for as-yet-unbuilt Wright buildings. Unfortunately, the structure has some sagging rafters and cracking walls, which Mr. Baker estimates will cost some $3.5-million to fix. Other Wright buildings here appear to be in better shape — a sprawling classroom building, a delightful set of administrative buildings designed on the residential scale that Wright excelled at, and the science complex. But almost every room offers some hint of the tension between making Wright’s buildings useful for 21st-century college students and preserving the architect’s sometimes-idiosyncratic vision — here, modern light fixtures Wright would have loathed; there, bold, polished ductwork that does its best to look stylish, even if it is not original. “I hold out a lot of hope for this campus,” says Mr. Baker. “A lot of the original fabric is intact.” Indeed, the gem of the campus — the tiny William H. Danforth Chapel, which peers out from beneath some trees beside the Annie Pfeiffer Chapel — appears to be in almost perfect condition. Movable pews designed by Wright and built by students are still in use, and climbing the narrow, angled stairway to Wright’s little choir loft is unexpectedly thrilling. A single piece of stained glass is missing from the big window behind the altar. That, at least, is an easy repair. Walkways: The Seminar Rooms Frank Lloyd Wright’s Buildings at Florida Southern College Frank Lloyd Wright’s Water Dome Wright’s Water Dome Finally Comes to Life Jets of water shot sideways into the air last Thursday evening, rising about 45 feet before curving down and inward to meet in the center of a circular basin 160 feet in diameter. Between the 75 or so powerful water jets, droplets of moisture created translucent, meshlike panels of water that became the water dome that architect Frank Lloyd Wright saw in his mind for Florida Southern College. It was a moment that took 69 years and close to a million dollars to happen. The Frank Lloyd Wright Water Dome is an innovation that Wright designed in the late 1930s, but it had never been turned on before because it didn’t have the financial or mechanical support needed to operate. That changed this year. Drawings were unearthed, the mechanics improved and the large fountain restored. About 250,000 gallons of water fill a circular pool that’s painted a green-tinged teal. Contractors and subcontractors worked up to the last day putting the final touches on the fountain that FSC President Anne Kerr called Wright’s “inspired vision.” Frank Lloyd Wright’s Water Dome Comes to Life Readers can access a virtual walking tour of the Wright buildings on Florida Southern’s campus here. (Please Click Image to View Slide Show) Interested readers can obtain online access to the complete 1910 Wasmuth Frank Lloyd Wright folios. This was the first publication of Wright’s work to appear anywhere in the world, since Wright had not published any of his work in his twenty previous years of activity in the United States. Publication of his folios in Germany is said to have been the inspiration for founders of the important Bauhaus architectural movement, such as Ludwig Mies van der Rohe, Walter Gropius and Josef Albers. Technorati: Frank Lloyd Wright, Bauhaus, Florida Southern College, Frank Lloyd Wright college buildings, campus buildings, Florida, celebrities, personalities, images, pictures, photos, photographs, photography, composer, composition, multimedia, multimedia composition, blog composition, art, architecture, gallery, photo gallery, slideshow, video, videos, WordPress video, photo-blog Please Share This:
Determination of tryptophan tRNA recognition sites for tryptophanyl-tRNA synthetase from hyperthermophilic archaeon, Aeropyrum pernix K1. To investigate the recognition mechanism of tryptophan tRNA by tryptophanyl-tRNA synthetase from extreme hyperthermophilic and aerobic archaeon, Aeropyrum pernix K1, tryptophanylation activities were examined by using mutant tryptophan tRNA transcripts prepared by in vitro transcription system. Their transcripts were aminoacylated with tryptophan by overexpressed A. pernix tryptophanyl-tRNA synthetase. The results indicated that anticodon nucleotides C34, C35 and A36, discriminator base A73, G1-C72 and G2-C71 base pairs of acceptor stem were base-specifically recognized by A. pernix tryptophanyl-tRNA synthetase.
The article below, on the issues raised by a Channel 4 documentary on London's mayor, Ken Livingstone, incorrectly stated that the London Evening Standard had organised a public debate with the Tory candidate, Boris Johnson, but had not invited the mayor or the Lib Dem candidate, Brian Paddick. In fact the Standard has organised a series of debates and next month's will be addressed by Mr Livingstone, followed by a later event with all the candidates present. The mayor of London, Ken Livingstone, failed last night in an attempt to get Channel 4 to pull a documentary which accused him of financial profligacy, cronyism and links to a Trotskyite faction conspiring to transform London into a "socialist city state". Last night's Dispatches programme, presented by Martin Bright of the New Statesman, also alleged that public money was used to smear one of Livingstone's adversaries and that mayoral staff raised funds for his re-election bid in breach of local government rules. But the mayor accused the channel of facilitating a smear campaign aimed at sabotaging his chances of re-election. Hours before the broadcast he approached Channel 4 and attempted to have the programme pulled, claiming lack of balance. He is today expected to renew his protests with a complaint to the regulator, Ofcom. He claimed that Dispatches had already been "totally discredited" in the wake of two controversial documentaries on Aids and climate change. "It has been pointed out to Channel 4 in the clearest possible terms that the allegations against Ken Livingstone, in the hatchet job on him by journalist Martin Bright for Dispatches, are equally ludicrous," a spokesman said. However, the broadcaster said he had been offered "a full right to reply in good time" and had declined to respond to the programme's allegations. "It would appear Mr Livingstone's spokesperson is trying to discredit Dispatches rather than address the issues put to the mayor. Dispatches has a very strong track record and is respected for the quality of its journalism." In the programme Atma Singh, the mayor's former senior adviser on Asian issues - whom the Livingstone campaign described as an "embittered ex-employee" - said many of the mayor's senior advisers had belonged to a Trotskyite faction called Socialist Action. It planned to use revolutionary politics and access to the Labour party establishment to turn the capital into a "socialist city state". The programme also alleged that Livingstone's race adviser, Lee Jasper, had paid a consultant to prepare a dossier to help smear the former head of the Commission for Racial Equality, Trevor Phillips. According to the documentary, the consultant was hired at the taxpayer's expense and her appointment signed off by the mayor himself. The Court of Ken, which examines Livingstone's eight years in power, also looks at Livingstone's relationship with the Venezuelan president, Hugo Chávez, the effectiveness of the congestion charge, and expenses incurred on a trip to India. The row intensified yesterday after Martin Bright wrote a piece in the Evening Standard - the London paper that has often criticised the mayor - calling Livingstone "a disgrace to his office". He added: "I feel it is my duty to warn the London electorate that a vote for Livingstone is a vote for a bully and a coward who is not worthy to lead this great city of ours." The article called on voters to "kick Ken out when they go to the polls in May". "Given these unambiguous statements by Mr Bright, any claim that this programme is merely investigative journalism is untrue," Livingstone wrote to the broadcaster yesterday. "It is clear that he is seeking to influence the electoral process." He said he would have no objection to the documentary if films "campaigning against" his fellow mayoral candidates Boris Johnson and Brian Paddick were also made. He offered to participate in an interview with Bright if Channel 4 decided not to broadcast the programme. "This would allow a balanced programme with both sides of the argument shown." But Channel 4 says it has a legitimate case for focusing on the mayoralty "which holds a unique and powerful position in British politics" and the incumbent. "It is legitimate to hold up to public scrutiny the way in which the office operates as a crucial example of the mayoral model in action, ahead of the next elections," a spokesman said. "Dispatches applies the same level of scrutiny to the office of mayor as it has to other figures in public office, in a fair and balanced investigation, subject to the rigorous editorial guidelines applied to all Dispatches films." It also pointed out that the controversial film referred to about Aids was first broadcast in 1987 and that The Great Global Warming Swindle was not a Dispatches film, but "a standalone 90 minute programme clearly labelled as a polemic". The mayor has seen the London daily as his tormentor-in-chief since Veronica Wadley, the former deputy editor of the Daily Mail, replaced Max Hastings in the editor's chair. Livingstone has long smarted at the way he was treated by the Mail during his time as leader of the Greater London Council, although he claims his dislike of the paper stems from its parent company's support for fascism and Nazism in the 1930s. On Wadley's appointment in 2002 Livingstone invited her to lunch and received no reply, an act which heightened his distrust of Wadley and her paper. Their relationship began badly and over time has worsened. In 2002 the Standard reported that Livingstone had been involved in a scuffle at a party - during which a man fell down a stairwell - and had manhandled his pregnant partner out of the celebration after accusing her of smoking. However, he denied the allegations and was cleared of bringing his office into disrepute by the local government watchdog a year later. His next public run-in with the paper came in February 2005 when he likened its Jewish reporter Oliver Finegold (pictured above) to a Nazi concentration camp guard. He also told Finegold that the Standard was "a load of scumbags and reactionary bigots" with "a record of supporting fascism". Livingstone refused to apologise for his remarks at the time, and was suspended for four weeks. However, the suspension was later quashed by a high court judge. He later apologised for causing any offence to the capital's Jewish community, saying: "I am an equal opportunities rude person." Wadley insists she has a "healthy" relationship with the mayor. In an interview with the Guardian in 2006 she said: "I've not invited Ken to a dinner party at my house. He's a great character and he's a very clever politician. It doesn't mean he's always right." The paper has published a string of hostile articles on Livingstone and his inner circle alongside reports helpful to the Conservative candidate for London mayor, Boris Johnson. An aggrieved Livingstone has dubbed the paper the London Evening Boris. Last night the Standard held a public debate entitled What Does London Need From The Next Mayor? Speakers included Johnson, but not Livingstone or the Liberal Democrat candidate Brian Paddick. The mayor's adviser on race relations found himself dragged into the spotlight again last year after the Evening Standard published a series of articles alleging that 12 organisations run by his friends and associates had been given grants worth more than £2m because of their links with him. According to reporter Andrew Gilligan, some of the deals involved fraud, while others failed to yield any tangible public benefit. An internal review by the London Development Agency, which provided the grants, cleared Jasper of improperly influencing their allocation this month. The review said it had investigated the allegation that "contracts and funding were awarded to organisations because of their personal friendships with the mayor's adviser Lee Jasper or because of inappropriate interference by Mr Jasper. The review has found that this was not true." Among the groups investigated were the South London Green Badge Taxi School and the African Caribbean Business Network. The taxi school, which was established by two of Jasper's associates to teach ethnic minority applicants "the knowledge", is now the subject of a police fraud inquiry. In the case of the business network - which received a £1m grant - the review found "substantive assurance regarding the project management and outcomes of this project". Livingstone stood by his adviser as allegations of cronyism were thrown around, and many of Jasper's supporters have pointed to his record of work on behalf of minorities - especially his role as chair of the lay advisory group on Operation Trident, the Metropolitan police unit which investigates gun crime in the black community. They say the criticisms are in fact an attack on the black voluntary sector. But his accusers say the issue is about procedures and the stewardship of public money. The LDA's review found that 12 of the 16 allegations made by the Standard were unfounded. But the agency said it had referred allegations that individuals had misappropriated funds to the police as it had no powers to formally interview and investigate external parties. On January 16 the London assembly agreed an urgent motion to refer the LDA to the district auditor for an independent audit. The assembly also agreed to summon representatives from both the LDA and the Greater London authority to appear before assembly members to answer further questions. Last weekend the mayor was criticised in reports in Sunday newspapers for surrounding himself with a clique of advisers, many of whom belonged to a Trotskyite splinter group. This claim first surfaced in the 1990s, but was revived in the Dispatches programme. The reports described how his chief-of-staff, Simon Fletcher, began his career working for Tony Benn and won a seat on Camden council in 1993 before becoming involved in Socialist Action. The faction, which sprang from a split in the International Marxist Group, aimed to reconcile its revolutionary programme with cooperation with the Labour party. Its critics claim Socialist Action decided to extend its influence by placing its members in positions of power in a number of organisations. Since meeting in 1992 Livingstone and Fletcher have forged a lasting partnership. Having worked as Livingstone's researcher and masterminded his boss's first successful mayoral bid in 2000, Fletcher is closer to him than most. Livingstone also trusts Fletcher enough to leave him to run things when he leaves the country - a duty that many feel should fall to the mayor's second-in-command, deputy mayor Nicky Gavron. In the early 90s John Ross, who is Livingstone's economic adviser, was another key figure in Socialist Action. While a parliamentary candidate for the Communist party in the 1974 general election he expressed the view that union members should be entitled to form militia units: "This is the only peaceful road to socialism," he said. "The ruling class must know that they will be killed if they do not allow a takeover by the workers. If we aren't armed there will be a bloodbath." After a spell as an financial guru in Russia, Ross returned to the UK to help Livingstone prepare his economic strategy for the mayoral contest. The third member of the triumvirate is Livingstone's deputy chief of staff, Redmond O'Neill, who was also a pivotal figure in Socialist Action. O'Neill holds the crucially important transport brief in the mayor's office but is also seen as Livingstone's main fixer on a range of thorny issues. Atma Singh, a former member of Socialist Action who advised the mayor on Asian issues, told Dispatches that the Trotskyite faction still had the mayor's ear. Singh also identified Mark Watts, the mayor's climate change adviser, Jude Woodward, his culture adviser, and Anne Kane, who has done consultancy work for Livingstone, as one-time members of Socialist Action. Singh, who left Livingstone's team in acrimonious circumstances and received a pay-off, told the Observer that although his salary was paid for by the taxpayer he and other advisers raised money for Livingstone's successful 2004 re-election bid in breach of local government rules. Livingstone has denied any wrongdoing. The two were on good terms until 1999, when both were running for London mayor. However, when Phillips's bid appeared to come apart, Livingstone offered the former television journalist a role as his deputy. Phillips turned him down, apparently feeling he was being patronised. Things worsened when Phillips, in his capacity as chair of the Greater London authority, decided to publicly quiz the mayor on what had happened at the party in 2002. By 2006 there was so much bad blood between the two that Livingstone accused Phillips, then chair of the Commission for Racial Equality, of "pandering to the right" to such an extent that "soon he'll be joining the BNP". The mayor later refused to take part in a CRE conference, while his race adviser, Jasper, urged other speakers to pull out of the event. Livingstone said Phillips was harming community relations and employing inflammatory language "to grab alarmist headlines". According to Dispatches, Jasper waded deeper into the feud in June 2006. The programme suggests that he commissioned Anne Kane to draft an article rubbishing Phillips's record as chair of the CRE to stop him being appointed head of the organisation that replaced it, the Commission for Equality and Human Rights. Dispatches claims Kane's fee for the work - some of which was published on Blink, a black website with links to Jasper - was paid by the taxpayer, and that her appointment was signed off by Livingstone. City Hall says the allegations stem from an embittered ex-employee. "The government carried out a consultation on their government equalities review and the mayor naturally took part in this ... the mayor, unlike Mr Phillips, did not agree with the abolition of the CRE. It is entirely legitimate and normal for the mayor to express his views on such matters of public policy. Such an exchange of views on policies of importance to London does not constitute a personal campaign."
/** * Copyright (c) 2021 <NAME>. * This is part of the libc project. * SPDX-License-Identifier: MIT */ /** * @file isdigit.c * The isdigit function tests for any decimal-digit character. * This header file is designed to be compatible with the C17 standard and * support gcc and clang, except that it is not backward compatible. */ #include "ctype.h" int isdigit(int c) { return ((c >= '0') && (c <= '9')); }
<filename>PrivateFrameworks/iWorkImport.framework/TSTWPSelection.h<gh_stars>10-100 /* Generated by RuntimeBrowser Image: /System/Library/PrivateFrameworks/iWorkImport.framework/iWorkImport */ @interface TSTWPSelection : TSWPSelection @end
Decomposing Loosely Coupled Mixed-Integer Programs for Optimal Microgrid Design Microgrids are frequently employed in remote regions, in part because access to a larger electric grid is impossible, difficult, or compromises reliability and independence. Although small microgrids often employ spot generation, in which a diesel generator is attached directly to a load, microgrids that combine these individual loads and augment generators with photovoltaic cells and batteries as a distributed energy system are emerging as a safer, less costly alternative. We present a model that seeks the minimum-cost microgrid design and ideal dispatched power to support a small remote site for one year with hourly fidelity under a detailed battery model; this mixed-integer nonlinear program (MINLP) is intractable with commercial solvers but loosely coupled with respect to time. A mixed-integer linear program (MIP) approximates the model, and a partitioning scheme linearizes the bilinear terms. We introduce a novel policy for loosely coupled MIPs in which the system reverts to equivalent conditions at regular time intervals; this separates the problem into subproblems that we solve in parallel. We obtain solutions within 5% of optimality in at most six minutes across 14 MIP instances from the literature and solutions within 5% of optimality to the MINLP instances within 20 minutes.
Naked Mole Rat Cells Have a Stable Epigenome that Resists iPSC Reprogramming Summary Naked mole rat (NMR) is a valuable model for aging and cancer research due to its exceptional longevity and cancer resistance. We observed that the reprogramming efficiency of NMR fibroblasts in response to OSKM was drastically lower than that of mouse fibroblasts. Expression of SV40 LargeT antigen (LT) dramatically improved reprogramming of NMR fibroblasts. Inactivation of Rb alone, but not p53, was sufficient to improve reprogramming efficiency, suggesting that NMR chromatin may be refractory to reprogramming. Analysis of the global histone landscape revealed that NMR had higher levels of repressive H3K27 methylation marks and lower levels of activating H3K27 acetylation marks than mouse. ATAC-seq revealed that in NMR, promoters of reprogramming genes were more closed than mouse promoters, while expression of LT led to massive opening of the NMR promoters. These results suggest that NMR displays a more stable epigenome that resists de-differentiation, contributing to the cancer resistance and longevity of this species. INTRODUCTION Naked mole rat (NMR) is the longest-lived rodent species and is becoming a popular model in research due to its longevity and very low incidence of cancer ((Delaney et al.,, 2016). NMRs have mouse-like body size but display almost ten times longer maximum lifespan and tumor resistance as opposed to tumor-prone mice. This makes NMR-to-mouse comparison very informative for identifying mechanisms of longevity and cancer resistance. Several such mechanisms have already been identified. For instance, NMR cells secrete very-high-molecular-weight hyaluronan, which prevents malignant transformation (). Furthermore, NMRs express a unique INK4 isoform, pALT, which consists of the first exon of p15INK4b and the second and third exons of p16INK4a and confers more efficient growth arrest (). NMR cells also have significantly higher translation fidelity than mouse cells and display better protein stability and less age-associated increase in cysteine oxidation during aging (). In addition, NMRs have markedly higher levels of cytoprotective NRF2 signaling activity due to the lower negative regulators of this signaling, such as Keap1 and bTrCP (). Finally, loss of either tumor suppressor p53 or Rb individually triggers apoptosis in NMR cells (), and loss of the tumor suppressor ARF triggers cellular senescence (). Chromatin undergoes dynamic, organizational changes over an organism's life and may be a contributing cause of aging. Indeed, aging is associated with loss of heterochromatin and smoothening of patterns of transcriptionally active and repressed chromatin regions (for review, see ). This is subsequently associated with loss of repressive histone marks and spreading of active histone marks, culminating in the ''heterochromatin loss model of aging,'' according to which age-related chromatin loss and de-repression of silenced genes lead to aberrant gene expression patterns and cellular dysfunction (Tsurumi and Li, 2012). Induced pluripotent stem cells (iPSCs) present a promising approach for regenerative medicine. However, tumorigenicity of these cells is a major concern for potential clinical applications (Ben-David and Benvenisty, 2011). Malignant transformation and cellular reprogramming share several characteristics such as changes in epigenetic marks, gene expression, and metabolic characteristics (;). Furthermore, expression of the reprogramming genes Oct4, Sox2, Klf4, and c-Myc (OSKM) is frequently perturbed in cancer (Ben-David and Benvenisty, 2011;). Epigenetic changes driven by OSKM play the key role in the reprograming process. Histone modifications, histone variants, and chromatin remodeling enzymes involved in reprogramming have been the subject of intense investigation (). Reprograming requires erasure of the existing somatic epigenetic memory and the establishment of a new epigenetic signature (). Early reprogramming events are associated with widespread loss of H3K27me3 and opening of the chromatin (). Reprogramming also requires bivalent chromatin domains that have both activating H3K4me3 and repressive H3K27me3 marks. Furthermore, several factors can reduce the efficiency of reprogramming: H3K27me3 represses pluripotency-associated genes (), HP-1g impedes reprogramming by maintaining heterochromatin (), and downregulation of H2A.X completely inhibits reprogramming (). Interestingly, H2A.X plays an important role in promoting reprogramming and controlling the differentiation potential of iPSCs, which is independent of its role in DNA damage sensing (). Finally, DNA methylation resists reprogramming, and inhibiting the activity of DNMT1 has been reported to increase reprogramming efficiency (). Here, we report that NMR cells are highly resistant to OSKM reprogramming. The frequency of iPSCs colonies was extremely low and was enhanced by inactivating Rb protein using SV40 LT antigen (LT). The resulting iPSCs could be expanded in vitro and differentiated into the cell lineages of three germ layers in vitro. The frequency of teratoma formation in vivo was very low compared with mouse iPSCs. Comparison of the histone landscapes in NMR and mouse using mass spectrometry revealed higher levels of repressive marks and lower levels of activating marks in the NMR cells. Furthermore, bivalent promoters in the mouse cells were repressed in the NMR, and this repression was alleviated by LT. Assay for transposase-accessible chromatin with high-throughput sequencing (ATAC-seq) revealed that NMR had more closed chromatin at promoter regions, and LT led to a massive opening of promoters. All together, these findings suggest that NMR cells have a more stable epigenome that is resistant to OSKM reprogramming. This more stable epigenome may contribute to the cancer resistance and longevity of this unique rodent and may provide novel insights into cancer prevention and treatment in humans. NMR Fibroblasts Are More Resistant to OSKM-Induced Reprogramming than Mouse Fibroblasts To generate the NMR iPSCs, we co-transfected NMR embryonic fibroblasts (NEFs) at population doubling time (PD) 10 with pPB-CAG-OSKM expressing mouse OSKM factors and PBase expressing PiggyBac transposase. No alkaline-phosphatase-positive (AP+) iPS colonies were observed in NEF transfectants after 4 weeks of culture in naive culture conditions (FBS + Lif or N2B27 + Lif + 2i) on feeder layers ( Figure 1A). In contrast and as a positive control, MEFs at a comparable PD number could be easily reprogrammed into AP+ iPSC colonies ( Figure 1A). Similar results were also obtained in adult NMR skin fibroblasts (NSF) ( Figure 1B). Importantly, transfection efficiency, measured by transfecting a GFP plasmid, was higher in NMR cells than in mouse cells ( Figure S1A) and could not account for the low reprogramming efficiency of NMR cells. Since the above fibroblasts have been cultured in vitro for several passages, we also used freshly isolated fibroblasts for reprogramming. Although the freshly isolated skin fibroblasts underwent mesenchymal-epithelial transition at low frequency upon OSKM overexpression, these epithelial-like colonies ( Figure S1B) stopped growth soon and could not be expanded in either mouse or human embryonic stem cell (ESC)/iPSC culture medium. Similarly, no iPS-like colonies were observed when we used human OSKM factors ( Figure S1C). Next, we attempted to reprogram the NMR fibroblasts under primed-type PSCs conditions () (KSR + bFGF + 2i). Under these conditions, NMR cells gave rise to AP+ colonies, but the frequency of AP+ colonies was 10-fold lower than for mouse cells ( Figures 1C, 1D, and S1D). In order to rule out the possibility of species specificity of OSKM factors, we cloned the NMR OSKM factors in pPB-CAG vector ( Figure S1E) and reprogrammed the NMR fibroblasts. We observed no improvement in reprogramming efficiency relative to mouse OSKM factors ( Figure S1F). Taken together, these results suggest that NMR fibroblasts are more resistant to OSKM-induced cellular reprogramming than mouse fibroblasts. LT Rescues the Low Reprogramming Efficiency of NMR Fibroblasts We speculated that OSKM might be insufficient to reprogram NMR somatic cells toward pluripotency. To address this problem, we used an OSKM + X strategy to screen for factor(s) that could facilitate the reprogramming of NMR fibroblasts ( Figure 2A). Twelve additional factors that have been reported to improve the reprogramming efficiency in mouse or human fibroblasts (Nanog, Lin28, Rar + Lrg, Hif1a, Dppa4, Dppa5, Sox15, hTERT, CEBPa, p53DD, and LT) were screened in NSF and NEF cells by co-transfecting them with OSKM. Among the 12 factors, only LT restored the iPSC reprogramming ( Figure 2B). LT is a viral oncoprotein that binds and inactivates both p53 and pRb. Importantly, the NMR iPS-like colonies generated by OSKM + LT (NMR OSKMLT iPSCs) could be expanded and maintained in the pluripotent state after passaging to new dishes ( Figure 2C). This result indicates that either p53, pRb, or both pathways interfere with OSKM-induced cellular reprogramming in NMR fibroblasts. We also tested whether oxygen concentration may affect NMR reprogramming efficiency by performing reprogramming under physiological 3% O 2 and under ambient 20% O 2. Reprogramming efficiency for both species was higher at 3% O 2 ( Figure S2A). To determine whether the p53 or pRb pathway inhibits reprogramming of NMR cells, we used the mutant derivatives of LT; LTD434-444 inactivates pRb and its family members (p107 and p130), while LTK1 inactivates p53 (). Only LTD434-444 significantly increased the efficiency of OSKM reprogramming, under both naive and primed-type conditions ( Figure 2D), and the resulting colonies could be expanded and maintained similarly to the colonies generated by the addition of the wild-type LT. It was reported that ARF suppression induces senescence in NMR fibroblasts (). As ARF suppression represses p53, we further tested whether p53 knockdown by LTK1 had a similar effect. We did not observe enlarged cells with senescent morphology or SA-b-gal positive cells after LTK1 transfection ( Figure S2B), suggesting that senescence was not the reason for low reprogramming efficiency in LTK1-transfected cells. These results taken together indicate that the Rb pathway is responsible for the resistance of NMR cells to reprogramming. Next, we compared the efficiencies of the p53 and Rb pathways in NMR and mouse cells, as these two pathways are known to be major barriers to reprogramming in mouse and human (;;;;). A reporter containing a p53-binding site fused to firefly luciferase ORF showed stronger activation in mouse cells than in the NMR cells ( Figure 2F). However, a reporter containing an E2F binding site showed stronger repression in the NMR cells than in the mouse cells ( Figure 2G). These results were unlikely to be affected by sequence divergence between mouse and NMR, as the p53 DNA-binding domain and Rb showed very high similarity between NMR and mouse (88% and 91%, respectively). Furthermore, (D) Quantification of iPSC reprogramming of skin fibroblasts under primed-type conditions (KSR + bFGF + 2i medium). All cell lines were at PD <10 at the start of reprogramming. The reprogramming was performed with mouse OSKM factors, unless otherwise indicated. The experiments were repeated at least five times, in two independent cell lines of each type, and error bars denote SD. MEF, mouse embryonic fibroblasts; NEF, naked mole rat embryonic fibroblasts; MSF, mouse skin fibroblasts; NSF, naked mole rat skin fibroblasts; nmrOSKM, reprogramming performed with naked mole rat OSKM factors. Rb-associated transcription factors E2F and DP1 DNA-binding domains are identical between NMR and mouse (Figure S2C). Consistent with the luciferase assay results, NMR cells had higher levels of hypo-phosphorylated Rb, while mouse cells had higher levels of p53 ( Figure 2G). Cumulatively, these results suggest that NMR cells have a more powerful Rb pathway, which serves as a barrier to OSKM reprogramming. Characterization of NMR iPSCs NMR OSKMLT iPSCs proliferated in culture for more than 1 year. Karyotypic analysis showed the iPSCs had normal karyotype ( Figure S3). RT-qPCR analysis confirmed the activation of endogenous Oct-3/4, E-cadherin, and NANOG in NMR iPSCs ( Figure 3A). Interestingly, HAS2, a gene encoding a hyaluronan synthase, was shut down in NMR OSKMLT iPSCs, consistent with our earlier observation (legend continued on next page) that NMR embryonic cells do not produce high-molecularweight hyaluronan (). TET1, a DNA oxidase that helps establish high 5hmC in mouse or human ESCs/ iPSCs, was also activated in NMR iPSCs ( Figure 3A). Consistently, 5hmC, the product of TET1-catalyzed 5mC oxidation, was present at higher levels in iPSCs ( Figure 3B). Immunostaining showed that NMR iPSCs expressed the mouse pluripotent marker SSEA-1 but not the human pluripotent marker SSEA-4 ( Figure 3C). Thus, our data suggest that the pluripotent network has been activated in our NMR iPSCs. To verify the NMR iPSCs obtained under naive conditions display features of naive-type iPSCs, we checked X chromosome reactivation in female NMR iPSCs. The naive iPSCs retain a pre-inactivation X chromosome state and lack a silencing mark on the X chromosome in female iPSCs (). We found that the Xist RNA, a mark for X chromosome inactivation, was drastically reduced in NMR iPSCs derived from female NMR fibroblasts and cultured under naive-type conditions, while the iPSCs obtained under primed-type conditions retained Xist RNA expression ( Figure S3B). Another X chromosomelinked gene, Hprt1, did not reduce its expression in naivetype iPSCs and showed about 2-fold higher level of expression compared with the primed-type iPSCs (Figure S3C) due to the dosage compensation effect of two active X chromosomes. This result showed that the NMR iPSCs cultured under naive conditions had one of the features of naive iPSCs. For brevity, we hereafter refer to these cells as naive-type iPSCs. However, additional characterization, such as comparison of expression of additional naive markers, not currently feasible in the NMR due to incomplete genome annotation, would be needed to definitively classify these cells as naive-type iPSCs. We further evaluated the in vitro and in vivo differentiation capability of naive-type NMR iPSCs. NMR iPSCs formed embryoid body (EB) spheres in suspension culture (in an ultra-low attaching dish) ( Figure 3D), and the differentiation markers of three germ layers were detected in the differentiating EB cultures ( Figure 3E). Interestingly, NMR iPSCs were very inefficient at forming teratomas in immunodeficient mice. Out of 16 nude mice injected subcutaneously with NMR OSKMLT iPSCs, only four developed teratomas ( Figures 4A and 4B), and the teratomas took 2-5 months to grow to detectable size. In contrast, mouse iPSCs formed teratomas in all injected mice, and the tumors developed within 3-4 weeks. Teratomas formed by NMR OSKMLT iPSCs displayed three germ layers that were verified with the following markers: GFAP and Nestin for ectoderm; aSMA and Desmin for mesoderm; Vimentin and FOXA2 for endoderm ( Figure 4D). These results indicate that, although the NMR iPSCs have the capability to differentiate into three germ layers in vitro and in vivo, they exhibit additional tumor suppressor activities that restrict their tumorigenicity. Naked Mole Rat Epigenome Is Enriched with Marks that Restrict Reprogramming Since we found that inactivation of Rb enabled reprogramming of NMR iPSCs and Rb plays a major role in maintaining stable chromatin states independently of its role in cell-cycle control (), we hypothesized that NMR may have a more stable epigenome that is refractory to OSKM reprogramming. To test this hypothesis, we first compared chromatin states at the promoters of genes involved in reprogramming in the mouse and NMR using chromatin immunoprecipitation (ChIP)-qPCR. As previously reported (), the promoter of E-cadherin had bivalent chromatin marks in mouse fibroblasts, showing enrichment for both the permissive (H3K4me3) and the repressive (H3K27me3) histone marks. Such a bivalent chromatin organization is critical for reprogramming (;). Strikingly, the E-cadherin promoter in NMR fibroblasts had only the repressive histone mark, H3K9me3 ( Figure 5A). Introducing LT reduced the H3K9me3 modification on E-cadherin promoter. Similarly, Utf1 gene (encoding Undifferentiated Embryonic Cell Transcription Factor 1) had bivalent chromatin marks in the mouse, but was repressed in the NMR. LT treatment increased the activating H3K4me3 mark on the Utf1 promoter and reduced the repressing marks ( Figure 5A). For another gene, SSEA1 (Fut4), which was observed to be bivalent in the mouse and repressed in the NMR, LT treatment increased the levels of the activating H3K4me3 mark but did not significantly reduce the repressive marks ( Figure 5A). Thus, introducing LT into NMR cells led to either opening of the repressed gene promoters or (B) Representative tumor images of NSF iPSCs formed in nude mice; the tumors took an average of 3 months to grow to detectable size. (C) Representative structures of three germ layers in the teratomas of NSF iPSCs. NSF iPSCs (1 3 10 7 cells) were injected subcutaneously in nude mice. Arrows indicate endodermal derivatives (gut-like); stars indicate mesodermal derivatives (bone-like and adipocyte-like structures); triangles indicate ectodermal derivatives (neuroepithelial-like structures). (D) Immunofluorescence pictures of NSF iPSCs teratoma paraffin sections stained with three germ layer markers: Nestin (expressed in neuroectoderm) and GFAP (expressed in CNS) for the ectoderm layer; aSMA and Desmin (both expressed in muscle) for the mesoderm layer; FOXA2 (expressed in hepatocytes) and Vimentin (type III intermediate filament expressed in mesenchyme and fibroblasts) for the endoderm layer. changing chromatin marks into the bivalent form. Analysis of additional promoters in the NMR was complicated by the lack of accurately mapped transcription start sites in the NMR genome. Next, we compared global histone landscapes in the NMR and mouse using quantitative mass spectrometry. Relative abundance of histone posttranslational modifications (PTMs) was determined in three lines of mouse fibroblasts and four lines of NMR fibroblasts where each line was derived from an individual animal. The complete dataset of relative abundances of histone PTMs is available in Table S4, whereas the PTMs showing significant differences between NMR and mouse are discussed below. The levels of unmodified H3 K9-14 peptide were higher in the NMR than in the mouse. Furthermore, NMR cells had lower levels of H3K9me3, H3K9me2K14ac, and H3K9me3K14ac peptides than the mouse cells ( Figure 5B). In contrast, the levels of unmodified H3K27-40 peptide were lower in the NMR than in the mouse. NMR cells had lower levels of H3K27ac and higher levels of H3K27me1, H3K27me3, and H3K27me1K36me1 peptides. NMR cells also had lower H3K56me2 ( Figure 5B). H3K9 and H3K56 methylation marks correspond to heterochromatin. Mouse cells are, and H3K9me3 histone marks were analyzed by ChIP-qPCR on the promoters of E-cadherin, Utf1, and SSAE1 (Fut4) genes in MSF and NSF cells. These genes have bivalent histone marka (H3K4me3+/H3K27me3+) in the mouse but show repressive marks in the NMR. This repression is partially alleviated by LT. Enrichment of each histone modification on the promoter was normalized to input DNA. Primer sequences can be found in Table S3. The experiments were repeated three times, and error bars represent SD. **p < 0.01. (B) Total histone H3 marks that significantly differ in abundance between NSFs and MSFs. The complete dataset of histone marks is shown in Table S4. The levels of H3 marks were determined by quantitative mass spectrometry in three independent lines of NSFs and MSFs. MSFs had higher levels of H3K9 methylation marks associated with heterochromatin, typical of mouse cells (Figure S4), while NSFs had higher levels of H3K27 methylation marks associated with silencing of gene regions. The relative fraction of a specific mark was calculated by dividing the abundance of the specific mark by the sum of all forms of the same given peptide, which was considered as 1. Error bars indicate SD. **p < 0.01, *p < 0.05. For K27me3, the p value was 0.058; K27ac peptide was undetectable in the NSFs, therefore no p value is provided. (C) Histone H2A variants show differences in acetylation marks. The levels of H2A acetylation marks were determined by quantitative mass spectrometry in three independent lines of NSFs and MSFs. Error bars indicate SD. **p < 0.01, *p < 0.05. Data on other histone modifications that did not show statistically significant differences are shown in Table S4. All cells used in the experiments described in this figure were at PD <10. Regulation of programmed cell death Regulation of RNA metabolic process Regulation of cell death Dorsal/ventral pattern formation Response to ionizing radiation GO analysis of NMR-specific closed genes in skin fibroblasts :147,127,603-147,130,826! JH602050:13,512,850-13,516,082! chr8:46,494,858-46,496,483! JH602082:16,931,189-16,938,656! (legend on next page) known to have large heterochromatic regions that are visible by in situ staining. Interestingly, NMR cells had no such heterochromatin foci ( Figure S4). This may explain the higher levels of PTMs associated with constitutive heterochromatin in mouse cells, which is unlikely to affect reprogramming. In contrast, H3K27 methylation is associated with silencing of differentiation-related genes and pluripotency genes (;;). Furthermore, H3K27me3 was reported to repress pluripotency-associated genes () and higher levels of this modification can make cells refractory to reprogramming. Unmodified histone variant H2A.Z was more abundant in the NMR, while the acetylated form of H2A.Z was more abundant in the mouse ( Figure 5C). H2A.Z is enriched on gene promoters (). The unmodified H2A.Z is associated with repression while the acetylated form is associated with active transcription (). H2A.Z acetylation is also associated with epigenetic deregulation during carcinogenesis (), suggesting that lower levels of H2A.Z acetylation in the NMR may prevent cell reprogramming and malignant transformation. Histone H2 variants H2A.V and H2A.J also showed differential acetylation between mouse and NMR ( Figure 5C), however, the biological roles of these histones are not well understood. We next compared epigenetic landscapes in NMR and mouse using ATAC-seq in NMR and mouse fibroblasts with and without LT. ATAC-seq quantifies open chromatin regions genome wide (). Fragment lengths corresponding to single nucleosomes were underrepresented in the NMR relative to the mouse, while LT removed this difference, making NMR fragment lengths similar to that of the mouse ( Figure 6A). The lower levels of single-nucleosome fragments may suggest that in the NMR, the chromatin is enriched for regions that are more open as well as for regions that are more compact than in the mouse, with fewer regions in the ''intermediate'' single-nucleosome state. The total number of peaks corresponding to open chromatin was higher in the NMR ( Figure 6B), which could be explained by the lower numbers of repetitive elements in the NMR genome (). However, the number of accessible regions within the promoters was higher in the mouse (Figure 6B), indicating that NMR has more closed chromatin in gene regions. Moreover, gene ontology (GO) analysis showed that the genes with ATAC-seq peak(s) in MSF but not in NSF are enriched in GO items: ''cell fate specification'' and ''innate immune response'' associated with cellular reprogramming () (Figures 5C-5E). Upon LT expression, mouse cells lost almost twice as many peaks as they gained, while NMR cells lost and gained a similar number of peaks genome wide (Figure 6F). Remarkably, in the promoter regions, mouse cells gained 547 peaks and lost 509 peaks, while NMR cells gained 2,873 peaks and lost 874 peaks ( Figure 6F). This result shows that LT triggered massive opening of gene promoters in the NMR but not in the mouse cells. The promoter peaks opened by LT in NMR cells corresponded to 1,163 genes; 520 of these genes were also NMR-specific (D) GO analysis of the genes with opened chromatin in the mouse but closed in the NMR (NMR-specific closed genes in skin fibroblasts). Two terms marked in red font have been reported to be associated with reprogramming. Venn diagrams of the genes opened by LT are shown in Figure S3. (E) GO analysis of the genes with opened chromatin in NSFs but closed in MSFs. (F) Effect of LT on peak gain and loss in the NSFs and MSFs. To quantitatively identify the differential ATAC peaks in the same species with or without LT, the ATAC peaks with more than 2-fold difference in density between two samples were defined as ''gain'' or ''loss'' ATAC peaks in cells upon LT overexpression. It should be noted that those weak peaks in which the differential values between two samples were less than 10 reads/bp have been removed to eliminate false positives. LT expression results in a greater number of peaks lost in the mouse cells than in the NSFs genome wide. In promoter regions, a similar number of peaks are lost and gained by the MSFs, while in the NSFs, LT leads to massive opening of chromatin with over 3-fold more peaks gained than lost. (G) Representative gene loci (Utf1 and Tlr3) with ATAC-seq peaks in MSF but not in NSFs. LT overexpression opened the chromatin of these gene loci in NSFs. All cells used in the experiments described in this figure were at PD <10. closed genes, i.e., these genes were open in mouse cells even without LT (Figures S5A and S5B). This group included genes implicated in reprogramming such as Utf1, Tlr3 ( Figure 6G), SSEA1 (Fut4), and E-cadherin (Cdh1) ( Figure S6). Cumulatively, these results suggest that the chromatin structure of gene promoter regions (including many genes essential for cellular reprogramming) in the NMR fibroblasts is more closed than in the mouse fibroblasts. Introducing LT into NMR cells opened the chromatin at promoter regions, bringing the number of peaks to the level found in mouse cells. Thus ATAC-seq analysis indicates that NMR cells have a less permissive chromatin state than mouse cells. DISCUSSION The comparison of the reprogramming process of NMR and mouse fibroblasts revealed several unexpected findings. NMR fibroblasts were refractory to OSKM-induced reprogramming in comparison with mouse fibroblasts. The AP+ colonies were formed at very low frequency and could not be expanded in the mouse iPSC/ESC culture medium. This was observed with both mouse and human OSKM factors under both naive and primed conditions. Similarly, while this study was in preparation, the very low efficiency of reprogramming was reported for NMR cells by Miyawaki et al. using human iPSC/ESC culture medium. Resistance of NMR cells to reprogramming is also being reported by our colleagues in an accompanying paper in this issue of Stem Cell Reports (). To understand the mechanism responsible for the resistance of NMR cells to reprogramming, we screened for factors that improve reprogramming efficiency. Our screen identified that SV 40 Large T antigen strongly improved NMR reprograming efficiency, albeit the resulting LT NMR iPSCs colonies still proliferated slower than mouse iPSCs. The LT NMR iPSCs expressed pluripotent marker genes and could differentiate into the cell lineages of three germ layers in vitro and in vivo. A unique feature of NMR iPSCs was the low efficiency in teratoma formation. This observation is consistent with NMR cancer resistance. Our previous work showed that NMR adult fibroblasts secrete high-molecular-weight hyaluronic acid, which protects cells from malignant transformation, and NMR fibroblasts expressing hRasV12, LT, and knockdown of the HAS2 gene responsible for the synthesis of high-molecular-weight hyaluronic acid formed tumors in nude mice (). However, HAS2 was silenced in NMR iPSCs and the knockdown of HAS2 had no significant effect on reprogramming of NSFs, suggesting that an additional mechanism is responsible for the low teratoma formation of NMR iPSCs. The recently published study by Miyawaki et al. also reported that NMR iPSCs did not form teratomas in vivo unless they expressed mouse ERas and shRNA to inhibit Arf. In our study, NMR iPSCs formed teratomas, but the efficiency was low and the tumors grew very slowly. It is possible that we observed teratoma formation because our iPSCs contained LT and the efficiency of tumor formation would be improved by overexpressing mouse ERas or human RasV12, as in our previous study (). Cumulatively, these results suggest that once NMR cells are reprogrammed into iPSCs, they require forced expression of Ras and inactivation of p53 or Rb pathways to form tumors in vivo. However, this does not explain the low reprogramming efficiency of NMR cells. To understand how LT facilitates NMR cell reprogramming, we utilized the separation of its function mutations in our reprogramming assays (). LT is a viral oncoprotein that binds and inactivates two tumor suppressor genes, Rb and p53. Using mutated versions of LT, we showed that inactivation of Rb alone was sufficient to allow reprogramming of NMR cells. Furthermore, NMR fibroblasts displayed stronger basal Rb activity than mouse fibroblasts and much lower p53 basal activity than mouse fibroblasts. An ideal balance between p53 and Rb activities may vary between different species of mammals. For example, mouse cells are known to rely more heavily on p53 while human cells rely more on Rb (Gorbunova and Seluanov, 2010;). This may reflect the choice between reliance on elimination of damaged cells by apoptosis in the case of activating p53 versus reliance on cell-cycle arrest and heterochromatin repressive functions in the case of Rb. Our previous work showed that Rb had a stronger tumor suppressive function than p53 in NMR cells, and NMR fibroblasts expressing RasV12, combined with the knockdown of Rb and enzymatic degradation of hyaluronan, formed colonies in soft agar, while the cells expressing RasV12, combined with p53 and enzymatic degradation of hyaluronan, did not (). Cumulatively, these results demonstrate that highly active Rb is a barrier to malignant transformation and reprogramming of NMR cells. Cell reprogramming depends on changes in chromatin structure that erase histone marks of a differentiated cell and establish new marks of a pluripotent cell (). Rb guards against reprogramming in mouse fibroblasts through the silencing of Sox2 as well as other pluripotent marker genes by maintaining a more repressive chromatin state (). Therefore, we hypothesized that NMR cells may have a more stable epigenome, and our quantitative mass spectrometry results are consistent with this hypothesis. Specifically, our analysis revealed that repressive H3K9 and H3K56-methylated histones associated with constitutive heterochromatin were more abundant in the mouse, while methylated H3K27 associated with repression of developmental and reprogramming gene promoters was more abundant in the NMR. Mouse cells have large regions of constitutive heterochromatin that apparently do not affect reprograming efficiency but may carry the abundant H3K9 and H3K56 methylation marks. In contrast, NMR cells do not show prominent heterochromatic foci but carry higher levels of H3K27 methylation marks on gene promoters. H3K27 methylation was shown to repress pluripotency genes and counteract reprogramming (). Moreover, NMR cells had lower levels of permissive H2A.Z acetylation mark. The H2A.Z histone variant is generally associated with transcribed genes and H2A.Z acetylation promotes transcription while unmodified H2A.Z is repressive (Bonisch and Hake, 2012). The picture that emerged from histone PTM analysis was that mouse cells contain more abundant marks of constitutive heterochromatin and more abundant marks associated with active transcription of euchromatic regions. The NMR cells, on the contrary, have fewer marks of constitutive heterochromatin but more repressive marks at gene promoters. Consistently, ChIP analysis of E-cadherin promoter involved in reprogramming showed that this promoter was bivalent in the mouse fibroblasts but contained only repressive marks in the NMR. Introducing LT reduced the levels of repressive marks on E-cadherin promoter. We have also carried out ATAC-seq, and the results were very consistent with the histone PTM analysis. The total NMR genome has a higher number of ATAC-seq peaks, suggesting more open chromatin. This is consistent with the fact that NMR cells lack large regions of constitutive heterochromatin in the NMR cells. The NMR genome is smaller and contains fewer repetitive elements than the mouse genome (). Repetitive elements may comprise a significant portion of constitutive heterochromatin found in mouse cells and carry abundant H3K9 and H3K56 methylation marks. Strikingly, the comparison of chromatin accessibility in promoter regions revealed that NMR cells had more closed chromatin at promoters than mouse cells. This result is again consistent with histone PTM analysis, which showed more abundant repressive H3K27 methylation marks in the NMR. This less accessible chromatin at promoters may impede reprogramming of the NMR cells. Remarkably, LT greatly increased the number of accessible promoter regions in the NMR, making it similar to that of the mouse genome, suggesting that LT enables NMR cell reprogramming by conferring a more permissive chromatin state. Epigenetic regulation has been proposed to be an important player during aging Booth and Brunet, 2016) and tumorigenesis (;). Cancer is associated with cellular de-differentiation and loss of tissue-specific histone marks. Aging also involves smoothening of the existing epigenetic patterns and loss of heterochromatin. Human progeroid syndromes, Werner and Hutchison-Gilford progeria, are both associated with loss of heterochromatin marks (;). This, combined with similar observations in model organisms, led to the ''global heterochromatin loss theory of aging'' (Tsurumi and Li, 2012). If epigenetic stability is a key determinant of longevity and tumor suppression, long-lived and cancer-resistant species would be expected to be more epigenetically stable. Our comparison of the epigenetic landscapes and reprogramming efficiency between NMR and mouse fibroblasts strongly suggests that the somatic cells derived from the long-lived and cancer-resistant NMRs have a more stable epigenome than cells from short-lived and cancer-prone mice. Our work provides evidence that epigenomic stability is associated with longevity and cancer resistance in a wild-type organism. The study of a naturally longlived and cancer-resistant NMR may provide clues to engineering more stable epigenomes to prevent cancer and extend the human lifespan. EXPERIMENTAL PROCEDURES Detailed experimental procedures, including cell culture, plasmids, cellular reprogramming, western blotting, karyotyping, culture of NMR iPSCs, alkaline phosphatase staining, luciferase assay, animal care and teratoma assays, immunostaining, RT-qPCR, ChIP-qPCR, mass spectrometry of histones, and ATAC-seq are provided in the Supplemental Information. All animal experiments were approved by the University of Rochester Institutional Animal Care and Use Committee.
The video above was filmed at Huntington Beach High School in Huntington Beach, California, and has been reposted several times, amassing over 26 million views since it was first uploaded. The high school officially released a statement that it did not condone the actions of the students and expressing disappointment that bullying is a practice engaged in by any of their students. Cody had also been interviewed and advised that he did not really want to hit the bully, who was only identified as Noah, but could not stand by and watch Austin be hurt. It seemed as if the boy was deliberately aiming his punches at Austin’s head. There was no fanfare in the teenagers actions, he simply delivered one explosive punch to the bully then angrily criticized his actions before swiftly turning to check on his friend. The bully, Noah, actually tweeted since the attack that Austin had started the fight. The Mirror reported that the Huntington Beach Union High School District issued a statement that they would investigate the incident. Noah has since been charged with misdemeanor battery before being released into his parents’ custody. The Dr. Phil interview is the first time that Cody Pines and Austin Higley have spoken of the incident together, and the master of getting people to talk about their feelings sought to get some insight into what Cody had been thinking of in the moments leading up to his one-punch knockout of the bully. Austin can be heard clearly asking for the punches to stop. Dr. Phil pointed out that Austin had been taking several hits before Cody appeared and asked what he initially noticed about the one-sided fight, which was more of an assault. Austin obviously could not see the punches coming, and the bully had grabbed his arm and as such he could not properly defend himself either. When asked if there is anything he has to say to Cody, Austin gives a simple yet heartfelt and touching statement before the two, who have been friends since elementary school, share an embrace. The Daily Mail wrote about Dr. Phil’s admiration of the fact that Cody did walk away, that he did not exploit the situation and continue to hit the young man that fought Austin. Dr. Phil calls the young hero’s actions very mature. Cody Pines is a teen full of hope and consideration, and he stated that he hopes his actions make other bullies think twice before harassing or assaulting anyone. The teen hero had also stated what an awesome person Austin Higley is and that he hopes everyone shows him love. The Dr. Phil episode will air on Wednesday.
<reponame>strategyobject/substrate-client-java package com.strategyobject.substrateclient.common.eventemitter; import lombok.val; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; class EventEmitterTest { @Test void PermanentHandlerCalledMultiple() { val emitter = new EventEmitter(); final Integer[] hitCount = {0}; emitter.on(TestEvents.ONE, _x -> hitCount[0]++); emitter.emit(TestEvents.ONE); emitter.emit(TestEvents.ONE); assertEquals(2, hitCount[0]); } @Test void OneTimeHandlerCalledOnce() { val emitter = new EventEmitter(); final Integer[] hitCount = {0}; emitter.once(TestEvents.ONE, _x -> hitCount[0]++); emitter.emit(TestEvents.ONE); emitter.emit(TestEvents.ONE); assertEquals(1, hitCount[0]); } @Test void HandlerNotCalledOnDifferentEvent() { val emitter = new EventEmitter(); final Integer[] hitCount = {0}; emitter.on(TestEvents.ONE, _x -> hitCount[0]++); emitter.emit(TestEvents.TWO); assertEquals(0, hitCount[0]); } @Test void CanCallMultipleHandlers() { val emitter = new EventEmitter(); final Integer[] hitCount = {0}; emitter.once(TestEvents.ONE, _x -> hitCount[0]++) .on(TestEvents.ONE, _x -> hitCount[0]++) .on(TestEvents.TWO, _x -> hitCount[0]++); emitter.emit(TestEvents.ONE); assertEquals(2, hitCount[0]); } @Test void EmitReturnsFalseIfNoHandlers() { val emitter = new EventEmitter(); final Integer[] hitCount = {0}; emitter.on(TestEvents.TWO, _x -> hitCount[0]++); assertFalse(emitter.emit(TestEvents.ONE)); assertEquals(0, hitCount[0]); } @Test void CanRemoveHandlers() { val emitter = new EventEmitter(); final Integer[] hitCount = {0}; EventListener handler = _x -> hitCount[0]++; emitter.on(TestEvents.ONE, handler) .once(TestEvents.ONE, _x -> hitCount[0] += 2) .removeListener(TestEvents.ONE, handler) .on(TestEvents.ONE, _x -> hitCount[0] += 3); emitter.emit(TestEvents.ONE); assertEquals(5, hitCount[0]); } } enum TestEvents implements EventType { ONE, TWO }
Q: What is the current date in W40K? Last I have delved into the W40K the "time" was M40.999. This was at or around the 13th Black Crusade. Cadia is almost taken and most of the planets around the Eye of Terror have fallen to Chaos. Is there a continuity story or something from Games Workshop that tells us what "time" it is? A: It is early M42 I previously assumed it was 999.M41, but with a bit more research, I've found that M42 has begun. The Chronology of Events in the 40k universe has a section dedicated to M42. While no specific events have occurred, there is an account of what the general state of the universe is now... As a new millennium dawns, the Imperium of Man faces dreadful threats on multiple fronts: The Forces of Chaos, partially victorious during Abaddon the Despoiler's 13th Black Crusade in 999.M41, seize control of most of the surface of the world of Cadia in the Segmentum Obscurus, the gateway to a breakout into the rest of Imperial space. Only the Imperial forces' local naval and air superiority protects the Imperium from the greatest Chaos invasion seen since the Horus Heresy ten millenia before. Hive Fleet Leviathan and the remaining splinter fleets from the earlier invasions of the Milky Way Galaxy by Hive Fleet Behemoth and Hive Fleet Kraken, continue to assault myriad worlds of the Imperium, slowly making their inexorable way towards Terra and the gleaming psychic beacon calling them like moths to a flame that is the Astronomicon. The Tau Empire prepares for its Fourth Sphere Expansion in the Imperium's Eastern Fringes. The Adeptus Mechanicus makes secret note that the technology of the Golden Throne of Earth continues to fail with no known way to repair the apparatus and keep the Emperor of Mankind alive. The Necrons continue to awaken at a rapidly increasing pace on their Tomb Worlds across the galaxy, ready to rebuild their ancient interstellar empire, and wipe away all the "lesser races" that stand in their way.
A Fort Worth, Texas, Army veteran was given a salute by comrades and family on Dec. 21 in the final moments of his life following a massive and sudden stroke. Staff Sgt. Matthew Whalen suffered the severe stroke on Dec. 19, which left him brain-dead. His family made the decision to remove him from life support so his organs could be donated to fellow military veterans. While Whalen was being wheeled out of his hospital room in his final moments, former and current service members held an impromptu honor guard ceremony in the hallway. “That&apos;s what Matt deserved. He was a four-time combat veteran,” Whalen’s wife, Hannah, said. “He was an exceptionally loving dad ... that ceremony that they did he deserved that recognition." Whalen was taken to an operating room, where the organ donation procedure was performed. Whalen’s wife posted on Facebook about his impending passing. “At 8pm local time Matt Whalen will give life to two veterans who are in dire need of organ transplants. Bulldog for life,” she wrote. Actor Crushed To Death By Jeep - Design Flaw To Blame?
<reponame>xdze2/arbresdegrenoble import json import sqlite3 json_filename = 'data/ARBRES_TERRITOIRE_VDG_EPSG4326.json' db_filename = 'data/arbres.db' data = json.load(open( json_filename )) db = sqlite3.connect(db_filename) # -- create the table -- cursor = db.cursor() cursor.execute(""" DROP TABLE IF EXISTS arbres """) db.commit() cursor.execute(""" CREATE TABLE IF NOT EXISTS arbres ( id INTEGER PRIMARY KEY, code_parent_desc TEXT, code_parent TEXT, genre_bota TEXT, espece TEXT, variete TEXT, anneedeplantation INTEGER, stadededeveloppement TEXT, sous_categorie_desc TEXT, sous_categorie TEXT, longitude REAL, latitude REAL ) """) db.commit() # -- Populate the table -- arbres = [] for k, arbre in enumerate( data['features'] ): infolist = [] infolist.append( arbre['properties']['CODE_PARENT_DESC'] ) infolist.append( arbre['properties']['CODE_PARENT'] ) infolist.append( arbre['properties']['GENRE_BOTA'] ) infolist.append( arbre['properties']['ESPECE'] ) infolist.append( arbre['properties']['VARIETE'] ) infolist.append( arbre['properties']['ANNEEDEPLANTATION'] ) infolist.append( arbre['properties']['STADEDEDEVELOPPEMENT'] ) infolist.append( arbre['properties']['SOUS_CATEGORIE_DESC'] ) infolist.append( arbre['properties']['SOUS_CATEGORIE'] ) infolist.append( arbre['geometry']['coordinates'][0] ) infolist.append( arbre['geometry']['coordinates'][1] ) arbres.append( infolist ) print( k, end='\r' ) print( ' done ' ) cursor.executemany("""INSERT INTO arbres VALUES (NULL, ?,?,?,?,?,?,?,?,?,?,? )""", arbres) db.commit() db.close()
<gh_stars>1-10 package com.zcswl.netty; import io.netty.util.concurrent.EventExecutorGroup; /** * @author zhoucg * @date 2020-11-20 10:14 */ public abstract class OnlyTest implements EventExecutorGroup { }
WASHINGTON -- More women now serve in Congress than ever before and while that's a milestone worth celebrating, there's still a lot of room for growth. It wasn't too long ago that about the only way a woman could get into Congress was if her husband died in office and she was appointed to finish his term. Jeannette Rankin of Montana became the first woman elected to Congress in 1917, three years before women won the right to vote. It would be another 15 years before voters put a woman in the U.S. Senate. Hattie Caraway from Arkansas won that distinction, becoming the first woman elected to a full term. Fast forward to 2015, when a record 104 women will play a role on Capitol Hill, crafting the nation's laws. Among them is Elise Stefanik, from New York, the youngest woman ever elected to the House, and Utah's Mia Love, the first black Republican congresswoman. "I've never really ever looked at issues in Washington, D.C., as women's issues. I think women have a unique perspective on every issue," Rep. Kristi Noem, R-S.D., told CBN News. The South Dakota congresswoman co-chairs the bipartisan Congressional Caucus for Women's Issues, alongside Democrat Rep. Doris Matsui of California. The diverse group finds areas of agreement, such as ending human trafficking, and tackles them like moms checking off a to-do list. "There's so many times I've been in on a discussion on a bill or policy where if the women weren't in the room, it wouldn't have been an adequate solution; it wouldn't have been something that worked for our country," Noem said. "My experience is that women like to solve the problem and aren't as concerned necessarily about who gets the credit, and that's a different way to govern from our male counterparts," Rep. Janice Hahn, D-Calif., said. Congresswomen Hahn and Noem also belong to a women's-only Bible study where their Christian faith helps diffuse political differences. "It's the one area we're able to share with each other some of our struggles that we probably would not share with a bigger group or with our male counterparts," Hahn said. The women appreciate and need each other's support. "The other women that are here are absolutely fantastic and they get it and we have great relationships and care about one another's families, ask about those families, keep up with one another even when we're not in D.C.," Rep. Marsha Blackburn, R-Tenn., said. Despite making up half the population, women are a clear minority on the Hill. "It's really interesting being a conservative woman in Congress and many times you do feel like you're at the end of the list, if you will. And even when you're the next in line there's always a guy that's going to be pushed in front of you," Blackburn told CBN News. Two years in a row, Republican women have responded to the president's State of the Union Address. However, in the Republican-controlled House, men head up all the committees. And in the GOP-controlled Senate, just one woman - Sen. Lisa Murkowski, R-Alaska, holds the title of chairman. It demonstrates that women still face challenges breaking into the men's club. "For a lot of us women, the chance to build relationships with individuals gets a little hard on Capitol Hill," Noem said. "You know, our chance to socialize with our colleagues, for me, certainly isn't going to happen in a bar at night," she continued. "I'm not going to do that so my chance to really build relationships with other colleagues is going to be in a Bible study or potentially in the gym." "That can end up hurting your chance to become a chairman or rise to the top of some type of panel or task force," she added. Why the Dearth of Women? A recent Gallop poll shows 63 percent of Americans believe the country would be in better shape if more women were in positions of political leadership. It's an opinion shared by both women and men across every age group. So why then do women only make up 20 percent of the House and Senate? "I think a lot of women just don't feel like they want to put their family through the trials and tribulations of running for office," Hahn explained. "And the way politics has become so combative and so negative that I think a lot of women just think, 'Who needs that?'" It can be a tremendous sacrifice, especially for moms who leave their families and sometimes travel across the country. "I had to be asked to run for office or else I never would be here. I wanted to be a farmer the rest of my life," Noem shared. "But people kept asking me for several years and finally I thought, 'Well, ya know, I can't just say I want this country to be different, maybe I have to contribute to that process," she said. For now, Democrats corner the "women in Congress" market. Fourteen of the 20 female senators are Democrats, along with 62 of the 84 women in the House. Tami Nantz, who writes for Smart Girl Politics, said liberal-minded women are more apt to run for office and the Democratic Party is better at recruiting and nurturing female candidates. "I think a lot of it has to do with the way the media approaches a liberal candidate versus a conservative candidate. I think women that are on the left don't have the microscope on them that conservative women do," Nantz said. Rep. Hahn said Democrats also have more role models. In the House, they're led by Rep. Nancy Pelosi, who also served as the first female speaker. Women on both sides of the aisle hope Congress will someday more closely mirror the population. But for now, they're celebrating their record numbers and weighing in on the nation's laws in ways only women can. "I think the role models that are in this Congress are important for young girls and women across this country to see that this really is something. It's a noble profession, and it's something worth striving for," Hahn said.
Time-Resolved Imaging Study of Jetting Dynamics during Laser Printing of Viscoelastic Alginate Solutions. Matrix-assisted pulsed-laser evaporation direct-write (MAPLE DW) has been successfully implemented as a promising laser printing technology for various fabrication applications, in particular, three-dimensional bioprinting. Since most bioinks used in bioprinting are viscoelastic, it is of importance to understand the jetting dynamics during the laser printing of viscoelastic fluids in order to control and optimize the laser printing performance. In this study, MAPLE DW was implemented to study the jetting dynamics during the laser printing of representative viscoelastic alginate bioinks and evaluate the effects of operating conditions (e.g., laser fluence) and material properties (e.g., alginate concentration) on the jet formation performance. Through a time-resolved imaging approach, it is found that when the laser fluence increases or the alginate concentration decreases, the jetting behavior changes from no material transferring to well-defined jetting to well-defined jetting with an initial bulgy shape to jetting with a bulgy shape to pluming/splashing. For the desirable well-defined jetting regimes, as the laser fluence increases, the jet velocity and breakup length increase while the breakup time and primary droplet size decrease. As the alginate concentration increases, the jet velocity and breakup length decrease while the breakup time and primary droplet size increase. In addition, Ohnesorge, elasto-capillary, and Weber number based phase diagrams are presented to better appreciate the dependence of jetting regimes on the laser fluence and alginate concentration.
/** * <p>This query operation <span class="hydra-summary">calculates disorder values</span>. * <p/> * <p>Table-level operation that involves a primary key, a secondary key, and * optionally a frequency column. For each primary key, computes how "disorderly" the * set of secondary keys is. If the frequency column is omitted then each row is * assumed to have a weight of 1. * <p/> * <pre>a b 1 * a c 2 * a d 1 * b x 2 * b y 3 * b d 1 * <p/> * disorder=0:1:2 * <p/> * a 0.451544993496 0.625 * b 0.439247291136 0.611111111111</pre> * * @user-reference * @hydra-name disorder */ public class OpDisorder extends AbstractTableOp { public static final Long ONE = Long.valueOf(1); private int primary; private int secondary; private int frequency; public OpDisorder(DataTableFactory tableFactory, String args, ChannelProgressivePromise queryPromise) { super(tableFactory, queryPromise); String[] split = args.split(":"); if (split.length < 2 || split.length > 3) { throw new IllegalArgumentException("expected disorder=p:s[:f], got " + args); } primary = Integer.parseInt(split[0]); secondary = Integer.parseInt(split[1]); if (split.length == 3) { frequency = Integer.parseInt(split[2]); } else { frequency = -1; } } @Override public DataTable tableOp(DataTable input) { int max = Math.max(primary, Math.max(secondary, frequency)); Map<String, Map<String, Long>> data = new TreeMap<>(); BundleColumnBinder binder = getSourceColumnBinder(input); for (Bundle row : input) { if (row.getCount() < max) { continue; } String p = binder.getColumn(row, primary).toString(); if (p == null) { p = ""; } String s = binder.getColumn(row, secondary).toString(); if (s == null) { s = ""; } Long f = frequency < 0 ? ONE : ValueUtil.asNumberOrParse(binder.getColumn(row, frequency)).asLong().getLong(); if (f == null || f.longValue() <= 0) { continue; } bump(data, p, s, f); } DataTable output = createTable(data.size()); for (String key : data.keySet()) { Bundle row = output.createBundle(); binder.appendColumn(row, ValueFactory.create(key)); for (double d : computeDisorder(data.get(key))) { binder.appendColumn(row, ValueFactory.create(d)); } } return output; } // data[p][s] += f protected static void bump(Map<String, Map<String, Long>> data, String p, String s, long f) { Map<String, Long> m = data.get(p); if (m == null) { data.put(p, m = new HashMap<>()); } if (m.containsKey(s)) { m.put(s, Long.valueOf(f + m.get(s).longValue())); } else { m.put(s, Long.valueOf(f)); } } public static double[] computeDisorder(Map<String, Long> data) { double sum = 0.0; double ent = 0.0; double gin = 0.0; for (String k : data.keySet()) { sum += data.get(k).doubleValue(); } for (String k : data.keySet()) { double prk = data.get(k).doubleValue() / sum; ent += (prk * Math.log10(prk)); for (String k2 : data.keySet()) { if (k != k2) { double prk2 = data.get(k2).doubleValue() / sum; gin += prk * prk2; } } } return new double[]{-1.0 * ent, gin}; } public static void main(String[] args) throws Exception { Map<String, Long> data = new HashMap<>(); data.put("a", 1L); data.put("b", 2L); data.put("c", 2L); System.err.println(data); System.err.println(Arrays.toString(computeDisorder(data))); System.err.println(); data = new HashMap<>(); data.put("a", 1L); data.put("b", 2L); data.put("c", 3L); System.err.println(data); System.err.println(Arrays.toString(computeDisorder(data))); System.err.println(); } }
/** * Static builder methods. */ public static class Builder { /** * Utility constructor. */ private Builder() { } /** * Create a new instance of {@link ScanArgs} with limit. * * @param count number of elements to scan * @return a new instance of {@link ScanArgs} */ public static ScanArgs limit(long count) { return new ScanArgs().limit(count); } /** * Create a new instance of {@link ScanArgs} with match filter. * * @param matches the filter * @return a new instance of {@link ScanArgs} */ public static ScanArgs matches(String matches) { return new ScanArgs().match(matches); } }
Samsung wasn't finished after yesterday's N110 and NC310 netbooks, oh no: today brought news of yet another offering, the N120, effectively a double-wide N110 with room for a bigger keyboard, battery and nicer speakers. If you hate the look of a huge bezel around your screen (and have elf hands), you'll probably be better off with the N110, but it's not a bad idea to just stretch the chassis a bit to accommodate at 12" laptop's keyboard; anything to make typing on a tiny, tiny netbook easier, I'm down with. Crammed into the bezel you'll also find an enhanced 2.1 SRS speaker set, with a tiny dedicated subwoofer buried somewhere within. And on top of that, the N120 is packing the jumbo 6-cell battery thus far only touted on the NC310 in Asia, building on the N110's 8 hours with a promised 10.5 hours of runtime. That's a lot. South Korea, March 24, 2009. Samsung Electronics, a market leader in consumer electronics and world leader in IT technology, today launched the innovative Samsung N120 mini notebook. Samsung Electronics, a market leader in consumer electronics and world leader in IT technology, today launched the innovative Samsung N120 mini notebook. Although slim at just 10.1″, the N120 is designed for optimum usability and performance, with its full-size 12" notebook style keyboard, larger touchpad and an ultra-lightweight and robust slim line casing. Although slim at just 10.1 ", the N120 is designed for optimum usability and performance, with its full-size 12" notebook style keyboard, touchpad larger and an ultra-lightweight and durable slim line casing. What's more, its enhanced sound system means you can experience a rich multimedia experience when you are on the go. What's more, its enhanced sound system means you can experience a rich multimedia experience when you are on the go. With the N120 you can enjoy high-quality sound anywhere using its integrated SRS 2.1ch system, featuring 2 x 1.5 Watt stereo speakers with a dedicated sub-woofer. With the N120 you can enjoy high-quality sound anywhere using its integrated SRS 2.1ch system, featuring 2 x 1.5 watt stereo speakers with a dedicated sub-woofer. It also features a larger touchpad and a full-size 12" notebook style keyboard with optimised key spacing, so you can type faster and make fewer mistakes. It also features a larger touchpad and a full-size 12 "notebook style keyboard with optimised key spacing, so you can type faster and make fewer mistakes. Its ergonomic design places less stain on your wrists, which all adds up to a more enjoyable and productive experience. Its ergonomic design places less stain on your Wrists, which all adds up to a more enjoyable and productive experience. Shielded by the robust Samsung Duracase, this mini notebook is more than capable of rolling with the punches. Shielded by the rugged Samsung Duracase, this mini notebook is more than capable of rolling with the punch. Having passed a grueling series of quality assurance tests, ranging from rapid temperature change to electrical surges, it sets new standards for survivability and resilience, giving you much less to worry about when you're travelling. Having passed a grueling series of quality assurance tests, ranging from rapid temperature change to electrical Surge, it sets new standards for Survivability and resilience, giving you much less to worry about when you're traveling. As well a being extremely portable, the N120 also delivers a significantly longer battery-powered performance of up to 10.5 hours* with a long-life 6 cell battery, independent of any power supply, for maximum freedom and mobility. As well a being extremely portable, so the N120 delivers a significantly longer battery-powered performance of up to 10.5 hours * with a long-life 6 Cell Battery, independent of any power supply, for maximum freedom and mobility. The unique combination of its power efficient LED display and optimised processing performance coupled with Samsung's class-leading engineering ensures that you're always ready to go anywhere and do anything thanks to the N120's vastly improved battery life. The unique combination of its power efficient LED display and optimised processing performance coupled with Samsung's class leading engineering ensures that you're always ready to go anywhere and do anything thanks to the N120's vastly improved battery life. To help you get the most out of life when you're out and about the N110's small form is packed with comprehensive array of the latest technology for maximum performance on the move, including advanced connectivity tools to help you stay in touch. To help you get the most out of life when you're out and about the N110's small form is packed with comprehensive array of the latest technology for maximum performance on the move, including advanced connectivity tools to help you stay in touch. An integrated 1.3 megapixel digital motion camera combines the resolution and image quality of a still camera with the sustained high frame rates of a video camera, so it's much easier to keep in touch with friends and family, using video-conferencing or live messaging. An integrated 1.3 megapixel digital motion camera combines the resolution and image quality of a still camera with the sustained high frame rates of a video camera, so it's much easier to keep in touch with friends and family, using video-conferencing or live messaging . To make it as simple as possible to exchange data, including documents, pictures and music, between almost any modern camera, peripheral or audio-visual device, the N120 incorporates a range of advanced connectivity tools, including a 3-in-1 memory card reader, 3 built-in USB ports and optional Bluetooth 2.0+EDR (Enhanced Data Rate). To make it as simple as possible to exchange data, including documents, pictures and music, between almost any modern camera, peripheral or audio-visual device, the N120 incorporates a range of advanced connectivity tools, including a 3-in-1 memory card reader, 3 built-in USB ports and optional Bluetooth 2.0 + EDR (Enhanced Data Rate). As well as safeguarding your data, the N120 also uses the latest medical technology to protect your health by providing a more hygienic computing environment. As well as Safeguarding your data, so the N120 uses the latest medical technology to protect your health by providing a more hygienic computing environment. Its anti-bacterial keyboard is coated with a special finish that successfully eliminates virtually all bacteria by making it almost impossible for them to live and breed. Its anti-bacterial keyboard is coated with a special finish that successfully eliminate virtually all bacteria by making it almost impossible for them to live and breed. "Now everyone can afford to experience exceptional portability and usability with the full sized keyboard and advanced communications tools of the ultra-light Samsung N120 mini notebook. "Now everyone can afford to experience exceptional portability and usability with the full sized keyboard and advanced communications tools of the ultra-light mini Samsung N120 notebook. It's the perfect choice for anyone who needs to stay in touch on the go," said Seong Woo Nam, Senior Vice President of Samsung Computer Systems Division. It's the perfect choice for anyone who needs to stay in touch on the go, "said Woo Seong Nam, Senior Vice President of Samsung Computer Systems Division. The Samsung N120 will be available from April in European, CIS, Asian, and also in the US country. The Samsung N120 will be available from April in the European, CIS, Asian, and also in the U.S. country. Incorporating Intel's smallest and lowest power processor, the Intel® Atom™ processor, which is specifically designed to deliver an amazing Internet experience using mini notebooks. Incorporating Intel's smallest and lowest power processor, the Intel ® Atom ™ processor, which is specifically designed to deliver an amazing Internet experience using mini notebooks. Based on an entirely new microarchitecture, the Intel® Atom™ processor increases energy efficiency to extend battery life, while delivering enhanced mobile performance and increased system responsiveness. Based on an entirely new micro architecture, the Intel ® Atom ™ processor increases energy efficiency to extend battery life, while delivering enhanced mobile performance and increased system responsiveness. Packed with multimedia features, Windows XP Home Edition aims to unlock the full potential of your mini notebook. Packed with multimedia features, Windows XP Home Edition aims to unlock the full potential of your mini notebook. Designed exclusively for home computing, Windows XP Home Edition puts the exciting experiences of the digital age at your fingertips. Designed exclusively for home computing, Windows XP Home Edition puts the exciting experiences of the digital age at your fingertips. From digital photos, music, and video to building a home network, Windows XP Home Edition brings you into the digital age with ease. From digital photos, music, and video to building a home network, Windows XP Home Edition brings you into the digital age with ease. Enjoy a full and rich multimedia experience on the go with high-quality sound thanks to the integrated SRS 2.1ch system, featuring 2 x 1.5 Watt stereo speakers with a dedicated sub-woofer. Enjoy a full and rich multimedia experience on the go with high-quality sound thanks to the integrated SRS 2.1ch system, featuring 2 x 1.5 watt stereo speakers with a dedicated sub-woofer. Makes typing easier and faster with a full-size 12" keyboard packed into a 10.1″ mini notebook. Makes typing easier and faster with a full-size 12 "keyboard packed into a 10.1" mini notebook. Despite its small form, the ergonomic design, with optimised key spacing and larger touchpad, provides a more pleasurable and productive experience, with less strain on your wrists, fewer errors and faster typing speeds. Despite its small shape, the ergonomic design, with optimised key spacing and larger touchpad, provides a more pleasurable and productive experience, with less strain on your Wrists, fewer errors and faster typing speeds. The class-leading slim line design lets you do more on the move - and still look good! The class-leading slim line design lets you do more on the move - and still look good! As well as premium ergonomics and haptic styling, the elegant, durable casing ensures maximum portability and reliability - making it easy to carry in a handbag, briefcase or luggage. As well as premium ergonomics and styling Haptic, the elegant, durable casing ensures maximum portability and reliability - making it easy to carry in a handbag, briefcase or luggage. Delivers longer battery-powered performance of up to 10.5 hours* with a long-life 6 cell battery, independent of any power supply, for maximum freedom and mobility. Delivers longer battery powered performance of up to 10.5 hours * with a long-life 6 Cell Battery, independent of any power supply, for maximum freedom and mobility. This vastly improved battery life is guaranteed by the power efficient LED display and optimised processing performance coupled with Samsung's class-leading engineering. This vastly improved battery life is guaranteed by the power efficient LED display and optimised processing performance coupled with Samsung's class leading engineering. So you're always ready to go anywhere and do anything. So you're always ready to go anywhere and do anything. Experience optimum mobility with this ultra-light mini notebook, which weighs just 1.28 kg (including a 6 cell battery) - allowing you to carry less, but do more. Experience optimum mobility with this ultra-light mini notebook, which weighs just 1:28 kg (including a 6 cell battery) - allowing you to carry less, but do more. Despite its compact size, it still incorporates comprehensive functionality for maximum performance on the move, including advanced connectivity tools and an ergonomic keyboard. Despite its compact size, it still incorporates comprehensive functionality for maximum performance on the move, including advanced connectivity tools and an ergonomic keyboard. Shielded by the robust Samsung Duracase to ensure reliability and durability, this mini notebook is more than capable of rolling with the punches, giving you less to worry about when travelling. Shielded by the rugged Samsung Duracase to ensure reliability and durability, this mini notebook is more than capable of rolling with the Punch, giving you less to worry about when travelling. A grueling series of 54 quality assurance tests, ranging from rapid temperature change to electrical surges, set new standards for survivability and resilience. A grueling series of 54 quality assurance tests, ranging from rapid temperature change to electrical Surg, set new standards for Survivability and resilience. An integrated 1.3 megapixel digital motion camera makes it easier than ever to keep in touch with friends and family, using video-conferencing or live messaging. An integrated 1.3 megapixel digital motion camera makes it easier than ever to keep in touch with friends and family, using video-conferencing or live messaging. The camera combines the resolution and image quality of a still camera with the sustained high frame rates of a video camera. The camera combines the resolution and image quality of a still camera with the sustained high frame rates of a video camera. Enables simple data sharing with a host of advanced connectivity tools, including a 3-in-1 memory card reader, 3 built-in USB ports and optional Bluetooth 2.0+EDR (Enhanced Data Rate). Enables simple data sharing with a host of advanced connectivity tools, including a 3-in-1 memory card reader, 3 built-in USB ports and optional Bluetooth 2.0 + EDR (Enhanced Data Rate). That means you can easily transfer pictures, movies and data to and from virtually any modern camera, peripheral or audio-visual device. That means you can easily transfer pictures, movies and data to and from virtually any modern camera, peripheral or audio-visual device. Using the latest medical technology, the Samsung anti-bacterial keyboard is coated with a special finish that makes it almost impossible for bacteria to live and breed. Using the latest medical technology, the Samsung anti-bacterial keyboard is coated with a special finish that makes it almost impossible for bacteria to live and breed. So, virtually all bacteria are successfully eliminated and the notebook remains clean, creating a more hygienic computing environment. Thus, virtually all bacteria are successfully eliminated and the notebook remains clean, creating a more hygienic computing environment. Samsung's own certification mark (ECO), which guarantees Eco-friendly features, including removal of hazardous substances and material, optimised energy efficiency and effective material usage for a more environmentally friendly product. Samsung's own certification mark (ECO), which guarantees Eco-friendly features, including removal of hazardous substances and material, optimised energy efficiency and effective material usage for a more environmentally friendly product. All this is backed up with the peace of mind that your notebook is protected by Samsung's class leading warranty service. All this is backed up with the peace of mind that your notebook is protected by Samsung's class leading warranty service. In the unlikely event that something goes wrong with your notebook, the Samsung international warranty infrastructure will provide you with a fast track collection repair and return service for your notebook in order to minimize disruption and get you back working as quickly as possible. In the unlikely event that something goes wrong with your notebook, the Samsung International warranty infrastructure will provide you with a fast track collection repair and return service for your notebook in order to minimize disruption and get you back working as quickly as possible.
<filename>docker/list-volumes.go package docker import ( "github.com/docker/docker/api/types/filters" ) func (nog *NogDockerClient) ListVolumes() ([]NogVolume, error) { f := filters.NewArgs() f.Add("label", "nog=true") list, err := nog.cli.VolumeList(nog.ctx, f) if err != nil { return nil, err } noglist := []NogVolume{} for _, s := range list.Volumes { noglist = append(noglist, NogVolume{Name: s.Name, Labels: s.Labels}) } return noglist, nil }
import React from 'react'; import { ButtonGroup } from '../ButtonGroup'; import { Button } from '../Button'; import { ContextMenu, Props as ContextMenuProps } from '../ContextMenu'; import { Entry } from '../../services/LibrariesService'; export interface Props extends ContextMenuProps { entry?: Entry; onDownload?: () => void; onRename?: () => void; onDelete?: () => void; onUpload: () => void; onNewFolder: () => void; } export const FilesContextMenu: React.FC<Props> = ({ entry, onDownload, onRename, onDelete, onUpload, onNewFolder, ...props }) => ( <ContextMenu {...props}> <ButtonGroup vertical> {entry && entry.category !== 'Folder' && ( <Button onClick={onDownload} icon="download"> Download </Button> )} {entry && ( <Button onClick={onRename} icon="pencil-alt"> Rename </Button> )} {entry && ( <Button onClick={onDelete} icon="trash"> Delete </Button> )} <Button onClick={onUpload} icon="upload"> Upload </Button> <Button onClick={onNewFolder} icon="folder-plus"> New Folder </Button> </ButtonGroup> </ContextMenu> );
Too few complaints about hospitals and GPs are being dealt with locally, says ombudsman The NHS in England is still failing to deal adequately with the most straightforward complaints, says the parliamentary and health service ombudsman, Ann Abraham, in her latest report. Improvement in complaints handling by the NHS since the system was reformed three years ago is still patchy and slow, and too many complaints are reaching her office that should have been resolved locally, she adds. The report names and shames the 10 most complained about hospital trusts in 2010-11, six of which are in London. Top of the league table stands the Heart of England NHS Foundation Trust in the Midlands, up from 13th last year. The number
Executive Summary The World Economic Forum’s Global Risks 2013 report is developed from an annual survey of more than 1,000 experts from industry, government, academia and civil society who were asked to review a landscape of 50 global risks. The global risk that respondents rated most likely to manifest over the next 10 years is severe income disparity, while the risk rated as having the highest impact if it were to manifest is major systemic financial failure. There are also two risks appearing in the top five of both impact and likelihood – chronic fiscal imbalances and water supply crisis (see Figure 4). Unforeseen consequences of life science technologies was the biggest mover among global risks when assessing likelihood, while unforeseen negative consequences of regulation moved the most on the impact scale when comparing the result with last year’s (see Figure 5). Three Risk Cases The report introduces three risk cases, based on an analysis of survey results, consultation with experts and further research. Each case represents an interesting constellation of global risks and explores their impact at the global and national levels. The three risk cases are: Testing Economic and Environmental Resilience Continued stress on the global economic system is positioned to absorb the attention of leaders for the foreseeable future. Meanwhile, the Earth’s environmental system is simultaneously coming under increasing stress. Future simultaneous shocks to both systems could trigger the “perfect global storm”, with potentially insurmountable consequences. On the economic front, global resilience is being tested by bold monetary and austere fiscal policies. On the environmental front, the Earth’s resilience is being tested by rising global temperatures and extreme weather events that are likely to become more frequent and severe. A sudden and massive collapse on one front is certain to doom the other’s chance of developing an effective, long-term solution. Given the likelihood of future financial crises and natural catastrophes, are there ways to build resilience in our economic and environmental systems at the same time? Digital Wildfires in a Hyperconnected World In 1938, thousands of Americans confused a radio adaptation of the H.G. Wells novel The War of the Worlds with an official news broadcast and panicked, in the belief that the United States had been invaded by Martians. Is it possible that the Internet could be the source of a comparable wave of panic, but with severe geopolitical consequences? Social media allows information to spread around the world at breakneck speed in an open system where norms and rules are starting to emerge but have not yet been defined. While the benefits of our hyperconnected communication systems are undisputed, they could potentially enable the viral spread of information that is either intentionally or unintentionally misleading or provocative. Imagine a real-world example of shouting “fire!” in a crowded theatre. In a virtual equivalent, damage can be done by rapid spread of misinformation even when correct information follows quickly. Are there ways for generators and consumers of social media to develop an ethos of responsibility and healthy scepticism to mitigate the risk of digital wildfires? The Dangers of Hubris on Human Health Health is a critical system that is constantly being challenged, be it by emerging pandemics or chronic illnesses. Scientific discoveries and emerging technologies allow us to face such challenges, but the medical successes of the past century may also be creating a false sense of security. Arguably, one of the most effective and common means to protect human life – the use of antibacterial and antimicrobial compounds (antibiotics) – may no longer be readily available in the near future. Every dose of antibiotics creates selective evolutionary pressures, as some bacteria survive to pass on the genetic mutations that enabled them to do so. Until now, new antibiotics have been developed to replace older, increasingly ineffective ones. However, human innovation may no longer be outpacing bacterial mutation. None of the new drugs currently in the development pipeline may be effective against certain new mutations of killer bacteria that could turn into a pandemic. Are there ways to stimulate the development of new antibiotics as well as align incentives to prevent their overuse, or are we in danger of returning to a pre-antibiotic era in which a scratch could be potentially fatal? Special Report: National Resilience to Global Risks This year’s Special Report examines the difficult issue of how a country should prepare for a global risk that is seemingly beyond its control or influence. One possible approach rests with “systems thinking” and applying the concept of resilience to countries. The report introduces five components of resilience – robustness, redundancy, resourcefulness, response and recovery – that can be applied to five country subsystems: the economic, environmental, governance, infrastructure and social. The result is a diagnostic tool for decision-makers to assess and monitor national resilience to global risks. X Factors from Nature Developed in partnership with the editors of Nature, a leading science journal, the chapter on “X Factors” looks beyond the landscape of 50 global risks to alert decision-makers to five emerging game-changers: Runaway climate change: Is it possible that we have already passed a point of no return and that Earth’s atmosphere is tipping rapidly into an inhospitable state? Is it possible that we have already passed a point of no return and that Earth’s atmosphere is tipping rapidly into an inhospitable state? Significant cognitive enhancement: Ethical dilemmas akin to doping in sports could start to extend into daily working life; an arms race in the neural “enhancement” of combat troops could also ensue. Ethical dilemmas akin to doping in sports could start to extend into daily working life; an arms race in the neural “enhancement” of combat troops could also ensue. Rogue deployment of geoengineering: Technology is now being developed to manipulate the climate; a state or private individual could use it unilaterally. Technology is now being developed to manipulate the climate; a state or private individual could use it unilaterally. Costs of living longer: Medical advances are prolonging life, but long-term palliative care is expensive. Covering the costs associated with old age could be a struggle. Medical advances are prolonging life, but long-term palliative care is expensive. Covering the costs associated with old age could be a struggle. Discovery of alien life: Proof of life elsewhere in the universe could have profound psychological implications for human belief systems. The Global Risks report is the flagship research publication of the World Economic Forum’s Risk Response Network, which provides an independent platform for stakeholders to explore ways to collaborate on building resilience to global risks. Further information can be found at www.weforum.org/risk The Evolving Risk Landscape How do the top risks as identified by the annual Global Risks Perception Survey change over time? Figure 6 shows how this list changed over the past seven years. The average ratings of the risks have changed slightly, as described in detail in Section 4 of the report, but the relative ranking of the risks according to their impact or their likelihood is less affected. Interestingly, the diffusion of weapons of mass destructionhas moved into the top five risks in terms of impact.
Allantoic fluid protease activity during influenza virus infection. Neutral protease activity of allantoic fluid from embryonated chicken eggs was quantified during the course of influenza virus infection. Antigenic subtypes of influenza A viruses selected for study were H1N1 strains PR/8/34, Brazil/8/78, FM/1/47, the H3N2 strain Bangkok/1/80 and the H5N9 Turkey/ /Ontario/66 as well as the Sendai strain of parainfluenza type 1 virus. Three different types of profiles of allantoic fluid proteases could be readily distinguished after infection of eggs with various virus strains. In all profiles, periodic peaks of protease activity always preceded the partial shut down of protamine cleaving proteases which paralleled the production of near maximum titers of infectious virus. To determine the mechanism involved in this reduction of proteolytic activity, infectious allantoic fluids were analysed for the presence of protease inhibitors. Acid heat treated 48 hour virus-infected allantoic fluids of different influenza strains could inhibit the activities of subtilisin and allantoic fluid proteolytic enzymes.
<gh_stars>0 #include <iostream> using namespace std; int main(){ setlocale(LC_ALL,"Portuguese"); float Nota1,Nota2,Media; cout<<"Digite a primeira nota :"; cin>>Nota1; cout<<"Digite a segunda nota :"; cin>>Nota2; Media = (Nota1+Nota2)/2; cout<<"O valor da media eh : "<<Media<<endl; if(Media>=7 && Media<=10){ cout<<"O aluno foi aprovado"; } else{ cout<<"O aluno vai pra final"; } return 0; }
Today in the Rock River Valley, you can expect frost until 8 a.m., followed by a day of mostly cloudy skies and scattered showers. The weather will stay cool, with temperatures in the lower 50s and south winds gusting as high as 25 mph. The next storm system is expected to slide in late Wednesday and last through Thursday morning. This system will bring us cooler temperatures and scattered showers.
Defective Phosphatidylglycerol Remodeling Causes Hepatopathy, Linking Mitochondrial Dysfunction to Hepatosteatosis Background & Aims Obesity promotes the development of nonalcoholic fatty liver diseases (NAFLDs), yet not all obese patients develop NAFLD. The underlying causes for this discrepancy remain elusive. LPGAT1 is an acyltransferase that catalyzes the remodeling of phosphatidylglycerol (PG), a mitochondrial phospholipid implicated in various metabolic diseases. Here, we investigated the role of LPGAT1 in regulating the onset of diet-induced obesity and its related hepatosteatosis because polymorphisms of the LPGAT1 gene promoter were strongly associated with susceptibility to obesity in Pima Indians. Methods Mice with whole-body knockout of LPGAT1 were generated to investigate the role of PG remodeling in NAFLD. Results LPGAT1 deficiency protected mice from diet-induced obesity, but led to hepatopathy, insulin resistance, and NAFLD as a consequence of oxidative stress, mitochondrial DNA depletion, and mitochondrial dysfunction. Conclusions This study identified an unexpected role of PG remodeling in obesity, linking mitochondrial dysfunction to NAFLD. O besity significantly increases the risk of nonalcoholic fatty liver disease (NAFLD), a condition that affects more than 30% of the US adult population. However, not all obese patients develop NAFLD. Although the precise molecular mechanisms underlying the discrepancy remains poorly understood, it is now widely accepted that mitochondrial dysfunction is pivotal to the pathogenesis of NAFLD and its progression to nonalcoholic steatohepatitis (NASH). 1 Obese patients who developed NAFLD showed a gradual decline of the respiratory control ratio and mitochondrial coupling efficiency before the progression to NASH. Accordingly, only those obese patients who showed a loss of mitochondrial functional adaptation to the bioenergetic needs in obesity were highly prone to the development of NAFLD. 1 However, the precise causes for these mitochondrial defects in NAFLD remain elusive, which has hindered ongoing efforts in developing an effective treatment of NAFLD and its dangerous progression to NASH. Phosphatidylglycerol (PG) is a glycerophospholipid commonly recognized for its important role as a precursor for the synthesis of cardiolipin (CL), a mitochondrial signature phospholipid required for dynamic mitochondrial functions. PG deficiency in mammalian cells leads to CL deficiency, mitochondrial dysfunction, and a reduction in adenosine triphosphate production. 5 Disruption of the PGS1 gene in yeast causes PG and CL deficiency and inhibition of growth on nonfermentable carbon sources. 6 PG is subjected to remodeling subsequent to its de novo biosynthesis in mitochondria to incorporate appropriate acyl content for its biological functions and to prevent the harmful effect of lysophosphatidylglycerol accumulation. Consequently, defective PG remodeling is implicated in the pathogenesis of NAFLD 7 and 3-methylglutaconic aciduria with deafness, encephalopathy and Leigh-like (MEGDEL) syndrome, a recessive genetic disorder of dystonia and deafness with Leigh-like syndrome. 8 Patients with MEGDEL syndrome also showed hepatopathy and mitochondrial dysfunction. Defective PG remodeling also is associated with the onset of Barth syndrome, an X-linked recessive disease caused by mutations of the tafazzin gene encoding a transacylase involved in CL remodeling. 9 PG and CL deficiency in Barth syndrome significantly impaired mitochondrial fatty acid oxidation, which leads to cardiomyopathy and premature death. 10,11 Our previous work showed that Lysophosphatidylglycerol Acyltransferase 1 (LPGAT1) is an acyltransferase that catalyzes the acylation of lysophosphatidylglycerol to PG, a key step involved in the PG remodeling process. 3 LPGAT1 belongs to a large family of acyltransferases, which are involved in a variety of biological processes including pathways that regulate energy homeostasis, body weight, and NAFLD. LPGAT1 also was reported to regulate lipid metabolism in the liver as a putative monoacylgcyerol acyltransferase. 12 Recently, a genome-wide association study linked DNA polymorphism of the LPGAT1 gene promoter to the onset of severe obesity in Pima Indians. 13 LPGAT1 is expressed abundantly in a number of metabolic tissues, with highest expression in the liver. 3 Moreover, LPGAT1 is implicated as a key regulator of cholesterol secretion and atherosclerosis. 14 However, the metabolic function of the LPGAT1 enzyme remains elusive. In this study, we generated mice with targeted deletion of LPGAT1, and investigated the role of LPGAT1 in regulating diet-induced obesity (DIO) and its related hepatosteatosis. We show that PG remodeling by LPGAT1 plays an important role in protecting mitochondrial dysfunction associated with NAFLD. Ablation of LPGAT1 Prevents DIO, but Leads to Severe Insulin Resistance The LPGAT1 gene promoter polymorphism recently was implicated in obesity in Pima Indians, 13 but the roles of the LPGAT1 gene in lipid metabolism and energy homeostasis remain elusive. By using the Clustered Regularly Interspaced Short Palindromic Repeats/CRISPR associated protein 9 (CRISPR/Cas9)-mediated gene editing technique, we recently generated mice with a targeted deletion of the LPGAT1 gene to determine its metabolic function ( Figure 1A and B). The genotype of the LPGAT1 knockout mice (LPGAT1 -/-) were confirmed by both reverse-transcription polymerase chain reaction (RT-PCR) and Western blot analyses ( Figure 1C and D). The LPGAT1 -/mice were born at the normal Mendelian ratio, but had significantly lower birth weight and body weight on either normal chow diet or a high-fat diet (HFD) (Figure 2A and B). However, the male LPGAT1 -/mice showed a higher percentage of body weight gain when fed a normal chow diet or a HFD ( Figure 2C and D). Contrary to the findings from the genome-wide association study analysis in Pima Indians, LPGAT1 -/mice were protected from DIO, which was evidenced by a significantly lower fat mass relative to the wild-type (WT) controls ( Figure 2G). Despite resistance to DIO, LPGAT1 -/mice developed glucose intolerance in response to a HFD, as indicated by the results from a glucose tolerance test ( Figure 2E). The defect most likely was caused by a reduction in insulin sensitivity, as evidenced by the results from insulin tolerance tests ( Figure 2F). Surprisingly, the insulin resistance was not caused by the hyperinsulinemia commonly associated with obesity. In contrast to hyperinsulinemia in WT controls, LPGAT1 -/mice showed a normal fasting serum insulin level ( Figure 2H) and significantly lower levels of glucose-stimulated insulin secretion during glucose tolerance tests ( Figure 2I). Likewise, the female LPGAT1 knockout mice also showed similar metabolic defects to male mice, as evidenced by significantly higher weight gain ( Figure 3A and B), glucose intolerance ( Figure 3C), and insulin resistance ( Figure 3D). LPGAT1 recently was reported as one of the target genes of microRNA-30c (miR-30c), which significantly depletes the expression of LPGAT1. 15 Targeted deletion of miR-30c significantly increased the plasma cholesterol level and hepatic lipid synthesis. 15 Conversely, treatment of mice with miR-30c mimetics mitigated hypercholesterolemia and atherosclerosis. 16 Consistent with the findings, we showed *Authors share co-first authorship. LPGAT1 Deficiency Leads to Hepatopathy, Hepatosteatosis, and Hepatofibrosis We next investigated the effect of LPGAT1 on hepatic lipid and cholesterol homeostasis because depletion of LPGAT1 messenger RNA (mRNA) by miR-30c significantly down-regulated hepatic lipid synthesis. 16 Surprisingly, LPGAT1 -/mice developed spontaneous hepatosteatosis, which was exacerbated by feeding with a HFD. Accordingly, LPGAT1 deficiency significantly increased liver weight and the content of both hepatic triglyceride and cholesterol in both male ( Figure 4A-C) and female ( Figure 4D-F) mice. The results were corroborated further by Oil red O staining of the liver section ( Figure 5B). Strikingly, LPGAT1 deficiency also caused hepatopathy, a major defect associated with MEGDEL syndrome, as evidenced by dilated hepatic venules, which were obstructed by massive accumulation of fat droplets in response to a HFD ( Figure 5A, highlighted by arrows). Consistent with hepatosteatosis, LPGAT1 deficiency significantly down-regulated the expression of genes required for lipolysis, including CGI-58 and adiponutrin ( Figure 5C and D), and also up-regulated the genes required for hepatic lipogenesis, including SREBP1c, FAS1, and ACC1 ( Figure 5E-G), as shown by results from the RT-PCR analysis. Hepatopathy often leads to liver fibrosis before the development of NASH in patients with MEGDEL syndrome. Indeed, LPGAT1 deficiency also caused severe hepatic fibrosis in response to a HFD, as evidenced by increased expression of fibrosis markers, collagen I and III ( Figure 6A and B). The results were confirmed further by Masson's trichrome staining of collagen fibers ( Figure 6C, highlighted by arrows, and the area of fibrosis is quantified in Figure 6D). Again, the defects were highly reminiscent of those observed in MEGDEL syndrome, which is characterized by fibrotic staining of the dilated hepatic venules. Increased de novo lipogenesis plays an important role in the accumulation of triglyceride in NAFLD. To further the molecular mechanisms by which LPGAT1 deficiency caused hepatosteatosis, we isolated primary hepatocytes and analyzed the effect of LPGAT1 deficiency on the expression of several genes involved in hepatic lipid synthesis by real-time RT-PCR analysis. Consistent with findings from the liver tissue, LPGAT1 deficiency also significantly increased the expression of genes involved in lipid synthesis, including PPARa, SREBP1c, and ACC1 in primary hepatocytes under basal condition and in response to treatment with oleic acids ( Figure 7A-C). Consequently, LPGAT1 deficiency also significantly increased both the number and size of lipid droplets in cultured primary hepatocytes ( Figure 7D, the lipid droplet number and size are quantified in Figure 7E and F, respectively). However, LPGAT1 deficiency did not promote lipid droplet biogenesis because the total number of lipid droplets was similar between LPGAT1 -/and the WT controls in response to stimulation with oleic acid ( Figure 7D and E). To further address the issue of whether the effect of LPGAT1 deficiency on hepatosteatosis is autonomous or liverspecific, we next determined the effect of LPGAT1 deficiency on lipid droplet biogenesis in C2C12 cells, an immortalized mouse myoblast cell line. The results showed that LPGAT1 depletion did not significantly increase lipid droplet LPGAT1 Specifically Regulates Insulin Signaling in Hepatocytes Mitochondrial-associated membrane (MAM) recently was identified as a major regulatory site for insulin signaling. 17 To gain further insight into the molecular mechanisms underlying insulin resistance in LPGAT1 -/mice, we next analyzed the subcellular localization of LPGAT1. The results showed that LPGAT1 is localized primarily at MAM, a primary site for phospholipid remodeling ( Figure 9A). Consistent with this notion, LPGAT1 deficiency significantly impaired insulin signaling in the liver, which is corroborated by a significant decrease in insulin-stimulated Protein kinase B (Akt) and Glycogen synthase kinase 3a/b (Gsk3 a/b) phosphorylation ( Figure 9B, quantified in Figure 9C and D, respectively). Likewise, LPGAT1 deficiency also significantly impaired insulin signaling in cultured primary hepatocytes ( Figure 9E). In contrast, LPGAT1 deficiency did not have a major effect on insulin signaling in other metabolic tissues, including skeletal muscle ( Figure 9F) and heart ( Figure 9G), further implicating a key role of LPGAT1 as a hepatic regulator of metabolism. In addition to MAM, LPGAT1 also is localized at the endoplasmic reticulum (ER), where it is required for ER homeostasis ( Figure 9A). Remarkably, LPGAT1 deficiency also caused severe ER stress, which is exacerbated by oxidative stress by treatment of primary hepatocytes with H 2 O 2, as evidenced by up-regulated expression of major ER stress regulators, including PERK, ATF4, BIP, ER57, GADD-34, and Xbp1 ( Figure 10A-F). LPGAT1 Deficiency Leads to Mitochondrial Dysfunction in the Liver Defective PG remodeling causes mitochondrial dysfunction, which is implicated in the pathogenesis of MEGDEL and Barth syndromes. We next questioned whether LPGAT1 deficiency also would cause mitochondrial dysfunction in the liver because mitochondrial dysfunction is implicated in Figure 3 were analyzed for changes in the following: (D) liver weight to body weight ratio, (E) liver TAG level, and (F) liver total cholesterol level. Data are represented as means ± SD (A and D, n 8-10; and B, C, E, and F, n 5). *P <.05, **P <.01 by 1-way analysis of variance. BW, body weight. the pathogenesis of NAFLD. As shown in Figure 11, LPGAT1 deficiency significantly increased the intracellular level of reactive oxygen species (ROS) in cultured primary hepatocytes, which was exacerbated further in response to oxidative stress by H 2 O 2 ( Figure 11A). Accordingly, LPGAT1 deficiency also promoted lipid peroxidation, as evidenced by increased levels of thiobarbituric acid reactive substances (TBARS), a byproduct of lipid peroxidation ( Figure 11B). Moreover, oxidative stress caused depletion of mitochondrial DNA (mtDNA) copy number in cultured primary hepatocytes from LPGAT1 -/mice ( Figure 11C). Consequently, LPGAT1 deficiency significantly impaired the mitochondrial respiration capacity, as evidenced by decreased mitochondrial oxygen consumption rate and blunted responses to different mitochondrial respiratory inhibitors, including oligomycin (an adenosine triphosphatase inhibitor), carbonyl cyanide ptrifluoromethoxyphenylhydrazone (FCCP; a mitochondrial uncoupler), and rotenone (a complex _ inhibitor) ( Figure 11D, quantified in Figure 11E). Oxidative stress disrupts mitochondrial dynamics, which also is implicated in the pathogenies of NAFLD and other aging-related diseases. We next determined the effect of oxidative stress on mitochondrial morphology in cultured primary hepatocytes from LPGAT1 -/mice and the WT controls by confocal imaging analysis. The results showed that LPGAT1 deficiency rendered primary hepatocytes highly sensitive to damage by oxidative stress by H 2 O 2, which disrupted mitochondrial dynamics, leading to mitochondrial fragmentation in primary hepatocytes from LPGAT1 -/mice. Surprisingly, LPGAT1 deficiency also rendered mitochondria more sensitive to damage by lipid overload. The addition of oleic acids to the culture medium severely disrupted mitochondrial dynamics in cultured primary hepatocytes from LPGAT1 -/mice relative to the WT controls, leading to mitochondrial fragmentation ( Figure 11F). LPGAT1 Deficiency Leads to Defective PG and CL Remodeling Commonly Associated With Metabolic Diseases We next determined the effect of LPGAT1 deficiency on the acyl profiles of PG, CL, and other phospholipids in the liver by lipidomic analysis. The results showed that LPGAT1 deficiency caused similar defects in PG remodeling as observed in MEGDEL syndrome, including a significant decrease in PG-36:2 ( Figure 12A). Surprisingly, ablation of LPGAT1 also significantly depleted the content of linoleic acid (C18:2), the major fatty acyl composition of CL in metabolic tissues ( Figure 12A, highlighted by a dashed box). Consequently, LPGAT1 deficiency also led to a significant decrease in tetra linoleoyl cardiolipin (TLCL) level in the liver ( Figure 12B, highlighted by dashed box, and Figure 12C), a common defect associated with the etiology of NAFLD, obesity, heart failure, and other aging-related diseases. 18,19 In contrast, LPGAT1 deficiency did not significantly change the total levels of PG, CL, and other phospholipids, including phosphatidylserine (PS), phosphatidylethanolamine (PE), and phosphatidylinositol, although there was a moderate decrease in the total level of phosphatidylcholine ( Figure 12D) and a slight increase in the total level of phosphatidic acid ( Figure 13A and B). Interestingly, LPGAT1 deficiency also significantly changed in the acyl profiles of PS, PE, phosphatidic acid, phosphatidylcholine, and phosphatidylinositol in the liver (Figures 13 and 14). Contrary to changes in PG and CL, LPGAT1 deficiency significantly increased the linoleic acid (C18:2) content in both PS and PE ( Figure 14A and B, highlighted by red lined boxes), although the biological significance of these changes remain to be elucidated. Discussion Mitochondrial dysfunction plays a major role in the development of NAFLD, which is increasing because of the ongoing obesity epidemic. There is no effective treatment for this debilitating disorder owing to poor understanding of the pathogenic mechanisms and a lack of suitable drug targets. Moreover, approximately 25% of NAFLD patients are not obese and the importance of mitochondrial dysfunction in these patients remains to be determined. In this study, we identified LPGAT1 as a key regulator of mitochondrial dysfunction in NAFLD, which is supported by multiple lines of evidence. We showed that LPGAT1 deficiency rendered the mice highly susceptible to the development of severe hepatosteatosis, implicating mitochondrial dysfunction as a potential cause for NAFLD. Indeed, LPGAT1 deficiency also caused multiple defects in mitochondrial function in the liver, including oxidative stress, mtDNA depletion, mitochondrial fragmentation, and impaired oxidative phosphorylation. In contrast to a recent report that decreased LPGAT1 expression was associated with severe obesity of Pima Indians, 13 LPGAT1-deficient mice are protected from DIO. Our findings are corroborated further by a recent report that mitochondrial dysfunction is the primary determining factor for susceptibility to the onset of NAFLD in obese patients. 1 The LPGAT1-deficient mice were born with lower body weight, although they gained a higher percentage of weight after birth. It is interesting to investigate the causative role of LPGAT1 deficiency on body weight gain because the role of PG remodeling on embryonic development is largely unknown. LPGAT1 catalyzes the remodeling of PG, which plays an important role in maintaining mitochondrial function. 3 Defective PG remodeling causes MEGDEL syndrome, as evidenced by genetic mutation of the SERAC1 gene in human beings. 8 SERAC1 is a putative enzyme required for PG remodeling. SERAC1 mutations cause hepatopathy, encephalopathy, and hypotonia. In support of LPGAT1 as a key enzyme that regulates PG remodeling, we showed in this study that LPGAT1 deficiency led to abnormal acyl compositions of PG that were highly reminiscent of the defects in MEGDEL syndromes. Consistent with the findings, LPGAT1 deficiency also caused hepatopathy and abnormal cholesterol metabolism commonly associated with MEGDEL syndrome, including dilated hepatic venules, hepatofibrosis, decreased serum cholesterol level, and accumulation of cholesterol in hepatocytes. 8 Although a previous report implicated a role of LPGAT1 in triglyceride synthesis as a putative monoacylglycerol acyltransferase (MGAT), 12 our data do not support the notion that hepatosteatosis in LPGAT1 -/was caused by impaired MGAT activity because LPGAT1 deficiency promoted lipogenesis in the liver and in cultured primary hepatocytes. In addition, overexpression of LPGAT1 in CV-1 in Origin Simian-7 (COS-7) cells did not promote lipid droplet formation in response to oleic acid stimulation or monoacylglycerol incubation (data not shown), which further indicated that the hepatosteatosis caused by LPGAT1 deficiency was not caused by MGAT activity. Moreover, LPGAT1 deficiency specifically promoted lipid droplet biogenesis in cultured hepatocytes, but not in C2C12 cells, whereas overexpression of MGAT2 stimulated lipid droplet biogenesis. 20 PG is a precursor for the synthesis of CL, a mitochondrial signature phospholipid that plays a pivotal role in normal mitochondrial function, including mitochondrial membrane structure, respiration, mitochondrial fusion/fission, and mitophagy. The biological function of CL is determined by the composition of its 4 fatty acyl chains, which are dominated by linoleic acid (C18:2) in metabolic tissues, including liver, heart, and skeletal muscles. 21 This unique CL structure, also known as TLCL, is believed to be required for mitochondrial architecture, function, and mitophagy, as evidenced by findings from research on Barth syndrome. 11, Consequently, TLCL depletion leads to mitochondrial dysfunction in Barth syndrome, including ROS production, defective oxidative phosphorylation, fatty acid oxidation, adenosine triphosphate production, and mitophagy. 10,11,19,23,25 TLCL depletion also is implicated in mitochondrial dysfunction in obesity, NAFLD, and other aging-related diseases. 23,26,27 Consistent with this notion, we showed that LPGAT1 deficiency leads to depletion of the TLCL level in the liver, further implicating an important role of PG remodeling by LPGAT1 in NAFLD. One of the most striking features of the LPGAT1 knockout mice is severe hepatic insulin resistance in the absence of obesity and hyperinsulinemia. Although obesity is the primary cause of insulin resistance, LPGAT1 knockout mice are leaner and had a lower fasting insulin level when compared with WT control mice on a HFD. Then comes the question of how this could happen. The answer comes from the studies of the subcellular localization of LPGAT1. We showed that LPGAT1 is abundantly localized at MAM, the primary hub for insulin signaling, because both the mechanistic target of rapamycin (mTOR) and Akt, the primary downstream targets of insulin signaling, are localized at MAM. Consequently, the mechanistic target of rapamycin complex 2 (mTORC2) deficiency disrupted MAM, causing mitochondrial defects that are dependent on Akt phosphorylation in MAM. 17 Disruption of MAM integrity also impaired insulin signaling in mouse and human primary hepatocytes. 28 MAM earmarks the site for mitochondrial fission and mtDNA replication, 29 a key process required for mitochondrial quality control by eliminating ROS-damaged mitochondria through mitophagy. 30 Accordingly, we and others previously have shown that obesity and type 2 diabetes causes dilatation of MAM, leading to mitochondrial fragmentation, ROS production, and insulin resistance. 21,31 In support of a key role of LPGAT1 in regulating insulin signaling in MAM, we showed that LPGAT1 deficiency causes mitochondrial fragmentation, defective mitophagy, and severe insulin resistance. Taken together, our findings support a key role of LPGAT1 in the onset of NAFLD. More importantly, our work has identified LPGAT1 as a novel drug target for the treatment of NAFLD. Generation of Mice With Targeted Deletion of the LPGAT1 Gene CRISPR/Cas9-mediated gene editing was used to generate LPGAT1 knockout mice. Briefly, guide RNAs targeting the protospacer adjacent motifs of exons of individual LPGAT1 genomic DNA were designed, and 2 protospacer adjacent motifs were chosen for the LPGAT1 gene on exon 3 ( Figure 1A). The complementary oligo DNAs were synthesized and then annealed and cloned into pUC-57 under the control of T7 RNA polymerase promoter. The correct constructs were digested by the enzyme DraI, and the purified products were used as the template for the in vitro transcription using T7 high-yield RNA synthesis kit (New England Biolabs, Ipswich, MA). In parallel, the plasmid encoding Cas9, which is driven by the T7 RNA polymerase promoter, was digested by the enzyme PmeI, and the purified products were used as the templates for the in vitro transcription using the mMESSAGE mMACHINE T7 ULTRA kit (Life Technologies, Carlsbad, CA). Both guide RNAs targeting LPGAT1 and Cas9 mRNA were purified using the MEGAclear kit (Life Technologies). A mixture of guide RNAs targeting LPGAT1 (20 ng/mL) and Cas9 mRNA (200 ng/mL) were co-injected into the 1-cell fertilized embryos that were collected from the oviducts of superovulated 7-to 8-weekold B6 mice. The blastocysts were implanted into the uterus of pseudopregnant Institute of Cancer Research (ICR) mice. The peripheral blood was collected from the 4-week-old mice for flow cytometry-aided screening. We further confirmed the LPGAT1 deficiency through PCR amplification, DNA sequencing, RT-PCR, and Western blot analysis. All of the mutant mice were backcrossed with C57BL/6 mice for more than 3 generations. Animal Care LPGAT1 -/and age-matched WT mice (4 weeks old) were divided into 2 groups. One group was fed the HFD (D12492, 60 kcal% fat; Research Diets, Inc, New Brunswick, NJ) for 12 weeks, and the control group was fed normal chow diet (Teklad 5001 Laboratory Diet, Envigo, Huntingdon, UK). All animals were maintained in an environmentally controlled facility with a diurnal light cycle and free access to water. All experiments used littermate controls of age-and sexmatched mice, and, in accordance with the "Regulations of the People's Republic of China on Laboratory Animal Management" and the "Administrative Measures on Quality of Laboratory Animals," used protocols according to National Institutes of Health guidelines. Glucose Tolerance Test and Insulin Tolerance Test The glucose tolerance test and insulin tolerance test were performed in overnight food-deprived mice (n 10). Glucose was delivered by oral gavage at 1.5 g/kg body weight after initial measurement of the fasting blood glucose level. Insulin was delivered by intraperitoneal injection (1 U/kg body weight; Novolin R, Novo Nordisk, Bagsvaerd, Denmark). Blood glucose was determined 0, 15, 30, 60, 90, and 120 minutes after the glucose or insulin load with a One Touch Ultra 2 glucometer (Lifescan, Milpitas, CA). Primary Hepatocyte Isolation Primary hepatocytes were isolated from male LPGAT1 -/mice and WT control mice (age, 6-10 wk). Mice were anesthetized with pentobarbital sodium, then perfused with 40 mL Krebs buffer (1 mol/L HEPES pH 7.45, 50 mmol/L ethylene glycol-bis(b-aminoethyl ether)-N,N,N 0,N 0 -tetraacetic acid pH 7.4) from the inferior vena cava for 7 minutes, followed by 30 mL 0.2 mg/mL collagenase type IV (Sigma) in Krebs buffer (with 1 mol/L CaCl 2 ) for 7 minutes. The perfused liver was excised, minced, and filtered through 100 mesh cell strainers (70 mm). The digestion was terminated by adding Dulbecco's modified Eagle medium (DMEM) (Gibco) containing 10% fetal bovine serum. Hepatocytes were collected by centrifuging at 500g for 2 minutes at 4 C. Percoll (Sigma, St. Louis, MO) solution (10 phosphate-buffered saline : Percoll 1:9, vol/vol) was added and then centrifuged to remove the dead cells. The cell pellet was washed with DMEM twice and the hepatocytes were cultured in DMEM supplemented with 10% fetal bovine serum and penicillin/ streptomycin for further experiments. Quantitative Real-Time PCR Analysis Total RNA from LPGAT1 -/and WT mice liver tissues or primary hepatocytes were extracted using TRIzol (Invitrogen, Carlsbad, CA) following the manufacturer's instructions. The purity and the concentration of RNA were detected by an automatic microplate spectrophotometer (OD-1000 sepectrophotometer, Thermo Fisher Scientific, Waltham, MA). Total RNA (1 mg) was used for the preparation of complementary DNA using SuperScript II Reverse Transcriptase (18064014; Invitrogen). Quantitative realtime PCR analysis was performed using SYBR Green Master Mix (330501; Qiagen, Hilden, Germany). The relative gene expression was calculated as follows: Cycle threshold (Ct) sample (Ct sample Gene of interest) -(Ct sample house keeping gene). Then, the relative gene expression 2 power (Ct sample test -Ct sample control). Primer sequences used for quantitative analysis are shown in Table 1. Subcellular Fractionation Cos-7 cells stably overexpressing FLAG-tagged LPGAT1 were homogenized with a Dounce homogenizer in 10 volumes (wt/vol) of solution consisting of 225 mmol/L mannitol, 75 mmol/L sucrose, 0.1 mmol/L EGTA, and 30 mm Tris-HCl, pH 7.4. The homogenate was first centrifuged at 600g for 10 minutes to remove cell debris and nuclear fractions. The crude mitochondrial fraction was obtained by centrifuging the supernatant at 8000g for 10 minutes. The crude mitochondrial pellet was resuspended in mitochondrial suspension buffer consisting of 250 mmol/ L mannitol, 5 mmol/L HEPES, pH 7.4, and 0.5 mmol/L EGTA, and then fractionated by Percoll gradient ultracentrifuge at 95,000g for 30 minutes to isolate the pure mitochondrial fraction and MAM. The microsomal fraction was prepared from the postmitochondrial supernatant by sedimentation at 100,000g for 1 hour. The mitochondrial, MAM, and microsomal fractions were resuspended in PBS buffer and analyzed by Western blot analysis using anti-LPGAT1 (1:1000 dilution in 5% bovine serum albumin/tris buffered saline with 0.1% Tween 20 (TBST), generated by our laboratory) and anti-calnexin (NB300-518; Novus Biologicals, Centennial, CO) antibodies, which were used as an endoplasmic reticulum biomarker. Intracellular ROS Production Analysis Intracellular ROS generation in primary hepatocytes were investigated using 2',7'-dichlordehydrofluoresceindiacetate (D399; Molecular Probes, Eugene, OR) at a final concentration of 5 mmol/L. Cells were incubated with 2',7'dichlordehydrofluorescein-diacetate in culture medium for 30 minutes at 37 C and then resuspended in 0.5 mL PBS. The fluorescence was measured using a microplate reader (Victor3 plate reader; Perkin Elmer, Waltham, MA) set to 488-nm excitation and 525-nm emission wavelengths. Lipid Peroxidation Assay Lipid peroxidation was analyzed from tissue samples by measuring the production of TBARS. TBARS production was measured according to the manufacturer's instructions (TBARS assay kit, cat 10009055; Cayman Chemical). For the preparation of liver cytosol, 25 mg liver tissues were homogenized at 4 C in 250 mL RIPA lysis buffer and placed on ice for 15 minutes, and then centrifuged at 16,000g for 10 minutes. A total of 10 mL of supernatant and 10 mL of the sodium dodecyl sulfate solution were reacted with 400 mL thiobarbituric acid (TBA) buffer at 100 C for 1 hour. After centrifugation at 1600g for 10 minutes, 150 mL of each well was pipetted onto a 96-well plate. Samples were analyzed spectrophotometrically for TBARS at 535 nm in a microplate reader (Victor3 plate reader; Perkin Elmer) and normalized by tissue weight. Oxygen Consumption Rate in Primary Hepatocytes Primary hepatocytes were isolated and seeded in XF96 cell culture microplates (Seahorse Bioscience, Billerica, MA) at 5000 cells/well in 80 mL DMEM growth medium supplemented with 10% fetal calf serum and antibiotics, incubated at 37 C for 24 hours. Assays were initiated by removing the growth medium and replacing it with assay medium, incubated for 30 minutes in an ambient air incubator at 37 C. The mitochondria test compounds oligomycin (1.5 mmol/L), FCCP (1 mmol/L), and rotenone (1 mmol/L) were preloaded in the reagent delivery ports of A, B, and C of the O 2 sensor cartridge, respectively. Oxygen consumption rate measurements then were performed according to the Seahorse Bioscience assay protocol. Triglyceride and Cholesterol Assay Triglyceride and cholesterol levels in serum and liver tissues were measured using the Triglyceride (290-63701; Wako) and Cholesterol (294-65801; Wako) Quantitative Assay Kits, respectively, according to the manufacturer's instructions. Confocal Imaging Analysis For intracellular lipid droplet analysis, primary hepatocytes were cultured in completed medium in the presence or absence of 200 mmol/L oleic acid for 16 hours. Cells then were incubated with BODIPY493/503 (5 mg/mL; Life Technologies) for 20 minutes, and analyzed under confocal microscopy (FV1200; Olympus, Shinjuku, Tokyo, Japan). To visualize mitochondria, primary hepatocytes were stained with MitoTracker Red CMXRos (50 nmol/L) for 20 minutes in a 37 C incubator, and then washed with PBS 3 times, followed by confocal imaging analysis. mtDNA Copy Number Assay The total DNA of primary hepatocytes was extracted using the Multisource Genomic DNA Miniprep Kit (Axygen) according to the manufacturer's instructions. Quantitative real-time PCR analysis of mtDNA copy number in hepatocytes was performed using mitochondrion-encoded reduced nicotinamide adenine dinucleotide dehydrogenase 1 as the mtDNA marker and cyclophilin A as a genomic DNA marker. The primer pairs used in the PCR analysis are shown in Table 1. Lipidomics Analysis For total lipids extraction, liver tissue was homogenized in a 2:1 chloroform:methanol (vol/vol) mixture and cell debris was removed by filtration. The homogenizer and collected cell debris were rinsed with fresh solvent mixture and the rinse was pooled with the previous filtrate before the addition of a 0.73% NaCl water solution, producing a final solvent system of 2:1:0.8 chloroform:methanol:water (vol/vol/vol). The lipid extracts were finally flushed with nitrogen, capped, and stored at -20 C (typically analyzed within 1 week, which is critical for CL analysis). Briefly, total lipids from LPGAT1 -/and WT mice liver tissues were analyzed by triple-quadruple mass spectrometer (Thermo Electron TSQ Quantum Ultra, Trzin, Slovenia) controlled by Xcalibur (Thermo Fisher Scientific) system software. All the mass spectrometer spectra and tandem mass spectrometer spectra were acquired automatically by a customized sequence subroutine operated under Xcalibur software. Western Blot Analysis The LPGAT1 -/and WT primary hepatocytes were treated with 0, 0.1, 1.0, or 10 nmol/L insulin for 15 minutes, and harvested in cell RIPA lysis buffer (20 mmol/L HEPES, 2 mmol/L EGTA, 50 mmol/L NaF, 100 mmol/L KCl, 0.2 mmol/L EDTA, 50 mmol/L b-glycerophosphate, 1.5 mmol/L Na 3 VO 4, 10 mmol/L Na 4 VO 7, 1 mmol/L benzamidine, 100 mL phosphatase inhibitor cocktail, 1% Triton X-100, 1.0 mmol/L phenylmethylsulfonyl fluoride), followed by centrifugation at 16,000g for 15 minutes at 4 C. The supernatant was used for Western blot analyses of total Akt, phosphor-Akt, total Gsk-3a/b, phospho-Gsk-3a/b, and glyceraldehyde-3-phosphate dehydrogenase. For analysis of insulin signaling from tissue samples, LPGAT1 -/mice and WT controls were fasted overnight, followed by intraperitoneal injection of insulin (1 U/kg body weight) or PBS, and then were euthanized 15 minutes after the injection. Tissues rapidly were dissected and frozen in liquid nitrogen. The tissue samples then were pulverized in liquid nitrogen and homogenized in the RIPA buffer with a polytron. After 30 minutes of incubation on ice, the samples were centrifuged at 16,000g for 15 minutes at 4 C. The protein concentration was determined by the Pierce BCA Protein Assay (23225; Thermo Fisher, Waltham, MA). Equal amounts of protein (30 mg) were subjected to sodium dodecyl sulfate-polyacrylamide gel electrophoresis, transferred to a polyvinylidene difluoride membrane (1620177; Bio-Rad, Hercules, CA), blocked in Tris-buffered saline with 5% milk, immunoblotted with primary antibodies (1:1000) overnight at 4 C, followed by secondary antibodies (1:5000) for 1 hour at room temperature, and developed with ECL Western Blotting Substrate (32106; Thermo Fisher).
package org.apache.taverna.databundle; /* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ import java.nio.file.Path; import java.util.ArrayList; import java.util.List; public class ErrorDocument { private List<Path> causedBy = new ArrayList<>(); private String message = ""; private String trace = ""; public List<Path> getCausedBy() { return causedBy; } public String getMessage() { return message; } public String getTrace() { return trace; } public void setCausedBy(List<Path> causedBy) { this.causedBy.clear(); if (causedBy != null) this.causedBy.addAll(causedBy); } public void setMessage(String message) { if (message == null) message = ""; this.message = message; } public void setTrace(String trace) { if (trace == null) trace = ""; this.trace = trace; } @Override public String toString() { return "Error: " + getMessage() + "\n" + trace; // TODO: also include the causedBy paths? } }
/* * Copyright 2020 The Catty Project * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pink.catty.spring.bean; public class ServiceBean<T> { private Class<T> interfaceClass; private T ref; public Class<T> getInterfaceClass() { return interfaceClass; } public void setInterfaceClass(Class<T> interfaceClass) { this.interfaceClass = interfaceClass; } public T getRef() { return ref; } public void setRef(T ref) { this.ref = ref; } }
The present invention relates to an aqueous acidizer composition and the use of the composition in the removal of scale from fresh water production equipment. More particularly, the present invention relates to an acidizer composition containing a blend of three acids of varying strengths, with each component of the composition being carefully selected to provide a timed release effect so as to prolong the activity of the composition in scale removal over an extended period of time while simultaneously minimizing pitting and fatigue of the metallic equipment or its components. Various compositions for use in water treatment or the removal of scale from water storage and transfer equipment as well as cooling towers, boilers and some types of fresh water production equipment, include the compositions as described in the following U.S. Pat. No. 3,235,324 to Merriman; U.S. Pat. No. 3,335,085 to Hronas; U.S. Pat. No. 3,424,688 to Boiko et al; U.S. Pat. No. 3,458,354 to Reich; U.S. Pat. No. 3,969,255 to Connelly; U.S. Pat. No. 4,199,469 to Walzer; and U.S. Pat. No. 4,222,886 to Connelly. Various of these patents disclose the use of hydrochloric acid along with additional components and, in particular, U.S. Pat. No. 4,199,469 discloses the use of hydrochloric acid along with isopropyl alcohol and additional acid components. The use of isopropyl alcohol has been restricted to its ability to kill microorganisms, remove bio-organic residues such as algae, and as a solvent for various organic components of formulations such as described in U.S. Pat. No. 3,335,085. In the removal of scale from certain types of fresh water production equipment, sometimes referred to as "watermakers", strong mineral acids such as sulfuric, nitric and perchloric acids have been found to cause serious damage to the equipment. This is due to the fact that both the hydrogen ions and the oxidizing anions of these acids tend to cause severe metal pitting at the acid concentrations which are required for scale removal. In such treatment for scale removal, acid concentrations of at least 0.6 M are generally necessary. Although hydrochloric acid does not contain an oxidizing anion, the chloride ion in high concentration readily coordinates copper, nickel and other transition metal ions, thus increasing the probability of metal pitting. This effect coupled with the corrosive properties of hydrogen ions in high concentration prohibits the use of hydrochloric acid alone to descale watermakers. To be effective, a solution concentration in excess of 0.6 M is desirable. The use of phosphoric acid to descale watermakers is not feasible. Not only is it a relatively weak acid, thus prolonging the time required for acidization, but also serious difficulties arise when using phosphoric acid in watermakers. Watermakers are primarily restricted to marine or related use in which water processed contains an extremely high level of calcium, magnesium, iron and other heavy metal ions which readily react with phosphoric acid to produce phosphate scales in these units. As there is always a residual amount of sea water in a watermaker during acidization, phosphoric acid creates a scale problem rather than alleviating one. In the group of moderately strong inorganic acids which are conventionally employed in scale removal, sulfamic acid has previously been the most popular of such acids employed in the descaling of water production equipment. It has been found that sulfamic acid does indeed remove phosphate and carbonate scale without causing serious damage to the metal unit. The K.sub.a for sulfamic acid is equal to 1.03.times.10.sup.-1. The use of sulfamic acid, however, has presented the following major disadvantages in the watermaker descaler process: (1) Sulfamic acid must be sold dry requiring "on site" mixing. This is very time consuming due to the relatively low solubility of the acid in water under ambient conditions. Aqueous solutions of the acid are unstable upon standing. Over a short period of time sulfamic acid in water hydrolyzes with the formation of NH.sub.4 HSO.sub.4. This prohibits the marketing of the acidizer as an aqueous solution; (2) Because of the low solubility of sulfamic acid in water, successive acidizations and flushings of the watermaker are frequently necessary for complete scale removal. This again is quite time consuming; (3) With even moderate to low temperature fluctuations within the watermaker, sulfamic acid frequently precipitates out of solution during acidization, clogging the watermaker tube bundles and other very "difficult to clean" components; (4) Calcium, magnesium and other metal sulfamate salts that are produced during the acidization process are quite water insoluble and thus precipitate within the watermaker tube bundles and other "difficult to clean" components. This poses a unit "clogging" problem similarly to point (3) above; (5) Sulfamic acid will cause some metal pitting of 90% copper-10% nickel alloy, and thus evaporator parts which are repeatedly exposed to the acid during descaling can experience considerable damage. By the present invention, there is provided an improved composition for use in the removal of scale from fresh water production equipment. The composition of the present invention includes a blend of three acids of varying strengths carefully selected to provide an acidizer having a timed release effect. This timed release effect significantly enhances maximum scale removal with minimal or insignificant pitting of the metal evaporator or other equipment. The composition also contains a surfactant, preferably isopropyl alcohol, which taken together with its reaction products with the acid components, serves to accelerate the descaling process. An acid-base indicator and water also form a part of the present composition.
Mobile phone base station exposure and symptoms. Eltiti et al. reported elevated levels of arousal when electromagnetic-hypersensitive subjects were exposed to a UMTS (universal mobile telecommunications system) mobile phone base station signal of 10 mW/m2. Based on their statistical analysis, they concluded that this observation was likely to be due to the effect of order of exposure rather than the exposure itself. In our view, however, a critical review of their data suggests a different conclusion. First of all, Eltiti et al. hypothesized that Sensitive participants would report more symptoms and lower levels of well-being during GSM and UMTS exposure compared to sham. When dealing with a directional hypothesis, a one-sided statistical test is indicated. According to a one-sided statistical test, differences between sham and UMTS exposure for sensitive subjects regarding anxiety (t-value = 2.89) and tension (t-value = 2.94) are significant, even after applying a Bonferroni correction. An arguable issue is whether Bonferroni correction should be applied in the first place. The trial was designed to replicate previous findings from a Dutch study (). Many statisticians may point out that multiple end point correction is not needed under these circumstances. Definitely, a Bonferroni correction, as used in the context of the trial by Eltiti et al., is too conservative when measuring several symptoms that are very likely to be correlated. The correlation between the outcomes should be taken into account in the multiple end point correction. As a consequence, the reference t-values would be lower, again yielding the conclusion that anxiety and tension are correlated with UMTS exposure. It is unfortunate that the exposure order among the three conditions was not counterbalanced. As Eltiti et al. reported, this unbalanced design led to additional variation in the data. We therefore cannot understand why the authors did not include the order of exposure conditions as a factor in their statistical model. Instead, they presented a between-subjects comparison stratified by order . It is true that the differences between sham and UMTS did not reach statistical significance in any of the three sessions. However, it is striking that in each of the three sessions, the arousal score of sensitive individuals was higher for the UMTS condition compared to sham. Pooling the three sessions together would yield a significant difference between sham and UMTS (t-test; p = 0.02). Likewise, a meta-regression of the data from their Table 3 confirms that order (p = 0.043) and exposure condition (p = 0.076) are important factors and should have been considered in the original model. Finally, given the fact that Eltiti et al. observed a few more borderline significant effects and that the targeted sample size was not achieved, one would expect a critical discussion about the power of the study, which the authors did not provide. In summary, a more careful data analysis yields significantly different tension, arousal, and anxiety scores between sham and UMTS exposure status for sensitive subjects. It seems unlikely that these differences are solely due to order of exposure, as argued by Eltiti et al.. We think that results from this study should be interpreted with more caution. Certainly, an association between low-level short-term UMTS mobile phone base station exposure and symptoms is unexpected and contradicts a previous study (). This issue merits further clarification. In the study by Eltiti et al., the intensity of the radiation emitted by the mobile phone base station was 1 W/cm 2 (5 mW/m 2 for 900 MHz and 5 mW/m 2 for 1,800 MHz). The authors assumed that the participants would not react to higher intensities such as 10 or 20 W/cm 2, or even to intensities up to 900 W/cm 2, which are used in mobile phone technology. The exposure durations were too short to produce real effects at the biochemical and clinical levels. Ahmed et al. and Lai et al. (1992Lai et al. (, 1994 concluded that the response depends on the duration of the radiation exposure. After 1 hr of exposure, alterations of certain biochemicals, which could be producing the symptoms, may or may not occur. For example, an increase in acetylcholinesterase activity is responsible for the levels of acetylcholine and with other neurotransmitters responsible for cognitive functions; with further exposure, this activity increases in two areas of the brain, the hippocampus and the striatum. Also, Johansson reported that electromagnetic fields may stimulate mast cells, which produce histamine, and then symptoms are produced in the skin and other organs. Furthermore, the effects of electromagnetic fields (Belyav 2005) may be related not only to intensity or duration of exposure but also to other parameters, such as frequency or modulation. To classify a clinical symptom as psychological, first we must exclude biochemical changes that could be triggered by the electromagnetic fields and cause neurobehavioral responses. This is supported by studies that show changes in neurotransmitters [e.g., acetylcholine ( Eltiti et al. reported elevated levels of arousal when electromagnetic-hypersensitive subjects were exposed to a UMTS (universal mobile telecommunications system) mobile phone base station signal of 10 mW/m 2. Based on their statistical analysis, they concluded that this observation was likely to be due to the effect of order of exposure rather than the exposure itself. In our view, however, a critical review of their data suggests a different conclusion. First of all, Eltiti et al. hypothesized that Sensitive participants would report more symptoms and lower levels of well-being during GSM and UMTS exposure compared to sham. When dealing with a directional hypothesis, a one-sided statistical test is indicated. According to a one-sided statistical test, differences between sham and UMTS exposure for sensitive subjects regarding anxiety (t-value = 2.89) and tension (t-value = 2.94) are significant, even after applying a Bonferroni correction. An arguable issue is whether Bonferroni correction should be applied in the first place. The trial was designed to replicate previous findings from a Dutch study (). Many statisticians may point out that multiple end point correction is not needed under these circumstances. Definitely, a Bonferroni correction, as used in the context of the trial by Eltiti et al., is too conservative when measuring several symptoms that are very likely to be correlated. The correlation between the outcomes should be taken into account in the multiple end point correction. As a consequence, the reference t-values would be lower, again yielding the conclusion that anxiety and tension are correlated with UMTS exposure. It is unfortunate that the exposure order among the three conditions was not counterbalanced. As Eltiti et al. reported, this unbalanced design led to additional variation in the data. We therefore cannot understand why the authors did not include the order of exposure conditions as a factor in their statistical model. Instead, they presented a between-subjects comparison stratified by order . It is true that the differences between sham and UMTS did not reach statistical significance in any of the three sessions. However, it is striking that in each of the three sessions, the arousal score of sensitive individuals was higher for the UMTS condition compared to sham. Pooling the three sessions together would yield a significant difference between sham and UMTS (t-test; p = 0.02). Likewise, a meta-regression of the data from their Table 3 confirms that order (p = 0.043) and exposure condition (p = 0.076) are important factors and should have been considered in the original model. Finally, given the fact that Eltiti et al. observed a few more borderline significant effects and that the targeted sample size was not achieved, one would expect a critical discussion about the power of the study, which the authors did not provide. In summary, a more careful data analysis yields significantly different tension, arousal, and anxiety scores between sham and UMTS exposure status for sensitive subjects. It seems unlikely that these differences are solely due to order of exposure, as argued by Eltiti et al.. We think that results from this study should be interpreted with more caution. Certainly, an association between low-level short-term UMTS mobile phone base station exposure and symptoms is unexpected and contradicts a previous study (). This issue merits further clarification. Electromagnetic hypersensitivity (EHS) is a potentially highly significant public health problem. Eltiti et al. (2007a) recently concluded that short-term exposure to a GSM (global system for mobile communication) base station-like signal did not affect wellbeing or physiological functions in individuals, and they dismissed a positive reaction to UMTS (universal mobile telecommunications system) as an artefact. Eltiti et al. (2007a) stated that " individuals are unable to detect the presence of rf-emf under double-blind conditions." We believe that this conclusion was erroneous, and that their data show that the EHS individuals reacted to both GSM and UMTS signals, and that this was not due to a nocebo effect. Figure 1 presents their data and clearly shows that the sensitive group, unlike the control group, was reacting to the exposure, with significant results in both the open provocation (for GSM and UMTS, note the sham; p < 0.0025) and the double-blind tests (for UMTS). The results for anxiety and arousal are very similar. The sensitive group had higher initial levels of anxiety, tension, and arousal. Only a short time elapsed after arrival before testing started. Wever and others have reported that a period of a few days in a low-EMF environment are necessary before testing for EMF-related changes. We are puzzled by the receiver operating characteristic (ROC) curves in Figure 2A ( a). The authors stated that the sensitive individuals were 55.2% correct, yet their curve was mostly below the 50% line. A more standard way of displaying the results would have been helpful. The sensitive group improved its on/off accuracy score after 50 min (55% to 60%), whereas the control group decreased (51% to 50%). The data for these double-blind tests (Fox E, personal communication) show that correct versus incorrect results were 60.6% (p < 0.005) for the sensitive group and 49.4% (not significant) for the control group. Eltiti et al. (2007a) found a large and statistically significant (p < 0.001) higher skin conductance in the sensitive group (see their Table 5). Their conclusions do not highlight this difference between the two groups, which may be a key indicator of likelihood of individuals to experience EHS symptoms. The EHS questionnaire devised by Eltiti et al. (2007b) was to be used for selecting the 132 most sensitive individuals. However, it was not used for this purpose because only 58 people with self-diagnosed EHS applied, and apparently no individuals were rejected because of a low score. Are provocation studies appropriate for testing for EHS, where there is often a significant time-lag from start of exposure to the start of symptoms? Also, perseveration of symptoms due to physiological arousal caused by traveling to the laboratory is a likely confounder. Any study should be designed to take into account both of these potential problems. Also, the use of Bonferroni corrections is contentious; uncorrected data should be shown along with corrected data. The study (a) required 66 individuals per group for a power 0.90 to detect a difference between real and sham exposure responses. The authors tested only 44 sensitive individuals under double-blind conditions, which reduced the power to about 0.7. We question the appropriateness of publishing such definite conclusions based on such data, especially with a high-profile media briefing. Despite limitations, this study of Eltiti et al. (2007a) has produced positive results that support claims that EHS people can be affected by microwave transmissions from mobile phone base stations. A.C. is employed by EMFields (A&J Philips) to design EMF measurement equipment (EMFields financially supports Powerwatch, a UK advocacy group that promotes a precautionary approach to EMF exposure); A.C. did not receive payment for writing this letter. A.D., serving as an unpaid volunteer, operates and writes for TETRAWatch, a nonprofit organization concerned with the health risks, costs, and effectiveness of TETRA communications antennae in the United Kingdom. D.M. is the founder of EMFacts Consultancy, an advocacy group that produces reports on health issues related to human exposure to electromagnetic radiation. E.O. is on the board of trustees of the EM-Radiation Research Trust, an independent charitable organization whose goal is to provide the facts about electromagnetic radiation and health to the public and the media; she receives no money from the organization.The remaining authors declare they have no competing financial interests. Three letters have questioned the validity of the conclusions drawn in our recent article on the short-term effects of GSM (global system for mobile communication) and UMTS (universal mobile telecommunications system) base station signals (). Most of the concerns are founded in misunderstandings of the study, and we hope to clarify these issues here. We assessed whether people could detect the presence of a 10-mW/m 2 signal over a 50-min period (not 10 W as claimed by Zinelis). This level of exposure is roughly equivalent to standing within 60 m of a mobile phone base station and was based on prior scientific evidence (). We also measured a range of variables within three classes of response: physiological response, selfreported well-being, and actual symptoms experienced. We found no evidence that people could detect the presence of the EMF (electromagnetic field) signal, and Cohen et al.'s assertion that "this conclusion is erroneous" is completely unfounded. Their conclusion arises from a misunderstanding of the receiver operating characteristic (ROC) curve analysis. ROC curves and dvalues tell us how accurate participants are in discriminating a signal from a nonsignal. This standard psychophysical measure (d) provides a measure of accuracy independent of bias. Thus, a d score of 0 means that the proportion of hits (respond "on" when on) is the same as for false alarms (respond "on" when off) and indicates that people are unable to detect a signal (Macmillan and Creelman 2005). In this case, the ROC curve will fall roughly across the graph at a 45°angle, (as we found (). As shown in Table 1, both the hits and false alarms were not different from what was expected by chance, and this was true for both the sensitive and the control groups. Thus, the comment by Cohen et al. is unfounded and inaccurate. We measured the following physiological responses: blood volume pulse, heart rate, blood pressure, and skin conductance response (SCR). The SCR in particular is considered to be one of the most sensitive measures of physiological arousal (). Although the sensitive group was more aroused at baseline than controlswhich has been reported many times before-this physiological arousal was not related to the EMF signal. The hyperarousal of the sensitive group is of interest in its own right, as noted in our article (). However, we found no evidence that either GSM or UMTS affected any physiological measure. In our study (), participants were free to report any symptoms they experienced at any time during the testing session. The number of symptoms experienced by the sensitive individuals was not, however, related to the presence of an EMF signal. In his letter, Zinelis argues that our statistical power was too low and the length of exposure too short to allow symptoms to emerge. First, the statistical power (0.75) in our study was actually very high for this field of research. Second, extensive pilot testing and interviews with study participants revealed that the people we tested reported that they usually experience their typical symptoms within minutes of being exposed to EMF signals. The fact that the symptoms were elicited under the open provocation, but not in the double-blind session, provides evidence that these sensitive people experienced a number of unpleasant symptoms, but these were not related to the presence of an EMF signal. Thus, our data () contradict the points raised by Zinelis. All three letters about our article () question the validity of our conclusions with regard to the subjective wellbeing measures. We did report a number of effects, two of which remained significant following Bonferroni correction. In their letter, Rsli and Huss question whether we should have used such a statistical correction in the current context. This is indeed an important and debatable issue. However, we believe that we took the most reasonable approach, given the weight of the evidence from the other indicators in our own study as well as from the bulk of other research in this area (e.g., for review, see ). To illustrate, previous research has reported positive (e.g., ), negative (e.g., ), and no effect of short-term EMF exposure on health indices (e.g., ;;). Thus, the use of two-tailed tests seems most appropriate. If we apply the Tukey-Ciminera-Heyse correction for highly correlated end points, as suggested by Rsli and Huss, we are left with a significant difference in selfreported anxiety [t A 2 (group) 3 (condition) 6 (exposure order) mixed analysis of variance (ANOVA) for anxiety, tension, and arousal resulted in significant two-way interactions of condition by exposure order for all three visual analogue scales (VAS) [F-values > 3.41; p-values = 0.001), which did not interact with group [F-values < 1.08; p-values > 0.05). This two-way interaction is difficult to interpret given the six levels of exposure order. To aid interpretation, we conducted a series of 2 (group) 3 (condition) 3 (first exposure) mixed ANOVAs for anxiety, tension, and arousal. This resulted in significant two-way interactions [F-values > 5.88; p-values = 0.001), but not a three-way interaction [F-values < 1.39; p-values > 0.05). Again, the first exposure did not interact with group. As shown in Figure 1, the significant differences depended on which condition the participant received first. When the first exposure was GSM, the VAS for GSM were higher than for sham [t-values > 3.72; p-values = 0.001); the same was found for UMTS [t-values > 2.66; p-values < 0.01); and sham [t-values > 2.12; p-values < 0.04). None of the other comparisons were significant (Figure 1). This confirms our previous conclusion that difference in selfreported VAS for anxiety, tension, and arousal is attributable to order effects. In conclusion, we appreciate the opportunity to discuss the interpretation of data in this controversial area. However, in our view the conclusions drawn in our article are fair and accurate, and we do not think that the letters have raised any issues that would lead us to modify those conclusions. As we made clear in our article (), we did examine short-term effects of EMF exposure and therefore can draw no conclusions about the possible long-term effects on human health.
package alert import ( "bytes" "github.com/boivie/lovebeat/config" "github.com/boivie/lovebeat/service" "github.com/franela/goreq" "io/ioutil" "net/http" "net/url" "text/template" "time" ) type slackhook struct { Uri string Data service.ViewStateChangedEvent } type slackhookAlerter struct { cmds chan slackhook template *template.Template } func (m slackhookAlerter) Notify(cfg config.ConfigAlert, ev service.ViewStateChangedEvent) { if cfg.Slackhook != "" { m.cmds <- slackhook{Uri: cfg.Slackhook, Data: ev} } } func (m slackhookAlerter) Worker(q chan slackhook, cfg *config.ConfigSlackhook) { for { select { case slackhook := <-q: var err error var context = make(map[string]interface{}) context["View"] = slackhook.Data.View context["Previous"] = slackhook.Data.Previous context["Current"] = slackhook.Data.Current var doc bytes.Buffer err = m.template.Execute(&doc, context) if err != nil { log.Error("Failed to render template", err) return } req := goreq.Request{ Method: "POST", Uri: cfg.Uri, Accept: "*/*", ContentType: "application/x-www-form-urlencoded", UserAgent: "Lovebeat", Timeout: 10 * time.Second, Body: "payload=" + url.QueryEscape(doc.String()), } req.AddHeader("X-Lovebeat", "1") res, err := req.Do() if err != nil { log.Error("Failed to post slackhook:%v:", err) } robots, err := ioutil.ReadAll(res.Body) res.Body.Close() //it returned a 200 so ignore any error here if err != nil { log.Error("OK:unreadable response:%v:", err) } else if res.StatusCode != http.StatusOK { log.Error("NOK:non-200:%d:", res.StatusCode) } else { log.Info("OK:response:%s:", string(robots)) } } } } func NewSlackhookAlerter(cfg config.Config) Alerter { tmpl := cfg.Slackhook.Template t, err := template.New("template").Parse(tmpl) if err != nil { log.Fatalf("skipping slackhook:error trying to parse slackhook template:%s:err:%v:", tmpl, err) } goreq.SetConnectTimeout(5 * time.Second) var q = make(chan slackhook, 100) var w = slackhookAlerter{cmds: q, template: t} go w.Worker(q, &cfg.Slackhook) return &w }
By Richard McCarty For the past several years, the Service Employees International Union (SEIU) has been dumping millions of dollars of its members’ money into efforts across the country to hike the minimum wage to $15. The membership of SEIU includes many lower-wage workers, such as janitors, security guards, home care workers, and graduate students. Last year alone, SEIU spent $19 million on its Fight for $15 campaign. Is this a good use of SEIU members’ money? In June of 2014, the Seattle City Council voted to raise the minimum wage in a series of steps to $15 (with annual increases for inflation after the minimum wage reaches $15). Later that year, the city signed a five-year contract with the University of Washington (UW) to study the effects of the wage increase. The first minimum wage increase of the series took effect in April of 2015. UW researchers found that increase had little impact, which may have been because many businesses were already paying above the minimum wage. The second increase took effect in January of 2016. This time, UW researchers found that the wage hike negatively impacted workers. In fact, that minimum wage increase caused the average low-wage worker’s income to fall by $125 a month, and the wage increase led to about 5,000 fewer jobs in the city. And Seattle isn’t done yet; the next wage hike takes effect in January of next year. As inconvenient as the UW study is for SEIU and its Fight for $15 campaign, that’s not the only bad news for them: over the past year, three states have rolled back local minimum wage hikes. The St. Louis Board of Aldermen voted to increase the minimum wage in August of 2015, but the minimum wage increase did not take effect until May of this year due to a lawsuit. The state legislature was displeased with the city’s action, in part because it wants a uniform minimum wage across the state. So, the legislature passed a bill to ban local minimum wages. After the UW study was released, the governor of Missouri announced that he would allow the bill to become law reversing St. Louis’s wage hike. Between late 2015 and early 2017, five Iowa counties passed local minimum wage hikes. Once again, state legislators disapproved of the measures, and passed legislation to ban local governments from setting a minimum wage. The governor quickly signed the bill reversing the minimum wage hikes, but before he did 10 city councils voted to opt out of their county’s minimum wage increases. In 2014, the City of Louisville, Kentucky voted to hike the minimum wage; the next year, the City of Lexington, Kentucky followed suit. However, just last fall, the state Supreme Court ruled — nearly unanimously — that cities in Kentucky lack the authority to increase the minimum wage. With all of these setbacks — and with new evidence that minimum wage hikes are hurting those that are supposed to be helped — maybe SEIU should stop spending so much time and money playing politics and focus its efforts on representing its members. Richard McCarty is the Director of Research at Americans for Limited Government Foundation.
Stability of linear systems with interval time delays excluding zero The stability of linear systems with multiple, time-invariant, independent and uncertain delays is investigated. Each delay is assumed to reside within a known interval excluding zero. A delay-free sufficient comparison system is formed by replacing the delay elements with parameter-dependent filters, satisfying certain properties. It is shown that robust stability of this finite dimensional parameter-dependent comparison system, guarantees stability of the original time-delay system. This result is novel in the sense that it does not require any a priori knowledge regarding stability of the time-delay system for some fixed delay. When the parameter-dependent filters are formed in a particular manner using Pade approximations, an upper bound on the degree-of-conservatism of the comparison system may be obtained, which is independent of the time-delay system considered. With this, it is shown that the conservatism of this comparison system may be made arbitrarily small. A linear matrix ineqaulity (LMI) formulation is presented for analysis of the stability of the parameter-dependent comparison system. In the single-delay case, an eigenvalue criterion is also available for stability analysis which incurs no additional conservatism
#abc127c n,m=map(int,raw_input().split()) l=[] r=[] for i in xrange(m): li,ri=map(int,raw_input().split()) l.append(li) r.append(ri) lmax=max(l) rmin=min(r) if lmax<=rmin: print rmin-lmax+1 else: print 0
#ifndef _A4PROCESS_H_ #define _A4PROCESS_H_ #include <set> #include <boost/program_options.hpp> #include <a4/types.h> #include <a4/message.h> #include <a4/register.h> #include <a4/object_store.h> using a4::store::ObjectStore; using a4::store::ObjectBackStore; #include <a4/storable.h> using a4::store::Storable; namespace po = ::boost::program_options; namespace a4 { namespace process { //INTERNAL template <class This, typename... TArgs> struct _test_process_as; template <class This, class T, class... TArgs> struct _test_process_as<This, T, TArgs...> { static bool process(This* that, const std::string &n, shared<Storable> s) { shared<T> t = dynamic_pointer_cast<T>(s); if (t) { that->process(n, t); return true; } else return _test_process_as<This, TArgs...>::process(that, n, s); } }; template <class This> struct _test_process_as<This> { static bool process(This* that, const std::string& n, shared<Storable> s) { return false; } }; using a4::io::A4Message; class Driver; class Configuration; class OutputAdaptor { public: virtual void write(shared<const A4Message> m) = 0; virtual void metadata(shared<const A4Message> m) = 0; void write(const google::protobuf::Message& m); void metadata(const google::protobuf::Message& m); }; class Processor { public: enum MetadataBehavior { AUTO, MANUAL_FORWARD, MANUAL_BACKWARD, DROP }; MetadataBehavior get_metadata_behavior() { return metadata_behavior; } Processor() : my_configuration(NULL), skip_to_next_metadata(false), rerun_systematics_current(NULL), locked(false), metadata_behavior(AUTO) {} virtual ~Processor() {} /// This function is called at the start of a new metadata block /// In here you can return an alternate metadata message if /// if auto_metadata is true. virtual shared<A4Message> process_new_metadata() { return shared<A4Message>(); }; /// Override this to process raw A4 Messages virtual void process_message(shared<const A4Message>) = 0; /// This function is called at the end of a metadata block virtual void process_end_metadata() {}; /// Write a metadata message that (manual_metadata_forward ? starts : ends) a metadata block /// To use this method you have to disable automatic metadata writing. /// You also need to think about if you want to write your metadata before (manual_metadata_forward = true) /// or after (manual_metadata_forward = false) the events it refers to. void metadata_start_block(shared<A4Message> m) { assert(metadata_behavior == MANUAL_FORWARD); _output_adaptor->metadata(m); } void metadata_start_block(const google::protobuf::Message& m) { assert(metadata_behavior == MANUAL_FORWARD); _output_adaptor->metadata(m); } void metadata_end_block(shared<A4Message> m) { assert(metadata_behavior == MANUAL_BACKWARD); _output_adaptor->metadata(m); } void metadata_end_block(const google::protobuf::Message& m) { assert(metadata_behavior == MANUAL_BACKWARD); _output_adaptor->metadata(m); } /// Write a message to the output stream void write(shared<const A4Message> m) { _output_adaptor->write(m); } void write(const google::protobuf::Message& m) { _output_adaptor->write(m); } /// Write a message to the output stream at most once per event void skim(shared<const A4Message> m) { if (not skim_written) write(m); skim_written = true; } void skim(const google::protobuf::Message& m) { if (not skim_written) write(m); skim_written = true; } /// Call channel in process_message to rerun with the prefix "channel/<name>/". /// In that run this function always returns true. bool channel(const char* name) { rerun_channels.insert(name); if (rerun_channels_current == NULL) return false; return strcmp(rerun_channels_current, name) == 0; } bool in_channel(const char* name) const { if (rerun_channels_current == NULL) return false; return strcmp(rerun_channels_current, name) == 0; } /// Call systematic in process_message to rerun with the prefix "syst/<name>/". /// In that run this function always returns true. bool systematic(const char* name) { rerun_systematics.insert(name); if (rerun_systematics_current == NULL) return false; return strcmp(rerun_systematics_current, name) == 0; } bool in_systematic(const char* name) const { if (rerun_systematics_current == NULL) return false; return strcmp(rerun_systematics_current, name) == 0; } /// (Idea, unimplemented:) From now on, all histos are saved under prefix "syst/<name>/" and scale <scale> // void scale_systematic(const char* c, double scale) { FATAL("Not Implemented"); return false; }; /// Access your own Configuration. /// WARNING: there is only one configuration per process, and it is shared by thread! /// Therefore, it is const. This may not prevent you from doing non-smart things with it. /// suggestion: Do a config = my<MyConfig>()," in your Processor. const Configuration* my_configuration; template<class T> const T* my() { return dynamic_cast<const T*>(my_configuration); } /// Set this flag to skip to the next metadata block bool skip_to_next_metadata; bool metadata_present() { return bool(metadata_message); } protected: /// In this store you can put named objects. /// It will be written and cleared at every metadata block boundary. ObjectStore S; /// This is the currently valid metadata message. If you manipulate it in AUTO mode /// in process_new_metadata the changes are written out. shared<const A4Message> metadata_message; /// Set the behaviour of metadata void set_metadata_behavior(MetadataBehavior m) { assert(!locked); metadata_behavior = m; } // Here follows internal stuff. std::set<const char*> rerun_channels; const char* rerun_channels_current; std::set<const char*> rerun_systematics; const char* rerun_systematics_current; OutputAdaptor* _output_adaptor; void lock_and_load() { locked = true; }; bool skim_written; friend class a4::process::Driver; private: bool locked; MetadataBehavior metadata_behavior; }; class Configuration { public: virtual ~Configuration() {}; /// Override this to add options to the command line and configuration file virtual void add_options(po::options_description_easy_init) {}; /// Override this to do further processing of the options from the command line or config file virtual void read_arguments(po::variables_map &arguments) {}; virtual void setup_processor(Processor &g) {}; virtual Processor* new_processor() = 0; }; template<class ProtoMessage, class ProtoMetaData = a4::io::NoProtoClass> class ProcessorOf : public Processor { public: ProcessorOf() { a4::io::RegisterClass<ProtoMessage> _e; a4::io::RegisterClass<ProtoMetaData> _m; } /// Override this to proces only your requested messages virtual void process(const ProtoMessage&) = 0; void process_message(shared<const A4Message> msg) { if (!msg) FATAL("No message!"); // TODO: Should not be fatal const ProtoMessage* pmsg = msg->as<ProtoMessage>(); if (!pmsg) FATAL("Unexpected Message type: ", typeid(*msg->message()), " (Expected: ", typeid(ProtoMessage), ")"); process(*pmsg); } const ProtoMetaData& metadata() { if (!metadata_message) FATAL("No metadata at this time!"); // TODO: Should not be fatal const ProtoMetaData* meta = metadata_message->as<ProtoMetaData>(); if (!meta) FATAL("Unexpected Metadata type: ", typeid(*metadata_message->message()), " (Expected: ", typeid(ProtoMetaData), ")"); return *meta; } protected: friend class a4::process::Driver; }; template<class This, class ProtoMetaData = a4::io::NoProtoClass, class... Args> class ResultsProcessor : public Processor { public: ResultsProcessor() { a4::io::RegisterClass<ProtoMetaData> _m; have_name = false; } // Generic storable processing virtual void process_storable(const std::string&, shared<Storable>) {} void process_message(shared<const A4Message> msg) { shared<Storable> next = _next_storable(msg); if (next) { if(!_test_process_as<This, Args...>::process((This*)this, next_name, next)) { process_storable(next_name, next); } } } shared<Storable> _next_storable(shared<const A4Message> msg); const ProtoMetaData& metadata() { shared<const A4Message> msg = metadata_message; if (!msg) FATAL("No metadata at this time!"); // TODO: Should not be fatal const ProtoMetaData* meta = msg->as<ProtoMetaData>(); if (!meta) FATAL("Unexpected Metadata type: ", typeid(*msg->message()), " (Expected: ", typeid(ProtoMetaData), ")"); return *meta; } protected: std::string next_name; bool have_name; friend class a4::process::Driver; }; template<class MyProcessor> class ConfigurationOf : public Configuration { public: /// Override this to setup your thread-safe Processor! virtual void setup_processor(MyProcessor& g) {} virtual void setup_processor(Processor& g) { setup_processor(dynamic_cast<MyProcessor&>(g)); } virtual Processor* new_processor() { Processor* p = new MyProcessor(); p->my_configuration = this; return p; } }; } } #endif
By attacking a bar in Wajir, a town used by the Kenyan military, and by singling out non-Muslim victims in two brutal attacks near the border with Somalia, al-Shabab is hoping to send a stark, and chilling message, both to the Kenyan Government, and to the public. Al-Shabab labels the presence of Kenyan troops, part of a wider African Union force in Somalia, as an "occupation". It claims the recent air strikes by Kenyan fighter jets on al-Shabab targets in Somali territory amount to "aggression" and have caused "atrocities" among the civilian population. The Somali-based group, which much of the world labels a terrorist group, wants to try and create the impression that the attacks are a direct consequence of the presence of Kenyan troops, and Kenyan airstrikes, in Somalia. But the message from Kenya's President Uhuru Kenyatta was uncompromising. At one time during his televised address, in language that reminded me of the language of President George W Bush in the wake of the 9/11 attacks in New York, the Kenyan president told his people they were either with his government, or with the "terrorists". Mr Kenyatta said his country was "at war" and fighting a "war on terrorism". His message to al-Shabab was clear: Kenyan troops will continue fighting you in Somalia, and said he would "intensify" the war. Before Kenyan troops were deployed in Somalia in October 2011 there were several attacks in the Mandera region, and other parts of northern Kenya, attributed to al-Shabab. But in the past few days the audacity and brutality of the attacks there has escalated, and so too has the pressure on the Kenyan authorities to prevent further violence. And in the face of such cruel attacks, some people in Kenya will support their government's position. There will inevitably be uncomfortable questions about the effectiveness of the Kenyan military operation in Somalia, and the affect it is having on Kenya's own internal security. And there may now be more calls now for them to pull-out. What's more, the situation in the corner of north-eastern Kenya, near to both the Ethiopian and Somali borders, is not a simple battle between Islamist militants and Kenyan security services. There are also clan-based allegiances within the majority Somali-Kenyan population there, on either side of the border. It is possible that al-Shabab is successfully exploiting tensions and loyalties amongst other militant groups in the region.
/* * Help rtw_rf_macwrite: tell MAC to bang bits to RF over the 3-wire * interface. */ static int rtw_rf_macbangbits(struct rtw_regs *regs, uint32_t reg) { int i; RTW_DPRINTF(RTW_DEBUG_PHY, "%s: %08x\n", __func__, reg); RTW_WRITE(regs, RTW_PHYCFG, RTW_PHYCFG_MAC_POLL | reg); RTW_WBR(regs, RTW_PHYCFG, RTW_PHYCFG); for (i = rtw_macbangbits_timeout; --i >= 0; DELAY(1)) { if ((RTW_READ(regs, RTW_PHYCFG) & RTW_PHYCFG_MAC_POLL) == 0) { RTW_DPRINTF(RTW_DEBUG_PHY, "%s: finished in %dus\n", __func__, rtw_macbangbits_timeout - i); return (0); } RTW_RBR(regs, RTW_PHYCFG, RTW_PHYCFG); } cmn_err(CE_NOTE, "%s: RTW_PHYCFG_MAC_POLL still set.\n", __func__); return (-1); }
Dissociation between Corneal and Cardiometabolic Changes in Response to a Time-Restricted Feeding of a High Fat Diet Mice fed a high fat diet (HFD) ab libitum show corneal dysregulation, as evidenced by decreased sensitivity and impaired wound healing. Time-restricted (TR) feeding can effectively mitigate the cardiometabolic effects of an HFD. To determine if TR feeding attenuates HFD-induced corneal dysregulation, this study evaluated 6-week-old C57BL/6 mice fed an ad libitum normal diet (ND), an ad libitum HFD, or a time-restricted (TR) HFD for 10 days. Corneal sensitivity was measured using a Cochet-Bonnet aesthesiometer. A corneal epithelial abrasion wound was created, and wound closure was monitored for 30 h. Neutrophil and platelet recruitment were assessed by immunofluorescence microscopy. TR HFD fed mice gained less weight (p < 0.0001), had less visceral fat (p = 0.015), and had reduced numbers of adipose tissue macrophages and T cells (p < 0.05) compared to ad libitum HFD fed mice. Corneal sensitivity was reduced in ad libitum HFD and TR HFD fed mice compared to ad libitum ND fed mice (p < 0.0001). Following epithelial abrasion, corneal wound closure was delayed (~6 h), and neutrophil and platelet recruitment was dysregulated similarly in ad libitum and TR HFD fed mice. TR HFD feeding appears to mitigate adipose tissue inflammation and adiposity, while the cornea remains sensitive to the pathologic effects of HFD feeding. Introduction Obesity can precipitate a cascade of systemic conditions, including metabolic syndrome, cardiovascular disease, non-alcoholic liver disease, and type 2 diabetes. Hence, the increasing global prevalence of obesity is very concerning. The World Health Organization considers a person to be overweight when their body mass index (BMI) is >25 and obese when their BMI is >30. Globally, about 2 billion individuals are overweight or obese, accounting for approximately 30% of the world population. In the United States, almost half (42.4%) of the adult population is obese. In addition to the cardiometabolic complications of obesity, there is emerging interest in its effects on vision. Recent studies show obesity is associated with an early loss of corneal nerve density and function. Loss of corneal nerve structure and function is detrimental to the health of the cornea, as corneal nerves release neurotrophic factors, which play a central role in the maintenance of corneal epithelial integrity and corneal transparency. The pathologic effects of obesity on the cornea can appear quickly and before the onset of hyperglycemia. In mice fed an obesogenic high fat diet (HFD) for 10 days, not only is there a loss of corneal nerve sensitivity, but there is also a noticeable delay in corneal wound healing following a central epithelial abrasion. Normal and efficient corneal wound healing is crucial for the preservation of corneal transparency, and when compromised, the cornea becomes susceptible to infection, ulceration, and opacification. Hence, there is a need to find novel strategies for preventing the corneal nerve degeneration and impaired corneal wound healing observed in diet-induced obesity. The energy imbalance resulting from increased consumption of energy-dense high-fat, -sugar, and -salt diets, collectively referred to as the Western diet, has been the main driving force of the current obesity epidemic. The temporal distribution of food intake plays an important role in mediating the metabolic and health outcomes of a given diet. Chrono-nutrition, the coordination of food intake with the daily rhythm of an organism, offers a promising approach to forestalling or treating the cardiometabolic effects of diets. The range of dietary strategies that manipulate the timing of food consumption are collectively known as intermittent fasting. Intermittent fasting strategies can be broadly grouped into alternate-day fasting (ADF), whole-day fasting, and time-restricted (TR) feeding. ADF typically involves alternating periods of 36 h of fasting followed by 12 h of ad libitum food consumption. Some forms of ADF allow one meal contain-ing~25% of the individual's baseline caloric needs, consumed typically in the afternoon during fast periods. Whole-day fasting strategies usually involve 1-2 days of severe caloric restriction or complete food abstinence followed by ad libitum feeding the rest of the week. TR feeding is a form of chrono-nutrition, in which food intake is restricted to a period (usually 6-10 h/day) during the active hours of an organism. Evidence from animal studies shows TR feeding elicits favorable metabolic effects, including protection against weight gain, hyperinsulinemia, insulin resistance, and adipose inflammation in response to an HFD, without requiring alterations in caloric intake or nutrient composition. Even though TR feeding mitigates the cardiometabolic complications of diets, its utility in preventing the corneal dysregulation observed with ad libitum HFD feeding remains unknown. In this study, we used a diet-induced obesity mouse model to compare corneal and cardiometabolic changes in response to a TR feeding regimen. We evaluated the effect of TR feeding on the corneal nerve function loss and impaired corneal wound healing observed in ad libitum HFD fed mice. We hypothesized that TR feeding would mitigate the corneal nerve function loss and impaired corneal wound healing observed in ad libitum HFD feeding. Mice Six-week-old C57BL/6 male mice (Jackson Laboratory) were housed in the University of Houston (UH) and Baylor College of Medicine (BCM) Children's Nutrition Research Center vivaria under temperature control and a 12:12 light-dark cycle environment (two mice per cage). Mice housed at UH were divided into three 10-day feeding groups. Power analysis revealed that a sample size of ≥6 mice per group would have 80% statistical power to reliably detect an effect size of 0.5 (50%) in diet-induced corneal changes, assuming a 0.05 significance level. The first group (n = 6) was fed a normal chow diet (ND; 5V5R, LabDiet, St. Louis, MO, USA) ad libitum. The second group (n = 8) was fed an HFD (Diet #112734; Dyets Inc., Bethlehem, PA, USA) ad libitum, while the third group (n = 8) was fed the HFD using a TR regimen. Two groups of mice were housed at BCM (TR HFD, n = 10, and TR HFD, n = 10), and these mice were used to evaluate body composition adipose tissue inflammation only (see Section 2.3 below). The nutritional composition of the diets is summarized in Table 1. Figure 1A shows a schematic representation of the experimental feeding regimens. For TR feeding, mice were allowed access to food for 8 h (between 8 p.m. and 4 a.m.). A TR ND group was not included in the current study because previous studies found no significant differences in cardiometabolic parameters between ad libitum ND and TR ND groups. For experiments at Baylor College of Medicine Children's Nutrition Research Center, food access was monitored using an automated metabolic chamber. For experiments at the University of Houston, food access was monitored by manually switching the mice from a cage with food and water to a cage with just water. Mice fed on the ad libitum regimens were also transferred between cages at the same time. Ethics Statement The study was approved by the Institutional Animal Care and Use Committee (IACUC) at Baylor College of Medicine (protocol #: AN-2721) and at the University of Houston (protocol #: 16-005). All procedures were performed according to the Association for Research in Vision and Ophthalmology (ARVO) Statement for the Use of Animals in Ophthalmic and Vision Research. Body Composition and Adipose Tissue Inflammation Body composition studies were performed in the Mouse Metabolic Research Unit at the USDA/ARS Children's Nutrition Research Center, Baylor College of Medicine. All mice were weighed at the start and end of experimental feeding. Prior to euthanasia, body composition was determined at the end of the experimental feeding using quantitative magnetic resonance (qMR) imaging. After euthanasia by carbon dioxide inhalation followed by cervical dislocation, epididymal adipose tissue (eAT) was harvested, weighed, and then processed for flow cytometric analysis to evaluate adipose inflammation. Antibodies against the following markers were used: CD45 and CD3 (BD Biosciences, San Diego, CA, USA) and F4/80 (eBioscience, San Diego, CA, USA). CD45 + /CD3 + were identified as T cells, and CD45 + /F4/80 + cells were identified as macrophages. Corneal Nerve Function A Cochet-Bonnet aesthesiometer (Richmond Products, Albuquerque, NM, USA) was used to measure the sensitivity of the cornea to tactile stimulation. The Cochet-Bonnet aesthesiometer has a thin nylon filament, which was held perpendicular to the central cornea. Starting at the maximum filament length (6.0 cm), the length was systematically decreased (0.5 cm increments) until a blink was observed when the filament was pressed against the central corneal surface. Shortening the filament length stiffens the filament, resulting in greater pressure. A decrease in corneal sensitivity is indicated when increased filament pressure is needed to elicit a blink. Corneal Wound Healing Mice were anesthetized via intraperitoneal injection of ketamine/xylazine (80 mg/8 mg/kg body weight). Using a trephine and a blunt golf spud, a 2 mm central corneal epithelial abrasion wound was created on the left eye. Wounding was performed in the morning of each day (between 8 a.m.-12 p.m.) to avoid confounding circadian effects on wound healing and the inflammatory response. The size of the wound opening was imaged at the time of wounding (0 h) and 12, 18, 24, and 30 h after wounding using sodium fluorescein staining. Briefly, under isoflurane anesthesia, 1L of 1% sodium fluorescein was pipetted onto the corneal surface. The wound was then imaged using a stereomicroscope equipped with a digital camera and blue light illumination. The wound area, denoted by the pooled fluorescein in the image, was then measured with ImageJ software (NIH, Bethesda, MD, USA). The results for each time point were expressed as a percentage of the original wound size. Immunofluorescence Staining Mice were euthanized at 30 h after wounding, a timepoint known to correlate with peak neutrophil infiltration into the central cornea during wound healing. The eyes were enucleated and fixed in phosphate buffered saline (PBS) containing 2% paraformaldehyde (Tousimus Research Corporation, Rockville, MD, USA) for 45 min at room temperature. Corneas were then excised from the eyeball, permeabilized in PBS containing 2% bovine serum albumin (BSA) and 0.01% TritonX-100 for 15 min, followed by blocking in PBS containing 2% BSA for an additional 45 min at room temperature. Corneas were then incubated overnight at 4 C in a cocktail of fluorescently-labeled antibodies (5-10 g/mL). The following antibodies were used: anti-CD31 antibody (for limbal blood vessels) (Bi-oLegend, San Diego, CA, USA), anti-Ly6G antibody (for neutrophils) (BD Pharmingen, San Diego, CA, USA), and anti-CD41 antibody (for platelets) (BioLegend, San Diego, CA, USA). DAPI (4,6-diamidino-2-phenylindole, Sigma-Aldrich, St. Louis, MO, USA) was added to the cocktail to visualize nuclei and mitotic figures. Corneas were flat mounted on a microscope slide in Airvol (Celanese, Dallas, TX, USA) and imaged with a DeltaVision epifluorescence light microscope (GE Life Sciences, Pittsburg, PA, USA). Full-thickness images were captured with a 30 silicon lens with an image size of 381 m 381 m and a z-section thickness step size of 0.5 microns. Morphometric Analysis of Neutrophil and Platelet Recruitment To assess neutrophil infiltration, images were taken in the central, paracentral, parawound, paralimbal, and limbal regions of the cornea in each quadrant, as previously reported. Both DAPI and Ly-6G staining were used to identify extravascular neutrophils. Neutrophil counts in each region from the 4 petals, except the center (which had counts from only one field per cornea), were averaged and expressed as neutrophils per field. For extravascular platelet assessment, the entire corneal limbus was imaged in each petal, and platelet counts from the four petals were summed together. Extravascular platelet counts were then expressed as platelets/mm 2 of limbal area, since extravascular platelets are non-motile and remain within the limbus. Epithelial cell division was assessed by counting mitotic figures, which were visualized via DAPI staining. Statistical Analysis Data were analyzed using GraphPad Prism 6 (GraphPad Software, La Jolla, CA, USA). Mean ± standard deviation was used to summarize data. Unpaired t-tests and ANOVAs (one-way, two-way, and repeated measures with Tukey post hoc tests for multiple comparisons) were used to analyze data when appropriate. For all statistical analyses, an alpha level of ≤0.05 was considered significant. Figure 1A shows the feeding regimen for each mouse group. Despite similar caloric intakes ( Figure 1B), mice fed the TR HFD gained less weight compared to mice fed the ad libitum HFD (p < 0.0001, Figure 1C). In addition, TR HFD fed mice had less eAT mass compared to the ad libitum HFD (p = 0.015, Figure 1D). Although TR HFD fed mice gained more weight than ND mice (p = 0.013), the two groups of mice did not differ significantly in eAT weight (p = 0.685). TR HFD feeding resulted in significantly lower body fat mass compared to ad libitum HFD feeding, as determined by qMR imaging (p = 0.0003, Table 2). HFD feeding induces inflammation in adipose tissue, which involves the infiltration of macrophages into adipose tissue. Adipose tissue macrophages (CD45 + /F4/80 + ) were six-fold lower in TR HFD fed mice compared to ad libitum HFD fed mice (p = 0.032, Table 2). T cells play active roles in diet-induced adipose tissue inflammation, increasing early in adipose tissue and likely preceding the infiltration of macrophages. In this study, TR HFD fed mice had nine-fold fewer T cells (CD45 + /CD3 + ) in adipose tissue compared to ad libitum HFD fed mice (p = 0.028, Table 2). Furthermore, the total number of leukocytes (CD45 + ) in adipose tissue was eight-fold lower in TR HF fed mice compared to ad libitum HFD fed mice (p = 0.018, Table 2). Time-Restricted Feeding Did Not Prevent Dysregulation of Corneal Homeostasis As expected, ad libitum HFD feeding caused a significant reduction in corneal nerve sensitivity (Figure 2A), as the filament pressure required to elicit a blink was increased when compared to ND mice. TR HFD feeding did not prevent this reduction, and corneal sensitivity was similar to that in ad libitum HFD mice (Figure 2A). Closure of a 2 mm epithelial abrasion wound in C57BL/6 mice is usually complete by 24 h after wounding. As expected, at 24 h after wounding, wound closure was complete in mice fed the ND. However, in mice fed the ad libitum HFD, wound closure was delayed by~6 h, and this delay was also observed in TR HFD fed mice ( Figure 2B). A two-fold reduction in basal epithelial cell division at the parawound was also noted for mice fed the ad libitum HFD or TR HFD ( Figure 2C). Nutrients 2022, 13, x FOR PEER REVIEW 6 of 14 study, TR HFD fed mice had nine-fold fewer T cells (CD45 + /CD3 + ) in adipose tissue compared to ad libitum HFD fed mice (p = 0.028, Table 2). Furthermore, the total number of leukocytes (CD45 + ) in adipose tissue was eight-fold lower in TR HF fed mice compared to ad libitum HFD fed mice (p = 0.018, Table 2). Time-Restricted Feeding Did Not Prevent Dysregulation of Corneal Homeostasis As expected, ad libitum HFD feeding caused a significant reduction in corneal nerve sensitivity (Figure 2A), as the filament pressure required to elicit a blink was increased when compared to ND mice. TR HFD feeding did not prevent this reduction, and corneal sensitivity was similar to that in ad libitum HFD mice (Figure 2A). Closure of a 2 mm epithelial abrasion wound in C57BL/6 mice is usually complete by 24 h after wounding. As expected, at 24 h after wounding, wound closure was complete in mice fed the ND. However, in mice fed the ad libitum HFD, wound closure was delayed by ~6 h, and this delay was also observed in TR HFD fed mice ( Figure 2B). A two-fold reduction in basal epithelial cell division at the parawound was also noted for mice fed the ad libitum HFD or TR HFD ( Figure 2C). Neutrophil extravasation at the limbus and subsequent migration to the center of the cornea has been shown to be necessary for efficient corneal wound healing. Thirty hours post-wounding, peripheral limbal images from ad libitum HFD and TR HFD fed mice had twice as many neutrophils ( Figure 3A,C) and 25% fewer platelets ( Figure 3B,D) Neutrophil extravasation at the limbus and subsequent migration to the center of the cornea has been shown to be necessary for efficient corneal wound healing. Thirty hours post-wounding, peripheral limbal images from ad libitum HFD and TR HFD fed mice had twice as many neutrophils ( Figure 3A,C) and 25% fewer platelets ( Figure 3B,D) Nutrients 2022, 14, 139 7 of 14 than ad libitum ND fed mice. Conversely, the accumulation of neutrophils at the wound center was reduced in the ad libitum HFD and TR HFD groups ( Figure 3A). Nutrients 2022, 13, x FOR PEER REVIEW 7 of 14 than ad libitum ND fed mice. Conversely, the accumulation of neutrophils at the wound center was reduced in the ad libitum HFD and TR HFD groups ( Figure 3A). Discussion The aim of the current study was to compare corneal and cardiometabolic changes in response to a TR HFD feeding regimen. We found that although mice on the TR feeding consumed an equivalent number of calories as those on the ad libitum HFD feeding, TR feeding attenuated body weight gain, adiposity, and adipose tissue inflammation; despite the effects of TR HFD feeding on mitigating systemic dysregulation, it had no effect on the reduced corneal sensitivity, impaired corneal wound healing, or dysregulated neutrophil and platelet infiltration observed in ad libitum HFD fed mice. Given the clinical and economic burden associated with obesity, it has become imperative to find preventive and interventional strategies for obesity. With the pivotal role of lifestyle choices, such as nutrition and activity level, in the rising obesity epidemic, lifestyle modification has been the go-to strategy for preventing/treating obesity. This strategy has the advantage of being low-cost and easy to access as compared to surgical or pharmacological strategies for treating obesity. The temporal distribution of caloric intake has been shown to be a significant contributor to the cardiometabolic effects of diet. Under ad libitum feeding conditions, HFD is known to blunt diurnal feeding rhythms, shortening an organism's fasting period while prolonging the feeding period. This disturbs various metabolic pathways entrained by the feed-fast cycle, predisposing the organism to obesity and other metabolic diseases. TR feeding, a form of chrono-nutrition that de-emphasizes reduction in caloric intake, is considered to be a potential behavioral strategy for preventing obesity. In humans, there are two main types of TR feeding: early TR feeding, where caloric intake is restricted to early (morning) or middle (afternoon) of the day and late TR feeding, where caloric intake is restricted to late (evening) in the day. These two TR feeding strategies produce diverging results. Early TR feeding reduces body weight gain, insulin levels, and systemic inflammation and increases insulin sensitivity, while late TR feeding worsens or has little effect on these cardiometabolic parameters. This divergence may be explained by the circadian system. The circadian system produces~24 h rhythms in behavior, physiology, and metabolism through feedback loops involving the transcription and translation of genes, collectively known as clock genes (e.g., Bmal1, Clock, Per1/2, Cry 1/2). This leads to oscillations in the expression and level of downstream target molecules. In humans, for instance, the expression and activity of key metabolic hormones such as insulin and cortisol exhibit a rhythm, with peak expression and activity in the morning and nadir in the evening. Insulin sensitivity also exhibits a 24 h rhythm, with a peak and a nadir in the morning and evening, respectively, suggesting the morning is an optimal time for food intake. The opposite is observed in nocturnal animals such as mice and rats. In the current study, ad libitum HFD feeding resulted in significant adiposity (body weight gain and eAT deposition) relative to the ad libitum ND feeding. Daily TR HFD feeding (8 h) attenuated adiposity and diet-induced adipose inflammation without altering nutritional/caloric intake. This is in agreement with reports by other investigators 49]. Hatori et al. reported that despite equivalent caloric consumption between ad libitum and TR access to HFD, TR feeding in mice protected against obesity, hyperinsulinemia, hepatic steatosis and systemic inflammation. The beneficial effects of TR eating have also been reported in humans. Wilkinson et al. reported improvement in cardiometabolic health (body weight, blood pressure, and atherogenic lipids) of individuals with metabolic syndrome following a two-week regimen of 10 h TR eating. Jamshed et al. reported improvement in glucose levels, lipid metabolism, and circadian clock gene expression in overweight individuals following a four day 6 h early TR eating schedule. Jones et al. also reported that 8 h early TR eating for two weeks improved whole body insulin sensitivity and skeletal muscle glucose and branched-chain amino acid (BCAA) uptake. An important difference between these TR eating studies in humans and the current mouse study is that the human studies employed TR eating as an interventional strategy, while the current study employed TR feeding as a preventative strategy. Although TR feeding of the HFD resulted in significant reduction in body weight gained compared to the ad libitum HFD, mice on the TR HFD gained more weight than ad libitum ND. However, visceral adiposity (eAT) was not different between TR HFD and ad libitum ND groups. A possible reason for this discrepancy may be the fact that several fat deposits (e.g., epididymal, retroperitoneal, mesenteric, inguinal, cervical, etc.) contribute to the overall body weight. A limitation of our study is that only epididymal fat deposits were used to assess visceral adiposity. Hence, TR HFD and ad libitum ND groups may have differed in the weight of other fat deposits, which in turn may have contributed to the observed difference in overall body weight. An important determinant of the overall metabolic signal needed to maintain body weight at a steady-state value is the duration of fasting. Insulin is an anabolic hormone that facilitates fatty acid synthesis and storage, alongside its effects on glucose uptake and storage. Diet-induced obesity is associated with insulin hypersecretion and insulin resistance. TR feeding is largely centered on the prolonged daily fasting period (14-16 h), which gives the body a chance to repair oxidative damage, leading to metabolic adaptations that sustain weight loss. Fasting leads to decreased insulin production and reduced levels of insulin in the circulation and also increases fatty acid utilization. Fasting also forces a shift in metabolic pathway usage from a glucose-driven oxidative phosphorylation to ketone-and fatty acid-dependent metabolism. Ketones are produced from fatty acids by the liver through a process known as ketogenesis, and fasting maintains ketogenesis. The shift in fuel utilization from glucose to ketones reinforces the metabolic circadian rhythm while reducing oxidative stress and systemic inflammation. The gut microbiome has been shown to be instrumental in metabolism and the metabolic effects of diets. In humans, the two main phyla of the gut microbiome are Firmicutes (F) and Bacteroidetes (B), and an increase in the F/B ratio has been linked to obesity and increased metabolic disorders. TR feeding exerts beneficial effects on the gut microbiome by decreasing the F/B ratio. Hence, the observed reduction in adiposity in mice fed a TR HFD to levels comparable to those found in mice fed the ad libitum ND may be due to a combination of the effects fasting has on insulin signaling, fuel utilization, and the gut microbiome. Our previous study shows ad libitum HFD feeding in mice for 10 days reduces corneal sensitivity, and in response to a central epithelial abrasion, corneal wound closure is delayed. Efficient corneal epithelial wound closure depends on a carefully regulated inflammatory response. This response includes a carefully orchestrated recruitment of neutrophils and platelets, which provide essential mediators (e.g., VEGF) that support epithelial cell division and nerve regeneration. This inflammatory response is dysregulated within 10 days of ad libitum feeding. In the current study, we confirm these findings for ad libitum HFD feeding and now report an identical corneal pathology for mice fed the TR HFD. With respect to corneal wound healing, the dysregulation of the inflammatory response was indistinguishable in ad libitum HFD and TR HFD fed mice. Extravascular platelet counts at the limbus were reduced, and there was a marked reduction in neutrophil migration toward the wound center, which likely explains the excessive accumulation at the limbus. Hence, while TR HFD feeding is clearly able to mitigate adipose inflammation (increased numbers of T cells and macrophages; see Table 2) seen in ad libitum HFD fed mice, it does not prevent the HFD-induced dysregulation of the inflammatory response seen after corneal abrasion. The similar and dysregulated inflammatory response seen in wounded corneas of ad libitum HFD and TR HFD fed mice likely accounts for the similarly reduced rates of epithelial division and wound closure seen under both feeding regimens. Most TR feeding studies in humans and rodents have focused on alterations in adiposity and metabolic health. Although the preventive utility of TR feeding in mitigating cardiometabolic effects of diet-induced obesity has been studied extensively 49], its effects on nerve health and wound healing are less well studied. Some animal studies suggest that TR feeding may benefit nerve health by delaying or protecting against the onset of neurodegenerative diseases. Kentish et al. reported that TR feeding restores ad libitum HFD-induced loss of gastric vagal afferent mechanosensitivity. Two studies demonstrated that TR feeding in a mouse model of Huntington's disease improves autonomic nervous function and motor coordination. An association between TR eating and cognitive status has also been reported in humans. In a cross-sectional cohort study, individuals adherent to TR eating (10 h eating window restriction) were found to be less likely to show cognitive impairment compared to those on an ad libitum eating schedule. The benefits of TR feeding on the nervous system are believed to act through the brain-derived neurotrophic factor (BDNF). TR feeding increases BDNF levels. BDNF is a neurotrophin that is crucial for the development, maintenance, and plasticity of the nervous system. TR feeding increases ketone production due to the extended fasting. Increased ketone levels in cortical and hippocampal neurons, specifically beta-hydroxybutyrate, induce transcription of BDNF. Our current study suggests TR feeding does little to protect corneal nerve health, as the effects of the HFD continue to decrease corneal nerve sensitivity despite the TR regimen. Hence, the beneficial effects of TR feeding may be nerve-and tissue-specific. TR feeding is largely centered on the prolonged daily fasting periods (14-16 h), which give the body a chance to repair oxidative damage. A possible explanation for the lack of benefit of TR feeding on corneal dysregulation may be found in the effect of extended fasting on the expression of insulin-like growth factors (IGFs) and the importance of IGFs to corneal homeostasis and wound healing. IGF-I has been shown to promote corneal epithelial cell migration and cell proliferation, processes that are important for corneal wound healing. Following epithelial abrasion, the secretion of IGF-I and IGF-II increases in the corneal epithelium, and expression of their native receptor, IGF-1R, also increases in the limbal epithelium. Fasting significantly reduces circulating levels of IGFs and gene transcription of IGF-I. Importantly, in the skin, the decrease in IGF-1 expression caused by extended fasting has been linked to impaired wound healing. Future studies are warranted to investigate the effect of fasting/TR HFD feeding on the expression of IGFs and their corresponding receptors in the cornea and if exogenous IGFs can enhance corneal homeostasis and wound healing in HFD fed mice. The dissociation between the corneal response and systemic adipose response to dietary strategies is not observed with TR feeding alone. We have previously reported such a dissociation in the response to a diet-reversal strategy. Even though switching from an HFD to an ND mitigated weight gain and visceral adiposity, the HFD feeding induced a heightened inflammatory state of the cornea, which persisted after diet-reversal. Thus, it is evident from the current TR feeding study and our prior diet-reversal study that the cornea is especially vulnerable to the effects of an HFD, even when diet-based therapeutic strategies designed to mitigate adipose inflammation and adiposity are employed. Conclusions In summary, a "short-term" consumption of an ad libitum HFD causes corneal dysregulation in the form of corneal sensitivity loss and impaired corneal wound healing. While TR feeding attenuates systemic parameters such as adiposity and adipose tissue inflammation, it does not attenuate or prevent the corneal dysregulation observed in ad libitum HFD feeding. This suggests that corneal changes are dissociated from the systemic changes regulated by TR feeding. Informed Consent Statement: Not applicable. Data Availability Statement: The data presented in this study are available on request from the corresponding author.
""" Contains functions for easily comparing versions of Maya with the current running version. Class for storing apiVersions, which are the best method for comparing versions. :: >>> from pymel import versions >>> if versions.current() >= versions.v2008: ... print "The current version is later than Maya 2008" The current version is later than Maya 2008 """ import re, struct from maya.OpenMaya import MGlobal as _MGlobal def parseVersionStr(versionStr, extension=False): """ >>> from pymel.all import * >>> versions.parseVersionStr('2008 Service Pack1 x64') '2008' >>> versions.parseVersionStr('2008 Service Pack1 x64', extension=True) '2008-x64' >>> versions.parseVersionStr('2008x64', extension=True) '2008-x64' >>> versions.parseVersionStr('8.5', extension=True) '8.5' >>> versions.parseVersionStr('2008 Extension 2') '2008' >>> versions.parseVersionStr('/Applications/Autodesk/maya2009/Maya.app/Contents', extension=True) '2009' >>> versions.parseVersionStr('C:\Program Files (x86)\Autodesk\Maya2008', extension=True) '2008' """ # problem with service packs addition, must be able to match things such as : # '2008 Service Pack 1 x64', '2008x64', '2008', '8.5' # NOTE: we're using the same regular expression (parseVersionStr) to parse both the crazy human readable # maya versions as returned by about, and the maya location directory. to handle both of these i'm afraid # the regular expression might be getting unwieldy # temp hack if 'Preview' in versionStr: version = '2014' else: ma = re.search( "((?:maya)?(?P<base>[\d.]{3,})(?:(?:[ ].*[ ])|(?:-))?(?P<ext>x[\d.]+)?)", versionStr) version = ma.group('base') if extension and (ma.group('ext') is not None) : version += "-"+ma.group('ext') return version def bitness(): """ The bitness of python running inside Maya as an int. """ # NOTE: platform.architecture()[0] returns '64bit' on OSX 10.6 (Snow Leopard) # even when Maya is running in 32-bit mode. The struct technique # is more reliable. return struct.calcsize("P") * 8 _is64 = bitness() == 64 _current = _MGlobal.apiVersion() _fullName = _MGlobal.mayaVersion() _shortName = parseVersionStr(_fullName, extension=False) _installName = _shortName + ('-x64' if _is64 else '') v85 = 200700 v85_SP1 = 200701 v2008 = 200800 v2008_SP1 = 200806 v2008_EXT2 = 200806 v2009 = 200900 v2009_EXT1 = 200904 v2009_SP1A = 200906 v2010 = 201000 v2011 = 201100 v2011_HOTFIX1 = 201101 v2011_HOTFIX2 = 201102 v2011_HOTFIX3 = 201103 v2011_SP1 = 201104 v2012 = 201200 v2012_HOTFIX1 = 201201 v2012_HOTFIX2 = 201202 v2012_HOTFIX3 = 201203 v2012_HOTFIX4 = 201204 v2012_SP1 = 201209 v2012_SAP1 = v2012_SP1 v2012_SP2 = 201217 v2012_SAP1SP1 = v2012_SP2 v2013 = 201300 v2014 = 201400 v2015 = 201500 def current(): return _current def fullName(): return _fullName def installName(): return _installName def shortName(): return _shortName def is64bit(): return _is64 def flavor(): import maya.cmds try: return maya.cmds.about(product=1).split()[1] except AttributeError: raise RuntimeError, "This method cannot be used until maya is fully initialized" def isUnlimited(): return flavor() == 'Unlimited' def isComplete(): return flavor() == 'Complete' def isRenderNode(): return flavor() == 'Render' def isEval(): import maya.cmds try: return maya.cmds.about(evalVersion=1) except AttributeError: raise RuntimeError, "This method cannot be used until maya is fully initialized"
The Window, the Patch, & the Artifact Is TIGHAR Artifact 2-2-V-1 a piece of wreckage from Amelia Earhart’s aircraft? Photo courtesy Miami Herald. Please note: this document will take a little while to load completely as there are many images. Please be patient! Abstract During Amelia Earhart’s stay in Miami at the beginning of her second world flight attempt, a custom-made, special window on her Lockheed Electra aircraft was removed and replaced with an aluminum patch. The patch was an expedient field modification. Its dimensions, proportions, and pattern of rivets were dictated by the hole to be covered and the structure of the aircraft. The patch was as unique to her particular aircraft as a fingerprint is to an individual. Research has now shown that a section of aircraft aluminum TIGHAR found on Nikumaroro in 1991 matches that fingerprint in many respects. Background The special window on the port side of the aircraft was installed in the cabin door. As shown in this photo of William Harney’s model of Earhart’s Electra, the lavatory was divided from the rest of the cabin by a heavy bulkhead at Sta. 293-5/8th with a door that swung out into the cabin. Some time in early 1937, as part of the preparations for Earhart’s world flight in March, somebody in Earhart’s organization decided there should be a large, non-standard window on each side of the cabin. Although no rationale has been found in the available literature, the purpose of these special windows has long been presumed to be for the navigator to take celestial observations. The place chosen for the starboard–side window was the lavatory at the back of the cabin. As shown on this photo the interior of Harney’s model, the lavatory compartment encompassed Fuselage Stations 293 5/8th, 307, 320, and 330. Electra fuselage station numbers represent the distance in inches from the tip of the airplane’s nose, so the lavatory compartment was almost exactly three feet long. It was four feet tall, floor to ceiling, by four feet wide. Understanding the Structures TIGHAR was fortunate to have such access to Lockheed c/n 1091, a Model 10A being restored to airworthy condition by Wichita Air Services in Newton, Kansas. Originally delivered to the Bata shoe company of Prague, Czechoslovakia in April, 1937, the aircraft enjoyed a long and varied career, eventually ending up in Denton, Texas. Four years ago, the Bata company purchased the aircraft and engaged Wichita Air Services to perform a complete rebuild. The restoration, now nearing completion, included re-skinning 90% of the airframe. In May, 2015 c/n 1091, registered as OK-CTB, will fly to Europe. On October 7, 2014 TIGHAR Executive Director Ric Gillespie, forensic imaging specialist Jeff Glickman, and aircraft structures expert Aris Scarla visited the aircraft and collected the data and photographs used in this report. TIGHAR video cameraman/producer Mark Smith of Oh Seven Films documented the research. We are indebted to the Wichita Air Services restoration team who generously provided us unlimited access to the aircraft and enthusiastically participated in the investigation. Fortunately, the lavatory compartment in c/n 1091 has been restored as a baggage compartment so there is no sink or water tank mounted on the wall. Fortunately, the lavatory compartment in c/n 1091 has been restored as a baggage compartment so there is no sink or water tank mounted on the wall. Looking into the lavatory compartment, this is the area where the special window was installed. Looking into the lavatory compartment, this is the area where the special window was installed. These are the standard structures in the area where the window was installed. These are the standard structures in the area where the window was installed. To install the window, it was necessary to cut a hole in the airplane. The red line describes the area to be cut out. To install the window, it was necessary to cut a hole in the airplane. The red line describes the area to be cut out. To make the required hole, these are the structures that had to be cut. To make the required hole, these are the structures that had to be cut. The resulting hole looked like this. The resulting hole looked like this. The frame that held the glass or Plexiglas was mounted on the exterior of the aircraft and riveted to underlying structure. The frame that held the glass or Plexiglas was mounted on the exterior of the aircraft and riveted to underlying structure. The frame could not be riveted directly to the underlying structure at Sta. 293 5/8 because of the thick lavatory bulkhead on the interior (no way to buck the rivets), so new underlying structure was added just aft of Sta. 293 5/8 to provide something to rivet the frame to. New underlying structure was also added along the top edge of the frame for the same reason. The frame could not be riveted directly to the underlying structure at Sta. 293 5/8 because of the thick lavatory bulkhead on the interior (no way to buck the rivets), so new underlying structure was added just aft of Sta. 293 5/8 to provide something to rivet the frame to. New underlying structure was also added along the top edge of the frame for the same reason. With the frame installed, the interior looked like this. The blue lines show the placement of the new underlying structure. With the frame installed, the interior looked like this. The blue lines show the placement of the new underlying structure. The last photo showing the window in place can be dated to Saturday, May 29, 1937 when Amelia’s step-daughter-in-law Nilla Putnam and her husband David Putnam (GP’s son) visited Miami from their home in Ft. Pierce, Florida. The last photo showing the window in place can be dated to Saturday, May 29, 1937 when Amelia’s step-daughter-in-law Nilla Putnam and her husband David Putnam (GP’s son) visited Miami from their home in Ft. Pierce, Florida.
PSUN62 A Rare Case of Atorvastatin Induced Drug Reaction with Eosinophilia and Systemic Symptoms (DRESS) Syndrome - A Case Report. Abstract Clinical Case A 77 year-old female presented to emergency department with progressively worsening rash that started one week ago. On physical examination blanching erythema was noted in lower back, buttocks and extremities. There was no desquamation or involvement of oral mucosa. No lymphadenopathy or hepatosplenomegaly was noted. Laboratory investigations showed white blood cell count of 21.210^3/uL, with 33.1% eosinophils. Creatinine was elevated at 2.8mg/dL from baseline of 0.8mg/dL. Further lab work revealed alkaline phosphate 2314 IU/L, aspartate transaminase 587 IU/L, and alanine transaminase 620 U/L, total bilirubin 7.3mg/dL with direct bilirubin of 5.1mg/dL, Immunoglobulin E level of 2000 IU/ml and normal Tryptase levels. Serology for viral hepatitis, Epstein-Barr virus, Cytomegalovirus and Human immunodeficiency virus was negative. Autoimmune workup including antinuclear Antibody (ANA), anti-neutrophil cytoplasmic antibody (ANCA), mitochondrial antibody, smooth muscle antibody, double stranded DNA, anti-histone antibody, rheumatoid factor, Sjogren's antibody and centromere antibody were negative. Urinalysis, urine protein electrophoresis and serum protein electrophoresis were also normal. Computed tomography (CT) scan of chest showed mild supraclavicular and mediastinal lymphadenopathy. Review of medical records revealed that patient was recently discharged from hospital with a prescription of Atorvastatin following admission for transient ischemic attack 6 weeks ago. Skin biopsy showed superficial perivascular dermatitis with extravasation of red blood cells with no diagnostic evidence of vasculitis, granuloma or malignancy. Based on biopsy findings, recent drug administration followed by appearance of rash, hematological abnormality, lymphadenopathy and multi-system involvement and after exclusion of other possible causes, diagnosis of DRESS syndrome was made. Atorvastatin was held and patient was given Methylprednisolone for three days followed by Prednisone 1mg/kg with taper. Rash as well as kidney and liver functions improved drastically within two days. Discussion Our patient presented with one week of rash associated with deteriorating kidney and liver functions. Atorvastatin was initiated five weeks prior to development of rash. All the possible infectious etiologies and malignancy was ruled out based on the laboratory work mentioned above. Work up for vasculitis was also negative. DRESS syndrome is a late onset drug induced hypersensitivity reaction which usually manifests 3 weeks to months after the offending drug is started. It is very rare with an incidence of 0.9-2 per 100,000 patients per year and accounts for 10-20% of all cutaneous adverse drug reactions. DRESS syndrome if untreated carries significant risk of mortality, between 10-20%. RegiSCAR (European Registry of Severe Cutaneous Adverse Reaction) scoring system can be used to help diagnose DRESS syndrome. It takes into account multiple factors including fever, lymphadenopathy, eosinophilia, rash, number of organs involved, disease duration, skin biopsy findings and negative results of investigations to rule out alternate causes of patient's symptoms. Patient responded well to systemic steroids followed by oral steroids with taper. Presentation: Sunday, June 12, 2022 12:30 p.m. - 2:30 p.m.
The Force awakens everywhere on December 18. In addition to catching up with the heroes from the original trilogy, fans will also be introduced to new characters like Finn, Rey, Poe Dameron, Kylo Ren, and the spherical droid, BB-8. But as we all know, there’s a lot a story can’t tell you on the screen. Fans can find out much, much more about the new characters, vehicles, and worlds of Star Wars: The Force Awakens in a slew of novels, art books, visual dictionaries, and sticker collections also coming December 18. Familiar relics and ships from the film like Luke’s lightsaber and the Millennium Falcon will also be examined in closer detail. The answers are coming. Get your first look at the covers and descriptions below. UPDATED 12/1 with a preview of Star Wars: The Force Awakens Incredible Cross-Sections! Star Wars: The Force Awakens Incredible Cross-Sections See the vehicles of Star Wars: The Force Awakens in unparalleled detail with this newest addition to the Star Wars Incredible Cross Sections series. Twelve breathtaking artworks bring the new craft to life, showing all of the weapons, engines, and technology, while engaging text explains each vehicle’s backstory and key features. The Art of Star Wars: The Force Awakens Step inside the Lucasfilm art departments for the creation of fantastical worlds, unforgettable characters, and unimaginable creatures. The Art of Star Wars: The Force Awakens will take you there, from the earliest gathering of artists and production designers at Lucasfilm headquarters in San Francisco to the fever pitch of production at Pinewood Studios to the conclusion of post-production at Industrial Light & Magic — all with unprecedented access. Star Wars: The Force Awakens Visual Dictionary The complete guide to Star Wars: The Force Awakens, this Visual Dictionary reveals all of the characters, creatures, droids, locations, and technology. Star Wars: The Force Awakens Ultimate Sticker Collection Join the heroes of the galaxy on their amazing adventure. Explore incredible worlds, meet bizarre aliens, and face sinister new villains. Star Wars: Before the Awakening A companion piece to the Journey to Star Wars: The Force Awakens character novels, Star Wars: Before the Awakening is an anthology book that focuses on the lives of Rey, Finn, and Poe before the events of the Star Wars: The Force Awakens. Star Wars: Rey Meets BB-8 (Level 1 Reader) Rey’s life is turned upside down when she meets a little droid named BB-8. Star Wars: Finn & Rey Escape! (8×8, w/stickers) When Finn and Rey find themselves on the run, they must work together to escape from the evil First Order. Star Wars: Han & Chewie Return! (8×8) Everyone’s favorite smugglers are back! Han Solo and Chewbacca return in a heroic new adventure set in a galaxy far, far away. 5-Minute Star Wars Stories Blast off into hyperspace with these 11 action-packed Star Wars tales! Jedi Master Yoda has a lightsaber showdown with the dreaded Count Dooku; Luke Skywalker and the Rebels race against time to destroy the Death Star; and the brave but lonely Rey makes a new friend when she meets the astromech droid BB-8. Each of these stories is the ideal length for reading aloud in five minutes — perfect for galactic adventures at lightspeed. Star Wars: The Force Awakens Flashlight Adventure Book Grab your flashlight and discover hidden surprises as you explore planet Jakku with Rey, Finn, and BB-8. This box set features a board book with pop-ups on every spread, and a real working flashlight that plays five different sounds from Star Wars: The Force Awakens. Star Wars: The Force Awakens – Lightsaber Rescue The galaxy is in trouble again! Activate your lightsaber and help join the fight! The Lightsaber Rescue storybook features a non-removable lightsaber module with six sounds to bring the story of Star Wars: The Force Awakens to life. Star Wars: The Force Awakens Mix & Match Captain Phasma, Kylo Ren, Poe, Rey, and Finn are featured in this book that blends spectacular action with three-panel pages to create confounding combinations! Mix and match the heads, bodies, and legs more than 200 different ways to create crazy mash-ups between the Resistance and the First Order! Star Wars: The Force Awakens – Rey’s Survival Guide Complete with stories, secrets, and insights, this guide will immerse readers in the world of Star Wars: The Force Awakens. Discover what you need to survive the hostile planet, Jakku. What secrets lurk inside the ship graveyard? What do you want to salvage? What should you avoid to stay alive? Includes gatefolds with exclusive artifacts including starship schematics and more! Still to come on 12/18: Star Wars: Finn and the First Order; Star Wars: Look and Find; Star Wars: The Force Awakens New Adventures; Star Wars: The Force Awakens Adult Novelization, Star Wars: The Force Awakens Junior Novelization Stay tuned to StarWars.com for more on Star Wars: The Force Awakens books! StarWars.com. All Star Wars, all the time.
/******************************************************************************* * Copyright (c) 2004, 2015 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * <NAME> (IBM) - Initial API and implementation * <NAME> (Wind River Systems) * <NAME> (IFS) *******************************************************************************/ package org.eclipse.cdt.internal.core.dom.parser.cpp; import org.eclipse.cdt.core.dom.ast.ASTVisitor; import org.eclipse.cdt.core.dom.ast.cpp.ICPPASTReferenceOperator; /** * Reference operator for declarators. */ public class CPPASTReferenceOperator extends CPPASTAttributeOwner implements ICPPASTReferenceOperator { private final boolean fIsRValue; public CPPASTReferenceOperator(boolean isRValueReference) { fIsRValue= isRValueReference; } @Override public boolean isRValueReference() { return fIsRValue; } @Override public CPPASTReferenceOperator copy() { return copy(CopyStyle.withoutLocations); } @Override public CPPASTReferenceOperator copy(CopyStyle style) { CPPASTReferenceOperator copy = new CPPASTReferenceOperator(fIsRValue); return copy(copy, style); } @Override public boolean accept(ASTVisitor action) { if (action.shouldVisitPointerOperators) { switch (action.visit(this)) { case ASTVisitor.PROCESS_ABORT: return false; case ASTVisitor.PROCESS_SKIP: return true; } } if (!acceptByAttributeSpecifiers(action)) return false; if (action.shouldVisitPointerOperators) { if (action.leave(this) == ASTVisitor.PROCESS_ABORT) return false; } return true; } }
One company striving to grasp the digital thread is Russian giant Gazprom Neft. The company is active in all of Russia’s major oil and gas regions, selling throughout the Russian Federation and exporting its products to more than 50 countries worldwide. Alexey Vashkevich, head of geo exploration at Gazprom Neft is clear that each company needs to base their strategy on their value drivers and then identify which product they need to use for the digital transformation. “When we started this process, we had about 200 ideas in the digital area,” he says. “Then we started to prioritize those ideas, trying to build what would bring the biggest value. Then we just realized there was no way we can do that. It's just so difficult, there is so much uncertainty. We don't really have experience in doing that. “We revised the process starting from the top level by deciding what we thought the main value drivers are in the digital arena for exploration. Then based on that we started building the products. There are many companies that try investing in developing these basic digital solution tools themselves, which might be misunderstanding the whole point of where the value comes from. If you really understand why you're doing digital, what value you're trying to capture, the tools are there already for you to use.”. Vashkevich calls the current business landscape an area of digital hype dominated by digital companies such as Google and Alibaba, who preach a new way of doing business and that's what we need to take as a recipe, apply and success will come. But he also believes that it is important to distinguish between the examples you usually hear. “We call it the digital vortex, all those industries are one way or another located in the center of it,” he explains. “These particular industries cannot survive without becoming digital. You cannot really think of a good bank right now without online applications, instant money transfer, and high-level security. “But it's very important to understand, it's still an opportunity strategy, it's not a survival strategy. For us, we can still see ourselves in the next decade. If you ask the financial department or strategy department for every company, they will tell you that their strategy is to show this level of performance for the next 20 years, and digital is not part of their agenda. Having met with companies that have successfully applied digital technology to their processes as well as consultants and software developing companies, he realized that the available technologies could have a higher impact on the later stages of field development. Advanced analytics and digital twins usually apply to the development and production phases, but it is in exploration where the biggest costs and requirements are. The process flow of exploration is lengthy, starting from seismic work until the geological model is complete, the average span is about 18 months. “We're talking about improving that by two or three months,” Vashkevich continues. “Although a lot of the software applications are digitized it's mostly done by human and transferred from one stage to another. “We need to simplify the data. By the end of the cycle of exploration we probably have about 10% of the data that we hand over to the next phase of development, drilling, and production. Our main task was historically to get as much data at the beginning and then simplify it in the way that keeps most of the information available. “You need to start thinking in the opposite way. We know that drilling does not need the full geological model but only needs some parameters set. Can we get those parameters from the source data? Can we get a new way of interpreting seismic data where you can extract that information online and just avoid that middleman of geology modeling?
First infection-fighting monoclonal antibodies scrutinized by FDA Advisory Committee members. ABSTRACT AGENTS against some gram-negative infections are moving closer to receiving the Food and Drug Administration's (FDA's) green light.If these agents are licensed, they will be the first monoclonal antibodies available for clinical use in the United States. Members of the FDA's Advisory Committee on Vaccines and Related Biological Products have heard from two manufacturers of these monoclonal antibodies that studies show a reduction in mortality and morbidity in patients with gram-negative sepsis.While the committee has made no formal recommendations to the FDA commissioner for licensure of these products, the members do agree that one of them, Centoxin (HA-1A) (Centocor Inc, Malvern, Pa), could get the green light if the types of patients in which it is most likely to be useful can be identified with reasonable precision. The committee also says that if data analysis on the other product, E5 (Xoma Corp, Berkeley, Calif), shows similar efficacy, it,
<reponame>blumug/texapi from django.conf.urls import * urlpatterns = patterns( 'text.api.views', url(r'^analyze/$', 'analyze', name='api_text_analyze'), url(r'^$', 'texts', name='api_texts'), url(r'^(?P<task_id>[-\w]+)/$', 'text', name='api_text'), )
Cardi B's Harper's Bazaar cover was just revealed, and the rapper looks like she stepped right out of Disney's Tangled. The Rapunzel-inspired shoot features the star sitting in the window of a tower letting down her super long hair. Considering the countless times she's worn different wigs and extensions in her day-to-day life, we can only imagine how enthusiastic she was about the shoot. This isn't the first time she's opted for super long hair either — the star adores having her hair down to her hips and is often seen with wigs with serious length in a rainbow of hues. Check out some of the times she's reminded us of Rapunzel ahead, and see the rest of the magazine photos.
<reponame>xitao-moura/musiclounge-api import { Controller } from '@nestjs/common'; @Controller('eventos') export class EventosController {}
GOP presidential hopeful Mike Huckabee on Sunday defended Kentucky county clerk Kim Davis for refusing to issue same-sex marriage licenses. When asked by ABC host George Stephanopoulos whether Davis had an obligation to uphold the law, even if she disagreed with it, Huckabee argued she did not. "You obey if it's right," the former Arkansas governor said on "This Week." "So, I go back to my question, is slavery the law of the land because Dred Scott said so? Was that a correct decision? Should the courts have been irrevocably followed on that? Should Lincoln have been put in jail? Because he ignored it. That's the fundamental question." The 1857 Dred Scott decision is widely viewed as the worst Supreme Court ruling in history. In it, the Court ruled that no one with African ancestry could be a citizen of the United States and voided prior legislation that had blocked the expansion of slavery into parts of the country. Huckabee, like some other conservatives, argued that a 19th-century ruling requiring discrimination against black people is similar to a 21st-century ruling barring discrimination against LGBT people. Davis has argued that her religious convictions bar her from allowing two people of the same sex to be legally recognized as married.
<reponame>vibhorsingh11/LeetCode package org.phoenix.leetcode.challenges; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; class Problem09_LargestPlusSignTest { private final Problem09_LargestPlusSign test = new Problem09_LargestPlusSign(); @Test void orderOfLargestPlusSign() { int[][] mines = new int[][]{{4, 2}}; assertEquals(2, test.orderOfLargestPlusSign(5, mines)); } }
Death Lineup The Death Lineup was a group of smaller basketball players on the Golden State Warriors of the National Basketball Association (NBA) from 2015 to 2019. Developed under head coach Steve Kerr, it began during their 2014–15 run that led to an NBA championship. Unlike typical small-ball units, this Warriors lineup was versatile enough to defend larger opponents, while also aiming to create mismatches on offense with their shooting and playmaking skills. The lineup featured the Splash Brothers, a three-point shooting backcourt consisting of two-time NBA MVP Stephen Curry, and perennial two-way All-Star Klay Thompson. It also featured versatile defender Andre Iguodala on the wing alongside scorer Kevin Durant, and 2016–17 Defensive Player of the Year Draymond Green at center. Green's defensive versatility was described as the "key" that allowed the lineup to be so effective; although his natural position was power forward, he was able to play as an undersized center in lieu of a traditional center who might have been slower or lacked the playmaking and shooting abilities of Green. The lineup originally included Harrison Barnes, who was replaced by former league MVP Durant in 2016–17, when the group also began to be known as the "Hamptons Five". The Death Lineup was considered to be indicative of a larger overall trend in the NBA towards "positionless" basketball, where traditional position assignments and roles have less importance. The Death Lineup ended after the 2018–19 season, when Durant left the Warriors for the Brooklyn Nets and Iguodala was traded to the Memphis Grizzlies. Origins and first championship In 2014–15, Golden State won 67 games in the regular season, led by NBA MVP Stephen Curry. The Warriors' starting lineup with the 7-foot (2.1 m) Andrew Bogut at center played 813 minutes together and outscored opponents by 19.6 points per 100 possessions. After falling behind 2–1 in the 2015 NBA Finals, Warriors coach Steve Kerr inserted Andre Iguodala into the starting lineup in place of Bogut, who had been named to NBA All-Defensive Team in 2015. The change was first suggested by Kerr's special assistant, Nick U'Ren, who was a fan of the lineup because it always seemed to pick up the pace of the game to a faster speed, which the Warriors preferred. The five-man lineup of Iguodala, Curry, Thompson, Green, and Barnes had played together for 102 minutes during the regular season and 62 minutes through the first 18 games of the playoffs. Although the tallest player was only 6 feet 8 inches (2.03 m), the unit was also strong defensively. They were all able to switch on defense, spearheaded by Green's ability to guard players taller and heavier than him. The Warriors won 103–82 in Game 4, and captured the series 4–2 to win their first championship since 1975. Iguodala was named the Finals MVP, becoming the first player to garner the award without starting every game in the series, as well as the first winner to have not started a game during the regular season. 73-win record It was not until months after it was deployed in the Finals that the Warriors small lineup was referred to as the Death Lineup. Despite the unit's success, the 2015–16 Warriors continued to start a traditional lineup with Bogut as their center. Golden State won an NBA-record 73 games behind Curry's league-leading 30.1 points per game and an NBA-record 402 three-pointers made in a season. He was named the MVP for the second straight season, becoming the first unanimous winner in league history. During the season, the Death Lineup was generally reserved to finish the first half and the end of games. It was deployed in 37 games, outscoring opponents by 166 points in 172 minutes for an average advantage of 4.5 points per game and 47.0 points per 100 possessions. Despite their regular season success, the Warriors lost the 2016 NBA Finals, becoming the first team to lose a Finals series after being ahead 3–1. "Hamptons Five" During the off-season, Golden State signed former league MVP and four-time scoring champion Kevin Durant to replace Harrison Barnes, who had averaged just five points and made only 5 of 32 shots during the last three losses in the Finals. With Durant leaving the Oklahoma City Thunder, the Warriors' opponents in the 2016 Western Conference Finals, the move was seen as a disruption in the competitive balance of the NBA, and the Warriors instantly became title favorites. The move gave the Death Lineup four players who have averaged at least five assists in a season. San Francisco Bay Area journalist Tim Kawakami coined the nickname "Hamptons Five" for the new group, which included the four players who traveled with team officials to The Hamptons to recruit Durant. The Warriors won the 2017 NBA Finals 4–1, and Durant was unanimously voted the Finals MVP. Kerr had used the Death Lineup for just 16 minutes in the series until deploying it for 17 in the Game 5 clincher. Iguodala scored 20 points in 38 minutes after averaging less than 30 minutes in the first four games of the series. Kerr used him in the small-ball lineup in lieu of big men Zaza Pachulia (10 minutes) and JaVale McGee (0). Injuries limited Golden States' use of the Death Lineup in 2017–18. Opponents had also adapted by rarely leaving their centers in the game against the Warriors' small lineup. Through 49 games, the unit had a -1.2 plus-minus rating. However, it was still considered the team's most potent lineup. In the final 17 games of the regular season, the Warriors were 7–10 with Curry missing all but one game after an ankle and later a knee injury. Durant, Thompson, Green, and Iguodala each missed four to eight games as well. In total, the group finished the regular season with 127 minutes played together over 28 games, outscoring their opponents by a mundane 22 points. In Game 4 of the conference semifinals in the 2018 playoffs, Kerr started the Hamptons Five with Durant for the first time ever, and they posted a plus-minus of +26 in 18 minutes to lead a 118–92 win over the New Orleans Pelicans, giving Golden State a 3–1 lead in the series. After eliminating the Pelicans in five games, the Warriors continued with the starting lineup in the conference finals to build a 2–1 series lead against the Houston Rockets. Golden State won the series 4–3, but Iguodala missed the last four games with a bruised leg. The Warriors entered the NBA Finals for the fourth straight time versus their longterm rivals, the Cleveland Cavaliers. They swept the Cavaliers in four games, leading to their third championship in four years. Fifth consecutive finals In 2018–19, the Warriors acquired DeMarcus Cousins, who was recovering from a ruptured left Achilles. He gave Golden State a top-flight, true center for the first time under Kerr, and they became the first team in 42 years with a starting lineup of five All-Stars from the previous season. While the Hamptons Five played more (178 minutes in 38 games) than in the previous regular season, Kerr played the lineup of Cousins, Durant, Curry, Thompson and Green more (268 minutes in 21 games) to integrate Cousins into his new team. In Game 1 of the opening round of the 2019 playoffs, Kerr went to the Hamptons Five early, using them to quell a rally by the Los Angeles Clippers. The coach called the lineup "the best five-man unit in the league". Cousins finished the game with a -17 plus-minus. The Warriors eliminated the Clippers in six games, but Cousins tore his left quadriceps in Game 2, and he was initially thought to be out for the remainder of the postseason. In a sign of respect and with an increased urgency, Kerr opened the following round against Houston by starting the Hamptons Five for the first time in the season. The Rockets featured two of the top one-on-one, pick-and-roll players in James Harden and Chris Paul, and the coach wanted Iguodala's defense in the starting lineup to counter. In Game 2, each member of the lineup scored at least 15 points, the first time for a Warriors starting unit under Kerr, and Golden State jumped to a 2–0 series lead. Houston countered the Warriors' small unit by playing forward P. J. Tucker at center along with four guards, forming a lineup of five shooters all 6 feet 6 inches (1.98 m) or shorter, and tied the series 2–2. In Game 5, Durant suffered a strained right calf and left with 2:05 remaining in the third quarter; he was later ruled out indefinitely. With Curry and Thompson struggling with their shooting, Durant had been their best player in the playoffs, averaging a team-leading 35.4 points entering the game. However, Curry led Golden State to a Game 5 win after scoring 16 of his 25 points after Durant exited. The Warriors captured the series on the road in Game 6, when Thompson scored 21 of his 27 in the first half, and Curry collected all of his 33 points in the second half. In the Western Conference finals, Golden State swept the Portland Trail Blazers 4–0, with three of the wins including comebacks of 15 points or more. After starting nine consecutive games, Iguodala missed the deciding Game 4 with a sore left calf injury from Game 3. Curry averaged a series career-high 36.5 points, the highest average by a player in a four-game sweep in NBA history. He and Green both had triple-doubles in Game 4, becoming the first teammates in NBA playoff history to achieve the feat in the same game. The Warriors became only the second team to reach five straight NBA Finals, joining the Boston Celtics (1957–1966). Golden State was down 3–1 in the 2019 finals to the Toronto Raptors when Durant returned in Game 5. The Hamptons Five started the game, but Durant ruptured his right Achilles tendon in the second quarter. The Warriors lost the series in Game 6, when Thompson tore the anterior cruciate ligament (ACL) in his left knee. Both Durant and Thompson were expected to miss most, if not all, of the following season. After the season, the free agent Durant announced that he would sign with the Brooklyn Nets, while Thompson agreed to re-sign with Golden State. Eyeing a replacement for Thompson while he recovered from his injury, the Warriors traded Iguodala to the Memphis Grizzlies in order to free salary cap space to acquire All-Star guard D'Angelo Russell in a sign-and-trade package with Brooklyn for Durant.
package studio.utils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.netbeans.editor.*; import studio.ui.Util; import javax.swing.plaf.TextUI; import javax.swing.text.BadLocationException; import javax.swing.text.DefaultEditorKit; import javax.swing.text.Document; import javax.swing.text.JTextComponent; import java.awt.*; import java.awt.event.ActionEvent; public class CopyCutWithSyntaxAction extends BaseAction { private static final Logger log = LogManager.getLogger(); public enum Mode {COPY, CUT}; private Mode mode; public CopyCutWithSyntaxAction(Mode mode) { super(mode == Mode.COPY ? DefaultEditorKit.copyAction : DefaultEditorKit.cutAction, ABBREV_RESET | UNDO_MERGE_RESET | WORD_MATCH_RESET); this.mode = mode; } private String toHtml(String text) { return text.replaceAll("&", "&amp;") .replaceAll("<", "&lt;") .replaceAll(">", "&gt;"); } private void appendColor(StringBuilder builder, Color color) { builder.append("#"); String hex = Integer.toHexString( color.getRGB() & 0x00ffffff ); int countToPad = 6 - hex.length(); for (int i=0;i<countToPad; i++) { builder.append("0"); } builder.append(hex); } private void appendHtml(StringBuilder builder, String text, TokenID tokenID, Coloring coloring) { StringBuilder style = new StringBuilder(); Font font = coloring.getFont(); if (font != null) { style.append("font-family: ").append(font.getFamily()).append(", Courier;"); if (font.isBold()) { style.append("font-weight: bold;"); } if (font.isItalic()) { style.append("font-style: italic;"); } } if (coloring.getForeColor() != null) { style.append("color: "); appendColor(style, coloring.getForeColor()); style.append(";"); } if (coloring.getBackColor() != null) { style.append("background: "); appendColor(style, coloring.getBackColor()); style.append(";"); } builder.append("<span"); if (style.length()>0) { builder.append(" style=\"").append(style).append("\""); } builder.append(">").append(toHtml(text)).append("</span>"); } @Override public void actionPerformed(ActionEvent evt, JTextComponent editor) { if (editor == null) return; TextUI textUI = editor.getUI(); if (! (textUI instanceof BaseTextUI)) { editor.copy(); return; } int start = editor.getSelectionStart(); int end = editor.getSelectionEnd(); if (start == end) return; try { EditorUI editorUI = ((BaseTextUI) textUI).getEditorUI(); BaseKit baseKit = (BaseKit) textUI.getEditorKit(editor); Syntax syntax = baseKit.createSyntax(editor.getDocument()); Document document = editor.getDocument(); String text = document.getText(0, document.getLength()); syntax.load(null, text.toCharArray(), 0, text.length(), true, text.length()); StringBuilder htmlBuilder = new StringBuilder("<pre>"); StringBuilder textBuilder = new StringBuilder(); int offset = 0; while (offset < end) { TokenID token = syntax.nextToken(); if (token == null) break; int newOffset = syntax.getOffset(); int left = Math.max(start, offset); int right = Math.min(end, newOffset); if (left < right) { String tokenName = syntax.getTokenContextPath().getFullTokenName(token); Coloring coloring = editorUI.getColoring(tokenName); String tokenText = text.substring(left, right); appendHtml(htmlBuilder, tokenText, token, coloring); textBuilder.append(tokenText); } offset = newOffset; } htmlBuilder.append("</pre>"); Util.copyToClipboard(htmlBuilder.toString(), textBuilder.toString()); if (mode == Mode.CUT) { document.remove(start, end-start); } } catch (BadLocationException e) { log.error("Exception is not expected", e); } } }
Integrative Modeling and the Role of Neural Constraints Neuroscience constrains psychology, but stating these constraints with precision is not simple. Here I consider whether mechanistic analysis provides a useful way to integrate models of cognitive and neural structure. Recent evidence suggests that cognitive systems map onto overlapping, distributed networks of brain regions. These highly entangled networks often depart from stereotypical mechanistic behaviors. While this casts doubt on the prospects for classical mechanistic integration of psychology and neuroscience, I argue that it does not impugn a realistic interpretation of either type of model. Cognitive and neural models may depict different, but equally real, causal structures within the mind/brain.
1. Field The present disclosure relates to network management. More specifically, the present disclosure relates to a method and system for packet forwarding in a virtualized network with a centralized point of control. 2. Related Art The relentless growth of the Internet has brought with it an insatiable demand for bandwidth. As a result, equipment vendors race to build larger, faster, and more versatile routers to move traffic, especially in the core networks. In a traditional mode of operation, such a router has a control plane and a data plane. The router is typically controlled by local software which implements the control and data planes. The router communicates with other routers through predefined protocols. The control plane configures the forwarding information on the router and distributes control information (e.g., routing cost), and the data plane forwards data packets according to the forwarding information. However, each individual router requires tedious and complex configuration, typically performed by a network administrator. Hence, accessing and configuring such information remotely and automatically can improve the management of a router, thereby addressing one of the most challenging problems in today's networking paradigm. Remotely gaining access to a router's control plane can address some aspects of this problem. For example, OpenFlow is a protocol that allows configuring some routing policies, such as setting up a forwarding path, by software running on a separate server. However, OpenFlow only defines the flow table message for an OpenFlow switch, and does not provide sufficient infrastructure to control a router completely. Hence, while remotely configuring network paths on a router brings many desirable features to layer-3 networks, some issues remain unsolved for packet forwarding among a group of routers with a centralized point of control.
A New Paradigm of Pharmaceutical Drug Delivery Systems (DDS) : Challenges for Space, Time, and Shapes Using 3D food printing with the Internet of Things (IoT) technology, patients can receive diagnoses and prescriptions from their doctors while in the comfort of their homes. The patient-specific prescription has been innovated by converging 3D food printing technology with drug delivery systems (DDSs). Quantitative drug dosages can be incorporated into the composition of food and produced in any shape within a short time. Automating food and DDSs makes promising implications for healing patients remotely, as well. Each of these aspects, along with IoT technology, have contributed to increased health care for patients, no matter their location. The quantitative discharge of vitamin C melted in water, mayonnaise, ketchup, and peanut butter has been verified using the Piston Typed Extrusion (PTE) method. Designs with different curves and shapes were repeatedly printed with a head speed of 1.610-2 m/s, and it was confirmed that effective control while printing the shapes was possible. The Hagen-Poiseuille (HP) formula was utilized to simulate the overall printing time. This simulation affirmed that increasing the head speed from 1.610-2 m/s to 4.010-2 m/s had reduced the printing time consistently, but the time was not reduced continuously after 4.010-2 m/s, depending on the materials' viscosities and how much curvature exists in the designs. The precision of printing was adjusted within 5% of the theoretical value during printing, and the IoT technology allowed printing of the materials within five minutes, regardless of the patient's location. Introduction The historical Greek physician Hippocrates said no medicine is needed since there is no illness that cannot be cured with food. 1 In the modern age, the advantages of food and medicine have been placed hand-in-hand for the benefit of humanity. A revolutionary drug delivery system (DDS) that accurately controls the dosages of drugs within food's composition has been studied. The development of a DDS to transmit precise amounts of drugs remotely has been a long-term interest in the medical field. DDSs have focused on a continuous delivery mechanism within the body by the use of microstructures in materials, and this field has been researched. 2,3,4 One example of these mechanisms is the Ringer's solution. As the most preferred technique for drug transmissions, a Ringer's solution is given to people who feel fatigued. All of the solutions appear transparent, but there are hundreds of species, depending on its purpose. Basic solutions are prescribed to replenish the water levels of patients whose electrolytes are not balanced due to possible difficulties in drinking water or abnormalities in their heart or kidneys. Nutritional solutions are mixtures of solutions. Proteins, carbohydrates, fats, vitamins, and trace elements can be supplemented through these solutions. Special solutions are used for particular medical situations. 5 In cases of blood infusions, precise amounts of dosages can be administered to a patient, depending on their age, weight, and clinical or biological conditions. 6 These scenarios include administering a solution that lowers the pressure being placed on a patient's brain due to cerebral hemorrhaging and dispensing a solution to keep the amount of blood flow constant. The technique of making the solutions has contributed to the development of macro-DDSs by shining light on how versatile DDSs can become. This versatility has been achieved with basic solutions, nutritional solutions, and special solutions, but the process of making these solutions is not easy, and many errors can occur. 5 It carries the risk of underdosing or overdosing patients, and medical doctors or nurses are incapable of being ever present to monitor each patient for these risk factors. The equipment involved in this method can be cumbersome. 6 When a macro-DDS is applied, the solutions can be accurately delivered to the patient from remote locations, and customized medication can be created for each patient using the Internet of Things (IoT) technology. 7 The new field of macro-DDSs will span beyond the confines of micro-DDSs using IoT-based technology. This novel application has the potential to be directly involved in disaster situations or in health care to deal with daily food management for consumers without space constraints. Future DDSs are based on this recent concept as it can be utilized in transferring precise amounts of drugs without space constraints, mixing viscous substances, solidifying a fluid for consumption when a patient may not be able to ingest liquids, and using equipment that quantifies the liquid state of a solution onto the surface of solidified food. Implementation of macro-DDS will lead to a new health technique for quantitative medicinal management. Production of quantified medication by this unique pharmaceutical system requires 3D printing technology for real practice. The applicability of 3D printing technology in the medical field has already been extensively explored. One instance is that of the research conducted on microneedle patches. 8 Microneedle patches are dime-sized plastic strips that contain tens to hundreds of drug-filled microscopic needles. These needles are soluble in water, and little to no pain is felt when applied to a patient's skin. 9 Another example of 3D printing in the medical field is the use of UV light to manufacture drug-containing structures by crosslinking. These structures, with different pore sizes and different drug loadings, were able to control drug release rates using 3D printing technology. 10 What each of these medical devices have in common is that they are produced using 3D printing technology. Challenges dealing with space, time, and shapes remain with these innovative DDSs. Effective drug delivery must not only be quantitatively controlled, but also must be free from space constraints, delivered in a short time, and printed with patientpreferred shapes in order to be easily accessed by the patients. The authors of this study have submitted a patent on this DDS, and it is currently being utilized for pharmaceutical trainings and education. 11 The trainees have succeeded in simulating 3D printing with the quantitative ejection of nutrients and carbohydrates into food. 12 This research is encouraging since it shows that, if high viscosity food material can be quantifiably printed, high viscosity drug solutions can be as well. This technology has been continuously tested since 2018 in the joint degree program between The College of Engineering and The School of Pharmacy. 12 Advancements in 3D printing technology enabled custom applications to produce personalized drug treatments for individuals via the use of both the Cloud and robotic technologies. 13,14 For innovative DDS to be practical, space constraints must be overcome through the IoT technology system. To illustrate the IoT process, it can be divided into four sections: the Cloud, where information is accessed; MongoDB, the database where information is stored; Raspberry Pi (RP), the microprocessor of the system; and Arduino, the microcontroller of the printer. The cloud displays printing conditions, printing designs, user data, locations, and codes for available printers in a database called MongoDB. 15 When a printer or a user requests any data, the stored data from MongoDB is transferred to the RP. The RP of the printer receives the data and orders the Arduino board to control the movement of the printer head according to its printing conditions and design. RP microprocessing equipment, with an Arduino microcontroller, has been patented. 7 A patient's health information is entered through a smartphone device. Then, this data is stored and analyzed by MongoDB in the Cloud. This data can be recalled to smartphone devices at any time. MongoDB calculates food printing conditions based on the analyzed data. The user selects the design and requests for it to be printed. Next, the saved G-code file is transferred to the RP of the printer. When an RP sends orders to an Arduino board, printing begins to produce food that fits the health conditions of patients. Because all the data is stored in the cloud, it can be retrieved and printed from anywhere whenever the device and printer are connected to the Wi-Fi. By finding practical ranges with real-time analysis of weight, head movement, temperature, etc, the optimal printing conditions have been thoroughly examined. First, data was sent to the Cloud from the RP. Next, the data was analyzed and sent back to the RP board from the Cloud server. These signals were transmitted from the RP to an Arduino board to initiate the printing operation. 16 Applications on the Cloud can access microcontroller-based devices and collect data from them. This is what is meant by the term Internet of Things. 16 The "IoTenabled 3D Food Printer" had been successfully used by pharmacists, patients, and their family members through the food control program of the pharmacy. 7,12 With IoT technology, being able to 3D print drugs onto food from a distance with accurately customized ingredients can be achieved. By incorporating protein sources, healthier diet options for patients who have certain restrictions on what they can consume can be created. 17 Specialists can properly prescribe what the patients or elderly would need based on previously gathered data from a remote location. Specialists can also determine which foods or nutrients need to be added to patients' diets by using high viscosity material extrusion technology. The assistants attending to the patients or elderly can use their smart devices to have the patient's food printed, or get the food or drugs sent to the patient right when they are ready without the patient having to prepare anything else. After the prescription is given to the patient and feedback is received from them, IoT technology can store the data and analyze the exact amount of each ingredient or drug that needs to be prescribed next time. Accumulation of prescription data and printed conditions will decrease more and more errors during printing or drug delivery with respect to time. Removing space constraints is essential in ensuring that patients globally can receive adequate healthcare using IoT technology. The IoT technology allowed DDSs to become remote and adaptable to different situations. Automating food and DDSs for disaster relief efforts is a possible implementation of this technology, since raw ingredients can be shipped to remote areas easily. This means, as machines handle the preparation of food, medical responders can focus on treating the injured or ill. Infrastructure for monitoring consumers' medical data can also be established, allowing the assessment of changes in health conditions in real-time. These features, in combination with IoT technology, will be essential in improving patients' health. The applicability of IoT technology has previously been demonstrated through preliminary testing. Using twenty donated 3D printers in cooperation with a company based in South Korea, the research team at The University of Texas at El Paso (UTEP) achieved in video conferencing employees of the company while simultaneously having products 3D printed at the company using IoT based 3D printers. Represented in Fig. 1 is the conceptual diagram of IoT technology being employed. In this study, remote printing was successfully conducted by sending a design file from the Engineering building at UTEP to The School of Pharmacy located 1.61 km away with a smartphone. The study aimed to improve the potential of quantitative drug delivery by 3D printing with food and changing the delivered drug amounts based on the printed design or shape. Using 3D printing techniques rather than ordinary ingestion or injection of the drug is expected to reduce the errors of drug delivery. The 3D printer's capability to control the flow rate or printing speed can allow quantitative drug control on any design or shape. 18 In order to confirm this hypothesis, the optimization of time and accuracy of 3D printing drugs with food, the quantitative discharge of the material, and evaluation of the potential of the new DDS in conjunction with the IoT system has been observed. Experimental Methods Equipment The PTE method allowed the quantitative printing of highly viscous food material or biomaterial. 16, The designs of the developed IoT-enabled equipment were patented and used for testing in this research. 7,23,24 Using the printers, medication was printed into food products or onto the food's surface. The combination of the food and medical industries has allowed the customization of food with specific amounts of ingredients and drug dosages for patients in the manner that was previously described. Materials In the experiment, water, mayonnaise, ketchup, and peanut butter, which are all highly viscous victuals, were used to verify that the needed components were dissolved in a solution and that the composition was controlled. Any nutrients and their compositions can be controlled and placed in a variety of highly viscous liquids in the form of a Ringer's solution, allowing patients to receive more accurate amounts of nutrients from remote locations. Represented in Table 1 is the viscosity, density, pressure, and flux of each of the used materials. Quantitative Control The most important feature of IoT-enabled 3D printing for patients in the medical field is quantitative control of the material when being discharged. Since any minimal changes in the amount or composition of drugs or food may worsen diseases or cause severe side effects for patients and the elderly, IoT technology will contribute significantly in easing symptoms and reducing side effects. This technology features scales that allow operators to control and verify how much of a material is discharged during printing. To ensure the applicability of this technology in a Ringer's solution, a supply of medical salt water had been discharged within the solution. Whereas the injection error range in a typical injection method is between 5 and 10 ml based on 150 ml, salt water was experimented with to find the range or pressure signal where the more accurate injection was possible. The experiment was conducted by injecting salt water into distilled water, then injecting various materials repeatedly within the solution. Time and Accuracy with Shape of Printing Drugs manufactured using IoT technology in the field should be delivered to the patients as soon as possible. A certain amount of medicine must be printed within three minutes and accurately delivered to where it is needed. Designs with different curves and shapes were printed with a head speed of 1.610 -2 m/s, and a vitamin C composition of 10 wt.% was melted in ketchup. The experiment was repeated to effectively print the entire drug by controlling the high curvature and size of the shapes using different designs. The theoretical error range and accuracy of printing was obtained by calculating the difference in volume between each discharged print. Printing Stability in Relation to Head Traveling Speed The stability of the printed shape, with respect to increasing head speed, was observed. Since drugs produced using IoT technology must be delivered in a short time, an attempt was made to print a certain amount of a drug within 10 minutes, then accurately deliver it. For this purpose, how accurately the different shapes were printed with the slowest speed of 2.010 -2 m/s needed to be known. When printing the various shapes, ketchup and mayonnaise were used as ingredients with vitamin C incorporated into them. The HP formula was also utilized to simulate the overall printing time. This assisted in reducing the printing time by increasing the head traveling speed and predicting the accuracy and stability of discharge of the materials. Time and Accuracy with Shape of Printing In the case of printing vitamin C into the ketchup with a head speed of 1.610 -2 m/s; it was confirmed that the vitamin was evenly distributed within 1.8% from the target position, regardless of the shape used. Provided in Table 2 below are the printed shapes, and the absolute amounts of vitamin C added to them by experimenting with the given shapes repeatedly with the head speed of 1.610 -2 m/s. The PTE method was used, along with a tip diameter of 1.610 -3 m. Depending on the shape shown in the table below, it was found that 101.5 mg to 376.4 mg of the vitamin was printed, which took between 27.5 seconds to 102 seconds to print. Time Savings An effect of increasing the head speed on the printing time and the stability of the printed shape was observed. Shown in Fig. 3 are the times taken to print each of the shapes by increasing the head speed relative to the time taken at 2.010 -2 m/s. Each shape's time varied depending on its curvatures and angular parts. These shapes were printed by increasing the head speed from 2.010 -2 m/s, which takes about 30 minutes, to confirm the point of highest stability in the figure where quantitative discharge was possible. It was confirmed that any shape could be stably printed in the range of 4.010 -2 m/s to 4.810 -2 m/s. In the case of complex shapes, increasing the head speed over 6.010 -2 m/s did not reduce the time needed. As the curvatures or bending in the shape increases, so does the amount of time needed to print it. Time Reduction and Accuracy by Head Traveling Speed By incrementally increasing the head's speed by 0.810 -2 m/s from 1.610 -2 m/s, regardless of the printed shape, the time taken to complete each print was divided by the printing time at the head speed of 1.610 -2 m/s. This value is described as the Printing Time Ratio in the figure below. If the head speed is increased by a factor of two, the Printing Time Ratio should be halved. The reduction of time, according to the HP formula, is represented by a dotted line and labeled as the Ideal Case. It was confirmed through the simulation that increasing the head speed does not reduce the printing time continuously, depending on different viscosities of materials and how much bending is prevalent in the designs. The simulation results confirmed that, if the head speed was increased from 1.610 -2 m/s to 4.010 -2 m/s, the printing times did not deviate significantly from the theoretical value, regardless of the shape. It was shown from the printing simulation below that the time required for actual printing was not significantly reduced, unlike the theoretical prediction. This depended on stopping of the material, bending, changes of direction, etc. As shown in the results in Fig. 4 and Fig. 5, the head speed was increased by intervals of 0.08 m/s from 1.610 -2 m/s to 4.010 -2 m/s while producing a penguin design. As a result of printing the shape repeatedly, the total discharge with increasing head speed was 1.80 ± 0.05 g and the accuracy percentage was 2.96%, in the case of ketchup. For the mayonnaise with vitamin C added, the total discharge was 1.49 ± 0.06 g with 4.22% accuracy. As shown in Fig.4 and Fig. 5, when the head speed was 4.010 -2 m/s, the discharge strength outrunning the friction between the material and the piston tip was constant. The error range was 0.68% for ketchup and 2.6% for mayonnaise, which were lower than that of the average error range. As presented in Fig. 4 and Fig. 5, the stability of the shape was low when it was printed using a head speed of 4.010 -2 m/s. The surfaces of the printed materials were not smooth, showing incomplete shapes compared to other head speed conditions. The roughness on the surface aggravated as the shape became more complicated. Quantitative Discharge with Respect to Time Provided in Fig. 6 below are the analyzed weights of the printed designs, according to the printing time. Using the PTE method, (a) water, (b) mayonnaise, (c) ketchup, and (d) peanut butter were printed with 500 mg of the drug once it was melted. As displayed in the graph of Fig. 4, the proportional line of the printing time by the total weight indicated that all of the materials were quantitatively discharged for 300 seconds. In order to decrease the change in pressure (∆P) and optimize the extrusion, a tip-diameter of 1.66 10 -3 m and tip-length of 2.30 10 -2 m was used. 10wt.% of vitamin C was included in the food material, lowering its viscosity. This has increased ∆P within the piston, increased the flux of the materials (qw), and increased its total discharge weight for all experiments from the expected or theoretical value. As confirmed by the previous experiment, where the results are shown in Fig. 6, quantitative control of the materials during discharge was attained. The outcome of the research is promising since it manifests future uses in controlling the exact amount of any nutrient varying from carbohydrates to fats and medication, allowing for the customization of patients' diets. As displayed in Fig. 7, the patients are capable of adding or removing their prescribed medications or nutrients from their preference of food. Previous DDSs have been based on targeting and control methods that are not regularly attainable for patients unless they are present at a medical facility. Rather than reusing these standard DDS methods, controlling the composition of the food and medication, as shown below, will result in increased accessibility for patients who have difficulties with mobility. Figure 7. Controlled printing of the nutrients or medication incorporated into different food products. Patient-specific Drugs For many medications, drug intake is highly recommended during meals, 30 minutes after meals, or immediately after meals. In the case of those living with diabetes, patients must intake the drugs within 30 minutes after a meal for the drugs to be effective. However, there is the problem of the elderly or children forgetting to take their prescribed medication, which can cause side effects or symptoms to intensify. Making medication in the form of liquid solutions and pills enabled elderly and ill patients to effectively intake drugs by mixing them with ordinary food. Exhibited in Fig. 8 are printed products with various shapes and materials. This showed that the drugs can be printed in any form or shape on the food. Assuming that 500 to 1000 mg of Metformin HCl was ingested, if the solution was produced with a high viscosity material by dispersing it in an aqueous solution, the addition of chemicals, water, and other solids would allow the adjustability of the drug content to 10 wt.% of the fluid. The patient may intake a 10 wt% Metformin slurry in water 5 to 10 g at a time. It was confirmed that the drug can be delivered in various shapes by printing less than 10 g onto food, such as semi-solid water, mayonnaise, ketchup, jam, and peanut butter. When IoT technology is utilized, real-time data will be analyzed and used to identify the points where the shape can collapse, depending on the design or printing head speed. Then, the material can be discharged from the points where the shape will not collapse. This allows the patients to eat 3D printed food that will not subside in a short time. By analyzing printing materials in real-time, each material can be stably printed in any design. This was possible by the optimization of the printing head speed, according to the type of material, viscosity, and number of bends in the design. As shown in Fig. 8, the prints of a design with various materials were stable every time without collapsing or spillage, even though each design was printed three times. It is noteworthy that a patient can choose any food material that is loaded with drugs and print the designs without any collapse in the figures or unappealing shapes. The real-time analysis using IoT technology will allow the detection of any substance that was lost or overflowed during printing, giving specialists the capability of precisely determining the next prescription for the patient. The potential for the IoT system to be applicable for patientspecific drugs is not only limited to real-time analysis of the design or printing status, but can also be utilized to monitor environmental conditions for material storage within the printer. This will allow the printer's systems to compensate for these factors and keep the material at optimal conditions for use when it is needed. As all medications have different prescription labels and handling guidelines, the medications that cannot be stored in large quantities outside of medical facilities or that need special containment will still need to be stored differently based on specialists' instructions or will need to be held at medical facilities. The limitations that were observed for this experiment dealt with the constraints placed on the storage of the materials within the printers and with the security of individuals' data. The depletion of drugs or food material would need nurses or medical assistants to replenish the lost material in order for patients to continue receiving adequate care within their homes. In addition, the sensitive health data of patients' needs to be kept confidential and protected from potentially fraudulent activity. Future studies will present solutions to these limitations by examining procedures to sustain the levels of materials within remote 3D printers and by conducting research on methods to develop the security systems of the IoT database. Conclusion The revolutionary patient-specific prescription has been successfully verified by converging 3D food printing technology with DDSs. Quantitative dosages of drugs were incorporated in the composition of various foods. Vitamin C and other food ingredients were precisely printed within 5% of the theoretical target during printing within five minutes. The prints could be created using various figures, regardless of the design or distance from where the print was ordered to where it was produced. The application of the IoT-enabled 3D printer makes it possible to heal patients of their diseases given time and the correct nutritional plans for patients requiring an effective DDS. Consumers' daily use of the IoT system is a realistic implementation that will allow them to print their personalized meals. Ordinary users can customize any victual product with their choice of ingredients and shapes and have it produced in minutes. The customers can remove specific ingredients that they are allergic to or add the ones they prefer with accurate amounts. The IoT technology will also monitor for changes in mood, appetite, health, and many other factors in real-time. This means both patients and day-to-day customers can video conference with a doctor who can diagnose their conditions and prescribe dosages of medication and food for them to intake. Exchange of messages or gifts using AM technology is applicable by representing special and unique shapes or images on the food. These claims have yet to be seen until now due to the emergence of IoT 3D printing, and the possible applications of this technology are copious. A better understanding of how this process would work and a step-by-step application from the medical field's perspective of DDSs will lead the way to real innovation.
Effects of Carbon Content on Wear Property in Pearlitic Steels To clarify the effects of carbon content on the rolling contact wear in steels, the authors conducted a two-cylinder rolling contact wear test using pearlitic steels with a carbon content in a range from 0.8 to 1.0 mass% and studied the relationship between the carbon content and the rolling contact wear. In addition, the authors examined the dominating factor in the rolling contact wear in pearlitic steels and the work-hardening rate of the rolling contact surface. The main findings obtained are as follows: The wear resistance of pearlitic steels improve as carbon content increases. The dominating factor in the rolling contact wear of pearlitic steels is the rolling contact surface hardness (RCSH). The improved wear resistance of pearlitic steels is attributable to an increase in RCSH due to raising the work-hardening rate of the rolling contact surface as carbon content increases. The reason why the work-hardening rate of the rolling contact surface of pearlitic steel rises as carbon content increases is considered to be as follows: an increase in the cementite density increases the amount of dislocation in the matrix ferrite and promotes the grain refinement of the matrix ferrite. As a result, the matrix ferrite is strengthened through the promotion of dislocation hardening and grain refinement.
Misao Okawa, the world's oldest person, died on Wednesday at the age of 117. Members of her family told the Associated Press that Okawa died of heart failure and stopped breathing at her nursing home in Osaka, Japan. The Guinness World Records has declared Menahem Asher Silva Vargas, a 37-year-old adult lawyer, the owner of the world's largest Harry Potter memorabilia collection. His trove of Hogwarts treasures contains over 3,000 pieces of junk. Congrats. Today in dumb controversies: Yesterday's world record for "Most People Twerking Simultaneously" has been formally challenged. Big Freedia is the undisputed queen of sissy bounce. She's a New Orleans hip-hop diva who's toured around the country with a twerking team long before the word demanded a supercut. Her signature track is "Azz Everywhere," though she's also got one called "Make Ya Booty Go," and now Big Freedia wants to reclaim her association with p-popping by setting the Guinness World Record™ for "Most People Twerking Simultaneously," a feat that will be attempted in New York City's Herald Square any minute now. Michelle Obama is gathering hundreds of children at the White House tomorrow to attempt to break the Guinness Book of World Records record of the number of people doing jumping jacks at the same time. She's part of a push by National Geographic Kids to get more than 20,000 people around the world to do jumping jacks for one minute at the same time. Feeling old? Well, you're an applesauce-smeared toddler next to Rebecca Lanier, a great-great-great-great grandmother from Ohio who turned 119 today. Here's a news report out of Karachi, Pakistan that features nineteen—yes, nineteen—college girls stuffing themselves into a Smart Car in order to be inducted into the Guinness Book of World Records, because why the hell not? Watch inside. Standing in a tent on the campus of The College of New Jersey, Matty Daley and Bobby Canciello earned a place in the Guinness World Records today by kissing for 33 hours. They livestreamed video of it and just finished. A traffic jam on China's Beijing-Tibet Expressway that started nine days ago is still going strong, stretching over 60 miles. Drivers are playing cards on the road, and some have suggested holding concerts to entertain stranded motorists. 60 miles! This clip makes you wonder why someone would spend their time mastering a talent so inane and genuinely gross. Our sandwich artist holds the Guinness title (he claims) so I guess that's at least something. Tobey Maguire's Golden Globe Nomination: Bought with DVD Players? Nikke Finke is digging through the nasty negative campaigning that fills her inbox as the Oscars, Golden Globes, etc. draw near. The most salacious smear: Leonardo DiCaprio bought Tobey Maguire his Brothers Golden Globe nomination... with Blu-Ray DVD players?
import datetime from copy import deepcopy from typing import Union, Optional, Type import pandas as pd from datacode.models.dtypes.base import DataType class PeriodType(DataType): name_roots = ('period',) def __init__(self, freq: str, categorical: bool = False, ordered: bool = False): super().__init__( datetime.datetime, pd_class=pd.PeriodDtype, categorical=categorical, ordered=ordered, ) self.freq = freq self.equal_attrs = deepcopy(self.equal_attrs) self.equal_attrs.append('freq') self.repr_cols = deepcopy(self.repr_cols) self.repr_cols.append('freq') @classmethod def from_str(cls, dtype: str, categorical: bool = False, ordered: bool = False): dtype = dtype.lower() freq: Optional[str] = None for name in cls.name_roots: if dtype.startswith(name): _, freq_extra = dtype.split('[') freq = freq_extra.strip(']') if freq is None: raise ValueError(f'Dtype {dtype} does not match valid names for {cls.__name__}: {cls.names}') return cls( freq, categorical=categorical, ordered=ordered ) @property def read_file_arg(self) -> Union[Type, str]: return f'period[{self.freq}]'
Effect of SC40230, a new class I antiarrhythmic agent, on canine ventricular tachycardias SC40230, [2ethyl](2chlorophenyl)1piperidinebutanamide, a new class I antiarrhythmic agent, was tested for efficacy against coronary ligationinduced arrhythmias and ouabain toxicity arrhythmias in dogs. Doses of 9mg/kg i.v. and 15, 25, and 35 mg/kg p.o. significantly reduced ectopic rate in conscious dogs that had undergone ligation of the left anterior descending coronary artery 24 hr prior to testing. Plasma concentrations of SC40230 ranging from 3 to 9 g/ml corresponded with ectopic rate reductions of 1082% in the coronary ligation model. SC40230 was well tolerated at all doses tested in the conscious dogs. A 5 mg/kg i.v. dose of SC40230 converted ouabaininduced ventricular tachycardias to normal sinus rhythm in anesthetized dogs. The antiarrhythmic effect of SC40230 in the ouabain toxicity model was reversed by large (⪇ 240 U) doses of insulin. The experiments described in this study demonstrated that SC40230 is a welltolerated new class I antiarrhythmic agent with intravenous and oral effectiveness against ventricular arrhythmias.
Understanding NFT Price Moves through Social Media Keywords Analysis Non-Fungible Token (NFT) is evolving with the rise of the cryptocurrency market and the development of blockchain techniques, which leads to an emerging NFT market that has become prosperous rapidly. The overall rise procedure of the NFT market has not been well understood. To this end, we consider that social media communities evolving alongside the market growth, are worth exploring and reasoning about, as the mineable information might unveil the market behaviors. We explore the procedure from the perspective of NFT social media communities and its impact on the NFT price moves with two experiments. We perform a Granger causality test on the number of tweets and the NFT price time series and find that the number of tweets has a positive impact on (Granger-causes) the price or reversely for more than half of the 19 top authentic NFT projects but seldom copycat projects. Besides, to investigate the price moves predictability using social media features, we conduct an experiment of predicting Markov normalized NFT price (representing the direction and magnitude of price moves) given social-media-extracted word features and interpret the feature importance to find insights into the NFT communities. Our results show that social media words as the predictors result in all 19 top projects having a testing accuracy above the random baseline. Based on the feature importance analysis, we find that both general market-related words and NFT event-related words have a markedly positive contribution in predicting price moves. INTRODUCTION Non-Fungible Token (NFT), with the earliest recognized example created in 2014 by McCoy and Dash registering a video clip on a blockchain, refers to a type of blockchain token that is unique and non-replicable so that it can designate the ownership of artwork, in-game items, domain names, assets in decentralized finance (DeFi), etc.. The ownership is stored in a decentralized manner utilizing the emerging blockchain technique, which deters any centralized authority from owning the distinct right to remove the ownership. After the early pioneer stage, the NFT standard in Ethereum ERC721 introduced in 2017 shaped the mainstream NFTs projects by standardizing NFT creation, transfer, and project deployment. The standardization is followed by an emerging NFT marketplaces to trade NFTs. NFT marketplaces have their users trading NFTs of art images, music, gaming cards, domain names, etc., for cryptocurrency. Opensea, as the largest NFT marketplace, has reached a total record of $31 billion volume and 1.8 million traders as of June 2022. The NFT trading volume as of May 2022 exceeds $37 billion, close to the total of $40 billion in 2021 even though the NFT market after 2022 April has been accompanied by a starting cryptocurrency bearish market. As the NFTs trading volume has been rising for two years, the data generated by the market began to unveil what in practice NFT contributes as an innovation. The close relation between NFTs and artwork will probably bring to mind that NFTs help encourage art creativity by making small individual artists connect with wider collectors to earn more profits. However, recent research by Vasan et al. presents that despite significantly more artists joined in NFT digital art market, the artist clusters are driven by homophily, i.e., successful artists invite successful artists into the NFT market and create similar sales patterns. They highlight the forming of the artist-collector ties, i.e. some successful artists receive repeated investment from a small group of collectors. According to Nadini et al, as of April 2021, the top 10% of buyers-sellers pairs contributed to the number of transactions the same as the rest 90%. These findings make us consider what NFT ecosystem builds may be pertaining to the members tie and communities. The buyers willing to buy an expensive NFT from a collection expect the NFT to be an entrance ticket to join the community behind the collection. The possession of those NFTs may have brought some private connections to the owners as well as some member benefits such as the right to receive airdrops of new related project NFTs, or to join in making decisions on the project's funds, etc. Apart from the private connections and benifits, the project teams also build public social media communities mostly on Twitter or Discord. The community interaction about the development of the project persists to attract not just the NFT owners but every user on social media to join and engage in the communities. We consider that the rise of the prices would be witnessed by the communities formed on social media, so we intend to investigate the relationship between social media content and the NFT price moves to understand the evolving NFT market. In our work, we collect the historical tweets and NFT trade transactions of 19 top collections ranked by Opensea in volume and 11 corresponding copycat collections (for RQ2). We conduct research for the following research questions: RQ1: Does the activeness of a social media community help in forecasting the prices and or reversely? RQ2: Will the causal relationship be inspected stronger in authentic NFT projects than the copycat projects since copycat project may not have sustained social media community building as same as the authentic projects? RQ3: Are the social media word features good predictors for the direction and magnitude of NFT price moves? More importantly, which word features mostly affect the price moves? To answer RQ1, we perform a Granger causality test on the time series data of the number of tweets and the average trading price for each project. For RQ2, we compare the Granger causality test results of the authentic collections with the results of the copycat collections. For answering RQ3, we set up a prediction task where first we divide both tweets and transactions into segments by timeframes. We propose words vector extraction method that is based on term frequency-inverse document frequency (TF-IDF) to extract the important words for each timeframe by treating the words in each timeframe as a document. The extracted most relevant words are likely to represent some events in each timestamp as TF-IDF emphasizes the words appear not frequently in all but in certain timeframes. We have the TF-IDF scores for each word to be used as the features along with a Markov normalized average NFT trading price to be used as the ground truth for each timeframe. We use a simple multi-layer perceptron (MLP) regression model to perform the prediction task and analyze the feature importance to understand which features contribute to the prediction for finding the insights into the social media content. We summarize our results and contributions: We investigate the causality relation between the activeness of a social media community and NFT price on 19 top authentic projects and 11 copycat projects. Our results show that 11 out of 19 authentic projects show markedly Granger causality between the number of tweets and NFT price or reversely, while we find that only 2 copycat projects show Granger causality. The test results evidence the NFT price or the social media community activeness are useful in forecasting the other for an NFT project and support our hypothesis that copycat projects show weaker evidence. We explore the NFT price predictability given social media word features. We apply a TF-IDF-based method for retrieving the most relevant words for each timeframe to be the features and a Markov normalized average price to be the labels. Our empirical results show that all 19 authentic projects have a testing accuracy above the random baseline showing a degree of predictability. We analyze the feature importance of the word features and find that the general market-related words and the NFT event-related words account for a notable portion of the words of most positive importance. These words contribute much more in predicting the NFT price compared to other words. RELATED WORK Recent research related to our work includes NFT market analysis and NFT market and social media studies, which will be addressed in details in the following. Other related works concern the social media text study for stock movement prediction, for crude oil market price prediction, and for global cryptocurrency price trend prediction. NFT Market Analysis NFT market has had tremendous growth in trading volume over the past two years. Nadini et al. characterized the market statistical properties such as the distribution of average price and sales per NFT from June 2017 to April 2021. They also investigated the predictability of NFT sales given the sale history and NFT price given the visual features. White et al. analyzed the sales data from OpenSea between Jan 2019 and Dec 2021 and found that a small group of whale NFT collectors are driving massive market growth. Franceschet proposed a rating method for utilizing artists and collectors trading networks and evaluates the data of the SuperRare NFT market, then has some network metrics to suggest investment strategies. Besides, other research discussed the potential fraudulent behaviors in the NFT emerging market, recent studies summarized malicious behaviors in NFT space. Das et al. performed an analysis on NFT marketplaces to discuss the security issues the NFT market is facing and one of the issues is the Counterfeit NFT creation. They performed the quantitative analysis on counterfeit NFTs created by searching all NFTs in markets to find counterfeit NFT collections with similar collection names, identical image URLs, or similar images as some authentic NFTs. We note the lack of research on comparing the authentic NFT projects and the counterfeit NFT projects. To this end, we begin to study from the social media perspective, investigate the relation between social media activeness and NFT price and compare the authentic NFT projects with the counterfeit NFT projects. NFT Market and Social Media Before, academic work has been conducted on the interaction between social media and cryptocurrency. One such example is the social media indicator for cryptocurrency price moves prediction. Besides, Phillips et al. investigated which certain topics discussed on social media are indicative of cryptocurrency price moves using a statistical Hawkes model and they illustrated the results by the words that precede positive or negative return. Also Mendoza-Tello et al. analyzed the impact of social media on increasing the trust to use cryptocurrencies. In addition, Nizzoli et al. studied the social media manipulation patterns. They detected social media bot accounts that broadcast suspicious links and summarized the deception schemes in online cryptocurrency communities. NFT is traded on cryptocurrency marketplaces and it shares a tight relation with cryptocurrency technically. NFT projects also evolve with the NFT communities emerging similar to cryptocurrency. Social media plays an essential role in NFT community development since platforms such as Twitter, Reddit, or Discord become where people know about new events for the NFT projects. Recent works showed that the social media features make improvements for an NFT valuation classification task. Aside from the market, Casale-Brunet et al. analyzed the NFT communities on Twitter using social network analysis. They found that most top NFTs can be considered as a single community, where most top projects are influenced by the development of the Bored Ape Yacht Club 1 collection from a social network perspective. However, we note the lack of a study investigating the relationship between the language content in these communities to NFT price growth. Therefore, we seek the extension of the analysis for the impact of important words used in social media communities on NFT price moves in our work. DATA COLLECTION We collect the NFT token trade transactions for some most successful NFT collections from Opensea top 19 (top 20 exclude marketplace Rarible) as of June 2022 and their corresponding fake or copycat collections in Table. 1. The data was collected by querying Google BigQuery 2 bigquery-public-data.crypto_ethereum resource given the smart contract addresses of those NFT collections. The transactions contain taddress from, address to, token id, transaction hash, transaction value (price in Eth), transaction hash, and block timeframe. The transactions are either with a transaction value of 0 as a transfer or with a positive transaction value as a sale emitted by the smart contract. If more than one NFTs were exchanged in the same transaction, we split the transaction value equally. We also collect the tweets associated (from, reply to, or @ ) with a Twitter account for the Twitter accounts in Table. 1, corresponding to the period between Jan 01, 2018 to June 01, 2022, except for CryptoKitties from Sept 16, 2018 to June 01, 2022 due to the large volume. We filter out the tweets with less than 5 likes for the original collection, and less than 1 likes for copycat projects to drop some noise data. 1 https://boredapeyachtclub.com 2 https://cloud.google.com/bigquery GRANGER CAUSALITY TEST ON TWEETS NUMBER AND NFT PRICE Utilizing the tweets and transactions collected, we investigate the causal relationship between the social network activeness and the NFT average prices temporally, then compare the results for the original projects and copycat projects. We perform a Granger causality test given the number of tweets and the average transaction values (price of the traded NFTs) within consecutive timeframes. Granger causality refers to, given a lagged time series, the ability of the time series A to help predict another time series B from the information that time series A contains. Projects that have no enough tweets to initialize the Granger causality test are removed. We set one individual timeframe to be of length 3 days. Within a timeframe, the total number of tweets and the averaged prices of the traded NFTs are calculated (with the transfer transactions filtered out) and then used for the Granger causality test. We present the Granger causality test results in Table. 2. Note that all the projects come with 2 types of null hypotheses (A and B) and 3 choices of lags (3-day, 6-day, and 9-day). As for the original projects, 11 out of 19 original projects show null hypothesis rejection and 8 show A null hypothesis rejection indicating that the number of tweets contains information that helps predict the average traded price of NFTs for those projects. Another 5 original projects show B null hypothesis meaning that the NFT price has a significant impact on the number of tweets, corresponding to the scenarios where the falling prices cause fewer tweets or the rising prices boost more tweets. By contrast, the fake or copycat projects show weak Granger causality. Only the copycat projects considered derivatives such as Lil Baby Ape Club show the number of tweets has a positive impact on the NFT prices. NFT PRICE MOVES PREDICTION After we see the evidence of the positive impact of the number of tweets on the NFT price, we then explore the content of the tweets and the features behind the content, which will potentially reveal the hidden factors that drive up the NFT prices. Our inspiration is based on the observation that the growth of an NFT project is accompanied by a series of official project events such as prior-mint promotion, release for sale, airdrop, derivative NFT announcement, DeFi or GameFi connection; and community-based events such as the interaction with influencers or celebrities, the creation of memes, the engagement of various online or offline activities. In this section, we propose a method to extract important words with importance scores from the tweets, which we use as the features to investigate the relation between these features and the NFT price moves. Then we set up a price moves prediction task given the features and perform an analysis of the results. Event Words Extraction on Tweets In social media communities, languages consist of a wide range of expressions about the events and the related behaviors. A method to extract the words describing the events from the tweets without prior knowledge of any events will help in building the word features. We first divide all the tweets into groups, where each group contains the tweets in a timeframe of days ( depends on the project lifetime) so that we have a list of groups of tweets ordered by date. We perform an NLP-based method described in Algorithm. 1 to extract the features from the tweets of a timeframe, which is represented by a vector of the importance score of relevant words. We first extract all nouns and verbs for each tweet using the tool Spacy with the English Partofspeech (POS) tagging model. After obtaining the list of nouns and verbs, we use a term frequencyinverse document frequency (TF-IDF) method to have a list of the retrieved important words and the weights of importance for the content of that timeframe. The TF-IDF method is used for measuring the importance of words, where the output is the product of TF and IDF, and a large output value of a word means the word is of high importance or relevance. TF measures how frequently a word appears in a document while IDF measures how less frequently a word appears in all the documents since the words that occur in all documents are likely to be less meaningful words such as join, like, which appear in almost all the timeframes. Here we treat the extracted nouns and verbs from the tweets in a timeframe as a document: a document that describes the events for this timeframe. We consider the words extracted in a timeframe can likely represent the events that happened in that timeframe. For example, in some early timeframes, mint is of a large TF, and mint will not occur with a high frequency in all timeframes. So some timeframes where the word mint occurs with a large TF-IDF are most likely to be the time when some mint events happened. We add a parameter p in line 4 and 18 in our method to be the minimum frequency for a word to be considered as being contained in a timeframe when calculating IDF. The minimum frequency is for preventing the words that are mentioned in all the timeframes but within only a few timeframes frequently mentioned such as mint, from being with a small IDF score. With the minimum frequency, mint is not to be considered to occur in most of the timeframes in our method so mint will not end up with a small IDF score. IDF weights down the words that frequently appear in most timeframes such as love, cool, join, etc. A small IDF leads to a small TF-IDF output. After performing the TF-IDF method, we will make the top TF-IDF values of words in a timeframe a vector, where the vector dimension is the union of the words of all timeframes: ∪ . The vectors will be used as input features for modeling the relation between events and NFT price moves. An example of the process of the event words vector extraction is demonstrated in Figure. 1, where we use Cool Cat NFT as an example. The words extracted in those 3 timeframes, after we confirm by searching news, correspond to the events of the announcement of mint in Jun 2021, the Cool Cat meme competition in Aug 2021, and the first time cool cat floor price hits 10 ETH in Sept 2021. However, we don't have the ground truth labels for the events of the NFT projects we collected to perform the validation through mapping the retrieved words to each individual event. Nevertheless, the words are features inherently describing the events that may be unique to that timeframe. We run Algorithm. 1 on the tweets of all 19 authentic NFT collections. Table. 3 shows the overall result. The tweets for most of the collections are split into frames within a length of 2-day. For the collections launched within shorter time such as Meebits, we use a length of 1-day. Collections have been existing for a longer time such as CryptoKitties are using a timeframe length of 4-days. The lengths are considered to make the collections not have a result of too less timeframes such as 50 or too many timeframes such as 400. The number of the extracted words are with a range of 100 to more than 500. The union of all distinct words from all the 19 projects is a set of 2401 words. We visualize the overlapping relation of the sets of the words from each project in Figure. 2 to see the proportion of the intersections for perceiving what words are shared or unique for those communities. The result shows that 0.07% words appear in all 19 projects, 3%, 4.9%, 7.7%, 31.3% of words appear in 15-18, 10-14, 6-9, 2-5 projects, 52.4% of the words appear only in one of the projects. We then observe the words that appear in most of the projects: 16 words (0.07%) that appear in all projects are check, congrat, floor, hour, keep, like, market, mint, miss, month, sale, start, team, today, use, week. These words are mostly the words used for describing the market-related or event-related content. we find the market-related or event-related content words also account for a large portion of the words appear in 15-18 projects such as volume, create, wallet, find, owner, list, hold, giveaway, own, eth, future etc. For the 52.4% of words that appear in one distinct project are more likely to be the terms used only in that community such as milk, cooltopia (name of the token and name of the community) for Cool Cats NFT, breeding, adoption (two behaviors in the game) for CryptoKitties. Normalized Price Regression With the extracted words vectors as the input features, we can further investigate the predictability of NFT price moves using a simple machine learning regression model. The prediction tasks will provide understanding in terms of both the predictability of NFT price given social media information, and the question of which features contribute to the prediction positively or negatively more than other features. Since each feature represents a word with a weight value for describing events in a timeframe, we tackle to discover the insights of the relation between the language in the social media communities and the NFT price. We split all the timeframes of a project into 80% for training and the latest 20% for testing by the date time. The input features are the word vectors for one timeframe of the project, and the ground truth is the normalized NFT average price for that timeframe. The normalization we use is a Markov assumption normalization for tackling the trend problem for time series price data: the price possibly grows or decreases by several orders of magnitude forming trends. For example, for BAYC NFT, in the timeframe of July 1, 2021, the average trading price is 3.37 ETH when the prices from its nearby date are around 3-5 ETH, while in the timeframe of Feb 25, 2022, the average trading price is 90.32 ETH where the prices from its nearby date are around 80-100 ETH. The trend is created through market behaviors of the whole NFT development, therefore, predicting the raw price given the event words vector is not much hope work for finding the relation of certain events described by words and NFT price. Instead, we explore predicting the price changing proportion, i.e., the ratio of current price and the average price of several timestamps prior to the current timestamp. Inspired by some early works using Markov assumption for NLP tasks where the assumption is to calculate the probability of a symbol to occur only depends on its previous symbols, we use the normalization: is the raw price at the timeframe, and the Markov normalized price will calculated by being divided by the average of its previous raw prices. The first timeframes will be dropped since have no previous timeframes. After the normalization, for example, with an = 3, a normalized value of 1.12 means the average NFT price of the current timeframe is 12% larger than the average price of its previous timeframes of length 3. In the meanwhile, we use a mean absolute error (MAE) loss with a penalizer ( = 1 if the ground truth and predictionare both >1 or both <1. otherwise = 2) for wrong price moves predictions as the equation below shows. Results. We evaluate our model using both the regression metric MAE and the classification metrics (accuracy and F1 score) with 3 runs of execution. The classification metrics are used since the metrics help in perceiving the correctness of the prediction of movement and our regression results can be easily reformed to a binary classification results by converting the prediction and the ground truth to 1 or 0 representing the price moves up or down ( := 1 if > 1 else 0, the same for). The prediction results shown in Table. 4 present a better than a random baseline of.5 accuracy price moves predictive performance for all the collections. Since we use a Markov window of length 3 or 5 to confine the normalization calculated on a short period prior to the current price, the predictability reflects the words as the predictors of a relatively quick NFT price change within a week or two considering small liquidity. It is important to note that the words used for discussion on social media about the NFTs are inherently not decisive predictors for price moves, since the price change is accompanied by heterogeneous market behaviors. Nevertheless, the prediction task does help in investigating which words take an effect on price moves prediction, which will provide us more insights into the events behind the language used in NFT communities and the effect on price. Analysis. We use the MDA method on the models for each collection to compute the importance of the features (words). The method calculates the importance by measuring how much the validation metric degrades when you remove one of the features. As a result, we can study the patterns shown in the words with the highest positive followed by the lowest negative importance scores. Figure. 3 illustrates the results of these important features. The positive values of importance indicate the words make positive contribute to the normalized price prediction task. Some general market-related words occupied a notable portion of the positive words for all of the projects. These general market-related words include buy, owner, floor, price, wallet, holder, market, sale, sell, money, own, offer, transaction, volume eth, flip, earn, etc. Particularly, the word owner (own) or holder pop up in the positive words of 10 of the projects. We suspect that the word owner or holder is associated with NFT whales or celebrities who bought or have been owning some NFTs mentioned in the social media or press. One example in history is: In May 2021, a big whale NFT collector "Pranksy" bought a large number of BAYCs and the news spread rapidly on social media, which led to a speedy sold out for the entire BAYC collections. The influence of the whales or celebrities is inevitably one of the most probable NFT price-rising reasons. In addition to owner, the word floor, which corresponds to the floor price, appears in the top 3 positive words for the project Clone X, Doodles, Cool Cats, and World of Women. In the meanwhile, the word eth also frequently appears, which evidences the recent research conducted mentioning the positive correlation between Ethereum price and NFT sales. Besides, the general NFT community and eventrelated words such as mint, airdrop, avatar, pfp, derivative, roadmap, founder, member, team, project, chain, etc., also comprise a markedly proportion of positive words. The most common events of NFT such as mint, airdrop, and new DeFi announcement would happen with the social media communities having these words more frequently mentioned at a certain time than other time. Most of the other positive words, in addition to the general market-related words and the NFT event-related words, are some unique terms in each community representing the name of a token, GameFi or DeFi product, derivative, or engagement of certain activities. For example, the word mana for Decentraland, and milk for Cool Cat are the ERC20 tokens for the projects. The word garden for Azuki is the name of the community that the owner of azuki NFTs has access to for exclusive streetwear collabs and other live events, so as the word swamp is the community name for CrypToadz. For Bored Ape Yacht Club, the word banana exactly corresponds to the meaning of being extremely excited, also being a double entendre representing the engagement of all ape activities. The word Serums for Bored Ape Kennel Club is the derivative NFT collection Bored Ape Chemistry Club, which consists of 10k Mutant Serums. The word nest for Moonbirds means the activity of earning benefits by staking the Moonbirds NFT. The word vx for Sandbox is another collection named CyberKongz VX being the avatars interoperate with the Sandbox metaverse. Now we look at the 20 least important words, which mostly come with negative scores. The words having a negative score of feature importance entail the result that the words have no better contribution to the prediction compared to a random noise replacement of those feature words. Words pertaining to pessimistic projections, concerns or issues such as scammer, delist, suit, fail, lose, afford, cancel, wait, miss, raise, drop etc, appear in negative words from half of the projects. It is important to notice that these negative words don't mean a negative correlation between the appearance of these words and NFT prices. Instead, these words made a negative contribution to the prediction task, i.e., disturbing the predictions of NFT price. We see sometimes the market-related or event-related words, such as hit, floor, sell, founder, sale, avatar pop up in the negative words of some projects. These words nevertheless less frequently appeared in negative words than in positive words. We also find many community distinct terms are in the negative words for some individual projects. The word meebit in CryptoPunks, Serum in Azuki, Doodle in CLONE X, Azuki in Meebits are some names of other projects people mention together probably because of news such as one beat out another in price, or just people are discussing and comparing the benefits to choose one of those to purchase. Other collections people also mentioned in addition to the competitive collections are the copycat collections. For example, the punk in Moonbirds may point to MoonbirdPunks, which is a mixture of CryptoPunks and Moonbirds. Another situation is illustrated by the word vibemas in negative words of CrypToadz, is a slogan as 'Merry !vibemas' or originally '!vibe' which is a trigger word in CrypToadz's Discord for the bot to print out some turtle stickers. This word vibemas is an example of some words that may frequently pop up in a specific period, which will not benefit the prediction since the market behavior may not be correlated with sudden social media hilarity because of a holiday celebration. After we examine which words have the most positive or negative importance on the price prediction, we inspect the distribution of these top 20 positive words and negative words throughout the timestamps. As Figure. 4 shows, 13 out of 19 projects have an early period where more positive words exist than negative words. The projects that start with a period of more negative words, such as Otherdeed for Otherside, Decentraland, Meebits, CryptoKitties, and Loot (for Adventurers) are mostly the projects of GamFi or Metaverse. The likely cause of the pattern we suspect is that in the early stage of the NFT project development, the emergence of certain words about the early NFT events of release, mint, and promotion in social media are more likely making a positive contribution to predicting the price moves, whereas for GamFi or Metaverse projects are likely to have a starting stage of contrasting circumstances such as the building of the metaverse ecosystem environment, or guidance of the gameplay. Therefore the words describing the rules are probably adding more noise in the predicting. CONCLUSION AND FUTURE WORK This paper contributes to exploring the relation between the NFT social media communities and the NFT price in terms of the tweets number and the content of the tweets. We first present positive results of a Granger causality test between the number of tweets and the prices time series for more than half of the authentic projects, compared to seldom Granger causality for most of the copycat projects. Later we propose an event words extraction method and a regression model for predicting a Markov normalized price given the extracted word vectors, with the results showing a certain level of predictability for the normalized price. Last we analyze the feature importance and summarize the findings of insight events behind the words and their effect on predicting the price. Future work can take several directions. We can expend the data to other platforms such as Discord and Reddit for having more text content about the events that exist. We can combine extracting other types of information from the social media resources: perform network analysis on accounts nodes with the metadata such as the number of subscribers for nodes, the number of likes and comments for tweets measuring the influence of each node. Recent work proposed some methods for learning Twitter user nodes representation using graph embedding to group and understand user types such as petitioners or promotioners. We think the social network analysis will be a promising future work since some user nodes within the social network might be influencers or market makers, whose behaviors may evidence directly on price change. A different perspective rather than NFT prices is analyzing the transactions network by utilizing the social-media-extracted features to perform de-anonymization on the transaction addresses. Better de-anonymization will help in detecting fraudulent activities such as the wash trade on the NFT market to address potential security concerns.
Image copyright Reuters Europe’s leaders are about to consign the Earth to the risk of dangerous climate change, a UN expert says. Prof Jim Skea, a vice-chair of the Intergovernmental Panel on Climate Change, says the EU’s plan to cut CO2 emissions 40% by 2030 is too weak. He says it will commit future governments to “extraordinary and unprecedented” emissions cuts. The Commission rejected the claim, saying the 40% target puts Europe on track for long-term climate goals. The 40% target - proposed by the European Commission - will be finalised at an EU summit this week. A spokesman for the Climate Commissioner Connie Hedegaard said: "Our 40% target is in line with science as it puts us right on track to meet our 2050 goal of cutting emissions by 80%-95%. "This is what developed countries will need to reduce by 2050 according to the IPCC to keep global warming below 2C." But Prof Skea, vice-chair of the economics working group of the IPCC, told BBC News the EU’s 40% target for 2030 would not lead to the desired cut by the middle of the century. He said the easy climate protection measures – like energy saving - had been snapped up, leaving to future leaders the job of introducing new clean technologies in every walk of life. "I don't think many people have grasped just how huge this task is," he said. "It is absolutely extraordinary and unprecedented. My guess is that 40% for 2030 is too little too late if we are really serious about our long-term targets." Step change He believes some politicians have not grasped the relative mathematics of transforming the energy economy step-by-step from 1990 baseline through to 2050. He says the Commission's current stance means that future leaders will need to make a three-fold cut in just 20 years - which Prof Skea believes is scarcely credible. Prof Skea believes governments are setting targets by what appears to be politically achievable rather than what is necessary to transform the way we make and use energy as the century unfolds. Much of the political difficulty lies in fears that Europe’s competitors will not play their part in reducing emissions, leaving EU firms and consumers saddled with high energy prices. Poland says the 40% target will damage its economy. Other nations like the UK say the target should be made more ambitious if the US and China agree strong action to protect the climate. Other negotiations around the EU’s climate and energy package centre on whether Europe should agree a mandatory energy efficiency target. Environmentalists and several industry groups argue this is the best way of cutting emissions whilst also reducing dependency on Russian gas. The UK’s Energy Secretary Ed Davey maintains that nations should be able to decide on their own strategies for cutting emissions without being bound by too many rules. Prof Skea, who is based at Imperial College London, agrees with him. The best way of cutting emissions, he says, is by Europe ratcheting up efficiency standards across all products that use energy. Some politicians recently complained that new EU efficiency standards were denying people the opportunity to buy the best vacuum cleaners. But Prof Skea maintains that industry standards take the least efficient machines off the market, which benefits consumers without excessive political pain. Follow Roger on Twitter.
/** * Copyright (C) 2010-2011 yvolk (<NAME>), http://yurivolkov.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.xorcode.andtweet.data; import com.xorcode.andtweet.TwitterUser; import com.xorcode.andtweet.util.MyLog; import android.content.Context; import android.content.SharedPreferences; import android.preference.PreferenceManager; import android.util.Log; /** * This is central point of accessing SharedPreferences, used by AndTweet * @author yuvolkov */ public class MyPreferences { private static final String TAG = MyPreferences.class.getSimpleName(); /** * Single context object for which we will request SharedPreferences */ private static Context context; private static String origin; public static final String KEY_OAUTH = "oauth"; /** * Is this user NOT temporal? * (TODO: refactor the key name and invert value) */ public static final String KEY_IS_NOT_TEMPORAL = "was_authenticated"; /** * Was current user ( user set in global preferences) authenticated last * time credentials were verified? CredentialsVerified.NEVER - after changes * of password/OAuth... */ public static final String KEY_CREDENTIALS_VERIFIED = "credentials_verified"; /** * This is sort of button to start verification of credentials */ public static final String KEY_VERIFY_CREDENTIALS = "verify_credentials"; /** * Process of authentication was started (by {@link #PreferencesActivity}) */ public static final String KEY_AUTHENTICATING = "authenticating"; /** * Current User */ public static final String KEY_TWITTER_USERNAME = "twitter_username"; /** * New Username typed / selected in UI * It doesn't immediately change "Current User" */ public static final String KEY_TWITTER_USERNAME_NEW = "twitter_username_new"; public static final String KEY_TWITTER_PASSWORD = "<PASSWORD>"; public static final String KEY_HISTORY_SIZE = "history_size"; public static final String KEY_HISTORY_TIME = "history_time"; public static final String KEY_FETCH_FREQUENCY = "fetch_frequency"; public static final String KEY_AUTOMATIC_UPDATES = "automatic_updates"; public static final String KEY_RINGTONE_PREFERENCE = "notification_ringtone"; public static final String KEY_CONTACT_DEVELOPER = "contact_developer"; public static final String KEY_REPORT_BUG = "report_bug"; public static final String KEY_CHANGE_LOG = "change_log"; public static final String KEY_ABOUT_APPLICATION = "about_application"; /** * System time when shared preferences were changed */ public static final String KEY_PREFERENCES_CHANGE_TIME = "preferences_change_time"; /** * Minimum logging level for the whole application (i.e. for any tag) */ public static final String KEY_MIN_LOG_LEVEL = "min_log_level"; /** * System time when shared preferences were examined and took into account * by some receiver. We use this for the Service to track time when it * recreated alarms last time... */ public static final String KEY_PREFERENCES_EXAMINE_TIME = "preferences_examine_time"; private MyPreferences(){ } /** * * @param context_in * @param origin - object that initialized the class */ public static void initialize(Context context_in, java.lang.Object object ) { String origin_in = object.getClass().getSimpleName(); if (context == null) { // Maybe we should use context_in.getApplicationContext() ?? context = context_in.getApplicationContext(); origin = origin_in; TwitterUser.initialize(); MyLog.v(TAG, "Initialized by " + origin); } else { MyLog.v(TAG, "Already initialized by " + origin + " (called by: " + origin_in + ")"); } } /** * Forget everything in order to reread from the sources if it will be needed */ public static void forget() { context = null; origin = null; TwitterUser.forget(); MyLog.forget(); } public static boolean isInitialized() { return (context != null); } /** * @return DefaultSharedPreferences for this application */ public static SharedPreferences getDefaultSharedPreferences() { if (context == null) { Log.e(TAG, "Was not initialized yet"); return null; } else { return PreferenceManager.getDefaultSharedPreferences(context); } } public static void setDefaultValues(int resId, boolean readAgain) { if (context == null) { Log.e(TAG, "Was not initialized yet"); } else { PreferenceManager.setDefaultValues(context, resId, readAgain); } } public static SharedPreferences getSharedPreferences(String name, int mode) { if (context == null) { Log.e(TAG, "Was not initialized yet"); return null; } else { return context.getSharedPreferences(name, mode); } } public static Context getContext() { return context; } }
def token_array_to_probs(agg_array: np.ndarray, prefix_labels: List[str]) -> Dict[int, Dict[str, float]]: token_probs = {} row_indices, col_indices = np.nonzero(agg_array[:, 1:]) for i, j in zip(row_indices, col_indices): if i not in token_probs: token_probs[i] = {prefix_labels[j+1]: agg_array[i, j+1]} else: token_probs[i][prefix_labels[j+1]] = agg_array[i, j+1] return token_probs
<filename>betaas-taas/betaas-taas-taasresourcesmanager/src/main/java/eu/betaas/taas/taasresourcesmanager/endpointsmanager/Subscription.java /** Copyright 2014 ATOS SPAIN S.A. Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Authors : <NAME>. Atos Research and Innovation, Atos SPAIN SA @email <EMAIL> **/ package eu.betaas.taas.taasresourcesmanager.endpointsmanager; import java.util.Calendar; import java.util.Date; public class Subscription implements Comparable<Subscription> { private String appId; private String appGateway; private String thingServiceId; // The period is considered in seconds private int period; private Date startDate; private Calendar lastReceived; private int lastResponseTime; public Subscription (String application, String appLocation, String thingService, int thePeriod) { appId = application; appGateway = appLocation; thingServiceId = thingService; period = thePeriod; lastReceived = Calendar.getInstance(); startDate = lastReceived.getTime(); } public String getApplicationId() { return appId; } public String getApplicationLocation() { return appGateway; } public String getThingServiceId() { return thingServiceId; } public float getPeriod() { return period; } public Date getStartDate() { return startDate; } public int getLastResponseTime() { return lastResponseTime; } public Date getExpectedDate() { Calendar nextDate = (Calendar)lastReceived.clone(); nextDate.add(Calendar.SECOND, period); return nextDate.getTime(); } public void setReceived() { // Calculate real response time in the last notification (in seconds) Calendar now = Calendar.getInstance(); lastResponseTime = (int) (now.getTimeInMillis() - lastReceived.getTimeInMillis())/1000; // Set current last received time lastReceived = Calendar.getInstance(); } public int compareTo(Subscription subs2) { return this.getExpectedDate().compareTo(subs2.getExpectedDate()); } }
package main import ( "net/http" "github.com/prometheus/client_golang/prometheus/promhttp" hClient "github.com/hcnet/go/clients/auroraclient" ) type prometheusWatchedTP struct { TradePair TradePair Spread Spread Volume Volume Slippage Slippage Orderbook Orderbook FairValue FairValue } var watchedTradePairs []prometheusWatchedTP func main() { cfg := loadConfig() c := trackerClient{hClient.DefaultPublicNetClient} watchedTPs := configPrometheusWatchers(cfg.TradePairs) trackSpreads(cfg, c, &watchedTPs) trackVolumes(cfg, c, &watchedTPs) http.Handle("/metrics", promhttp.Handler()) http.ListenAndServe(":2112", nil) }
import React, { useContext, useEffect, useState } from 'react'; import { BigNumber, ethers } from 'ethers'; import { MerkleDistributor, MerkleDistributor__factory, ERC20__factory, ERC20, } from '../typechain'; import { EthersWalletContext } from '../contexts/wallet'; import ActiveClaim from './ActiveClaim'; import { ClaimInfo, getDistributorAddress, RegistrationInfo, } from '../utils/api'; import ClaimWizard from './ClaimWizard'; import ClaimingDisabled from './ClaimingDisabled'; import watchAsset from '../utils/watchAsset'; import { switchToMainnet, switchToXDai } from '../utils/switchEthereumChain'; import { mainnetChainId, xDaiChainId } from '../utils/chainIds'; interface ActiveClaimControllerProps { claim: ClaimInfo; registrationInfo: RegistrationInfo; payoutChainId: number; nextAmount: BigNumber; } export const TxStates = { Idle: 0, WaitingSignature: 1, WaitingConfirmation: 2, Confirmed: 3, Error: 4, } as const; export type TxState = typeof TxStates[keyof typeof TxStates]; interface ClaimState { txState: TxState; txHash?: string; errorMessage?: string; } const ActiveClaimController = ({ claim, registrationInfo, payoutChainId, nextAmount, }: ActiveClaimControllerProps) => { const [merkleDistributor, setMerkleDistributor] = useState< MerkleDistributor | undefined >(undefined); const [token, setToken] = useState<ERC20 | undefined>(undefined); const [isClaimed, setIsClaimed] = useState(false); const [claimState, setClaimState] = useState<ClaimState>({ txState: TxStates.Idle, }); const { wallet, network, onboardApi } = useContext(EthersWalletContext); const [showWizard, setShowWizard] = useState(false); // initialize contract instance useEffect(() => { const runEffect = async () => { setMerkleDistributor(undefined); if (!wallet?.provider) { console.log(`ClaimController: No wallet set`); return; } console.log( `Claimcontroller: Setting up ethers provider, network ${network}` ); const provider = new ethers.providers.Web3Provider( wallet.provider, 'any' ); const ethersNetwork = await provider.getNetwork(); console.log( `Claimcontroller: Got ethersNetwork with chainId ${ethersNetwork.chainId}` ); console.log(`Claimcontroller: Setting up ethers signer`); const signer = provider.getSigner(); if (ethersNetwork.chainId !== claim.chainId) { // dont try to initialize contract when provider does not match chainId of claim return; } const contractAddress = await getDistributorAddress(claim.chainId); if (!contractAddress) { return; } console.log(`MerkleDistributor address: ${contractAddress}`); try { // check if contract is deployed const code = await provider.getCode(contractAddress); if (code === '0x') { throw Error( `No contract deployed at ${contractAddress} on ${provider.network.chainId}` ); } console.log( `Initializing merkleDrop contract on chain ${provider.network.chainId} at ${contractAddress}` ); const instance = MerkleDistributor__factory.connect( contractAddress, signer ); setMerkleDistributor(instance); } catch (e) { console.log(e.message); } }; runEffect(); }, [wallet, claim, network]); // get token contract useEffect(() => { const getToken = async () => { const provider = merkleDistributor?.provider; if (!provider) { console.log(`No provider set in Merkledistributor!`); return; } const contractAddress = await merkleDistributor?.token(); if (!contractAddress) { console.log(`No token contract set in Merkledistributor!`); return; } try { // check if contract is deployed const code = await provider.getCode(contractAddress); if (code === '0x') { throw Error(`Token contract not deployed at ${contractAddress}`); } console.log(`Initializing token contract at ${contractAddress}`); const instance = ERC20__factory.connect(contractAddress, provider); setToken(instance); //TEST let name = await instance.name(); console.log(`TokenName: ${name}`); } catch (e) { console.log(e.message); } }; if (merkleDistributor) { getToken(); } }, [merkleDistributor]); // get initial claim status useEffect(() => { const getClaimStatus = async () => { if (merkleDistributor) { // is it already claimed? setIsClaimed(await merkleDistributor.isClaimed(claim.index)); } setClaimState({ txState: TxStates.Idle }); }; getClaimStatus(); }, [merkleDistributor, claim]); // listen for claim events useEffect(() => { if (merkleDistributor && !isClaimed) { // There is some issue with typechain preventing setting the type of 'amount' to BigNumber... const handler = (claimIndex: BigNumber, account: string, amount: any) => { console.log( `Claimed: ${claimIndex.toString()}, ${account}, amount ${amount.toString()}` ); setIsClaimed(true); }; // Look for "Claimed" event for my claim const filter = merkleDistributor.filters.Claimed( claim.index, claim.address, null ); console.log( `Start listening for Claimed event for claim ${claim.index} on chain ${claim.chainId}` ); merkleDistributor.on(filter, handler); return () => { console.log( `Stop listening for Claimed event for claim ${claim.index} on chain ${claim.chainId}` ); merkleDistributor.off(filter, handler); }; } }, [claim, isClaimed, merkleDistributor]); // redeem claim const redeem = async () => { if (onboardApi) { const checkResult = await onboardApi?.walletCheck(); if (!checkResult) { console.log(`Failed walletCheck!`); return; } } if (merkleDistributor) { try { setShowWizard(true); setClaimState({ txState: TxStates.WaitingSignature }); const txResult = await merkleDistributor.claim( claim.index, claim.address, claim.amount, claim.proof ); console.log(`Result: ${txResult.hash}`); console.log(`Waiting for tx ${txResult.hash} to be mined...`); setClaimState({ txState: TxStates.WaitingConfirmation, txHash: txResult.hash, }); const receipt = await txResult.wait(); if (receipt.status === 1) { console.log( `Tx ${receipt.transactionHash} mined in block ${receipt.blockNumber}` ); setClaimState({ txState: TxStates.Confirmed, txHash: receipt.transactionHash, }); } else { console.log( `Tx ${receipt.transactionHash} reverted in block ${receipt.blockNumber}` ); setClaimState({ txState: TxStates.Error, txHash: receipt.transactionHash, errorMessage: `Transaction reverted`, }); } } catch (err) { console.log(`Error while claiming: ${err}`); // @ts-ignore const message = err?.data?.message || err.message; setClaimState({ txState: TxStates.Error, errorMessage: message, }); } } else { console.log(`no merkledistributor`); setClaimState({ txState: TxStates.Error, errorMessage: `Merkledistributor contract not found`, }); } }; const cancelRedeem = () => { setShowWizard(false); setClaimState({ txState: TxStates.Idle, }); }; const connectWallet = async () => { await onboardApi?.walletSelect(); }; const watchAssetHandler = async () => { if (token && wallet && wallet.provider) { const address = token.address; const decimals = await token.decimals(); const symbol = await token.symbol(); const image = 'https://fairdrop.brightid.org/BrightTokenIcon256.png'; await watchAsset({ address, decimals, symbol, image, provider: wallet.provider, }); } }; const connectChainHandler = async (chainId: number) => { if (wallet && wallet.provider) { switch (chainId) { case xDaiChainId: await switchToXDai({ provider: wallet.provider }); break; case mainnetChainId: await switchToMainnet({ provider: wallet.provider }); break; default: console.log(`Unhandled chainId ${chainId}`); } } }; const now = Date.now(); if ( registrationInfo.currentRegistrationEnd < now && registrationInfo.nextClaimStart > 0 ) { // we are in phase transition return <ClaimingDisabled registrationInfo={registrationInfo} />; } const isMetamask = wallet?.name === 'MetaMask'; return ( <> <ActiveClaim amount={claim.amount} nextAmount={nextAmount} claimed={isClaimed} claimChainId={claim.chainId} selectedChainId={payoutChainId} currentChainId={network || 0} registrationInfo={registrationInfo} connectWallet={connectWallet} claimHandler={redeem} watchAssetHandler={isMetamask ? watchAssetHandler : undefined} connectChainHandler={isMetamask ? connectChainHandler : undefined} /> <ClaimWizard chainId={payoutChainId} amount={claim.amount} open={showWizard} claimState={claimState} claimHandler={redeem} cancelHandler={cancelRedeem} /> </> ); }; export default ActiveClaimController;
Excess in the pharmaceutical industry The main point about excess in the pharmaceutical industry is how much there is of it. Here I can touch on only a few specifics about this altogether over-the-top business. ! Figure. Photo by: Earthlore Although the pharmaceutical industry claims to be a high-risk business, "Me-too" drugs The main output of the big drug companies is "me-too" drugs: minor variations of highly profitable pharmaceuticals already on the market. 5 Some me-too drugs are gimmicks to extend monopoly rights on an older blockbuster. For example, the antacid Nexium was AstraZeneca's virtually identical replacement for Prilosec when its exclusive rights on the older drug expired. Others are attempts by competitors to cash in on lucrative markets. For example, the top-selling drug in the world, Pfizer's Lipitor, is the third of 3 me-too drugs to cash in on the success of the first statin, Merck's Mevacor. All of these drugs inhibit the same rate-limiting enzyme in cholesterol synthesis. There is generally no good reason to believe that one me-too drug is better than another, since they are seldom compared head-to-head at equivalent doses in clinical trials. Instead, they are tested against placebo, and so all we know is that they are better than nothing. In fact, it's conceivable that, within me-too families, each successive drug is actually worse than the one before. Without suitable comparative testing, we'll never know. Because me-too drugs are cheaper and less risky to develop and have ready-made markets, the industry increasingly relies on them. From 1998 through 2003, 487 drugs were approved by the US Food and Drug Administration (FDA). Of those, 379 (78%) were classified by the agency as "appear to have therapeutic qualities similar to those of one or more already marketed drugs," and 333 (68%) weren't even new compounds (what the FDA calls "new molecular entities"), but instead were new formulations or combinations of old ones. Only 67 (14%) of the 487 were actually new compounds considered likely to be improvements over older drugs. 5 This state of affairs is growing worse. The industry justifies me-too drugs by arguing that they provide back-up for patients who don't respond well to already available drugs and that the competition keeps prices down. Neither argument has much merit. The claim that back-up me-too drugs are clinically useful is rarely tested in trials. Drug companies don't test their me-too drugs in people who haven't responded to another drug (or have had unacceptable side-effects). Anecdotes, of which there are plenty, are notoriously unreliable. In any case, while it may be reasonable to have 1 back-up available, it's hard to make the case for 4, 6 or 8. As for price competition, there is very little of it. Me-too drugs are almost never promoted as being cheaper than the others. Instead, companies imply that they are better in some way. Sometimes they do this by touting the results of clinical trials in which the drug was used for a slightly different indication. (These and other kinds of phase IV or post-approval studies consume about a quarter of the industry's much-vaunted R&D expenditures. 6 ) But the fact remains that in the US the prices of drugs in most me-too categories are almost never reduced over time, despite the introduction of new competitors. Instead, prices are relentlessly increased. Marketing Closely tied to excess me-too drugs are excessive marketing expenditures. For decades, the big drug companies have spent far more on "marketing and administration" (companies have slightly different names for this budgetary item) than on anything else. Throughout the 1990s, for example, the top 10 drug companies in the world consistently spent about 35% of sales on marketing and administration, and only 11% to 14% on R&D. 7 (For that decade, they took in profits of 19% to 25% of sales.) Just looking at the top 10 US companies in 2002, expenditures for marketing and administration were 31% of sales, compared with only 14% for R&D. 1 That comes to an astonishing $67 billion dollars of their $217 billion in sales. Where did all that money go? No one can say for sure, because the drug companies do not make that information publicly available. But one can make some reasonable estimates. First, the lion's share probably went to marketing, not administration. That assumption is supported by the fact that, according to the Pharmaceutical Research and Manufacturers of America (PhRMA), the industry's trade association, 35% of its members' personnel in 2000 were in marketing, compared with 12% in administration. 6 Marketing includes expenditures for "education of medical professionals," which is probably the biggest single chunk of it. Administration includes executive salaries (which are huge 8 ), legal costs and the overhead associated with run-ning any large business. Most marketing is directed toward persuading doctors and patients to choose one me-too drug over another, usually without a scientific basis for doing so. For that reason, free samples are mainly newly patented, me-too drugs. It takes a lot of promotion to convince people to select one me-too drug over another. AstraZeneca was reported to have spent a half-billion dollars in a year to switch Prilosec users to Nexium. 9 In contrast, a uniquely important drug would require very little promotion. Advertising also expands the total market. Drug companies increasingly promote diseases to fit drugs, rather than the reverse. They try to persuade people in affluent countries that they are suffering from conditions that need long-term treatment. Thus, millions of normal people come to believe that they have dubious or exaggerated ailments such as "generalized anxiety disorder," "erectile dysfunction," "premenstrual dysphoric disorder" and GERD (gastroesophageal reflux disease). That, too, is expensive. The big drug companies like to say that prices have to be high to cover their R&D costs, but it would be truer to say they are high to cover their marketing costs -and their outsize profits. Influence on the medical profession The medical profession has largely abdicated its responsibility to educate medical students and doctors in the use of prescription drugs. Drug companies now support most continuing medical education, medical conferences and meetings of professional associations. 10 Although they call it education, the billions of dollars they put into it comes out of their marketing budgets. The industry also provides students, house officers and physicians in practice with meals, trips to exotic locations and many other blandishments. Although medical and industry associations have issued guidelines that would limit these gifts, codes of conduct are entirely voluntary and full of loopholes. Although it is self-evidently absurd for medical professionals to look to an investor-owned company for an impartial, critical evaluation of its own products, there is ample evidence that marketing masquerading as education does increase the use of a drug; indeed, if it did not, heads would roll in executive suites, since these companies are not charities. And so why does the profession pretend to believe that drug companies, in contrast with all other businesses, can provide objective information about their own products? Unfortunately, the answer is because it pays -in CME credits, perks and free lunches. But ask yourselves, fellow physicians, why drug companies should be giving you any gifts at all, especially since they just tack the costs on to the price of drugs. The profession should pay for its own education, just as other professions do. Influence on government The pharmaceutical industry has the largest lobby in Washington, DC -there are more pharmaceutical lobbyists there than members of Congress -and it gives copiously to political campaigns. 11 As a result, the prescription drug legislation and policies that come out of Washington are usually made to order for the industry. Here are just a few examples: A series of laws has enabled drug companies to extend the exclusive marketing rights of brand-name drugs through a variety of manoeuvres, including suing generic companies, sometimes repeatedly, to gain additional 30-month periods of exclusivity. The fruits of publicly funded research are virtually given to drug companies, with no requirement for reasonable pricing. Americans are prohibited from importing prescription drugs from countries where they are less expensive, most notably Canada. The FDA does not require drug companies to test their new drugs against old ones for the same condition, even when several drugs of the same class are already on the market. Most stunningly, in 2003 Congress passed a Medicare prescription drug benefit that explicitly prohibits the agency from using its purchasing power to bargain for low prices or discounts. That makes prescription drugs unique in the Medicare program, which does regulate doctors' fees and hospital reimbursement. Furthermore, every other large insurer bargains with drug companies for lower prices or discounts, including the Veterans' Affairs System and the Department of Defense. I won't take space to discuss some of the other excesses, such as the growing influence of drug companies on the design and reporting of clinical trials. The specific excesses already noted should be sufficient to show why prescription drug expenditures in the US are so high and so central to the struggle to contain rising health costs. Although outpatient prescriptions accounted for only 12% of US personal health care expenditures in 2002, they were its fastest growing component, increasing at an unsustainable rate of about 15% per year. 12 The excesses of the pharmaceutical industry are perhaps the clearest example of the folly of allowing health care expenditures and policies to be driven by largely unregulated market forces and the profit-making imperatives of investor-owned businesses.
As Gorillaz release their new album, Humanz, the duo discuss Brexit, predicting dystopia, and how they made up after not speaking for three years On the top floor of his west London studio, “13”, Damon Albarn is sitting at a small wooden table that his dad made sometime in the 1970s. In front of him are a glass of nettle tea, some freshly squeezed juice in a union jack mug and a single cigarette. He’s in a good mood, and he is talking about Gorillaz, the cartoon band that he formed with Jamie Hewlett almost 20 years ago. Unfortunately, he is not being clear. I’ve just asked him if the music that he makes on the new album – soulful, urban, with contributors including Mavis Staples, Pusha T, Jehnny Beth and Benjamin Clementine – marries up at all with Hewlett’s drawings of 2D, Murdoc, Russel and Noodle, the cartoon members of the band. “Well, I don’t think that matters,” says Albarn, cheerfully. “Gorillaz isn’t about anything specific. It’s very unruly. You can go anywhere and do anything, and that’s the whole point of it. There’s no agenda. I’ll go with what’s exciting, and I’ll make it work, because I can pretty much lend my hand to anything, musically. Also, I could keep knocking out tunes for the next year or two. So, just look at this album as the nucleus of something much bigger.” I make a face. Albarn says, “You know, a lot of things don’t make sense at the moment. This is just another one of those. It’s very à la mode!” Grr. He can be a frustrating interviewee, because he doesn’t like unpicking his creative process, talking the magic away. This is what he says about his lyrics on the new Gorillaz record: “I get my chords and then I improvise the words in one take. It’s a subconscious thing, so I think of my language as somehow connected to the future or the past. Untethered to now. And with this record, I mostly left the words as they came out.” He doesn’t want to explain, because he’s not really sure what his lyrics are about. There’s just so much to be learned from being sensitive to other people, you know? There’s so many flavours… Let’s talk about his everyday life for a bit, instead. On Saturday night, Albarn and Missy, his 18-year-old daughter, went out to celebrate the 17th birthday of Jamie’s younger son, Rocky. They drank sake. Missy told Albarn she thinks she could drink as much as him; he didn’t want to disillusion her. (Her mum, his partner, Suzi Winstanley, is teetotal.) He’s on his own with Missy for a week – “She’s really interested in politics and film. One of the most heartwarming things for me is, occasionally, when it’s not trap music booming out from her room, I hear Radio 4. And I think: Ah, you are my child!” – and is vaguely wondering how much he should be parenting her. Albarn famously keeps working hours at his studio: 10am-5pm every day. He cycles there from his house (he lost his driving licence 18 years ago, and never bothered to get it back), and before he does anything, he shoves vegetables, fruit and ginger into the studio’s industrial juicer and hands the juice out to anyone who’s about. For the past three years, he’s added yoga into his routine. He started with a teacher coming to his home, but soon realised he likes the friendliness and surprise of classes. He tries to go to a class every day, no matter where in the world he finds himself: Jamaica, Japan, the US. The only place he couldn’t find one was Bamako, in Mali. He’d like, he says, and it’s only partly a joke, to write a Lonely Planet guide to the world’s best yoga classes. He catches himself and starts laughing. “Listen to me,” he says. “International yoga guide. Who’d have thought?” Yoga, juicing and being untethered to now weren’t part of the original Gorillaz lifestyle. The band started at the end of the 1990s, when Albarn and Hewlett lived together in a gadget-packed, boy’s own first-floor flat in Notting Hill. Then, Albarn was known as the singer and songwriter of Blur; Hewlett as the co-creator and artist of the comic book Tank Girl. They were both heartbroken (Damon over his split with Justine Frischmann, Jamie over his with Jane Olliver, Justine’s friend – though he and Jane got back together for a time and had two sons) and they bonded over lost love and silly toys. Despite their upset, their flat was a fun place to be. When you visited, there was always something in the background making an unexpected noise: a robot, a watch, something digital or musical. Jamie Hewlett and Damon Albarn circa 1997. Photograph: Via Pixbear They had a big telly and watched a lot of MTV. The original idea for Gorillaz came about because they thought so many pop groups made lame interviewees, and because Albarn wanted to make hip-hop, which he could never do with Blur. He needed to be anonymous in order to experiment: “People weren’t meant to know it was me,” he says. “Even now I think, during the gigs, I’m going to be able to go off, go backstage and make myself a drink and a hologram will take my place for a couple of songs.” (This has not yet happened.) Gorillaz was, unexpectedly, a huge success. In the US, the first album outsold the whole of Blur’s output. People loved the band. And so, over the next 13 years, Albarn and Hewlett worked and partied and made two more Gorillaz albums, plus Monkey: Journey to the West, an opera created for the Manchester international festival in 2007, which toured the world. They knocked around in the same group of mates; their kids grew up together. For a while, during the making of their last album, 2010’s Plastic Beach, Jamie had an animation studio, employing a few staff, on one of the floors of 13. Now, the two of them live far apart. Albarn is still in west London, settled in his studio, his home, his long-term relationship. Hewlett met his new wife, actor Emma de Caunes (daughter of Antoine), at a Gorillaz gig (she read his tarot cards), and moved to Paris in 2010 to live with her and her daughter, Nina, now 14. His boys, Denholm (21) and Rocky (17), live in Leeds, but are moving to London for work and will live in Jamie’s London flat. Aside from their history, Albarn and Hewlett have a lot in common: a portrait-in-the-attic youthfulness, a bovver-street way of dressing, a cheekiness (more confrontational in Albarn), a dislike of talking about their art in specifics. They both get bored very quickly. They both love to work. They both spark off each other, but they create separately. They’re like brothers, but they stopped talking in 2011 and didn’t communicate for three years. Facebook Twitter Pinterest Gorillaz performing Humanz live for the first time at Printworks, London, in March 2017. Photograph: Joseph Okpako/WireImage Before we get to that, though, let’s try and explore Humanz, Gorillaz’s fourth album (if you don’t count The Fall, which is more like a tour notebook). Hewlett (whom I meet separately) is telling me the story. We’re in a glass-walled side office at the film company where he works when he’s in London; he faces away from the main office, to stop him getting distracted. As soon as the pair decided to do another album, he immediately began making visuals. He wrote little stories about each of the Gorillaz characters that moved them on from Plastic Beach and situated them in a studio called 14, next door to Damon, in west London. “I see the “z” in Humanz as referring to robots, AI, programming, brainwashing, indoctrination,” he says. “And it’s a question to us: are we human or are we humanz? Have we lost the ability to think for ourselves, do we just believe what we’re told? That’s how I see it. And the reason the characters appear on the album cover as a real-life version of themselves is because I’m wondering: are these characters becoming more human than us?” That is not what Albarn says, when I ask him: “‘Pain, joy, urgency’ was the mantra of these sessions. Those words were always one of the first things that I talked about with collaborators. Also, I had this mad idea of imagining what it would be like if Donald Trump won the US election. Like, what’s the darkest, most twisted fantasy I can come up with? I liked the idea of the album taking place on the night of this massive culture shock. How would people feel? The record is not polemical, it’s quite abstract. It’s just that I wanted to imagine the near future, because this record for me was always slightly futuristic. Hence the name Humanz.” Clear now? Good. Anyhow, the resulting LP is, according to Albarn, “about America. Pre-post-Trump America.” Sessions were recorded in Chicago, Jamaica, Paris, London. Towards the end of the process, he took out many of the non-US contributors, including Graham Coxon, Héloïse Letissier from Christine and the Queens, and Rag’n’Bone Man. He also took out any English rap and all direct Trump references, because he didn’t want to give him any more promotion. Other than that, he was keen for the album “to be a record of male and female voices. Really well balanced.” None of which seems to have much bearing on Gorillaz as Hewlett’s visual entity. The band exists in its own universe, and if you want to experience it, you can do a virtual tour via the Gorillaz app. Or you could watch the latest video, for the single Saturnz Barz (Spirit House), which gives you an entire world, from scrunched-up tissue to spinning star, a world where you can move from a haunted house into outer space. On its release, it broke all records for viewings of a 3D video. On Instagram, you can see short film clips; on YouTube you’ll find a full-length interview done by MistaJam with 2D and Murdoc, where they’re sitting on a sofa in a room with him, answering questions sent in by fans. You can head to a Gorillaz festival: Demon Dayz, a one-dayer on 10 June at Margate’s Dreamland. A TV series is ready to go. Gorillaz is an art project gone supernova. When Albarn wants to work with someone, he writes them a letter. Not an email; a letter. He writes to icons (Sade, Dionne Warwick, Morrissey and Barry Gibb all said no), but also up-and-coming talent. Sometimes, these are brought to his notice by Remi Kabaka Jr, or by Anthony “the Twilite Tone” Khan, who both co-produced Humanz. If Albarn hears a young artist’s music and thinks there might be “some sort of kinship”, he writes the letter. “I love working with people. I don’t like being on my own in the studio. And the amazing thing about working with other people is that everyone’s got their own way of doing things. I love it.” We talk about some of his collaborators on Humanz, such as Grace Jones: “Doing headstands at two o’clock in the morning, after a couple of bottles of wine and whatever else. She is slightly supernatural.” And his old rival, Noel Gallagher: “Once he stopped being totally defined by Oasis, he started to feel different about a lot of things – and that was when we started talking. He’s got a very different kind of voice from mine, it’s super-melodic, and he works really accurately.” Do you phone each other up now, then? “Noooo! Though I did call him the other day, after the game (he means Chelsea v Man City: City lost) to say, ‘Do you want to go for a drink?’ Surprisingly, he wasn’t really up for it.” God, you’re a proper wind-up, I say. He digs in his pocket and brings out a badge. It says: “Trust Me, I’m A Cunt.” He grins. His gold tooth glints. Why did you fall out with Jamie? I ask. “There’s a lot of competition between men,” he says. “I think it’s always been about that. But I was just devastated that I’d lost such a close friend. That he’d just left me.” It happens quite a bit with you, though, I say. You fall out with your close male friends. “That’s true,” he says. “With Jamie, it was horrible. It’s so horrible when it happens... I just dig my heels in and go, ‘Right, I ain’t fucking moving.’ And then slowly, you start to move a little bit. Because you’ve forgotten what it was about, and also, you always really liked them anyway. And, you know, I may fall out with a lot of people, but I’ve also managed to reconcile with a lot of people.” I ask Hewlett about the fall-out too. Even now, he says, “we fight at everything. We’d fight over a worm if we found it at the same time. It’s hilarious, but it’s stupid. We’re in a restaurant, we fight. In a taxi, we fight. In front of people, we fight. It’s just one-upmanship. School for Scoundrels. He’s an intense person to be friends with, I mean. A lot is required.” Facebook Twitter Pinterest Damon Albarn with former Gorillaz collaborator Snoop Dogg at Glastonbury festival, 2010. Photograph: Getty Images It must have been strange when the arguing stopped and they didn’t speak for three years. They can’t or won’t say what happened, but, in essence, Hewlett felt that his contribution to Gorillaz was being diminished. For the Plastic Beach album, he was meant to make four elaborate videos, which would have told the full story; but the budgets were pulled by EMI when he’d only made two. Then Gorillaz went on tour and, says Hewlett, “Damon had half the Clash on stage, and Bobby Womack and Mos Def and De La Soul, and fucking Hypnotic Brass Ensemble and Bashy and everyone else. It was the greatest band ever. And the screen on stage behind them seemed to get smaller every day. I’d say, ‘Have we got a new screen?’ and the tour manager was like, ‘No, it’s the same screen.’ Because it seemed to me like it was getting smaller.” Added to this, he says that he had lost his way a bit, partying a little too hard, “not considering myself”, and so… he met Emma and he left. He pulled out of Gorillaz and he pulled out of Dr Dee, the opera he was working on with Albarn, and he shut down his animation studio and moved to Paris. “I just didn’t want to fucking work with him. I didn’t want to see him.” They reconciled at a friend’s Christmas party in 2014. Hewlett says: “I knew the first words that came out of his mouth would be, ‘All right, poof?’ And he came over and said, ‘All right, poof?’ And I said, ‘Hi, Damon.’” During their hiatus, Hewlett painted a lot. He had an idea for a live-action film, which he worked on but then abandoned. (“When projects don’t work, I feel pissed off for about a week and then I get over it.”) He created work for his acclaimed solo shows at London’s Saatchi Gallery. Albarn did what he does, which is to make music. In his Jamie-free years, he made albums for different projects: Kinshasa One Two, an Oxfam charity album made with musicians from the Democratic Republic of the Congo; Maison Des Jeunes with Africa Express; Rocket Juice & the Moon. He finished the Doctor Dee opera, performed in it on stage and released the album; closed the 2012 Olympics ceremony with a huge Blur gig in Hyde Park; made a solo album, Everyday Robots, and toured it. Blur also toured and made an album, which is what delayed the start of Humanz. Hewlett, meanwhile, kept working alone. “I don’t like collaborating too much,” he says. “I’m all about doing things myself because I find it hard to trust other people. Not trust, but I know exactly what I want to do and I know exactly how it’s supposed to look. And so to do it myself is fine, you know? Sometimes I involve other people, but what often happens is that I come up with an idea for a video, and because I don’t really know the advances in animation and the technology that’s available, they say, ‘We can’t do this yet.’” Hewlett films nearly all of what happens with Gorillaz, and hangs out in the studio when Damon is making the music. “An amazing thing happens when Damon and I have people coming into the studio,” he says. “There’s that initial moment when they’re a bit uncomfortable, and then they start becoming musical with each other and it’s a different story. When we had Mavis Staples in, it took a while for her to get into the song, and there was a moment with her and Damon, and Damon’s got his guitar, and she’s sitting next to him, and Damon’s taking her through it. And then, after an hour, they suddenly find this connection, and it’s really strong. You can see it on the film. It’s a breakthrough, like they discover this little love affair through music and they make a song. You can’t have that as an artist, you know?” Facebook Twitter Pinterest ‘I may fall out with a lot of people, but I’ve also managed to reconcile with a lot of people’: Damon Albarn, right, with Jamie Hewlett. Photograph: Suki Dhanda for the Observer Albarn travels a lot, working, and it informs how he thinks and feels about the world. He tells me about playing a big gig in the old citadel in Damascus, about five months before the Syrian civil war really kicked off. He has musician friends there who took him round monasteries and to meet a Bedouin prince, lent him an old motorbike so he could drive around Palmyra by moonlight. After the war started, he brought over 20 of those Syrian musicians for a tour; they played Glastonbury, Roskilde, Istanbul. “Then a lot of them went back, and some of them just disappeared into the night after the last gig,” he says. Some of them have settled here. Albarn does not have a Brexit attitude to that, or anything else. “It’s just such a mess, the Middle East,” he says, “and a lot of it stems back to Tony Blair and George Bush in 2003 [and the invasion of Iraq]. That’s why the whole idea of Brexit is so tragic, because people were hoodwinked into this idea that they were imminently going to be invaded by millions of marauding hordes. But those hordes were only moving away from their homes because of the direct influence, the direct tremor [of the Iraq war], which we had not only very strongly as a country said we were against, and has now proved to have been an utter disaster. Brexit would never have been anything other than a sort of prosaic, mechanical thing if it hadn’t been spiced up with all of that.” There should be a new referendum, he says, but knows it won’t happen. (He saw Michael Gove recently in the street and shouted at him, called him a xenophobe.) His next album with his band the Good, the Bad & the Queen is going to be a post-Brexit record, “like Parklife – stories about Britain – but less jaunty”. Albarn’s way of making music, the way he goes out to gather it, changes him, and that’s the point. “All I want to do is meet new people,” he says, “learn about them, and maybe do new shit because of that. I don’t want to threaten people’s way of life at all. There’s just so much to be learned from being sensitive to other people, you know? There are so many flavours… “If we really, truly as a country, want to leave, then, OK. If you really don’t want to go to France and you really don’t want to go to Spain, and you really don’t want to go to Portugal, and Italy, and you really don’t want to go to Greece, or Crete or Majorca… and you have no desire to go to Iceland or Denmark, or even if you really can’t bear the thought of going to Ireland, fine. Stay home. But personally, I’m going to carry on going to those places because I really like them. As well as I really love the country I live in.” Gorillaz v Kendrick Lamar – who won the battle of the surprise drops? Read more He stops. Rant over. Except it isn’t, really, is it? “No. It’s all part of a long conversation. We’ll get back to it.” When he was making Humanz, during 2015 and 2016, Albarn knew, in his bones, he says, that Donald Trump was going to get in. Sometimes he thinks he can see the future, and sometimes he can’t. “When I did all that study for Dr Dee, it really changed me. I was able to articulate stuff I instinctively felt. I always had a slightly witchy element to my psyche. The role of magic is not to be underestimated.” It’s why he doesn’t want to explain; and why he wants to mix and create with other people. When he makes music with other people, he can feel the magic: “Definitely, definitely. The spirit… we all feel the spirit, you know, it exists,” he says. “It’s an energy, it’s whatever you want to call it, it exists. And the beautiful thing about it is it’s there for everyone. Whether you’re a billionaire or you’re living on the street – the spirit is there for you. “The only way you’re not going to feel it is if you close yourself off completely.” And why would anyone do that?
The predictive value of anthropometric indices for cardiometabolic risk factors in Chinese children and adolescents: A national multicenter school-based study Objectives This study aimed to assess the accuracy of body mass index (BMI) percentile, waist circumference (WC) percentile, waist-height ratio, and waist-hip ratio for identifying cardiometabolic risk factors in Chinese children and adolescents stratified by sex and BMI categories. Methods We measured anthropometric indices, fasting plasma glucose, lipid profile and blood pressure for 15698 participants aged 617 in a national survey between September and December 2013. The predictive accuracy of anthropometric indices for cardiometabolic risk factors was examined using receiver operating characteristic (ROC) analyses. The DeLong test and Z test were used for the comparisons of areas under ROC curves (AUCs). Results The prevalence of impaired fasting glucose, dyslipidemia, hypertension and cluster of risk factors were 2.9%, 27.3%, 10.5% and 5.7% respectively. The four anthropometric indices showed poor to fair discriminatory ability for cardiometabolic risk factors with the AUCs ranging from 0.530.72. Each index performed significantly better AUCs for dyslipidemia (0.590.63 vs. 0.560.59), hypertension (0.620.70 vs. 0.550.65) and clustered risk factors (0.700.73 vs. 0.600.64) in boys than that in girls. BMI percentile performed the best accuracy for hypertension in both sexes; WC percentile had the highest AUC for dyslipidemia and BMI percentile and waist-height ratio performed similarly the best AUCs for clustered risk factors in boys while BMI percentile, WC percentile and waist-height ratio performed similar and better AUCs for dyslipidemia and clustered risk factors in girls; whereas waist-hip ratio was consistently the poorest predictor for them regardless of sex. Though the anthropometric indices were more predictive of dyslipidemia, hypertension and clustered risk factors in overweight/obese group compared to their normal BMI peers, the AUCs in overweight/obese group remained in the poor range below 0.70. Conclusions Anthropometric indices are not effective screening tools for pediatric cardiometabolic risk factors, even in overweight/obese children. Introduction Cardiometabolic risk factors among children and adolescents, including hyperglycemia, dyslipidemia, hypertension, etc, have increased with the global pandemic of childhood obesity over recent decades. Cardiometabolic risk factors in childhood are associated with earlier onset and greater risk of many chronic disorders in adults such as cardiovascular disease, metabolic syndrome and type 2 diabetes. Thus, early screening of cardiometabolic risks is believed to be crucial for the prevention and intervention of chronic diseases. Although cardiometabolic risk factors are mostly determined by objective approaches (e.g., laboratory tests), non-invasive and easy anthropometric measurements, such as body mass index (BMI) and waist circumference (WC), have been proposed as feasible alternatives for assessing cardiometabolic risks in early stages because of the robust relationship between childhood obesity and cardiometabolic risks. However, existing studies have reported controversial results for the predictive capabilities of anthropometric indices for cardiometabolic risk factors among children and adolescents. Some studies have suggested that certain, not all, anthropometric indices were useful screening tools for identifying children and adolescents with elevated cardiometabolic risk ; on the contrary, other studies disapproved of anthropometric indices for predicting pediatric cardiometabolic risks due to the poor accuracy observed. In these studies, the discriminatory ability of BMI, WC, and waist-height ratio for cardiometabolic risk factors have been studied a lot while there were few studies on waist-hip ratio, a commonly used index for central obesity in adults. Furthermore, current research mainly focused on general population or overweight/obese children, there is however little evidence on the predictive performance of anthropometric indices for cardiometabolic risk factors among children and adolescents with different BMI categories. Therefore, further research is warranted to investigate the predictive accuracy of anthropometric indices for screening cardiometabolic risks. This study aimed to comprehensively assess the discriminatory ability of four commonly used anthropometric indices (BMI percentile, WC percentile, waist-height ratio and waist-hip ratio) for identifying cardiometabolic risk factors in Chinese children and adolescents stratified by sex and BMI categories. Findings of this study will contribute to a better understanding of the effectiveness of those indices in predicting cardiometabolic risks and inform future preventive practices by guiding how to choose anthropometric measurements for screening cardiometabolic risks without caution. Study design and participants This study was based on a national cross-sectional survey conducted during September and December 2013 in seven provinces in China-Liaoning Province (Northeast), Tianjin Municipality (North), Shanghai Municipality (East), Hunan Province (Central), Guangdong Province (Southeast), Ningxia Autonomous Region (Northwest), and Chongqing Municipality (Southwest). The protocol has been described elsewhere. Briefly, a multi-stage stratified cluster sampling method was used to recruit primary and secondary students: 4-10 primary schools, 2-6 junior high schools, and 2-6 senior high schools were selected in each province; 15-25 classes were randomly chosen from each of Grades 1-12 in the selected schools, except Grades 6, 9, and 12 to avoid influences on their preparation for graduation examination. 65347 students from 94 schools in seven provinces were enrolled in the physical examination. According to the protocol, only two primary school, one junior school and one senior high school were randomly selected from each province because of limited funding, and the students in those selected schools were invited for blood collection. Finally, 16756 students participated in blood examination, including 2160 from Hunan, 2471 from Ningxia, 2770 from Tianjin, 2163 from Chongqing, 2338 from Liaoning, 2316 from Shanghai and 2538 from Guangzhou. Those with missing anthropometric measurements (n = 694), blood pressures (n = 92), fasting plasma glucose (n = 10), or lipid levels (n = 3) and outliers of these measurements (n = 259) were excluded from this study. The outliers were defined as measurements higher than the sum of Q3 plus 3 times interquartile range or measurements lower than the difference of Q1 minus 3 times interquartile range in each sex-age group in boxplots. A total of 15698 children and adolescents were included in following analyses. The study was approved by the Ethical Committee of the Peking University (NO.IRB00001052-13034). Written informed consents were obtained from each student and their parents. Anthropometric measurements Height, weight, waist and hip circumferences of all participants were measured by experienced technicians in accordance with standard procedures. The standing height was measured to the nearest 0.1 cm using a fixed stadiometer (model RGT-140, China), and body weight was measured using a lever-type weight scale to the nearest 0.1 kg (model TZG, China). Waist and hip circumferences were also measured to the nearest 0.1 cm. Cardiometabolic measurements Blood pressures were measured by trained medical staff with mercury sphygmomanometers (model XJ11D, China), stethoscopes (model TZ-1, China), and appropriate cuffs. Participants were asked to sit quietly for at least 5 min prior to the first reading. Systolic blood pressure (SBP) was determined by onset of the first Korotkoff sound and diastolic blood pressure (DBP) was determined by the fifth Korotkoff sound. Blood pressure was measured twice with 5-min gap between two measurements and the mean values were calculated. After an overnight fast of 12 h, venous blood samples (5ml) were obtained from the antecubital vein of each participant and collected into EDTA vacuum tubes between 7 and 9 AM. Samples were centrifuged at 3000r, aliquoted and stored at -80C. Levels of fasting plasma glucose (FPG), total cholesterol (TC), low-density lipoprotein cholesterol (LDL), high-density lipoprotein cholesterol (HDL), and triglyceride (TG) were determined at a validated biomedical analyses company, which is accredited by Peking University. The FPG level was measured by glucose oxidase method; TC and TG levels were measured by enzymatic methods; and LDL and HDL levels were measured by clearance method. The non-high-density lipoprotein cholesterol (nHDL) level was calculated by subtracting HDL level from TC level. Adiposity-related anthropometric indices Age-and sex-specific BMI percentiles were calculated based on the BMI growth charts for Chinese children and adolescents. Overweight and obesity were defined based on the age-sexspecific BMI cut-offs equivalent to BMI �24 kg/m 2 and BMI �28 kg/m 2 at 18 years of age, respectively. Age-and sex-specific WC percentiles were calculated based on the WC growth charts for Chinese children and adolescents. The waist-height ratio was calculated as dividing waist circumference by height. The waist-hip ratio was calculated as dividing waist circumference by hip circumference. Definition of cardiometabolic risk factors Cardiometabolic risk factors were determined based on recommended definitions for children and adolescents identified in the literatures. According to 2011 Expert Panel on Integrated Guidelines for Cardiovascular Health and Risk Reduction in Children and Adolescents, abnormal lipid levels were determined as follows: TC �5.18 mmol/L; nHDL �3.76 mmol/L; LDL �3.37 mmol/L; TG �1.13 mmol/L for 0-9 years and �1.47 mmol/L for 10-19 years; HDL <1.04 mmol/L. Dyslipidemia was defined as the presence of one or more of the five conditions above. Impaired fasting glucose (IFG) was defined as FPG �5.6 mmol/L. High SBP and high DBP was defined as SBP and DBP at or above the 95 th percentile based on age and sex respectively, and hypertension was determined as high SBP or high DBP. Cluster of cardiometabolic risk factors was created as accumulation of three or more risk factors above, i.e., high TC, high nHDL, high LDL, high TG, low HDL, IFG, high SBP and high DBP. Statistical analyses The normality of continuous data was examined by Lilliefors and Shapiro-Wilk tests. All continuous variables didn't conform to normal distribution and were described by median and quartile. The Mann-Whitney U test, t test, and chi-square test were used for comparing anthropometric indices and cardiometabolic risk factors between sexes. Partial correlations were performed between cardiometabolic risk factors and anthropometric indices adjusting for age and sex. The interactions between anthropometric indices and sex were analyzed by using logistic regression models with each cardiometabolic risk factor as dependent variables and so were the interactions between anthropometric indices and BMI categories. Receiver operating characteristic (ROC) analyses were used to assess the predictive performance of anthropometric indices for cardiometabolic risk factors. The area under the ROC curve (AUC), which ranges from 0.5 to 1.0, provides a measure of the model's discriminatory ability. In general, if AUC = 0.5: this suggests no discrimination; if 0.5<AUC<0.7: this is considered poor discrimination; if 0.7�AUC<0.8: this is considered acceptable discrimination; if 0.8�AUC<0.9: this is considered excellent discrimination; if AUC�0.9: this is considered outstanding discrimination. The AUCs of four anthropometric indices were compared with each other by the DeLong test and the comparisons of AUCs between sexes or BMI categories were performed by Z test. We didn't perform weighted analysis in our study because the aim of this study was to find associations at an individual level and not to report population estimates. ROC analyses and comparisons were conducted in MedCalc (MedCalc Software bvba, Ostend, Belgium), and other statistical analyses were conducted in the SPSS 19 statistical package (SPSS Inc, Chicago, Illinois). Basic characteristics of study participants The levels of weight, height, waist and hip circumferences had no significant differences between excluded and included participants in most of sex-age groups among 65347 students (S1 Table). The demographic characteristics, anthropometric indices, and cardiometabolic risk factors of the included participants were presented in Table 1. The schoolchildren aged from 6-17 years. The overweight and obese rates were 15.7% and 11.8% respectively, with a larger proportion of boys in overweight/obese group relative to the normal weight group. BMI percentile and WC percentile were significantly higher in girls than that of boys, and waistheight ratio and waist-hip ratio were significantly higher in boys. Girls had higher lipid levels but lower FPG and blood pressures compared with boys. The prevalence of IFG, dyslipidemia, hypertension and cluster of cardiometabolic risk factors were 2.9%, 27.3%, 10.5% and 5.7% respectively, with no significant differences between sexes except for higher IFG in boys. Correlation between anthropometric indices and cardiometabolic variables As shown in Table 2, all the correlation coefficients between anthropometric indices and cardiometabolic variables were statistically significant in total sample as well as in both sexes (p values < 0.05). The four anthropometric indices were negatively correlated with HDL, and positively correlated with the other cardiometabolic variables except the negative correlation between waist-hip ratio and DBP in girls. WC percentile, waist-height ratio and BMI percentile had the highest coefficients for FPG, lipid levels, and blood pressures respectively. The discriminatory ability of anthropometric indices for cardiometabolic risk In the total sample, the AUCs of four anthropometric indices for cardiometabolic risk factors ranged from 0.53 to 0.72. Among them, only AUCs of BMI percentile and WC percentile for elevated SBP were higher than or equal to 0.70 (Table 3). Since the interactions between each anthropometric index and sex were statistically significant for most of cardiometabolic risk factors adjusting for sex and the corresponding anthropometric index (S2 Table), the predictive capabilities of anthropometric indices for cardiometabolic risk were further analyzed by sex. For IFG, the AUCs of anthropometric indices ranged from 0.53 to 0.57 in boys and 0.54 to 0.59 in girls. The AUC of each index showed no significant differences between sexes. Waisthip ratio in both sexes and waist-height ratio in girls had no discrimination for IFG. BMI percentile and WC percentile performed similar and better AUCs in both sexes (Table 3). For dyslipidemia, the AUCs of four anthropometric indices ranged from 0.59-0.63 in boys and 0.56-0.59 in girls. Each index performed better AUC for identifying dyslipidemia among boys compared to girls. WC percentile showed the best AUC while waist-hip ratio performed the poorest AUC for dyslipidemia in boys. Similar performance was observed by BMI percentile, WC percentile, and waist-height ratio while waist-hip ratio was still the poorest predictor in girls (Table 3 and Fig 1). As for hypertension, the AUCs of four anthropometric indices were 0.62-0.70 for boys and 0.55-0.65 for girls. Each index showed better discriminatory ability for hypertension in boys compared to girls. The AUCs of BMI percentile, WC percentile, waist-height ratio, and waisthip ratio for hypertension were shown in descending order in both sexes (Table 3 and Fig 2). With regard to cluster of cardiometabolic risk factors, the four anthropometric indices performed fair discrimination in boys with AUCs from 0.70-0.73 but poor range of AUCs (0.60-0.64) in girls. The performance of each index was significantly better in boys relative to that in girls. BMI percentile and waist-height ratio had similarly the best AUC for cluster of risk factors among boys. Statistically similar AUCs were performed by BMI percentile, WC percentile, and waist-height ratio in girls. Waist-hip ratio was the poorest predictor for cluster of risk factors in both sexes (Table 3 and Fig 3). The discriminatory ability of anthropometric indices for cardiometabolic risk by BMI categories Further analyses were conducted in different BMI categories because the interactions between each anthropometric index and BMI categories were significant for most of risk factors adjusting for the anthropometric index and BMI categories (S3 Table). The anthropometric indices performed quite poor accuracy for cardiometabolic risk factors in normal BMI group with all the AUCs below 0.60. The four anthropometric indices were more predictive of dyslipidemia, hypertension and clustered risk factors in overweight/obese group compared to their normal BMI peers. However, the AUCs in overweight/obese group were also in the poor range below 0.70 and had no advantage for identifying cardiometabolic risk factors compared with that in total sample (S4 Table). Discussion Cardiometabolic risk factors have been an increasing public concern worldwide and also in China. More than a quarter of total sample had abnormal lipids, over one in ten participants were determined as having hypertension and 5.7% of children and adolescents were found to have at least three cardiometabolic abnormalities clustered in our study. Such high prevalence of pediatric cardiometabolic risk factors foreshadows the enormous burden of chronic diseases in Chinese population in the future. Effective screening and intervention of cardiometabolic risk factors in children and adolescents are urgently needed. Boldfaced numbers indicate the AUC was statistically greater than 0.50 (p < 0.05) # Significant difference for the AUCs between sexes by Z test (p < 0.05) a Significant difference for the AUCs of BMI percentile and WC percentile by Delong test (p < 0.05) b Significant difference for the AUCs of BMI percentile and waist-height ratio by Delong test (p < 0.05) c Significant difference for the AUCs of BMI percentile and waist-hip ratio by Delong test (p < 0.05) d Significant difference for the AUCs of WC percentile and waist-height ratio by Delong test (p < 0.05) e Significant difference for the AUCs of WC percentile and waist-hip ratio by Delong test (p < 0.05) f Significant difference for the AUCs of waist-height ratio and waist-hip ratio by Delong test (p < 0.05). https://doi.org/10.1371/journal.pone.0227954.t003 To our knowledge, this is the first national study in China which comprehensively assessed the predictive capability of four adiposity-related anthropometric indices (BMI percentile, WC percentile, waist-height ratio and waist-hip ratio) in identifying cardiometabolic risk factors in children and adolescents. By analyzing a large-sample dataset, we found in general the poor accuracy of all four indices in both sexes from the perspective of clinical application. Our findings were consistent with most of existing studies that anthropometric indices performed poor to fair accuracy for hyperglycemia, dyslipidemia, hypertension and cluster of risk factors. In a recent meta-analysis for AUCs of BMI, WC, and waist-height ratio for pediatric cardiometabolic risk factors, the pooled AUCs for hyperglycemia, elevated TC, elevated TG, low HDL, elevated LDL, hypertension and at least three comorbidities were 0.57-0.57, 0.55-0.56, 0.67-0.73, 0.69-0.70, 0.61-0.62, 0.64-0.68 and 0.69-0.74 respectively. A plausible explanation to the unsatisfactory predictive accuracy is that there are many important factors contributing to levels of fasting glucose, serum lipids, and blood pressures other than adiposity, such as genetic polymorphism and dietary patterns. Secondly, existing research demonstrated that visceral fat content was the primary cause of metabolic disorders, and anthropometric indices are just indirect indicators for body weight or fat, whose limited correlation with visceral fat content during childhood may be another possible reason for the poor accuracy. Therefore, the utilization of anthropometric indices for identifying cardiometabolic risk factors in children and adolescents should be considered with great caution. Despite this, given that those four will continue to be practical indices for screening cardiometabolic risks, it is still worth comparing their performance that may vary by cardiometabolic risk factors. The existing studies about the predictive superiority of different anthropometric indices for cardiometabolic risk have not reach an agreement yet. A large multi-center survey of overweight/obese adolescents in Germany, Austria, and Switzerland revealed that BMI standard score was more closely associated with hypertension, while WC standard score was more closely associated with dyslipidemia. Lo et al. found that waist-height ratio, WC, and BMI performed similarly in screening most cardiometabolic risk factors among children and adolescents. In another study, WC consistently showed better predictive capabilities for cardiovascular risk factors compared with waist-height ratio and BMI among children in Guangzhou. And there were some studies considering waist-height ratio the best screening tool for pediatric cardiometabolic risk factors. In our study, BMI percentile performed the best accuracy for hypertension in both sexes; WC percentile had the best AUC for dyslipidemia, and BMI percentile and waist-height ratio performed similarly the best AUCs for clustered risk factors in boys while BMI percentile, WC percentile and waist-height ratio performed similar and better AUCs for dyslipidemia and clustered risk factors in girls; whereas waist-hip ratio was consistently the poorest predictor for these cardiometabolic risk factors. The heterogeneity on the predictive superiority of anthropometric indices may be attributed to the different definitions of anthropometric indices and outcome variables, and the racial and regional differences in participants. For instance, BMI and WC can be used for analyses in the form of absolute index and relative index such as percentiles or Z scores. Besides, it is likely that some anthropometric indices of fat distribution among adults, such as waist-hip ratio, may be inappropriate for children and adolescents because of the small amount of visceral fat before adulthood and rapid changes in fat patterning during growth and development. Previous studies have shown that the magnitude of associations between anthropometric variables and cardiometabolic risk factors was greater in overweight and obese group compared with their normal weight peers. Similar findings were observed in our study that anthropometric indices were more predictive of cardiometabolic risk factors among overweight/obese children. However, the AUCs in overweight/obese group remained in the poor range below 0.70 and had no significant advantage of the accuracy of anthropometric indices for cardiometabolic risk factors compared with the corresponding AUCs in total sample, in other words, the combination of overweight/obese BMI categories and elevated BMI percentile, WC percentile, waist-height ratio or waist-hip ratio could not produce greater insight into cardiometabolic risk in our study. This is consistent with the findings by Bauer et al. Some other studies also found that anthropometric indices could not identify cardiometabolic risk factors well among overweight/obese children. For example, a study of obese Italian children and adolescents demonstrated that anthropometric indices (BMI, BMI Z-score, WC, and waist-height ratio) were not satisfactory predictors for metabolic comorbidities with the significant AUCs ranging from 0.55-0.70. Since the vast majority of children and adolescents with a normal BMI category had low levels of WC while overweight/obese subjects were more likely to be central obesity, although cardiometabolic risk factors were more popular among overweight/obese children, the discriminatory ability of WC percentile, waist-height ratio or waist-hip ratio didn't increase because of the smaller intervals of these anthropometric variables. In this sense, other effective screening tools should be used in overweight/obese children and adolescents for cardiometabolic risk assessment, maybe regular blood tests as recommended in the guidelines. Several limitations of the present study should be addressed. First, the analyses of impaired fasting glucose just partially reflected the glycometabolic status of children and adolescents, other important metabolic variables were not be considered in our study, such as insulin resistance index. Second, our study was cross-sectional and could not obtain the data about the duration of obesity and the recent change of body weight, which may also affect blood pressure, glucose and lipid metabolism besides the present weight status. More prospective cohort studies are needed to explore the association between anthropometric indices and cardiometabolic risks before definitive conclusions can be made. Despite the limitations, this is the first study from a national school-based survey to assess the predictive value of BMI percentile, WC percentile, waist-height ratio and waist-hip ratio for cardiometabolic risk factors in Chinese children and adolescents. Our study provided large-sample evidence that adiposity-related anthropometric indices lack of sufficient predictive capability for cardiometabolic risks in children and adolescents, even in overweight/obese group. It implies that anthropometric indices should be used cautiously for early screening of cardiometabolic risk factors in children and adolescents. More effective indicators or models considering multiple determinants of cardiometabolic risk could be explored in future research. Supporting information S1 Table. The distribution of anthropometric measurements between the excluded and included participants by sex and age. (DOCX) S2 Table. The P values for the interactions between each anthropometric index and sex for cardiometabolic risk factors in logistic regression models adjusting for sex and the corresponding anthropometric index. (DOCX) S3 Table. The P values for the interactions between each anthropometric index and BMI categories for cardiometabolic risk factors in logistic regression models adjusting for BMI categories and the corresponding anthropometric index. (DOCX) S4 Table. Areas under the ROC curve (AUCs) and 95% confidence intervals of the four anthropometric indices for cardiometabolic risk factors in children and adolescents by BMI categories. (DOCX) S5 Table. Areas under the ROC curve (AUCs) and 95% confidence intervals of the four anthropometric indices for cardiometabolic risk factors among 15957 children and adolescents in the sensitivity analyses that included the outliers. (DOCX)
Owning a home in Vancouver already comes at a premium. And increasingly, homeowners are facing the high cost of renovation and maintenance as tradespeople either opt out of working in the city entirely, or charge extra for having to go there. A big reason for the premium cost of hiring the trades is the city’s traffic, contractors say. Vancouver traffic is so congested, and so time-consuming, it makes working there a losing proposition. Considering that a lot of tradespeople live outside the city, it means the options are fewer. Chilliwack, B.C.-based John Van Kammen, who owns Jovak Landscape & Design, no longer services Vancouver, citing “prohibitive” traffic and an employee base that simply doesn’t want to deal with it. Mr. Van Kammen has his theories on why Vancouver has become a no-go zone for companies such as his own. The traffic, he said, is due to the fact people who can’t afford to live centrally have moved east, but they still work in or around the city, where the jobs are located. As well, the infrastructure has not kept up with the growth, such as Highway 1, with only two lanes from Langley to Chilliwack, even though it’s probably the busiest road in British Columbia, he said. As a result, there are daily traffic jams in that long 64-kilometre section of highway that takes Fraser Valley residents into the city. It doesn’t help that the toll was removed from the Port Mann Bridge from Surrey to Coquitlam, which has increased traffic and closed off that option to workers willing to pay the toll. And anyway, who needs congested Vancouver? So many people have moved eastward that there are tons of construction work elsewhere in the region. On top of it all, Mr. Van Kammen’s younger workers simply don’t want to be stuck in a truck in a traffic jam half the day. “They are willing to sacrifice dollars for more time,” he said. “Everyone hates sitting in traffic, hence many of my employees who live in Abby [Abbotsford] or Chilliwack will quit if I continually send them to Vancouver. They either will not want to go to Vancouver or they will charge a hefty price for their time. “Bottom line, as younger people who typically enter the trades move east to afford homes, homeowners in Vancouver will find it exceptionally hard to find competent trades or they will have to pay much higher prices,” Mr. Van Kammen said. The housing market in Vancouver has softened greatly since a host of provincial tax measures and federal government lending measures were introduced. A flattened market often motivates people to renovate, since it’s not a good time to sell. While waiting for the market to go up, they may as well fix up the house. In a high-priced market, it also helps if there’s considerable equity to be tapped into, by way of home equity lines of credit (HELOCs). Altus Group released a report last week that said British Columbia is poised to lead the way in increased spending on home renovations. The majority of that spending will come from the 50-plus age group, because of the equity that group holds. In 2017, more than $17-billion of new borrowing in Canada was for the purpose of renovations, according to the report. Two-thirds of borrowing using HELOCs for the purpose of renovations was carried out by homeowners older than 50. Writer Jen Van Evra is constantly working on her old house on the east side of Vancouver. She said it’s getting more difficult to find trades who’ll quote on small jobs, which is making it harder to get the standard three quotes to help her determine a fair price. She’s seeing people who get frustrated and settle for one quote, which opens the door to inflated prices and even shoddy work. “We’ve had literally dozens of tradespeople through here, and I would divide them into two camps: those who quote based on a fair or typical market price, and those who quote based on what the market will bear – basically, how much they can get away with. I think it has a lot to do with the neighbourhood, as well as the level of knowledge of the client. Jak McCuaig, who owns a house on the east side of the city, doesn’t buy the argument that traffic is scaring off the trades. He said the bigger challenge is that a lot of tradespeople simply don’t need the small residential jobs. “I’m a consulting engineer and we work in construction. Part of my job is renovations and multiunit residential. We do lots of work for the City of Vancouver and BC Housing, so I have access to all of the best practices, and all the good contractors, all that stuff. We’re always working on several buildings in Vancouver, the whole Lower Mainland and elsewhere in B.C., so you would think I would have no problems at our own house. Mr. McCuaig advises homeowners to get everything in writing, get referrals and make sure tradespeople have up-to-date insurance coverage. You could also consider doing the work yourself if it’s not specialized. Rod MacKay has been a realtor for 40 years and also manages about 50 properties around the city. He has lost handymen over the years owing to the high cost of commuting, and as a result is also having a tougher time finding trades. Mr. MacKay has established a relationship with a builder who’ll do the small jobs because he also gets the big jobs. For the average homeowner, however, that “carrot” is not an option. Mr. MacKay said people are renovating because those who were thinking of downsizing no longer see the value in it. The price gap between the single-family detached house they own and the price of a condo they could move into has narrowed. It means they get less value, so they stay put instead. As well, upgrading to a nicer property means extra taxes, which also means less value. So, they are fixing up their homes until it’s worthwhile to sell. “If you come onto the market with a single-family house that’s an inferior product, it will sit there,” Mr. MacKay said. Jake Fry, a builder of laneway housing, is based in Vancouver and Abbotsford, but does most of his work in Vancouver. He has made a point of hiring people who are local to keep turnover low. He recently lost a key Vancouver employee because he was commuting 15 hours a week by car. Arriving to work early in the morning is easy enough; however, getting home in rush hour traffic is exhausting. Even within the city, traffic is so congested and time consuming, he has had to make efficiency adjustments. To retain staff, Mr. Fry now looks for office staff who are close enough to get to work by bike or transit. To keep fuel costs down, he has created a system so crews don’t have to traverse one end of the city to the other. With projects throughout the city, he has assigned work teams according to neighbourhoods. But the rising cost of materials is also having an impact, as is the fact tradespeople are in high demand and rates have gone up. Bryan Roberts is a builder who lives in the city and he said he’ll avoid jobs that require too much car time. As a homeowner, he tries to do all his maintenance and repair himself because he can’t find other trades to do small jobs.
import React, { useLayoutEffect } from 'react' import Layout from '../../components/Layout' import { CustomerLogosSection } from '../../components/product/CustomerLogosSection' import { createHubSpotForm } from '../../components/HubSpot' export default ((props: any) => { useLayoutEffect(() => { createHubSpotForm({ portalId: '2762526', formId: '310000a0-2b6b-4da2-89e9-2be930a8a298', targetId: 'hubspotRequestDemoForm', }) }, []) return ( <Layout location={props.location} minimal={true}> <script charSet="utf-8" type="text/javascript" src="//js.hsforms.net/forms/v2.js" /> <div className="bg-white text-dark"> <div className="container-lg py-6 px-5"> <div className="row flex-wrap-reverse"> <div className="col-md-6"> <h1 className="display-4">Request a demo</h1> <h3 className="font-weight-light text-sans-serif"> To schedule a demo with a product specialist, tell us a bit about yourself. </h3> <div className="form mt-5"> <div id="hubspotRequestDemoForm" className="d-flex justify-center" /> </div> </div> <div className="col-md-6"> <CustomerLogosSection className="mt-3 mb-6" /> </div> </div> </div> </div> </Layout> ) }) as React.FunctionComponent<any>
Public Policy and Network Governance in Ghana: Challenges for Urban and Regional Development Does network governance really work? And what difference does network governance make in promoting sustainable environmental governance? How can the performance of network governance be determined? And what factors contribute to success and failure? This study evaluates the performance of network governance on contemporary and future governance arrangements in environmental governance. The study primarily relied on the small-N qualitative methods of data collection through series of semi-structured interviews with key stakeholders primarily network participants, national and regional level government officials involved the selected governance networks, district level officials and local elected politicians, non-governmental organizations actors and traditional authorities at the districts and local sites. The analysis presented here, however, demonstrates the relative network governance effectiveness is highly contextual and contingent on the problem involved. The findings demonstrate governance network is most likely to be effective in cases where the problems faced to be addressed are complex, multi-faceted, and resource demanding such as slum upgrading, pollution, and land degradation. The study also found that governance networks are more likely to be effective in cases where there are clear positive incentives such as provision of government funding, donor supports, and or negative incentives such as peer pressure or regulatory defaults. Additionally, measures to reduce transaction costs and decision rules that foster consensus building are vital to the effectiveness of governance network. However, in the light of the chronic hierarchical and market based mechanisms resource management deficiencies in Ghana, epitomized in the urban environment and land resource sectors, bringing public, private, and civil-society actors together for frank political debates on resource governance is becoming urgent. It is argued that doing so requires addressing the restraints regarding the role of traditional authorities in resource governance will improve the social and environmental performance of land resource and mining activities in the country. than markets. Network governance is more likely to be effective in policy settings where negotiated solutions among stakeholders are seen as appropriate and necessary, and thus where technical solutions are not possible. It permits different sources of competences and knowledge provided by the different stakeholders to be integrated, especially when the network arrangement fosters efficient social learning and information sharing. Subsequently, there is euphoric praise of the capacity of network governance in addressing modern multi-faceted societal developmental problems. With its increased prominence and countless publications about its suppose success, the actual performance of network governance has received little attention in literature. The concern about network governance performance has been raised in recent years, due in part to a series of highly publicized network failures. Provan and Kenis have emphasized the need to assess the actual performance of network governance since the literature at the moment lack very empirical evidence to support the euphoric praise of network governance compared to the volumes of publications on the performance of single actors or groups of actors within network governance. Moreover, the conditions under which we can account for the success or failure in network governance process is underresearched, leaving the question of what is successful network governance and how to evaluate failure still open and unanswered. The study aims to bridge this gap in the literature by evaluating the actual performance of network governance. The qualitative case study approach was used to compare two mega network governance projects in environmental sector of Ghana. Statement of Research Questions The performance of network governance has been questioned in recent years. Bob Jessop for example, has drawn our attention to the fact that network governance can fail to deliver like other modes of governance. Researchers and policy analysts have overtime overlooked the possible risks involved in substituting network governance for hierarchical or market failure and the resulting possible network failure. Klijn and Koppenjan also questioned the performance of network governance. According to them, the positive picture of network governance epitomizes has been extensively highlighted in the literature. However, the biggest limitation is lack of adequate and clear conditions and mechanisms that account for the actual performance of network governance and the conditions under which we can find 'success' or 'failure'. The problem is further compounded since networks bring to the table a diversity of stakeholder views and thus diverse perspectives about processes, goals, and outcomes. How do we assess the performance of governance network under these conditions of negotiated processes objectives, with complex causal pathways? The deficits and limitations in the literature made this study critical and important research topic both to governments, ' policy analysts, and practitioners. The study sought to examine the mechanisms that affect the performance of governance network with the aim of identifying the conditions that account for success or failure. The question that then arises is what are the sources and mechanisms of network governance performance and what factors contribute to success and failure? In order to address this central research question, the following subquestions were formulated; What are the conditions essential for possible cooperation in governance network? What kinds and levels of participation necessary for effective governance process? How and to what extent those better resourced and well placed participants such as elected officials and industry players influence the decision-making process? To what extent the decisions and policies that go on in PSUP and GRMA projects are made subject to accountability? How, when, and to what extent the stakeholders/participants can gather, analyze, learn on information on their progress in practice? These questions were addressed by means of comparative evaluative case study of the Participatory Slum Upgrading Project (PSUP) and Ghana Responsible Mining Alliance (GRMA) in Ghana. In addressing the research questions the analyses were mainly based on the stakeholders' perception on the performance and progress of the network and the conditions that have contributed to their success or otherwise. Study Objectives The main purpose of this study was to assess the actual performance of network governance with the aim of identifying the conditions that account for success or otherwise. The study uses two case studies which main focus is on promoting good environmental governance. Other related objectives included; providing deep insights to the government about how to organize network governance, and what the limitations are of relying on networks. The study also aimed to reveal to the stakeholders and network participants analyzed how the networks functions and what the relative position is of the participants involved affect the overall outcome of the alliance. Another objective was to increase our understanding procedural conditions that affect the processes in building successful network governance, hence the need to compare the two cases to illuminate the variations in degrees building successful network. In other words, what procedural and institutional reasons for account for these variations? In doing so the study intended to provide an updated review of the conditions and mechanisms that affect the governance network effectiveness. Subsequently, the analyses were substantially based on the sustainability of good processes (i.e. whether good processes have been embedded for building sustainable results) at the network level. The study was limited to assessing only the sustainability of good process because the empirical cases under investigation were all relatively new and as such environmental outcomes and ex-ante performance of governance networks typically take long period of time to be manifested; hence it was too early to be able to assess outcomes. Hence, this present study did not cover outcome-oriented performance since such assessment would require before and after data, and with the empirical cases such data were lacking. This outcome-oriented performance were therefore not included in this study on the premise that such outcomes may not easily be assessed as they are influenced by multiple factors and actors besides the activities of the stakeholders in PSUP and GRMA projects. Research Approach: Evaluation Research Evaluating the performance of network is an important task since networks are often set up to create certain values for society; therefore their performance should be assess at some point to find out whether they contribute to fulfill these ambitions. Evaluation research normally seeks to examine and judge processes and outcomes of intervention attempts aimed at solving problems or bringing about change in society. Two main types of evaluation research can be identified in the network governance literature. These are 'formative' and 'summative' evaluations. The purpose of summative evaluations has always been to assess the overall performance of an intervention program whereas formative evaluations tend to focus on formulating policy recommendations on the basis of which an intervention program can be improved. In addition, summative evaluations can be generalized to future efforts and to other programs, the usefulness of formative evaluations is limited to the specific setting studied. The main purpose of this study is to evaluate the overall of network governance; this study was situated within the summative evaluation research. In carrying out this summative evaluation research the small-N qualitative methods of data collection was utilized. Primary data was collected through semi-structured interviews with twenty-six key stakeholders primarily network participants', national and regional level government officials involved the selected governance networks, district level officials and local elected politicians, non-governmental organizations' (NGOs) actors and traditional authorities at the districts and local sites. Insights from published materials and policy documents were also consulted. The purposive sampling technique was used to select participants to be included in the study based on their knowledge and information about the existence, functioning and operations of the network. Upon the first official meeting with the organizations, author was lucky to identify appropriate participants to be included in the study. All participants were approached to participate in the research voluntarily and received some explanation about the purpose of our research. Author told them officially that there were no right or wrong answers to the questions, and that they could withdraw from the study at any point without reason and that their names would not be used anywhere in our research, unless they willingly asked for it. Author has always been completely open about the intention of our research to participants and this motivated them to provide as much information as possible. Criteria for Selecting Cases This study utilized comparative case study with embedded units of analysis. The goal was to fill the gap in existing studies which have mainly tended to be single case project-related evaluations with limited institutional analysis. Consistent with the network governance theoretical approach, these cases exhibit great variation across a range of variables, the scale of action including their problem focus, mixes and roles of participants, their existence, and policy contexts which makes it appropriate to compare and evaluate their performance. Although other programmes could have been included in the study, given the inevitable time and budget limitations, author focused on these two programmes with the main objective of capturing differences in for example policy domain in which the problems are located, the complexity and challenges of the problem, key stakeholders and roles of network stakeholders, and the functional focus of the collaborative activities. The table below demonstrates the criteria for selecting the two cases (Table 1). Cases The policy domain in which the problems are located Theoretical Approach: Network Governance Theory (NGT) The study was carried out through the network governance theory (NGT). Proponents of NGT approach argue that public policy-making and implementation increasingly take place within a multi-layered polity, formally organized by governments at central, regional and local levels. At all levels, policy-making involves elected politicians as well as powerful executive administrators. However, it also involves other actors like lower ranking administrators, interest organizations, private and public firms, popular movements and citizen groups. This is what it is termed as governance within and through networks of interdependent, but operationally autonomous actors that are involved in the production of public policy. Network governance describes an inter-organizational medium for interest mediation between autonomous and interdependent, but antagonistic and conflicting actors, each of whom has a resource base of themselves. Networks are mostly formed through the outcome of the strategic actions of independent actors who interact in order to benefit from the resource base (such as knowledge, innovative ideas, funding, formal authority etc.) of other actors to regulate and govern a certain policy sector. According to Srensen and Torfing decision-making in governance network is based on continuous interaction, compromise and negotiation among stakeholders and consensus becomes possible because of stakeholders' mutual resource dependencies to address public problems which individual actors cannot unless they collaborate. The theory highlights that governance networks are incremental bottom-up processes where politicians strategically select some actors participate in the policy making and implementation process. It is believed governance networks seek and promote different antagonistic interests mainly through the internal power struggle mechanisms, but importantly are held and bound together by their mutual interdependence, which ultimately promote compromise as well as providing a relatively stable environment for negotiation for effective and efficient delivery of network goals. Consequently, the theory argue that public policy making are moving towards being intertwined in forms of interactive networks which in many cases are not prescribed by constitutions, legal frameworks and statutes. This proponents call governance networks; they are neither market nor government nor civil society instruments; they are hybrid organizational forms. This study understands network governance as an integration of a number of interdependent, but autonomous actors performing negotiations based on a common understanding of factors like regulation, norms and common perceptions. In that case, networking governance coordinates policy decisions, and to some degree are selfregulating and self-steering. However, there will be some steering or controlling of the networks by central government, and such orchestration will here conceptually be termed meta-governance. According to Rhodes network governance is flexible and proactive autonomous self-organizing, and self-governing. For these reasons, it has been concluded that network governance in order to succeed require high-level of trust among stakeholders, reputation, mutual interdependence, and reciprocity. Subsequently, network governance provides a reflexive alternative to the procedural rationality of the market and the substantial rationality of the state. In the wake of market and state-controlled instruments failure, network governance rises as a phoenix from the ashes to have the capacity in building platforms for collaborative processes that can contribute to enhance the quality of public policies and delivery public services. Is Network Governance Panacea for Promoting Sustainable Environmental Governance? In the wake of apparent failures to govern complex environmental problems by the central state, networking modes of governance have been proposed in recent years. Network governance is the mode most commonly associated with the concept of governance, in which autonomous actors' work together to achieve common goals (Ibid). The emergence of network governance can be characterized by an attempt to take into account the increasing importance of the private sector, NGOs, scientific networks and international institutions in the performance of various functions of governance. Prominent examples of such networks that have been instrumental in forming successful working arrangements are the Global Environmental Facility, the World Commission on Dams, and the flexible mechanism of the Kyoto Protocol. Another ongoing effort of governance networks in environmental governance is the United Nations Global Compact which combines multiple actors' in a trilateral construction including representatives from governments, private sector and the NGO community to address environmental challenges. Dedeurwaerdere and Haas emphasized that one main reason for the proliferation of network approaches in environmental governance is their potential to integrate and make available different sources of knowledge and competences from different sectors and to encourage individual and collective learning. According to Newig et al., environmental governance currently faces various challenges that are characterized by complexities and uncertainties inherent to environmental and sustainable problems. In the view of Head network governance can provide a means to address these governance problems by institutionalizing learning on facts and deliberation on value judgments. A critical example is in the realm of global chemical safety, where transnational networks have formed around initiatives by international organizations and successfully developed rules for addressing global chemical issues many of which have been implemented by national legislations across countries. Most importantly, these transnational networks made it possible to avoid the institutional apathy that is typically found in political settings with many actors of conflicting interests, especially on a global level. According to Dedeurwaerdere through integration of actors from different sectors of society, governance networks are able to provide an innovative environment of learning, providing the way for adaptive and effective governance. One particular characteristic of networks important to solving complex multi-layered environmental problems is its formation of 'epistemic communities' in which actors share the same basic casual beliefs and normative values (Dedeurwaerdere: Although participation in these networks requires stakeholder's interest in the policy problem at stake, the actors involved do not necessarily seek to promote the same interest). In general, the interests are interdependent but can also be different or sometimes conflicting, emphasizing the need for consensus building and the development of cognitive commodities. The conclusion drawn from previous suggest that the advantage of network governance over traditional state command and control regulations or, alternatively, the use of market instruments, lies in its capacity to deal with situations of intrinsic uncertainty and decision making under bounded rationality. This is typically the case in the field of environmental governance where one has to deal with complex multi-layered and interrelated problems. In these circumstances, network governance can create a synergy between different competences and sources of knowledge from all segments of society allowing dealing with complex and interlined problems. Evaluating Network Performance -Conceptual and Theoretical Challenges The underlying question in assessing the performance of network governance is whether governance networks make a difference in the quality of service provision and addressing complex public policy issues. In other words, what value emerges from the network-based governance? Provan and Milward identified three levels of analysis for evaluating the performance of network governance involved in community-based health and human services. These elements included; (i) the network as an organization, (ii) the community, and (iii) the organizational participants. They outlined three categories of constituents which need to be considered; "principals, who monitor and fund the network and its activities; agents, who work in the network both as administrators and service-level professionals; and clients, who actually receive the services provided by the network". According to them the assessment criteria might include a mix of process issues (such as the network survival, membership interaction, network growth, and service co-ordination and outcome issues at different levels, such as impact on clients, range of services, cost effectiveness of services). In the area of human services provision for disadvantaged societies, Connell et al. and Schorr highlighted the need for rigorous research on the impact of the network programmes on the disadvantaged population groups. For example, evidence-based programme evaluations using rigorous methods including randomized controlled trials. However, methods such as randomized controlled trials may be misleading and insufficient due to problems such as limited formal knowledge derived from network programme. In this regard, other sources of knowledge are required, because "too many programs are multidimensional, cannot or should not be standardized, evolve or adapt through time, require stakeholders' active involvement, or are heavily dependent for success on good implementation, not just good design". Schorr therefore support the need for more flexible forms of assessment that require researchers to work jointly in pooling their knowledge together. In relation to addressing complex, inter-connected, multi-faceted environmental issues, network governance has been persistently argued to have the capacity in developing multiple strategies to address a number of inter-connected issues both at the local and regional levels. This has led some scholars using multiple criteria for assessing the performance of network-based projects. Leach for example in their study into the US Watershed Management Initiatives used six types of outputs and outcomes criteria to assess the performances of the initiatives. The criteria they employed included; "the perceived effects of the partnership on specific problems in the watershed; implementation of agreed restoration projects; perceived effects of the partnership on human and social capital; the extent of agreement reached among the stakeholders; collection of data for monitoring the effects of actions taken; and undertaking education and outreach projects". The limitation however is how can we cautiously prevent any premature judgments being drawn by funders' and public officials since governance networks may take some time to overcome distrust, educate stakeholders, secure funding, reach agreements, and begin implementation. The reliance on solely the 'satisfaction' expressed by stakeholders as used in previous studies pose great limitations for the reason that such views may be misleading and a poor guide to determine whether enduring benefits are being achieved for the environment. In the view of Coglianese all these highlight the importance of first assessing the sustainability of good process characteristics. It is until recently that Public Administration theorists like Srensen and Torfing have attempted to assess the performance of governance networks in terms of their ability to effectively modify their competing valid goals and interests in the face of continued disagreement. In their study, they evaluated the performance of governance networks in terms of their ability to deliver their purported benefits in practice. That is, provide effective and efficient negotiated interactions between a plurality of independent public and private actors to address the policy problems. According to Srensen and Torfing the success or failure of network governance cannot be prescribed on forehand. In their view the performance of governance networks is believed to be most often dependent on the institutional capacity, deliberative process, and the use of management techniques. Accordingly, the proponents of the governance network theory have emphasized the process of facilitating interaction and cooperation among stakeholders; inclusion and meaning participation of all affected stakeholders; flexible deliberations; horizontal accountability; and mechanisms to enhance learning and institutional memory among the stakeholders to positively influence the building of sustainable good processes in governance network. The study identified these five crucial interconnected variables to affect the performance of PSUP and GRMA sustainability of good processes. Hence, the independent variables used in the study consisted of the factors and conditions the governance network theory postulates could positively or negatively impact on building long-term multi-stakeholder commitments. The five conditions (i.e. independent variables) included; collaboration, participation, deliberation, learning and horizontal accountability. How these variables affect and influence the building of sustainable good process (Performance) of network governance are formulated below (Figure 1). Results For the purposes of capturing diversity of views to facilitate comparisons between the two networks, the respondents were primarily selected on the basis that they capture the main stakeholder types involved or connected to each of the network programme. Again, due to time and budgetary limitations, it was not feasible to select participants' numbers that specifically reflect the proportion and distribution of stakeholder types in each case under investigation. The key study findings have been structured around the outcomes based on the five evaluative criteria discussed above. The process of fostering successful collaborative governance To enhance collaboration and to improve a sustained environment for all relevant and most affected actors to participate in the deliberation process, governance networks are believed to be design in such a way that it provides in-kind incentive support such as funding to reduce or offset the transaction cost of participating. Like in the PSUPS project, the GRMA designed and initiated through a Memorandum of Understanding (MoU) between the stakeholders. To harness institutional regulation, these MoUs served as an incentive to get affected and relevant participants to participate in the governance process. However, while direct legal regulations remained evident in both networks, the incentive and other persuading mechanisms adopted by the designers in both cases offered a less costly and conflict-ridden environment for actors to come to the collaborative and deliberative table to discuss and negotiate for solutions to public problems. However, GRMA project was able to foster a very sustained and successful collaborative network process than that of the PSUPS. This was mainly attributed to both government financial support and the USAID technical support coupled with the small number of affected residents in the communities reduced the transaction cost for participating, and ensured that the GRMA project capture all relevant and most affected stakeholders. The GRMA project having engaged all key affected actors who should be at the collaborative and negotiation table fostered easy agreement on plan of action and policy targets to be implemented. Furthermore, the cooperative approach and high level of trust by local residents in the lead industries helped reduced the transaction cost of negotiating on feasible and implementable targets to ensure and observe responsible mining principles to improve the lives of local residents. Additionally, the existence of high level of trust among stakeholders shortened the time frame it took to negotiate and set targets and the implementation of these targets. This improved relationship between the major stakeholders and local residents helped the GRMA project to achieve almost unanimous agreement on the development targets set. This substantial unanimity and consensus on targets was underpinned by the shared trust among stakeholders that each will commit resources to the implementation of plans and the stipulated decision rules in the MoU. In comparative terms, the PSUPS focuses on addressing more complex problems across larger slum areas and involved a greater number of stakeholders than the GRMA project. Contrary to findings from previous studies, the inclusion of greater number of participants in the network should have led to less collaboration than in the GRMA project. However, the initiation and the design process of the PSUPS project brought about some level of trust and awareness as well as sustained collaboration between the stakeholders to address the problem of slum growth. The presence of regular in-kind incentives such as funding from the Ghana government, the Shack and Slum Dwellers International (SSDI), and other international donor NGOs encouraged and sustained other relevant stakeholders such as the Kayayo Youth Association (KYA) and women savings groups to commit themselves and participate meaningfully in the deliberation process. The initial high levels of trust, cooperation and commitment motivated the coming together of participants to agree on a common plan of action to reduce slum growth and improve the lives of poor slum dwellers. As one resident put it "the continual financial support from government and its commitment towards improving the lives of slum inhabitants coupled with the support from the Shack and Slum Dwellers International (SSDI) meant that those stakeholders who are engaged in the negotiation process had greater motivation to share information and other resources to make meaningful improvements in the communities other than symbolic commitment". This study concludes that there is strong, need-centered representative inclusion of local slum dwellers that have the capacity to consolidate and use resources in the creation of new development options for safe, secure and decent shelter conditions in the PSUPS project. The conclusion drawn from many of our interviewees was that the inclusion and participation of relevant and affected actors and the genuine commitment of the lead NGOs, the SSDI and the government there were substantial success the PSUPS project has achieved in terms of improving the living conditions of slum dwellers in the Ga Mashie and Old Fadama communities. To summarize, the initiation and the design process of the PSUPS network process brought about high level of trust, improved relationships among the stakeholders through the awareness the programme created, and finally generated effective action due to the presence of in-kind incentives and cooperative approaches which motivated local residents and other stakeholders to come to the deliberation and negotiation table. The process designing inclusive participation and representation of relevant stakeholders The study findings reveal that non-state actors and civic society organizations, and environmental interest groups have representations in both PSUPS and GRMA projects. Nonetheless, the respondents reported a lack of genuine participation in decision-making and the negotiation processes. Although there were satisfactory levels of local residents and interest groups participation, many of the respondents pointed out in both cases of under representation of women groups and other relevant environmental interest groups and government stakeholders. For example, the Environmental Protection Agency (EPA) input in the design and implementation of environmental sustainability programs was very minimal. Although there was little reliable and detailed demographic data of the study communities in both projects, lack of equality in representation of most affected and disadvantaged interests such as women in slum communities and farmers in mining communities were proportionally smaller the men and the NGOs in the governance process. These gaps in local environmental interest groups, women and farmers and other local government bodies' engagement in the planning and setting of action plans affected the inclusive implementation of targets especially in the GRMA project. A third participation and inclusive governance limitation respondents in both cases consistently pointed out to was related to the inability to obtain a rough balance of all relevant and affected actors. It was observed that in the entire two network programmes lacked any significant representation outside the already active networked stakeholders (Interview, 2014 Asutifi District). Despite these participation and representation lapses, a positive finding from both projects was the significant success in creating the awareness and the platform for local residents to hold duty bearers accountable through their symbolic inclusion and participation. For example, most slum dwellers and farmers had no previous connection to organized community groups or influence over any decision-making regarding slum upgrading and environmental standards and performance mining companies should observe. As one community resident put it prior to the establishment of the GRMA project "community residents had little or no knowledge at all about any EPA regulations and standards, building permits requirements, noise limits and the responsible mining principles mining companies should observe. The process of enhancing deliberative decision-making Policy formulation and implementation in both PSUPS and the GRMA projects involved multiple private NGOs and public stakeholders as well as community and other civil society organizations engaging in complex and continuous negotiation, deliberation and mediation processes to reach agreement on the most suitable and implementable policy targets. Throughout author study, it was revealed that the presence of more skilled and resource endowed NGOs and industry players ensured that the few women, farmers and local government bodies and actors had little opportunities to express their opinion and influence the decision-making process. The under representation of most disadvantaged women and farmers sufficiently created imbalance in skills, capacities and local knowledge and experiences and this affected the development of comprehensive targets to enhance the living conditions of slum dwellers and farmers in mining communities. Respondents consistently raised concerns about the problem of power imbalances in the governance process and this ultimately undermined the input of local residents and local government bodies and only saw the NGOs and the industry players with resources and technical capacities dominate to decision-making process. Although the nature of the deliberative decision-making was observed by examining how often local residents, government and non-government actors were at the negotiation table to solutions to complex developmental challenges based on their own experiences, resource and knowledge, one key interest to the study was examining how the negation and decision-making process between these actors have been. That is how problematic these complex interactions and negotiations between non-government organizations, community stakeholders, government and industry players have been. It was revealed in both programmes that, their decision-making processes had sometimes being associated with conflicts and self-aggrandizing interests bargaining stands. The respondents pointed out as a result of the power imbalances between the participating stakeholders. The fair and genuine deliberative governance principle in network governance which to seeks to open up and include all diverse interests and views irrespective of the power base was questioned by many respondents. The results is that the technical knowledge, skills and capacities as well as other monetary resources possessed by the lead NGOs and the mining companies and their over representation of stakeholders in the deliberative process often overrides and marginalized local community participants. This was most evident in the GRMA project where respondents describe the decision-making, plan policy actions, target writing, consultation and monitoring process as mainly the responsibility of the power wielders in the network (Interview, 2014, Asutifi District Assembly). The conclusion to be drawn from both cases suggest that it is highly impossible for local community stakeholders to influence policy outcomes and underrepresented women groups, unskilled farmers and uninformed local government bodies felt they are just symbolic participants as they could not influence policy decisions and its implementation. The process of enhancing horizontal accountability The findings revealed that although local community stakeholders and local government bodies had little influence over policy decisions and targets setting, the stakeholder oversight, the creation and existence of monitoring and evaluation teams as well as the complementary interaction among stakeholders ensured that performance can easily be assessed and monitored. Although these innovative horizontal performance-based accountability mechanisms did reduce abuse of power significantly, it ensured that the local stakeholders hold duty bearers accountable with regards to funds and resource appropriation (Interview, 2014 Local resident). This ensured the attainment of some level of success in both cases. However, it was revealed that these horizontal accountability mechanisms to ensure local ownership of development projects were more effective in the PSUPS compared to the GRMA. It was reported that in the PSUPS project the evaluation and monitoring team adequately performed their duties and besides that designed adequate and reasonable management control systems to ensure that set targets are met within reasonable time limits. As reported by one respondent, the PSUPS have developed its own management plans to make sure funds are directed towards planned projects. Additionally, programme evaluators monitor subsequent plan projects implemented to ensure that the network follow in great detail the governance processes and the recommendations of the evaluation team. Unlike the PSUPS, the GRMA monitoring and evaluation of implemented targets was very weak and infrequent. As one respondent reported "the cost and technical skills associated with collecting credible data about plans implemented and its impacts posed a serious challenge in the GRMA programme". Mutual interaction and accountability was also reported to be limited hence there were always a mismatch between targets plans set and the knowledge, experiences and resources needed to carry them out. In comparatively terms, the consistent interaction and mutual accountability between stakeholders and the slum communities in the case of PSUPS ensured significant success in securing descent housing facilities and portable drinking water to slum communities. Relatedly, it was observed that the existence of credible data about slum communities through enumeration, profiling and mapping ensured the network reached credible and achievable verifiable targets. Compared to the GRMA the availability of data and adequate knowledge and clear understanding and experiences about slum development gave the network the opportunity to set demonstrable and achievable programs to improve the living conditions in slum communities. The process of building future cooperation through learning and adaptation The findings on this variable revealed that the PSUPS project has developed adequate and an improved structures and capacity in developing workable solutions to slum and other environmental problems in a more reflexive and strategic manner than the GRMA project. As reported during the interview, the savings and group formation, peer to peer exchanges, enumeration, mapping and profiling as well as dialogue and partnership building had ensure to develop the local residents capacities to advocate and influence policies that affect their daily life. In particular, respondents in the slum communities reported that the PSUPS governance processes had allowed the network stakeholders especially the slum communities more sensitized to the problem of slums, and the risks to their social and economic well-being. This has also ensured continual learning about slum impacts and the subsequent demonstrable projects to reduce further slum development. In addition, the stakeholders long-term involvement in the governance process had reportedly served to improved their self-regulatory skills and capacities in target setting, planning and risk analysis. Additionally, this had ensured the continuous search for demonstrable and implementable projects and plans to reduce slum growth and improve the living conditions of slum dwellers as compared to the GRMA project. Through observation, it was discovered that this process-based approach to learning was adequately instituted in the GRMA project. Author observations were further confirmed by the interview data where respondents highlighted the general lack of stakeholder commitment especially the mining companies towards environmental standards which has translated into a strong reluctance to build local capacities to advocate and influence policy decisions. Conclusion It has been established that networks are more likely to be effective in policy settings where negotiated solutions among stakeholders are seen as appropriate and necessary, and thus where technical solutions are not possible. Additionally, Robert Agranoff and Michael McGuire have emphasized the capacity of governance networks in bringing innovation in the public sector yet they highlighted the issue of collaborative 'effectiveness' which is fundamental but has been incompletely addressed. Sabatier et al. argues that the governance network literature tends to focus on the inclusion and participation of all affected stakeholders like resource-users and local community actors under natural resource management; this paper revolves around exploring the conditions and mechanisms that affect the performance governance network with the aim of identifying the benchmarks to evaluate the success or failure of governance networks in the land resource and mining sector of Ghana. The discussion shows how the "process" of forming governance network affects its performance. The conclusion drawn from this study is that though networking governance has enormous capacities as a result of the different knowledge, expertise, resources, and skills the different stakeholders bring on board to address complex sets of interconnected environmental problems; the mechanisms for fostering and ensuring effective collaboration, participation, deliberations, accountability, and learning largely affect for the relative success or failure of governance networks. In ensuring these critical requirements are met therefore requires the willingness of the stakeholders especially the network managers to address the restraints regarding these issues which appears to paralyze trust and donor support. Civil society organizations, such as local interest groups and small-scale miners associations, are also seen as potentially fruitful stakeholders who possess diverse local knowledge and authority. The ability of the network managers to strategically include all these stakeholders and providing equal platform for deliberations are found to be critical in building successful governance network for delivering sustainable environmental results.