content
stringlengths
7
2.61M
<reponame>s-take/knowledge-like package handler import ( "net/http" "strconv" "github.com/labstack/echo" "github.com/s-take/knowledge-like/model" ) func AddLike(c echo.Context) error { Like := new(model.Like) if err := c.Bind(Like); err != nil { return err } if Like.KnowledgeID == 0 { return &echo.HTTPError{ Code: http.StatusBadRequest, Message: "invalid fields", } } uid := userIDFromToken(c) if user := model.FindUser(&model.User{ID: uid}); user.ID == 0 { return echo.ErrNotFound } Like.UserID = uid model.CreateLike(Like) return c.JSON(http.StatusCreated, Like) } func GetLikes(c echo.Context) error { Likes := model.ListLikes() return c.JSON(http.StatusOK, Likes) } func GetLikesByUserID(c echo.Context) error { uid := userIDFromToken(c) if user := model.FindUser(&model.User{ID: uid}); user.ID == 0 { return echo.ErrNotFound } Likes := model.FindLikes(&model.Like{UserID: uid}) return c.JSON(http.StatusOK, Likes) } func GetLikesByKnowledgeID(c echo.Context) error { KnowledgeID, err := strconv.Atoi(c.Param("id")) if err != nil { return echo.ErrNotFound } Likes := model.FindLikes(&model.Like{KnowledgeID: KnowledgeID}) return c.JSON(http.StatusOK, Likes) } func DeleteLikeByUserID(c echo.Context) error { uid := userIDFromToken(c) if user := model.FindUser(&model.User{ID: uid}); user.ID == 0 { return echo.ErrNotFound } if err := model.DeleteLike(&model.Like{UserID: uid}); err != nil { return echo.ErrNotFound } return c.NoContent(http.StatusNoContent) }
Shelly Songy to attend State-Federal STEM Summit in Washington, D.C. Oak Grove High's Shelly Songy to attend STEM summit in D.C. Shelly Songy, Oak Grove High School teacher, to attend a state-federal STEM summit in Washington, D.C. Shelly Songy, a teacher at Oak Grove High School, is attending the first State-Federal Science, Technology, Engineering and Math Education Summit Monday and Tuesday in Washington, D.C. The summit, hosted by the White House Office of Science and Technology Policy ,has convened a diverse group of STEM leaders, including officials from governors’ offices, K-20 educators, workforce and industry representatives, state policy experts and non-governmental organization executives. These attendees are participating in the development of a federal five-year STEM Education Strategic Plan. “This event is the first time an administration has asked for this level of state input when developing a federal STEM education strategy,” said Jeff Weld, senior policy adviser and assistant director for STEM education at the Office of Science and Technology Policy. “Top-down approaches to STEM education can often yield wonderful ideas, but it’s at the state and community level where the momentum happens. "State leaders know best what kinds of programs will work in their communities and where they need the power of the federal government to help drive success in this field." Congress established the Office of Science and Technology Policy in 1976 to provide the president and others within the executive office with advice on scientific, engineering and technological aspects of the economy, national security, homeland security, health, foreign relations, environment and technological recovery and use of resources.
Symbiotic Effectivity of Dual and Tripartite Associations on Soybean (Glycine max L. Merr.) Cultivars Inoculated With Bradyrhizobium japonicum and AM Fungi Soybean (Glycine max L. Merr.) is regarded worldwide as indisputably one of the most important crops for human food and animal feed. The presence of symbiotic bacteria and fungi is essential for soybean breeding, especially in low-input agricultural systems. Research on the cooperation between different microbial symbionts is a key to understanding how the health and productivity of the plant is supported. The symbiotic effectivity of dual and tripartite symbiotic agents was investigated in two pot experiments on different soybean cultivars with special regard to compatibility. In the Selection experiment, two out of sixteen soybean cultivars (Aliz, Emese) were chosen on the basis of their drought tolerance and used in all the other investigations. In the Compatibility experiment, the compatible coupling of symbiotic partners was selected based on the efficiency of single and co-inoculation with two Bradyrhizobium japonicum strains and two commercial arbuscular mycorrhizal fungal (AMF) products. Significant differences were found in the infectivity and effectivity of the microsymbionts. The rhizobial and AMF inoculation generally improved plant production, photosynthetic efficiency and root activity, but this effect depended on the type of symbiotic assotiation. Despite the low infectivity of AMF, inocula containing fungi were more beneficial than those containing only rhizobia. In the Drought Stress (DS) experiment, co-inoculated and control plants were grown in chernozem soil originating from organic farms. Emese was more resistant to drought stress than Aliz and produced a bigger root system. Under DS, the growth parameters of both microbially inoculated cultivars were better than that of control, proving that even drought tolerant genotypes can strengthen their endurance due to inoculation with AMF and nitrogen fixing bacteria. Root electrical capacitance (CR) showed a highly significant linear correlation with root and shoot dry mass and leaf area. The same root biomass was associated with higher CR in inoculated hosts. As CR method detects the absorptive surface increasing due to inoculation, it may be used to check the efficiency of the microbial treatment. INTRODUCTION Soybean (Glycine max L. Merr.) is one of the most widely cultivated crops throughout the world under various climates. Future scenarios show that soybean production will expand by 30% for the coming decade, so optimizing the cultivation of this crop has important economic and political implications. Achieving this purpose, however, is complicated by the extreme weather conditions caused by global climate change. Crop growth and yield are impaired by both abiotic and biotic stress conditions, of which drought has been identified as the most important factor in limiting the productivity of grain legumes (). Drought sensitivity coupled with high nutrient demand may seriously restrict soybean yields due to weak development and poor competitiveness against weeds. Therefore, the development of resistant cultivars and site-specific selection from the available genetic resources should be major objectives in any breeding program. The significance of nitrogen-fixing bacteria and arbuscular mycorrhizal fungal (AMF) symbionts is indubitable not only in soybean host nutrition, but also in the alleviation of plant stress caused by adverse soil conditions (). Nitrogen (N) and phosphorus (P) are critical limiting elements for crop growth (Miller and Cramer, 2004;). Legumes require nitrogen-fixing rhizobial symbiotic partners, mostly Bradyrhizobium japonicum bacteria, to achieve their maximum yield potential. Soils in areas where soybean is not native usually lack these bacteria, e.g., in. in Europe (), so the microbial inoculation of soybean is essential to provide adequate nitrogen supplies and maximum yields both in conventional agricultural practices and more especially in organic farming. Leguminous plants are highly dependent also on mycorrhizal fungi, symbionts that can resolve the problem of phosphorus limitation. AM fungi live in a mutualistic symbiosis with 80-90% of terrestrial plants and form the most ancient and prevalent type of mycorrhizae. The improved productivity of AM plants and the physiological and biochemical changes caused by AM can result in greater stress resistance in the host (;Rapparini and Peuelas, 2014;). The extraradical hyphal network of AMF provides more effective water and nutrient (especially plant-unavailable phosphorus) uptake for the host plants (Marschner, 1997;Hodge and Storer, 2015;;;). Furthermore, the host plants benefit from the special composition of the microbial community in the mycorrhizosphere (Barea, 1997;). Recent studies confirm that there is a common genetic basis for plant root endosymbioses with both rhizobia and AM fungi Abbreviations: AMF, arbuscular mycorrhizal fungi; ARA, acetylene reduction assay; C R, root capacitance; DSS, drought stress symptoms; F v /F m, maximum quantum efficiency of photosystem II photochemistry; GSI, germination stress index; LA, leaf area; NN, node number; RDW, root dry weight; RMID, relative microbial inoculation dependence; RSR, root shoot ratio; RWC, leaf relative water content; SDW, shoot dry weight; SH, shoot height. (Denison and Kiers, 2011;). AMF colonization has an influence on the development and function of rhizobial nodules and vice versa, but the multiple mutualistic effect on the host participating in the rhizobia-AM fungi-legume interaction is very fertilizer-dependent. Numerous publications reported that the co-inoculation of legumes with AMF and rhizobial strains resulted in greater benefits for the plants and symbionts alike, due to a synergistic effect (;;;). Neutral or negative responses to co-inoculation indicated that the advantage of tripartite symbiotic associations depended greatly on the compatibility and susceptibility of the partners (;). As a principal effect, AM fungi improve nodulation in the legume host by enhancing phosphorus uptake (). In addition, AMF root colonization leads to changes in the nitrogen transfer, microelement uptake and phytohormone production of the plants, which play an important role in nodulation and nitrogen fixation (Behie and Bidochka, 2014). The multi-factorial evaluation of tripartite functional diversity could provide a theoretical basis for optimizing the application of selected biofertilizers for soybean production. The varying efficiency of biological nitrogen fixation and fungal colonization both show that the compatibility of the mutualistic partners depends on their genotype. Due to intra-and interspecific variability, several possible partners are in the race for the formation of symbiosis, but the actual outcome of colonization is also influenced by the environmental conditions (;;). In general, associations of rhizobia and host plants have narrow specificity during nodule development (Fauvart and Michiels, 2008). The association between the bacteria and the soybean may be host-specific, to such a degree that some rhizobial species nodulate plants only in a certain genus (Neves and Rumjanek, 1997;). The variability of bradyrhizobial effectiveness on different soybean varieties was reported by Okereke et al.. In contrast, the AMF-host relationship is not strictly specific; the high intra-and interspecific variability of AM fungi creates great functional diversity (;;). The great functional diversity and non-host specific association of AM fungi give a chance to produce biofertilizers that can establish fungi-host combinations under diverse environmental conditions. Despite the increasing attention paid to AMF as an advantageous symbiotic partner, a number of difficulties have so far prevented the large-scale application of AMF inoculation (). In low-input organic systems, compatible plant-fungus-rhizobium associations may play a more prominent and critical role in the optimal nutrition of the host plants than in conventional agricultural systems (;;). In organic farming, choosing a suitable cultivar is essential to avoid the damaging effects of environmental stress. The metabolism of drought-tolerant and non-tolerant cultivars has been compared (;), but few data are available on cultivars with similar tolerance and on the role of symbionts in endurance. Well known, that there are differences in the drought tolerance of the registered soybean cultivars (Bouslama and Schapaugh, 1984;). It is assumed, that even drought tolerant soybeans benefit from inoculation, but because of the various compatibility cultivars show different responses to microbial treatments. For the rational use of commercial inocula, their efficiency should be checked before application in local soils under different environmental conditions. It is also assumed that AMF will add to drought tolerance more than rhizobium considering the active surfaces. The main objective of the experiments was to show the significance of the compatibility of symbiotic partners in a soybean tripartite inoculation and provide the theoretical basis for the application of commercial products in the soybean field production. To asses the effect of symbioses, it is practical to measure multiple growth, physiological and morphological parameters related to all the partners. In the present study, commercial products containing Bradyrhizobium japonicum and AM fungal inocula were tested for symbiotic effectivity on soybean in different growth media. The host susceptibility to inoculation and the impact of the soil biotic context (belowground interactions) were investigated to reveal their effect on the compatibility of the symbiotic partners. The functionality of the symbiotic partners was also checked in relation to genotypic differences in the soybean cultivars under drought stress. The Sequence of the Experiments In the Selection experiment 16 soybean (Glycine max L. Merr.) genotypes of diverse origin (Table 1) were evaluated for drought tolerance by means of polyethylene glycol (PEG) -induced drought stress. The Compatibility pot experiment was than performed to test two bradyrhizobia and two AM fungi (alone and in pairs) by inoculating two selected soybean cultivars grown in pumice to identify the most effective combinations of symbiotic partners. Finally, the Drought Stress pot experiment was performed to study symbiotic efficiency under optimal water supplies and water deficit in soil from organic farms. The test plants, methods of plant cultivation and tests for symbiotic effectiveness were the same in both pot experiments. Selection Experiment The seeds were selected for size homogeneity, surface-sterilized for 0.5 min in 70% (v/v) ethyl alcohol, rinsed and soaked in sterile distilled water. Seed germination was studied in distilled water (control) and in a 25 w/w% solution of polyethylene glycol (PEG, Karbowax 6000 Fluka AG) generating an osmotic potential of -0.82 MPa (Michel and Kaufmann, 1973). For each genotype, four replicates (n = 4) of 25 seeds were placed in Petri dish ( 9 cm) on a filter paper that covered a piece of cotton wool (4 g). The dishes were filled with 40 cm 3 of distilled water or PEG solution. The seeds were incubated in a dark, temperature-controlled chamber at 25 ± 1 C. Seeds were considered germinated when the radicle had reached at least 2 mm in length. The number of germinated seeds was counted daily. After 6 days, the GSI 6 was calculated according to Bouslama and Schapaugh. In the Compatibility and Drought Stress experiments the two cultivars with the highest GSI 6 (Aliz and Emese, indeterminate growth habit, maturity group 0) were used as host for the microsymbionts. Compatibility Experiment The Aliz and Emese cultivars were investigated for their compatibility with AM fungi and rhizobial inoculants. Biomass production (shoot and root dry weight; SDW and RDW), LA, photochemical efficiency (F v /F m ) and root electrical capacitance (C R ) as an indicator of root system activity, were measured in the early developmental stages. Root colonization with AMF, nodulating parameters and acetylene reduction were tested to estimate the functionality of the symbiotic partners. AMF inoculation with a commercial product (either F 1 or F 2 ) and rhizobial inoculation with a commercially available soybean inoculum (R 1 ) or a Bradyrhizobium japonicum strain (R 2 ) were applied as microbial treatments. Control (C) plants were not inoculated. The R 1 microbial treatment was carried out with a peat-based soybean inoculum of Bradyrhizobium japonicum containing 10 6 CFU g −1. A liquid culture of a B. japonicum strain from the strain collection of the Research Institute for Soil Sciences and Agricultural Chemistry was used for the R 2 treatment. R 2 B. japonicum strain was incubated on yeast extract mannitol agar (YMA) for 5 days. After incubation, one loopful of the culture was suspended in 10 mL of sterile tap water. 100 mL of YMB (Yeast Extract Broth) culture medium was inoculated with this bacterial suspension and incubated for 5 days under continuous shaking at 28 C. In the case of rhizobial inoculation, the seedbed was inoculated with 1 mL suspension of R 1 (2 g 200 mL −1 sterile tap water) or 1 mL liquid culture of R 2. Each planting combination was represented by four replicates (n = 4). Drought Stress Experiment The Aliz and Emese cultivars were tested for symbiotic effectiveness in chernozem soil treated with the previously selected microbial treatments. Besides the control, both cultivars were exposed to two treatments, co-inoculation with R 1 B. japonicum chosen in the Compatibility experiment and F 1 or F 2 AMF inoculum. Plants growing with different water supplies were investigated in five-five replications (n = 5). Experimental Design and Growth Conditions In the Compatibility experiment, the soybean seeds were planted in 72 1.25 dm 3 plastic pots containing 1.25 kg of soil-analog ground pumice (porous vitroclastic perlite) medium with 0.7-1.1 mm particle size and pH H2O 6.5. Two seeds were planted in each pot and thinned to one after emergence. The pumice, which lacks indigenous rhizobia and infective propagules of AM fungi, was treated with AMF or rhizobial inoculums, alone or in combination, except for the controls. The plants were cultivated in a random arrangement in a growth chamber for 65 days with day/night temperature and photoperiod of 26/18 C and 16/8 h respectively, at a photon flux density of 600 mol m −2 s −1 and relative humidity of 50-70%. Optimal plant nutritional status was maintained by weekly irrigation with 100 ml of modified Hoagland's solution (0.5 M KH 2 PO 4 ) per pot. The cultivars were grown for 65 days with the chosen highly effective combinations of microbial partners (F 1 R 1 ; F 2 R 1 ) under climatic and light conditions identical to those in the Selection experiment. Half of the plants were watered adequately (200 ml per pot three times a week) while the others were put through two drought cycles during 23-35 and 40-60 days after planting (DAP). At the beginning of the drought cycles, water was withheld till the soil moisture decreased near to wilting point (requiring 3-6 days, depending on plant size), after which this water status was maintained by daily irrigation during the rest of the drought cycle. Pre-harvest Investigations The effects of non-lethal water deficit during the growth of soybean cultivars prior to the reproductive phase were assessed by recording the number of nodes (NN) on the main stem, measuring the stem height (SH) and the relative water content (RWC) of the leaves, and by examining DSS on the leaves (symptoms of wilting and leaf loss/shedding on a 0-3 scale). Sampling was performed every 7th day from the 14th day after planting (DAP) until the beginning of the reproductive growth stage (R 1 ). Data representing the functional aspects of the AMF-rhizobia-soybean symbiotic systems were obtained in situ by measuring chlorophyll fluorescence induction and root electrical capacitance (C R ). Chlorophyll Fluorescence Induction A FMM Chlorophyll-A fluorometer () was used to measure the fluorescence induction parameters in situ. The internal light source was a 635 nm laser diode (QL63H5SA, Roithner Lasertechnik GmbH, Wien, Austria) with 20 mW maximum optical power. Traditional Kautsky induction kinetic curves were detected simultaneously at 690 nm (red) and 735 nm (far-red) wavelengths, where Chl-A fluorescence shows two maxima in leaves. Minimal (F0) and maximal fluorescence (Fm) values were detected, after which the light-adapted, steady-state quantum efficiency of photosynthetic electron transport, F v /F m = (F m -F 0 )/F m, was calculated for both emission maxima. The F v /F m values showed a similar tendency at both wavelengths, so only the results for 690 nm are shown. The F v/ F m data were measured on all the plants on 63 DAP. Root Electrical Capacitance (C R ) Measurement The extension and activity of the root system (rhizosphere) was assessed simply in situ by C R measurement (Chloupek, 1972;). When an alternating current passes through the root tissue, charge accumulation, i.e., polarization, occurs. The amount of electric charge stored by the root system can be expressed as electrical capacitance (in nanofarads) which is proportional to the active root surface area. The method is only valid for the comparison of plants of the same species grown in the same substrate, at the same moisture level (). C R measurements were carried out on all the plants on DAP 64 (before harvest and after the second drought stress period in the Drought Stress experiment) using a GW-8101G LCR instrument (GW Instek Co., Ltd., Taiwan) at 1 kHz frequency with 1 V terminal voltage. One terminal of the instrument was connected to the plant stem with a spring tension clamp fixed 10 mm above the substrate level, while the second was grounded by a stainless steel rod (6 mm ID, 15 cm long) inserted into the substrate. Electrocardiograph paste (Vascotasin R ; Spark Promotions Co., Ltd., Budapest, Hungary) was smeared around the stem to maintain electrical contact, and the substrate was irrigated to field capacity before measurement. Leaf Relative Water Content (RWC) In the Drought Stress experiment, the relative water content (RWC) of the leaves was measured during drought exposure on DAP 22 and DAP 63 (Gonzlez and Gonzlez-Vilar, 2007). Post-harvest Measurements Both the inoculated and control soybeans were harvested and sampled for microbial investigations after 65 days of cultivation (R1, R2 flowering stage and R3 green pod formation) (). LA was measured using image processing. In both experiments, SDW and RDW were determined after drying the samples at 80 C for 48 h. The susceptibility to symbiotic associations and the dependence of soybean cultivars on microbial treatments were characterized by the index of RMID. RMID is an extension of the relative field mycorrhizal dependence (RFMD) index (), taking into consideration all symbionts rather than just AMF, and is defined as RMID = 100 * /dry weight of inoculated plant. Quantification of AMF Root Colonization, Nodulation and Functionality of Rhizobia After harvest, the soybean response to rhizobial inoculation was characterized by a number representing the density of nodules found on the primary roots and lateral root zones. The nitrogenase enzyme activity of the nodules was measured by means of ARA (). After removing them from the pots, the 65-day-old plants were carefully washed, then the whole, nodulated roots were placed in a 500 ml glass bottle capped with a rubber septum. After adding 50 cm 3 of acetylene gas (C 2 H 2, >99.95% purity, Lindegas) to the bottle, the roots were incubated for 30 min at room temperature (22 C). A gas sample (500 l) was removed using a gas-tight syringe and analyzed on a gas chromatograph (GC 8000, FISONS Instruments) with the following specifications: flame ionization detector (FID) and Porapak T column to separate ethylene from acetylene, carrier gas N 2 = 175 kPa, hydrogen at 50 kPa, air at 80 kPa, injector temperature 100 C, oven temperature 80 C, isotherm, detector temperature 150 C. The area under the peaks was evaluated with Chrom-Card software and a standard of 10 ppm ethylene (C 2 H 4 ) in N 2 (Scotty 14, Supelco) was used for calibration. After ARA, the roots were randomly sampled for AMF root colonization measurement performed after staining the separated root (≤1 mm indiameter) sub-samples with lactic acid-aniline blue according to Phillips and Hayman. AMF colonization was estimated using a BX51 microscope (40-200X; Olympus Corp., Tokyo, Japan). The frequency (F%), the intensity of colonization (M %) and the arbusculum richness (A%) in the roots were calculated using a five-class system () after observing 30 fine root segments, each 1 cm in length. Concentration of Nitrogen (N), Phosphorus (P) and Potassium (K) in Leaves The N (%), P and K (mg kg −1 ) concentrations of the plants were measured after harvest. The P and K macroelement concentrations of the plants were assessed after wet digestion of air-dried plant samples with cc. HNO 3 + cc. H 2 O 2. The nitrogen content in the leaves was determined by the Kjeldahl method after digesting the samples in sulfuric acid (cc. H 2 SO 4 ). The leaf element contents were measured with an ICP-AES instrument (Jobin-Yvon, ULTIMA2). Statistical Analysis Results were analyzed using two-way ANOVA or Kruskal-Wallis non-parametric test if the prerequisites of ANOVA did not fulfill. Bartlett test of homogeneity of variance and Shapiro-Wilk normality test with model residuals were carried out before ANOVA. Post hoc tests were also carried out: LSD (least significant differences) values were calculated after ANOVA analysis, while a pairwise t-test with the Holm method was applied after the Kruskal-Wallis test. The comparisons between means were performed at the significance level of p < 0.05. The relationship between C R and RDW or LA were evaluated using simple regression analysis (p < 0.05 for each treatment). Selection Experiment The negative osmotic potential significantly decreased seed germination ( Table 1). The PEG solution retarded germ development, to the greatest extent for the cultivar Splendor. The GSI 6 values ranged from 0.0% (Splendor) to 60.3% (Aliz). Cultivars Aliz and Emese had the highest GSI 6 values, so these were assumed to have high drought stress tolerance in the early stages of plant development. There was no relationship between the 1000-grain weight or maturity group of the cultivars and the GSI levels. Compatibility Experiment In the Compatibility experiment all the AMF and dual inoculations resulted in higher SDW and RDW (Figures 1A,C), stem length and number of nodes (data not shown), especially in the F 1 AMF treatments. The biomass production of AM plants was significantly higher (by 50-70%) than that of the controls or plants inoculated only with rhizobium. No significant differences in SDW and RDW were found between the soybean cultivars, though the SDW of Aliz was slightly higher than that of Emese. Aliz grew taller and were spindlier. The LA of the differentially inoculated soybean plants showed a tendency similar to their shoot biomass and dry weights (Figure 1B), except that Emese had higher LA than Aliz. No AMF structures were detected in the control or rhizobium inoculated plants (C and R 1,2 ). Microscopic examination of the harvested roots showed a low frequency (F%) of fungal colonization in mycorrhizal plants irrespective of the AMF products. A comparison of F 1 and F 2 showed that the former caused a greater increase in host growth, while the latter resulted in a higher colonization rate . AMF root colonization decreased in plants co-inoculated with both rhizobial and AMF products. No significant differences in AMF infectivity were found for the different host plant cultivars. The N content of plants treated only with rhizobium (R 1 ; R 2 in the case of Aliz) was significantly higher than that of control (C) and AMF-inoculated (F 1; F 2; F 1 R 1 ; F 1 R 2 ; F 2 R 1 ; F 2 R 2 ) hosts ( Table 2). The leaf N content of Aliz was significantly higher, than that of Emese in the F 1, R 2, F 1 R 1 and F 2 R 2 treatments. The P concentration was generally higher in the leaves of Emese, except for the F 1 treatment where a significant increment occurred in Aliz (Table 3). Root electrical capacitance (C R ) was the highest in AMF-treated plants ( Figure 1D) for both Emese and Aliz. Significant differences in C R were found between the mycorrhizal and non-mycorrhizal (C, R 1, R 2 ) plants. Rhizobium treatment caused a lower, but still significant increase in C R. Drought Stress Experiment Microbial inoculation caused a significant increment in SDW, LA, RDW (Figures 2A-C) and NN (except in F 1 R 1 ) ( Figure 2D) under both well-watered (WW) and drought stress (DS) conditions. The beneficial efficiency of the F 1 was mostly higher than that of F 2 (Figure 2). The microsymbionts induced a more than two-fold increase in LA (WW: 126-128%; DS: 113-115%) ( Figure 2B). The LA of Emese was generally higher than that of Aliz. RDW was enhanced by 41-58% (WW) and 24-30% (DS) by co-inoculation ( Figure 2C). Greater changes were observed in shoot than in root biomass production: inoculation increased SDW by 60-69% (WW) and 54% (DS). The RSR values of microbially treated and control plants were statistically similar under WW conditions ( Figure 2E). Shoot dry weight decreased by 41% in the control and by 41-45% in the co-inoculated plants (Figure 2A). LA was 40% smaller in the control and 44% smaller in the co-inoculated plants (Figure 2B), while RDW was reduced by 22% in the control and by 31-36% in the co-inoculated plants ( Figure 2C). NN also decreased significantly under drought stress ( Figure 2D). SH was 21-24% lower after inoculation in the case of DS. Combined inoculation with AM fungi and rhizobia was significantly more beneficial for Emese than for Aliz, as shown by differences in various plant growth parameters (LA: 10%, RSR: 15%) (Figures 2B,E). By contrast, the SDW of Aliz was higher than that of Emese. DS induced a greater decrease in SDW (35% Emese and 45% Aliz) than in RDW (18% Emese and 40% Aliz). No differences were found between the effects of F 1 and F 2 AMF inocula on plant growth. The drought-induced decline in LA ranged from 36% (Emese F 2 R) to 50% (Aliz F 2 R). Water deficit increased the RSR values in all the treatments and led to significant differences between the RSR values of inoculated and control plants, which were absent under WW conditions. Cultivar Emese exhibited significantly higher RSR than Aliz (Figure 2E). The growth parameters of F 1 and F 2 treated plants were statistically similar under DS. There were no significant differences in RWC between microbially treated and control plants under WW conditions, irrespective of the cultivar (Figure 3A). The exposure of plants to drought led to a noticeable decrease in leaf RWC from 90.4-93.4% to 44.7-78.2% with significantly lower values for inoculated cultivars than for non-inoculated ones. Cultivar Aliz generally had higher RWC values in the vegetative phenophase, especially when inoculated before DS. No differences were found in the DSS of the two cultivars in any treatment, though plants inoculated with F 1 exhibited pronounced wilting symptoms ( Figure 3B). The number of nodules on the roots of co-inoculated plants was much higher than on non-inoculated, control plants. Nodulation was independent of both the application of mycorrhizal fungi and the cultivar under both watering regimes. The intensity of colonization (M%) in the roots ranged from 35.2 % to 60.8% in WW soil, with the lowest rate for F 2 (Figure 3C). The M% of roots colonized by indigenous AMF exhibited a 9% increase in response to DS, while those colonized by AMF species from commercial products had suppressed or unchanged infectivity. Indigenous AMF alone generated a higher colonization in roots than it was observed after the inoculation with F 1 or F 2. The intensity of AMF colonization in the roots was slightly higher for Aliz than for Emese. The characteristics of and changes in arbusculum richness (A%) were similar to those of M%, again proving the better natural infectivity and responsiveness of Aliz. The increment in A% caused by DS was significant (data not shown). The RMID values for mycorrhizal and rhizobial treatments of the cultivars were greater under WW than under DS conditions. Under WW conditions, Emese had slightly higher susceptibility to microbial inoculation than Aliz, but DS inverted this relationship., and (E) root:shoot ratio (RSR) of two soybean cultivars (Emese and Aliz) exposed to well-watered (WW) and drought-stressed (DS) conditions. Error bars represent LSD values, different letters mean significant differences (in case of transformed data) at p < 0.05 (n = 5). and (E) root electrical capacitance (C R ) of control (C) and co-inoculated (F 1 R 1, F 2 R 2 ) soybeans for different cultivars (Emese, Aliz) under well-watered (WW) and drought-stressed (DS) conditions. Error bars represent LSD values, different letters mean significant differences (in case of pairwise t-test) at p < 0.05. F v /F m values were increased by dual inoculation under both water regimes (Figure 3D). Drought did not significantly affect the photosynthetic activity, and the quantum efficiency of PSII was independent of the cultivar. Microbial inoculation significantly enhanced the C R values of the cultivars, irrespectively of watering, particularly in the case of Emese (Figure 3E). The effect of F 1 on C R (36% for Aliz, 34% for Emese under WW; 21% for Aliz and 28% at Emese under DS) was greater than that of F 2 (26% for Aliz, 32% for Emese under WW; 14% for Aliz and 20% for Emese under DS) (Figure 3D). C R decreased significantly in response to DS, to a greater extent for Aliz (31-37%, as a percentage of WW) than for Emese (27-33%). Cultivar Emese exhibited significantly higher C R than Aliz at the end of the experiment. A good linear correlation (P < 0.01) was obtained between C R and both SDW (R 2 = 0.829) and LA (R 2 = 0.716) by pooling the data of the six plant groups. Strong linear correlations were found between both C R and RDW and C R and LA (Aliz R 2 = 0.672; Emese R 2 = 0.758). Figures 4A,B illustrate an increase in C R due to inoculation and a decrease due to drought stress. Regression analysis demonstrated linear correlations between C R and RDW for each treatment. The same root biomass was associated with higher C R in inoculated treatments and with lower values in the case of drought stress ( Figure 4B). The beneficial effect of rhizobial nodulation on the leaf nitrogen concentration was detected, as in the Selection FIGURE 4 | (A) Relationship between root electrical capacitance (C R ) and root dry weight (RDW) of soybean cultivars (Emese, Aliz) and (B) RDW of control and co-inoculated (F 1 R 1, F 2 R 1 ) soybeans for different cultivars (Emese, Aliz) under well-watered (WW) and drought-stressed (DS) conditions. experiment. The leaf N percentage of inoculated plants significantly exceeded the controls under both WW and DS conditions ( Table 4), but no significant differences were found either between the cultivars or between the microbial treatments. The leaf P concentration was decreased by drought and by inoculation under WW conditions, although DS led to higher leaf P concentration in inoculated plants compared to controls ( Table 5). The total P content in the leaves was significantly higher in inoculated plants than in control plants. The P concentrations in Emese were greater than in Aliz. DISCUSSION For the future of mycorrhizal biotechnology and industry, it is crucial to incorporate the scientific knowledge derived from fundamental and applied research into the innovation of microbial inoculants (). Although various commercial microbial products are already in use, few data have been published on their effect on plant development, nutrition and yield under controlled environmental conditions (). Most studies have been primarily focused on the Means and standard errors of five replicates. Two way-analysis of variance was performed. LSD 5% values indicate the least significant difference at the p = 0.05 level. Microbial treatments: C, non-inoculated control plants; F 1,2, commercial AMF inocula; R 1, 2, Bradyrhizobium japonicum inocula; Soybean cultivars: Aliz and Emese; WW, well-watered conditions; DS, drought stress conditions. investigation of qualitative and quantitative yield indicators in horticulture, fruit or ornamental production (;;). In the present study symbiotic effectiveness was investigated with 20-20 measured parameters on drought-selected soybean hosts during their vegetative and early reproductive stages, with special regard to the compatibility of dual and tripartite symbiotic agents. Most of the measured parameters confirmed the beneficial effect of inoculation with symbionts on plant development and drought tolerance (Figures 1-4). Similar to other observations (;), the responses of soybean to microbial inoculation depended considerably on the rhizobial strains, on the fungal products and also on the cultivars. The present investigations showed that the benefits of symbiosis were more obvious in the case of plants singly or co-inoculated with AMF than for those inoculated solely with rhizobium (Figure 1 and Tables 1, 2). After single inoculation with products containing AM fungal species (F 1 and F 2 ) the increase in shoot and RDW, LA and C R was higher than in rhizobium only treatments (Figures 1A-D). With the exception of C R these parameters differed slightly between soybean varieties. Such functional differences also occurred between the AMF treatments as reported earlier (Louis and Lim, 1988;;de Varennes and Goss, 2007). Infection with Bradyrhizobium clearly enhanced the leaf nitrogen content, but AMF colonization reduced the concentration of nitrogen and phosphorus due to the dilution effect of higher plant biomass (Tables 1, 2). In the Compatibility experiment, the results obtained for ARA and plant nitrogen content revealed clear differences in the effect of the two rhizobial inoculants on plant nutrition. Despite their lower colonization values, plants treated with the AMF inoculum F 1 had a higher growth rate than those inoculated with F 2 (Figures 1A-C). No AM fungal colonization was observed during the experimental period on the great majority of the roots in F 1 -treated pots. The extra biomass production caused by F 1 could be due to the high organic content of the biofertilizer. The higher number of species in the F 2 product could lead to higher infectivity and effectiveness, so F 2 could be more successful in developing a compatible relationship. Numerous field and pot trials proved that AMF inoculation significantly increased soybean yields (by 20-50%); however, the response varied with soil type, fertilization and the origin of the symbiotic partners (Porcel and Ruiz-Lozano, 2004;;;). It was found that inoculation with Bradyrhizobium sp. or Glomus mosseae in pot culture was able to inhibit both pathogenic infection with Cylindrocladium parasiticum and the development of the parasite in soybean roots, while co-inoculation was more effective than the use of any of the microsymbionts alone (). The positive interaction between the microsymbionts was indicated by the extra biomass production, the increased number of nodules and the richness of AMF root structures (Louis and Lim, 1988;). However, the carbon cost of mycorrhizal fungi requires the delivery of 4-20% of the photosynthetically fixed carbon from the host plants to the symbiotic partner (;). According to Minchin et al., as much as 25% of net photosynthates may be appropriated for biological nitrogen fixation. Furthermore, the cost of two independent microsymbiotionts in tripartite associations may be cumulative (). Although multiple inoculation may result in decreased biomass production or unfavorable microbial parameters, the present results indicated that microbial inoculation can improve photosynthetic efficiency both in well-watered soils and under drought stress, though this effect depends on the inoculants (Figure 3D). The increased photosynthetic activity and root functionality of bacteria or AMF treated hosts resulted in higher biomass production compared to control plants. However, co-inoculation with rhizobial and AM fungal strains did not produce significantly further more biomass in pumice ( Figure 1A); moreover, in the case of dual inoculation, AMF root colonization exhibited a slight decrease. Fluorescence kinetics and the root electrical capacitance methods proved to be useful tools for the in situ monitoring of the effect of several stress factors and for the selection of stress-tolerant cultivars or effective microbial strains (;Cseresnys et al.,, 2018, but less research has targeted the characterization of differences between different genotypes of crop species (Barcsi, 2013). The rate of photosynthesis is influenced by environmental conditions (e.g., water, temperature, nutrients, light and CO 2 ) and internal factors, such as the nutrient concentration of the tissues and the sink strength stimulated by the carbon cost of symbiotic associations (). In the present experiments significant differences were found both in the chlorophyll fluorescence and C R values of different microbial treatments and soybean cultivars. Of the two varieties, the Aliz had higher photochemical efficiency, which represented an inverse tendency compared to the LA values. Both in pumice and organic soil C R was closely correlated with root biomass (). However, the measured values were the joint result of root activity, the activity of root-associated symbionts and the soil. This means that at the same stage of development, under similar soil conditions, both rhizobial nodules and AMF extraradical hyphal network may have a beneficial effect on active root surface area. The same root biomass is associated with higher C R in the case of inoculated plants and with lower values in response to drought stress (). The increased water uptake of inoculated plants is presumably due to the enhanced root-soil interface caused by the external fungal hyphae and the root nodulation. It was found earlier that eight soybean cultivars could be classified into different drought-tolerance groups on the basis of C R showing a strong correlation with cultivar-specific root growth and biomass production under both well-watered and drought conditions (). Several studies proved that advanced root properties such as greater depth and larger root system with more root hairs are advantageous under water deficit (;Kumagai and Sameshima, 2014;). It was also found that the effects of co-inoculation were related to the root morphology of soybean genotypes ((Wang et al.,, 2016. For example, deeply rooted hosts benefited more than shallowly rooted ones. DS impeded plant growth in all the treatments for both cultivars (Figures 2A-D) (Porcel and Ruiz-Lozano, 2004;Masuda and Goldsmith, 2009). Similar to other investigations (;Porcel and Ruiz-Lozano, 2004), mycorrhizal associations seem to have played an important role in the drought stress tolerance of soybean. Strong linear C R -RDW and C R -LA relationships were revealed in the Drought Stress experiment, (Figures 4A,B). As the leaves vaporize the water taken up by the roots, LA should be proportional to the absorptive surface area of the root system involved in the mycorrhizosphere, being manifested as a correlation between C R and LA. In the case of drought stress, the beneficial effect of inoculation was mostly more pronounced in Emese than in Aliz. Emese produced a bigger root system and was more resistant to drought stress than Aliz, where the levels of RSR and C R were lower (Figure 2E). Although Emese lost more water from its leaves due to the larger biomass, it did not differ from Aliz in terms of DSS ( Figure 3B). The loss of biomass caused by drought was also lower in the case of Emese, while the mycorrhizal dependence of Aliz increased under drought. These results suggest that the functional properties of the roots may significantly affect the susceptibility of cultivars to microsymbionts, which could also influence the development of cultivar-specific stress tolerance (). It has been reported that grasses having hairy and bushy fibrous roots with a large surface for uptake are less mycotroph than the tap-rooted plants in mycotrophic plant families. In a tripartite symbiotic association, AM fungi respond more sensitively to differences in the (functional) properties of the root between species and varieties than rhizobia (). The establishment of symbiosis enlarges the active root surface area responsible for water and nutrient uptake, which can be detected by measuring C R. C R measurements are sufficiently sensitive to detect differences in the infectivity of AM strains (). No data on the C R of nodulated roots have yet been published. In the present experiments plants infected with rhizobium had significantly higher C R than the control despite the similar RDW ( Figure 4B). Nodules and the extraradical hyphal network add to C R by enlarging the absorbing surface. It should be noted that when evaluating the beneficial effects of inoculation one should allow for the carriers of the commercial products, which may contain nutrients. The extra biomass production caused by the F 1 AMF biofertilizer could be the result of its high organic material content. Multifactorial investigations are needed for the determinination of host compatibility and the efficacy of microbial inocula on plant production, including the measurement of plant growth and functional parameters, with both in situ and destructive methods. In the field, the symbiotic effectiveness of rhizobial strains or AMF inoculants and their competitiveness can be achieved by superior microsymbionts originated from natural or managed selection. However, no single AMF or rhizobium strain is likely to be effective in all soils under any conditions and on any plant host, therefore no commercial fertilizer can be expected to be ideal for every field, despite containing multiple strains or species. All the products need to be tested before application and it is possible that no suitable commercial fertilizer will be adequate for a specific purpose. In this case, the managed selection of strains originating from the indigenous microbial community could be the best solution. In low-input agricultural systems, research on cooperation between different microbial symbionts is a key to understanding how the health and productivity of the plant is supported (;). Increasing the fitness and vitality of host plants to environmental stress by means of site-adapted, compatible symbiotic partnerships could be a new strategy for mitigating the impacts of environmental stress factors on plant production (). Little differences in the root properties of drought tolerant cultivars may cause significant differences in the growth and physiological parameters that are used to describe symbiotic relationships. Endurance of even drought tolerant cultivars can be improved by inoculation with AMF or nitrogen fixing bacteria. In well-watered conditions, the tripartite association did not show the synergistic effect on the plants, so the benefits produced by commercial AMF products may have been partially originated from their carrier. The efficiency of these biofertilizers should therefore be checked before large scale application. The present results show the potential of C R measurements to monitor the effect of symbiotic factors influencing root growth and biomass production. This in situ technique provides an opportunity to follow the temporal changes in root activity and to select efficient plant-microbe partnerships. AUTHOR CONTRIBUTIONS TT supervised the project, conducted the pot experiments, discussed the results, and wrote the paper. IC was responsible for C R measurements and data evaluation. TT, RK, IP, BK, TS-K, and AF designed and carried out the investigations on the symbiotic microorganisms and on the plants. All authors read the manuscript and approved the submission.
Compartmental surgery in tongue tumours: description of a new surgical technique. The aim of curative surgical oncology is to remove the primary tumour with a wide margin of normal tissue. What constitutes a sufficiently wide margin particularly in oral cancer is fundamentally unclear. The currently accepted standard is to remove the primary lesion with a 1.5-2 cm circumferential macroscopic margin. In the last ten years, anatomical considerations in the approach to primary, advanced and untreated tumours of the tongue led us to develop and improve a new surgical approach to their demolition and reconstruction. From July 1999 to July 2009, at the European Institute of Oncology in Milano, Italy, 155 patients were treated, while defining and refining the concept of compartmental tongue surgery (CTS) and its main components: 1) anatomical approach to the disease that requires removal of the primary lesion and all of the potential pathways of progression--muscular, lymphatic and vascular; 2) identification of a distinct territory at risk of metastatic representation of the disease: the parenchymal structures between the primary tumour and the cervical lymphatic chain that include the muscular (mylohyoid), neuro-vascular (lingual nerve and vein) and glandular (sublingual and submandibular) tissues; 3) preparation for a rational reconstruction in consideration of a functional defect resulting from this anatomical demolition.
<filename>keyvault/data-plane/azure-keyvault/src/main/java/com/microsoft/azure/keyvault/models/KeyAttributes.java // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator package com.microsoft.azure.keyvault.models; import com.fasterxml.jackson.annotation.JsonProperty; /** * The attributes of a key managed by the key vault service. */ public class KeyAttributes extends Attributes { /** * Reflects the deletion recovery level currently in effect for keys in the * current vault. If it contains 'Purgeable' the key can be permanently * deleted by a privileged user; otherwise, only the system can purge the * key, at the end of the retention interval. Possible values include: * 'Purgeable', 'Recoverable+Purgeable', 'Recoverable', * 'Recoverable+ProtectedSubscription'. */ @JsonProperty(value = "recoveryLevel", access = JsonProperty.Access.WRITE_ONLY) private DeletionRecoveryLevel recoveryLevel; /** * Get the recoveryLevel value. * * @return the recoveryLevel value */ public DeletionRecoveryLevel recoveryLevel() { return this.recoveryLevel; } }
<reponame>mengkai94/training_results_v0.6 import torch from seq2seq.data.config import BOS from seq2seq.data.config import EOS class SequenceGenerator: """ Generator for the autoregressive inference with beam search decoding. """ def __init__(self, model, beam_size=5, max_seq_len=100, cuda=False, len_norm_factor=0.6, len_norm_const=5, cov_penalty_factor=0.1): """ Constructor for the SequenceGenerator. Beam search decoding supports coverage penalty and length normalization. For details, refer to Section 7 of the GNMT paper (https://arxiv.org/pdf/1609.08144.pdf). :param model: model which implements generate method :param beam_size: decoder beam size :param max_seq_len: maximum decoder sequence length :param cuda: whether to use cuda :param len_norm_factor: length normalization factor :param len_norm_const: length normalization constant :param cov_penalty_factor: coverage penalty factor """ self.model = model self.cuda = cuda self.beam_size = beam_size self.max_seq_len = max_seq_len self.len_norm_factor = len_norm_factor self.len_norm_const = len_norm_const self.cov_penalty_factor = cov_penalty_factor self.batch_first = self.model.batch_first def greedy_search(self, batch_size, initial_input, initial_context=None): """ Greedy decoder. :param batch_size: decoder batch size :param initial_input: initial input, usually tensor of BOS tokens :param initial_context: initial context, usually [encoder_context, src_seq_lengths, None] returns: (translation, lengths, counter) translation: (batch_size, max_seq_len) - indices of target tokens lengths: (batch_size) - lengths of generated translations counter: number of iterations of the decoding loop """ max_seq_len = self.max_seq_len translation = torch.zeros(batch_size, max_seq_len, dtype=torch.int64) lengths = torch.ones(batch_size, dtype=torch.int64) active = torch.arange(0, batch_size, dtype=torch.int64) base_mask = torch.arange(0, batch_size, dtype=torch.int64) if self.cuda: translation = translation.cuda() lengths = lengths.cuda() active = active.cuda() base_mask = base_mask.cuda() translation[:, 0] = BOS words, context = initial_input, initial_context if self.batch_first: word_view = (-1, 1) ctx_batch_dim = 0 else: word_view = (1, -1) ctx_batch_dim = 1 counter = 0 for idx in range(1, max_seq_len): if not len(active): break counter += 1 words = words.view(word_view) output = self.model.generate(words, context, 1) words, logprobs, attn, context = output words = words.view(-1) translation[active, idx] = words lengths[active] += 1 terminating = (words == EOS) if terminating.any(): not_terminating = ~terminating mask = base_mask[:len(active)] mask = mask.masked_select(not_terminating) active = active.masked_select(not_terminating) words = words[mask] context[0] = context[0].index_select(ctx_batch_dim, mask) context[1] = context[1].index_select(0, mask) context[2] = context[2].index_select(1, mask) return translation, lengths, counter def beam_search(self, batch_size, initial_input, initial_context=None): """ Beam search decoder. :param batch_size: decoder batch size :param initial_input: initial input, usually tensor of BOS tokens :param initial_context: initial context, usually [encoder_context, src_seq_lengths, None] returns: (translation, lengths, counter) translation: (batch_size, max_seq_len) - indices of target tokens lengths: (batch_size) - lengths of generated translations counter: number of iterations of the decoding loop """ beam_size = self.beam_size norm_const = self.len_norm_const norm_factor = self.len_norm_factor max_seq_len = self.max_seq_len cov_penalty_factor = self.cov_penalty_factor translation = torch.zeros(batch_size * beam_size, max_seq_len, dtype=torch.int64) lengths = torch.ones(batch_size * beam_size, dtype=torch.int64) scores = torch.zeros(batch_size * beam_size, dtype=torch.float32) active = torch.arange(0, batch_size * beam_size, dtype=torch.int64) base_mask = torch.arange(0, batch_size * beam_size, dtype=torch.int64) global_offset = torch.arange(0, batch_size * beam_size, beam_size, dtype=torch.int64) eos_beam_fill = torch.tensor([0] + (beam_size - 1) * [float('-inf')]) if self.cuda: translation = translation.cuda() lengths = lengths.cuda() active = active.cuda() base_mask = base_mask.cuda() scores = scores.cuda() global_offset = global_offset.cuda() eos_beam_fill = eos_beam_fill.cuda() translation[:, 0] = BOS words, context = initial_input, initial_context if self.batch_first: word_view = (-1, 1) ctx_batch_dim = 0 attn_query_dim = 1 else: word_view = (1, -1) ctx_batch_dim = 1 attn_query_dim = 0 # replicate context if self.batch_first: # context[0] (encoder state): (batch, seq, feature) _, seq, feature = context[0].shape context[0].unsqueeze_(1) context[0] = context[0].expand(-1, beam_size, -1, -1) context[0] = context[0].contiguous().view(batch_size * beam_size, seq, feature) # context[0]: (batch * beam, seq, feature) else: # context[0] (encoder state): (seq, batch, feature) seq, _, feature = context[0].shape context[0].unsqueeze_(2) context[0] = context[0].expand(-1, -1, beam_size, -1) context[0] = context[0].contiguous().view(seq, batch_size * beam_size, feature) # context[0]: (seq, batch * beam, feature) # context[1] (encoder seq length): (batch) context[1].unsqueeze_(1) context[1] = context[1].expand(-1, beam_size) context[1] = context[1].contiguous().view(batch_size * beam_size) # context[1]: (batch * beam) accu_attn_scores = torch.zeros(batch_size * beam_size, seq) if self.cuda: accu_attn_scores = accu_attn_scores.cuda() counter = 0 for idx in range(1, self.max_seq_len): if not len(active): break counter += 1 eos_mask = (words == EOS) eos_mask = eos_mask.view(-1, beam_size) terminating, _ = eos_mask.min(dim=1) lengths[active[~eos_mask.view(-1)]] += 1 output = self.model.generate(words, context, beam_size) words, logprobs, attn, context = output attn = attn.float().squeeze(attn_query_dim) attn = attn.masked_fill(eos_mask.view(-1).unsqueeze(1), 0) accu_attn_scores[active] += attn # words: (batch, beam, k) words = words.view(-1, beam_size, beam_size) words = words.masked_fill(eos_mask.unsqueeze(2), EOS) # logprobs: (batch, beam, k) logprobs = logprobs.float().view(-1, beam_size, beam_size) if eos_mask.any(): logprobs[eos_mask] = eos_beam_fill active_scores = scores[active].view(-1, beam_size) # new_scores: (batch, beam, k) new_scores = active_scores.unsqueeze(2) + logprobs if idx == 1: new_scores[:, 1:, :].fill_(float('-inf')) new_scores = new_scores.view(-1, beam_size * beam_size) # index: (batch, beam) _, index = new_scores.topk(beam_size, dim=1) source_beam = index / beam_size new_scores = new_scores.view(-1, beam_size * beam_size) best_scores = torch.gather(new_scores, 1, index) scores[active] = best_scores.view(-1) words = words.view(-1, beam_size * beam_size) words = torch.gather(words, 1, index) # words: (1, batch * beam) words = words.view(word_view) offset = global_offset[:source_beam.shape[0]] source_beam += offset.unsqueeze(1) translation[active, :] = translation[active[source_beam.view(-1)], :] translation[active, idx] = words.view(-1) lengths[active] = lengths[active[source_beam.view(-1)]] context[2] = context[2].index_select(1, source_beam.view(-1)) if terminating.any(): not_terminating = ~terminating not_terminating = not_terminating.unsqueeze(1) not_terminating = not_terminating.expand(-1, beam_size).contiguous() normalization_mask = active.view(-1, beam_size)[terminating] # length normalization norm = lengths[normalization_mask].float() norm = (norm_const + norm) / (norm_const + 1.0) norm = norm ** norm_factor scores[normalization_mask] /= norm # coverage penalty penalty = accu_attn_scores[normalization_mask] penalty = penalty.clamp(0, 1) penalty = penalty.log() penalty[penalty == float('-inf')] = 0 penalty = penalty.sum(dim=-1) scores[normalization_mask] += cov_penalty_factor * penalty mask = base_mask[:len(active)] mask = mask.masked_select(not_terminating.view(-1)) words = words.index_select(ctx_batch_dim, mask) context[0] = context[0].index_select(ctx_batch_dim, mask) context[1] = context[1].index_select(0, mask) context[2] = context[2].index_select(1, mask) active = active.masked_select(not_terminating.view(-1)) scores = scores.view(batch_size, beam_size) _, idx = scores.max(dim=1) translation = translation[idx + global_offset, :] lengths = lengths[idx + global_offset] return translation, lengths, counter
Preferential Fas-mediated apoptotic execution at G1 phase: the resistance of mitotic cells to the cell death Apoptosis is induced by various stresses generated from the extracellular and intracellular environments. The fidelity of the cell cycle is monitored by surveillance mechanisms that arrest its further progression if any crucial process has not been completed or damages are sustained, and then the cells with problems undergo apoptosis. Although the molecular mechanisms involved in the regulation of the cell cycle and that of apoptosis have been elucidated, the links between them are not clear, especially that between cell cycle and death receptor-mediated apoptosis. By using the HeLa.S-Fucci (fluorescent ubiquitination-based cell cycle indicator) cells, we investigated the relationship between the cell cycle progression and apoptotic execution. To monitor apoptotic execution during cell cycle progression, we observed the cells after induction of apoptosis with time-lapse fluorescent microscopy. About 70% of Fas-mediated apoptotic cells were present at G1 phase and about 20% of cells died immediately after cytokinesis, whereas more than 60% of etoposide-induced apoptotic cells were at S/G2 phases in random culture of the cells. These results were confirmed by using synchronized culture of the cells. Furthermore, mitotic cells showed the resistance to Fas-mediated apoptosis. In conclusion, these findings suggest that apoptotic execution is dependent on cell cycle phase and Fas-mediated apoptosis preferentially occurs at G1 phase. Subject Category: Experimental Medicine Apoptosis is a mechanism of cell death that is fundamental in many biological phenomena, including morphogenesis and maintenance of tissue homeostasis. Apoptosis is characterized by chromatin condensation, nuclear fragmentation, and formation of membrane-enclosed vesicles called apoptotic bodies, which are phagocytosed by other cells. Various stresses generated from the extracellular and intracellular environments induce apoptosis. Stimuli that trigger apoptosis in mammalian cells can be summarized into two major categories of apoptotic pathways: the extrinsic and intrinsic pathways. The extrinsic pathway occurs in response to external signals. This is also referred to as the death receptor pathway, as it is mediated by ligation of cell surface death receptors to their cognate ligands. Ligation of death receptors induces subsequent downstream signaling through the initiator caspase-8 and/or caspase-10. Intrinsic apoptotic signaling occurs in response to stimuli such as DNA damage, growth factor withdrawal, and exposure to certain chemotherapeutic agents, all of which result in the release of cytochrome c and other pro-death factors from the intermembrane space of the mitochondria and subsequent downstream signaling through the initiator caspase-9. Fas/CD95 is a member of the tumor necrosis factor receptor superfamily, and induces apoptosis through the extrinsic pathway. The activation of Fas/CD95 by its specific ligand, FasL/CD95L, induces apoptosis in susceptible target cells. 4 After activation of Fas/CD95, the adapter protein, Fasassociated protein with death domain (FADD), binds to the death domain of Fas/CD95 and attracts procaspase-8 via its death effector domain to the receptor complex, forming the death-inducing signaling complex (DISC). Upon DISC formation, procaspase-8 is autolytically cleaved and activated and, in turn, cleaves downstream caspases such as effector caspase-3 and -7, leading to cleavage of cellular proteins and DNA and to subsequence apoptotic cell death. 8 On the other hand, etoposide (VP-16) is one of the most widely used anticancer drugs, belonging to the family of DNA topoisomerase II inhibitors. Etoposide causes DNA double-strand breaks through the formation of a cleavage complex containing DNA-drug-enzyme, and induces apoptosis through the intrinsic pathway. 9 Cell cycle checkpoints restrain further cell cycle progression if a process has not been successfully completed or DNA damage has been sustained. 10 Checkpoints operate to prevent further DNA replication within S phase when the replication complexes are stalled, to prevent entry into mitosis when DNA replication is not completed, and to prevent chromosome segregation when mitotic spindle assembly has not been completed. DNA damage-induced checkpoints also inhibit entry into S phase, progression through S phase, and entry into mitosis. Recently, HeLa.S-Fucci (fluorescent ubiquitination-based cell cycle indicator) cells were established by Miyawaki group, 11 which express monomeric Kusabira-Orange 2 (mKO2) and monomeric Azami-Green 1 (mAG1) fused to the ubiquitination domains of Cdt1 and geminin, respectively, to monitor the cell cycle progression in situ. As Cdt1 and geminin are the direct substrates of SCF Skp2 and APC/C Cdh1 complexes, respectively, the level of Cdt1 is highest at G 1 phase whereas geminin is prominent during S, G 2, and M phases. Therefore, the cell nuclei of HeLa.S-Fucci cells during the cell cycle are labeled with orange of mKO2 fused to the ubiquitination domain of Cdt1 in G 1 phase and green of mAG1 fused to the ubiquitination domain of geminin in S, G 2, and M phases. The availability of the Fucci system for the analysis of tumor biology and cell biology in relation to cell cycle regulation has been reported. It has been unclear whether the apoptotic events are dependent on the cell cycle phase, because the available methods are not sufficient to monitor the apoptotic execution in situ. Common protocols such as immunoblotting and flow cytometry have been applied to examine the cell cycle progression and apoptotic events, which indirectly indicated the cell cycle phase of apoptotic cells by using marker proteins such as cyclins and Rb, and DNA contents. However, cyclin E, Rb, and cyclin-dependent kinase (CDK) inhibitors are substrates for caspases, and DNA contents gradually decrease during apoptotic execution. Furthermore, if the cells were examined after synchronization using some drugs that are toxic to cells, it is difficult to distinguish whether apoptosis is induced by apoptotic stimuli. Here we used HeLa.S-Fucci cells to directly monitor the cell cycle progression and determine the cell cycle phase of apoptotic cells induced by the extrinsic and intrinsic pathways under the time-lapse fluorescent microscopy. Our results clearly indicated that apoptotic execution is dependent on cell cycle phase and Fas-mediated apoptosis preferentially occurs at G 1 phase. Results The ratio of cell cycle phase of normal cells after apoptosis induction. The relationship between apoptosis and the cell cycle remains unclear. Therefore, we set out to analyze the cell cycle specificity of Fas-mediated and etoposide-induced apoptotic execution using HeLa.S-Fucci cells. First, we checked the sensitivities of HeLa.S-Fucci cells to the treatment of an agonistic anti-Fas antibody and etoposide. The viability of the cells treated with the anti-Fas antibody decreased in a time-dependent manner accompanying the activation of caspases and the cleavage of caspase substrate (Figures 1a and b). The response to etoposide had a time lag of about 12 h for the apoptosis execution and caspase activation (Figures 1c and d). To study the effects of the agonistic anti-Fas antibody and etoposide on the cell cycle progression, we observed HeLa.S-Fucci cells with immunofluorescent microscope at 24 h after treatment ( Figure 2a). Total number of living cells, which attached on the culture dish and showed spread shape, decreased as compared with that of non-treated cells. Although the ratio of the cells having red nuclei to the cells having green nuclei in non-treated and the anti-Fas antibody-treated cells seemed to be unchanged, the relative number of normal cells having green nuclei significantly increased after treatment with etoposide, suggesting that etoposide-induced apoptosis occurred in a cell cycle-dependent manner. To confirm this, cells showing normal morphology were counted and classified into each cell cycle phase according to the criteria as shown in Figure 2b. Cells treated with etoposide were significantly accumulated at S/G 2 /M phases with nuclei labeled with green color, consistent with the pharmacological action of etoposide that inhibits topoisomerase II, thereby arrests the cells at S and G 2 phases (Figure 2c). Additionally, we noticed a slight decrease of cells at G 1 phase accompanying an increase of cells at S/G 2 /M phases in the anti-Fas antibody-treated cells, suggesting that the Fas-mediated apoptosis preferentially occurs at G 1 phase. Figure 3b) were shown. Apoptotic cells with red color were marked with red arrows, apoptotic cells with green color were with green arrows, apoptotic cells with both red and green colors were with yellow arrows, and apoptotic cells without any color were with white arrows. Although most of the cells treated with etoposide had green color, suggesting that apoptotic cells were at S, G 2, and M phases (Figure 3b), the majority of Fas-mediated apoptotic cells had red color indicating that the cells were dead at G 1 phase (Figure 3a). To confirm this, we counted apoptotic cells and classified into each cell cycle phase according to the criteria as shown in Figure 4a. About 70% of apoptotic cells were present at G 1 phase at 6 h after treatment with the anti-Fas antibody, and gradually decreased the apoptotic cells at G 1 phase accompanying the increase of apoptotic cells at M/G 1 phase (Figure 4b), suggesting that Fas-mediated apoptosis preferentially occurred at G 1 phase. On the other hand, more than 60% of etoposideinduced apoptotic cells had green color at 24 h after treatment with etoposide (Figure 4c), indicating that the etoposideinduced apoptotic execution occurred at S/G 2 /M phases. suggesting that etoposide-treated cells arrested at S and/or G 2 phases. The viability of synchronized cells treated with the anti-Fas antibody decreased in a time-dependent manner, but the prominent decrease was observed between 12 and 15 h after the release to enter the cell cycle (Figure 5c), indicating that the sensitivities of the cells to the treatment with the anti-Fas antibody were transiently enhanced after M phase. To confirm these results, the number of apoptotic cells per unit area was counted at each time point (Figure 5d), and the increase of apoptotic cells at each period was shown by the histogram (Figure 5e). Apoptotic cells appeared at the highest level between 12 and 15 h, suggesting that the Fas-mediated apoptosis was preferentially executed at G 1 phase. In contrast, the onset of apoptotic execution of etoposide-treated cells was around 15 h after treatment, and thereafter the decrease of viability and the increase of apoptotic cells were observed in a timedependent manner (Figures 5f-h). These results suggested that apoptotic execution is dependent on cell cycle phase and Fas-mediated apoptosis preferentially occurs at G 1 phase. Resistance of mitotic cells to Fas-mediated apoptosis. As shown in Figures 5a and e, Fas-mediated apoptosis was transiently enhanced immediately after M phase. Furthermore, we noticed that there were no occurrence of apoptosis during M phase under time-lapse fluorescent microscope observation of both asynchronous and synchronous cells after induction of Fas-mediated apoptosis (Supplementary Movies S2 and S5). These results suggest that mitotic cells are resistant to Fas-mediated apoptosis. To confirm this, we counted mitotic cells on the movies and the images constructing the movies, and classified into normal and dead cells (Table 1). Cells treated with the agonistic anti-Fas antibody normally transited through M phase, which have more than 95% viability throughout the observation, even though the total cell viability was gradually decreased. These results strongly suggested that Fas-mediated apoptosis is not executed during M phase. FADD, a component of DISC, was suggested to be phosphorylated at Ser-194 in arrested cells at G 2 /M phases. 29 Therefore, we analyzed whether FADD is phosphorylated at Ser-194 during M phase ( Figure 6). After synchronization of HeLa.S-Fucci cells at the border of G 1 and S phases with a double-thymidine block following its release into the cell cycle, cells were collected at each time point and analyzed by immunoblotting. The doublet bands of FADD were detected at 8 and 10 h after release from the thymidine block. Furthermore, the band corresponding to phosphorylated FADD was detected with the antibody specifically recognized phosphorylated FADD at Ser-194 at the same periods. As synchronized HeLa.S-Fucci cells at the border of G 1 and S phases transited through M phase from 7 to 11 h after the release to enter S phase as described in Figure 5, these results suggested that FADD is phosphorylated at Ser-194 during M phase. Discussion The relationship between the apoptotic execution and the cell cycle progression had been difficult to be analyzed, because it It was suggested that the cells treated with etoposide arrested at S and/or G 2 phases, as most normal and apoptotic cells had nuclei labeled with green color and no mitotic cells were observed (Figures 2-4, and Supplementary Movies S3 and S6). As etoposide forms a complex with topoisomerase II and DNA to enhance double-strand and single-strand breaks and reversible inhibition of DNA religation, the cell cycle was arrested in S and/or G 2 phases by the mechanisms of the cell cycle checkpoints, leading to apoptotic cell death through the intrinsic pathway before the entry into M phase. Fas-mediated apoptosis was executed through the extrinsic pathway of apoptosis. Although several groups tried to Cell viability after treatment with the agonistic anti-Fas antibody was assessed by counting cells on the movies and the images constructing the movies a Mitotic cells were classified into normal and dead cells during each period after induction of Fas-mediated apoptosis. The data were obtained from six movies b Data were obtained from Figure 1a Cell cycle-dependent Fas-mediated apoptosis T Hashimoto et al determine the cell cycle dependency of Fas-mediated apoptosis, the results were inconsistent: more sensitive at G 1 phase, 17,21 no cell cycle dependency, 18,19 susceptible at S phase, 20,30 and dependency at G 1 and S phases. 22,23 Here we clearly indicated that Fas-mediated apoptosis occurred preferentially at G 1 phase. Furthermore, we noticed that no apoptosis occurred at M phase and the ratio of apoptotic cells which died immediately after cytokinesis increased in a timedependent manner (Figure 4b and Supplementary Movies S2 and S5). These results suggest that mitotic cells show a resistance to Fas-mediated apoptosis. How are mitotic cells resistant to Fas-mediated apoptosis? During mitosis, cells undergo dramatic changes including chromosomal condensation, nuclear envelope breakdown, and mitotic spindle formation. Phosphorylation events of multiple proteins by CDKs and other mitotic kinases are essential for the regulation of these processes. 31,32 The components of DISC were suggested to be phosphorylated during mitosis. FADD is the key adapter protein transmitting apoptotic signals mediated by the death receptors. 5,7 FADD is also implicated in cell proliferation, cell cycle progression, tumor development, inflammation, innate immunity, and autophagy. 33 Ser-194 of the FADD protein has been shown to undergo phosphorylation in cells arrested in G 2 /M phases, and three kinases able to phosphorylate FADD at the Ser-194 residue have been identified: the 37-kDa casein kinase Ia, the 130-kDa Fas/FADD-interacting serine/threonine kinase (FIST-HIPK3), and Polo-like kinase 1. 29,34-36 Furthermore, the phosphorylation at Ser-203 of FADD by mitotic kinase Aurora-A led to cooperative phosphorylation at Ser-194 of FADD. 36 In contrast to pro-apoptotic cytoplasmic FADD, the phosphorylation at Ser-194 is suggested to be essential for the nuclear localization of FADD, 37 and nuclear FADD is implicated in survival mechanisms. 38 As we detected the phosphorylation at Ser-194 of FADD during M phase (Figure 6), the molecular mechanisms of the resistance to Fas-mediated apoptosis in mitotic cells may be partially explained by the phosphorylation at Ser-194 of FADD that sequestered FADD molecule in nuclei thereby preventing DISC formation in the cytoplasm. Furthermore, phosphorylation at Ser-18 and Ser-21 of FADD during mitosis were identified by phosphoproteomics analysis. 39 It will be necessary to evaluate the possible contribution of these phosphorylation of FADD to the regulation of Fas-mediated apoptosis. On the other hand, it was recently reported that procaspase-8 was phosphorylated in mitotic cells by Cdk1/ cyclin B1 on Ser-387, which prevented Fas-mediated apoptosis. 40 However, as the phosphorylated procaspase-8 at Ser-387 was predominantly localized at centrosomes from prometaphase to anaphase, further investigation will be needed to clear the relationship between the phosphorylation of procaspase-8 at Ser-387 and the resistance of mitotic cells to Fas-mediated apoptosis. Additionally, several phosphorylation of the components of DISC during mitosis was reported by phosphoproteomics analysis, such as Ser-209, Ser-212, Thr-214, and Thr-219 of Fas/CD95, 41 Ser-276 and Ser-289 of procaspase-8. 39 The physiological importance of the phosphorylation of these proteins in the DISC formation needs to be clarified. Materials and Methods Cell culture, synchronization, and apoptosis induction. HeLa. S-Fucci cells 11 were cultured in DMEM supplemented with 10% fetal bovine serum (FBS). For synchronization at the border of G 1 and S phases, cells were seeded at a density of 5 10 4 cells per 35-mm dish and cultured for 24 h. After exposure to 2.5 mM thymidine for 18 h, cells were washed with phosphate-buffered saline (PBS) three times and incubated in fresh medium for 10 h, and then exposed to 2.5 mM thymidine again for 14 h. To release the cells from the arrest, medium containing thymidine was removed, and the cells were washed with PBS three times and incubated in fresh medium. For induction of apoptosis, cells were treated with 0.5 mg/ml of the agonistic anti-Fas antibody (CH-11; Medical & Biological Laboratories, Nagoya, Japan) or 50 mM etoposide (VP-16). Time-lapse fluorescence microscopy. For time-lapse fluorescence microscopy, HeLa.S-Fucci cells were plated on a 35-mm dish. The medium was replaced with MEM supplemented with 10% FBS without phenol red, and dishes were placed in a humidified chamber at 371C that was mounted on a fluorescence microscope (model BZ-8000; Keyence, Osaka, Japan) with a constant supply of mixed air containing 5% CO 2. For analyses with BZ-8000, cells were observed with a 20 X objective lens and image data were obtained automatically every 15 min from seven different fields by using BZ-H1TL software (Keyence). In the original program by the manufacturer, cells are exposed to the intense mercury lamp light intermittently and the cells were driven to apoptosis within the initial 6 h by the physical damage of the light. Therefore, we used the neutral density filters to reduce the light levels to 1.6% that allowed the cells to proliferate during 48 h observation. 42 Conflict of Interest The authors declare no conflict of interest. Figure 6 FADD phosphorylation during mitotic HeLa.S-Fucci cells. Cells were arrested at the border of G 1 and S phases with a double-thymidine block, followed by release to enter S phase. Cell lysates from cells at each time point were subjected to SDS-PAGE, followed by immunoblotting with antibodies as indicated. Cyclin B and cyclin E were used for cell cycle markers of G 2 /M and G 1 /S phases, respectively
/** * Converts metadata type string representation into {@link MetadataType} instance. * * @param value metadata * @return metadata type instance, null otherwise */ public static MetadataType fromValue(String value) { return Stream.of(values()) .filter(type -> type.name().equalsIgnoreCase(value)) .findAny() .orElse(null); }
<filename>app/src/main/java/com/codepath/newyorktimes/util/ChromeShareProvider.java package com.codepath.newyorktimes.util; import android.app.Activity; import android.app.PendingIntent; import android.content.Intent; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import com.codepath.newyorktimes.activities.R; /** * Created by sdevired on 10/23/16. * util class for showing custom share provider */ public class ChromeShareProvider { Bitmap bitmap; public PendingIntent getPendingIntent() { return pendingIntent; } public void setPendingIntent(PendingIntent pendingIntent) { this.pendingIntent = pendingIntent; } public Bitmap getBitmap() { return bitmap; } public void setBitmap(Bitmap bitmap) { this.bitmap = bitmap; } PendingIntent pendingIntent; public ChromeShareProvider(Activity activity){ //prepare intent Intent intent = new Intent(Intent.ACTION_SEND); intent.setType("text/plain"); intent.putExtra(Intent.EXTRA_TEXT, "http://www.codepath.com"); //pending intent int requestCode = 100; pendingIntent = PendingIntent.getActivity(activity, requestCode, intent, PendingIntent.FLAG_UPDATE_CURRENT); bitmap = BitmapFactory.decodeResource(activity.getResources(), R.drawable.ic_sharable); } }
Nicotinamide as Additive for Microcrystalline and Defect Passivated Perovskite Solar Cells with 21.7% Efficiency. Passivation of electronic defects on the surface and at grain boundaries (GBs) of perovskite films has become one of the most effective tactics to suppress charge recombination in perovskite solar cells. It is demonstrated that trap states can be effectively passivated by Lewis acid or base functional groups. In this work, nicotinamide (NTM, commonly known as vitamin B3 or vitamin PP) serving as a Lewis base additive is introduced into the PbI2 and/or FAI: MABr: MACl precursor solution to obtain NTM modified perovskite films. It has been found that the NTM in the perovskite film can well passivate surface and GBs defects, control the film morphology and enhance the crystallinity via its interaction with a lone pair of electrons in nitrogen. In the presence of the NTM additive, we obtained enlarged perovskite crystal grain about 3.6 m and a champion planar perovskite solar cell with efficiency of 21.72% and negligible hysteresis. Our findings provide an effective route for crystal growth and defect passivation to bring further increases on both efficiency and stability of perovskite solar cells.
Social skills characteristics of students with autism spectrum disorder Objectives: This study examines differences in social skills among children with autism spectrum disorder (ASD). In order to investigate these differences, social skills were associated with variables like gender, age, intellectual disability, language development, and school type. Methods: For the purposes of the study a total of 63 students with ASD attending primary and secondary special education units were selected in Northern Greece. A structured questionnaire was filled in by their teachers. Results: The results showed major differences between children with ASD and intellectual disability and those without. Likewise, verbal children obtained higher scores than non-verbal. These higher scores indicate better social skills. Age, gender, and school type differentiated the scores of the groups only in a few factors of the questionnaire. Conclusion: Intellectual disability and language are variables that clearly influence the socialization of children with ASD.
/** * Tests if integer value is less than a specified maximum * @param max value to test input against (inclusive) * @param prompt prompt to ask user for input * @param errorPrompt if value is invalid * @return boolean if value falls in range */ public int intIsLessThan(int max, String prompt, String errorPrompt){ int val = readInt(prompt); if(val <= max){ return val; } else{ prompt(errorPrompt); return intIsLessThan(max, prompt, errorPrompt); } }
Jones is understood to have been furious after discovering the bloody animal remains in his locker on Friday morning and threw a brick through the side window of Glenn Whelan’s Porsche as revenge. The picture of the pig was then posted on Instagram by American winger Brek Shea to alert attention to the incident and Stoke have had no option but to initiate an inquiry. <noframe>Twitter: Kissa Abdullah - Big men acting like kids a pigs head is not a prank its just disgusting!!!!! Wtf??? I don't get how was that sup 2 be funny!!!! Really???</noframe> It is believed Whelan, the Republic of Ireland international, was behind the prank and he is expected to explain his behaviour to Stoke officials before Sunday’s trip to Southampton. Whelan and Jones could face fines if they are found guilty of misconduct in line with the club’s disciplinary procedures, with chief executive Tony Scholes leading the investigation. Shea is also likely to be reminded of his responsibilities while using social media. This latest practical joke comes days after Michael Owen’s Mercedes was pelted with eggs and flour, while it is understood other pranks have been taking place all week as Stoke prepare for their final game of the season. <noframe>Twitter: Kissa Abdullah - <a href="http://www.twitter.com/TheSwin" target="_blank">@TheSwin</a> they put a huge bloody pigs head in his clothes at training! He warned them 2 keep him out of the banter or else! u kno the rest</noframe> Stoke’s players are bemused that news of the pig’s appearance at the Clayton Wood training ground has created such a storm and a source described it as “banter that went a bit too far”. However, Jones’s grisly discovery enraged his girlfriend, Kissa Abdullah, who criticised the Stoke players on Twitter. She wrote: “Big men acting like kids a pigs head is not a prank its just disgusting!!!!! I don’t get how was that sup 2 be funny!!!! Really???” <noframe>Twitter: Kissa Abdullah - Listen 4 all those who don't get it lay the f! back! Where I come from that's not anywhere close 2 funny! Esp not knowing religious beliefs</noframe> Jones is expected to be included in Stoke’s squad for the game at St Mary’s, but Whelan is doubtful with a groin injury, with manager Tony Pulis keen to avert attention from the row and focus on a possible tenth-placed finish. Pulis is facing crunch talks with chairman Peter Coates next week to discuss preparations for next season in a scheduled meeting, with his future expected to be on the agenda. The Welshman’s position has been the subject of much debate after a difficult 2013 but the Welshman wants to remain in charge at the Britannia Stadium. He has mapped out the club’s pre-season tour of the United States and been making trips across Europe scouting targets. But Pulis will want to discuss various issues, including the budget for next season, the club’s recruitment policy and growing focus on the academy. <noframe>Twitter: Kissa Abdullah - It just takes a minute to be sensitive to someone's beliefs no one says u can't have a sense of humour but let it be a joke! This wasn't!</noframe> “I want to stay in charge and I’ve been doing everything as normal,” he said. “As far as I’m concerned I’ll still be here next season.” Owen will also be in the squad this weekend in what could be his final appearance before his retirement at the end of the season. The former England international has made only one start for Stoke since signing on a free transfer last summer and is set to be named among the substitutes. “There will be a send-off for him at Southampton on Sunday for what has been a wonderful career,” said Pulis. “As to whether he’ll play, I’ll be telling him before anybody else.”
If you've followed this site for a while, you'll know that I'm a big fan of Dave Ramsey, his 7 Baby Steps To Getting Out Of Debt, as well has his 9 week class, Financial Peace University (which we helped to facilitate at hour church), and his next steps class, The Legacy Journey. Dave has several best selling books, does huge live events across the country, and has his own radio and TV shows that are enjoyed by millions. One of the things that Dave Ramsey is known for is his admonition to not use debt via credit cards or other means, and his suggestion to pay cash for everything you buy. He even suggests that you should pay cash for a home – if you can! I was just reading some posts today about Dave Ramsey's new home that he recently built, and it sounds like it is quite the showplace. The home and land are valued at over $4,900,000! So the question is… Did Dave Ramsey pay cash for his new home? Or was he a hypocrite and take out a mortgage? UPDATE: I published this article several months ago. Within the past few days Dave Ramsey actually found this post through Twitter and commented! (see below). To read Ramsey's comment without going through the entire thread (150 comments) and to get my take on it, head on over to the updated post: Dave Ramsey Comments On My Post About His New House, His Debt Philosophy And Giving. MY LATEST VIDEOS MY LATEST VIDEOS All photos copyright coolsprings.com Did Dave Ramsey Pay Cash For His New House? After another personal finance blog linked to it I checked out this real estate website talking about Dave Ramsey's home, the cost, and some of the features of the home. Most people have seen Dave Ramsey’s home in Cool Springs from the distance although they may not have known it is his. The house looks like a snow capped mountain but instead of snow, the mountain top is covered by Dave Ramsey’s home. It is fairly majestic to say the least. The land at King Richard’s Court Franklin TN 37067 was purchased for $1,552,000 by Dave Ramsey on April 2, 2008. For the tax year 2008 (before the home was constructed) annual taxes were just $4,938. For the year 2010, the land market value is $750,000 and the improvement value is $4,159,200 for a combined total market appraisal of $4,909,200. A mortgage does not appear to have been recorded for the property. That’s our Dave! The tax record shows 3 levels in Dave Ramsey‘s Cool Springs home, totaling 13,307 square feet of living area and 1,454 square feet of garage. From what we hear Dave’s home office, including the sliding library wall ladder, is made of solid mahogany. The shower in the master bathroom is rumored to have 18 shower heads and is larger than the jacuzzi tub. Cathedral ceilings throughout. The local who we spoke with felt the basement was by far the most impressive. Full bar with whiskey barrels built into the walls, media room and several bedrooms make up the broad lower basement level you see from the distance, wrapping around the tip of the mountain. So according to this real estate professional the home had no mortgage documents recorded, which means that it's pretty likely that Ramsey paid cash for the land and home! He's following his own advice! It sounds like it's quite the house too. Who wouldn't want to have a beautiful mahogany lined office like that, or a shower with 18 shower heads? He truly is now living like no one else! Of course, since Dave Ramsey has an estimated net worth of $55 million dollars, this home is well within his family budget. Want to live near Dave Ramsey? The house next door, owned by Lee Ann Rimes, is now for sale for only $6,499,000! What a Bargain! More Pictures Of Dave Ramsey's House All photos copyright coolsprings.com All photos copyright coolsprings.com All photos copyright coolsprings.com Resources
A cellular implantation system using an injectable ultra-purified alginate gel for repair of osteochondral defects in a rabbit model. We developed a novel cellular implantation system using an in situ forming ultra-purified alginate gel with quite low endotoxity. The aims of this study were to determine the superiority of chondrogenic potential of bone marrow stromal cells (BMSCs) cultured in the purified alginate gel compared with a commercial grade gel, and to assess reparative tissues treated with BMSCs implanted using the developed system into cartilage defects in rabbit knees. The effects of each alginate gel on cellular proliferation and chondrogenesis of rabbit BMSCs were determined by in vitro assessments. Using our purified alginate gel, a novel vehicle system for injecting BMSCs into osteochondral defects without periosteal patch was successfully established in this animal models. The in vitro analyses demonstrated that the purification of alginate significantly enhanced the cellular proliferation and chondrogenic differentiation of BMSCs. The in vivo assessments suggested that the implantation of BMSCs with the developed system using the purified alginate gel histologically and mechanically improved the reparative tissue of osteochondral defects. This system using the purified alginate gel shows the clinical potential for arthroscopically injectable implantation of BMSCs for the treatment of cartilaginous lesions.
<reponame>abhisaini/pulsar-hack // +build !ignore_autogenerated /* Copyright 2021. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by controller-gen. DO NOT EDIT. package v1alpha1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Topic) DeepCopyInto(out *Topic) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topic. func (in *Topic) DeepCopy() *Topic { if in == nil { return nil } out := new(Topic) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *Topic) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TopicList) DeepCopyInto(out *TopicList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Topic, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicList. func (in *TopicList) DeepCopy() *TopicList { if in == nil { return nil } out := new(TopicList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *TopicList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TopicSpec) DeepCopyInto(out *TopicSpec) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicSpec. func (in *TopicSpec) DeepCopy() *TopicSpec { if in == nil { return nil } out := new(TopicSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TopicStatus) DeepCopyInto(out *TopicStatus) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicStatus. func (in *TopicStatus) DeepCopy() *TopicStatus { if in == nil { return nil } out := new(TopicStatus) in.DeepCopyInto(out) return out }
<reponame>thiagopmartins/ferrara-api export enum OrderStatusEnum { production = 0, sending = 1, finished = 2, canceled = 3, }
. OBJECTIVES To estimate the risk of mother-to-child transmission (MTCT) of HIV-1 and evaluate the effect of preventive methods on vertical transmission of HIV, such as: use of AZT by pregnant women during the prenatal period, labor and delivery; use of AZT in newborns; replacement of breastfeeding by formula; and indication of cesarean section. METHODS This was a cohort study. Data was collected from medical records of pregnant women and of children followed at reference health centers for HIV/AIDS patients. To estimate the risk of MTCT we divided the number of mothers whose children acquired the virus through vertical transmission by the total number of mothers included in the study; the relative risks were calculated with a CI=95%. Occurrence of transmission was regarded as a dependent variable, the other factors (maternal age, use of oral AZT and use of AZT syrup, gestational age and breastfeeding) were considered as independent variables. Mantel-Haenszel's techniques were used in this analysis to control the possible effect of some variables. RESULTS The occurrence of HIV in MTCT was identified in 144 children. Fourteen were classified as infected. Risk of transmission was of 0.097 (95% CI; 0.030-0.163). The risk of HIV vertical transmission was smaller in women under thirty years of age when compared with older ones. In the univariate analysis, the MTCT risk was significantly associated with maternal age, prenatal care, use of oral AZT and use of AZT syrup, gestational age and breastfeeding. CONCLUSION This study showed that the implementation of preventive measures is important in the prevention of mother-to-child transmission of HIV, providing a risk reduction of almost 40% when compared to that before use of these preventive measures.
<gh_stars>0 # -*- coding: utf-8 -*- """ pip_services3_commons.convert.IntegerConverter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Integer conversion utilities :copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details. :license: MIT, see LICENSE for more details. """ from typing import Any, Optional from ..convert.LongConverter import LongConverter class IntegerConverter: """ Converts arbitrary values into integers using extended conversion __rules: - Strings are converted to floats, then to integers - DateTime: total number of milliseconds since unix epoсh - Boolean: 1 for true and 0 for false Example: .. code-block:: python value1 = IntegerConverter.to_nullable_integer("ABC") # Result: None value2 = IntegerConverter.to_nullable_integer("123.456") # Result: 123 value3 = IntegerConverter.to_nullable_integer(true) # Result: 1 value4 = IntegerConverter.to_nullable_integer(datetime.datetime.now()) # Result: current milliseconds """ @staticmethod def to_nullable_integer(value: Any) -> Optional[int]: """ Converts args into integer or returns null when conversion is not possible. :param value: the args to convert. :return: integer args or null when conversion is not supported. """ return LongConverter.to_nullable_long(value) @staticmethod def to_integer(value: Any) -> int: """ Converts args into integer or returns 0 when conversion is not possible. :param value: the args to convert. :return: integer args or 0 when conversion is not supported. """ return LongConverter.to_long(value) @staticmethod def to_integer_with_default(value: Any, default_value: int) -> int: """ Converts args into integer or returns default args when conversion is not possible. :param value: the args to convert. :param default_value: the default args. :return: integer args or default when conversion is not supported. """ return LongConverter.to_long_with_default(value, default_value)
SPRINGFIELD -- The city Parks Department is partnering with two forestry and conservation groups on a project that is aimed at helping to restore American elm trees to the Connecticut River Valley decades after being devastated by Dutch elm disease. Approximately 130 disease-resistant elm tree saplings, now being grown inside University of Massachusetts greenhouses in Amherst, are slated to be relocated and replanted at the Forest Park nursery in September under the new elm tree restoration collaboration, officials said. Half of the trees, once mature, will be planted as urban trees on the streets of Springfield, and the other half will be planted in areas along the Connecticut River, said Christian Marks, a representative of The Nature Conservancy, one of the participating groups. It will take at least two years before some of the saplings can be moved from the Forest Park nursery, and some years longer before they would be ready for urban streets, officials said. "Like many cities throughout New England, Springfield used to have streets lined with these gorgeous American elms before the spread of Dutch elm disease," Marks said. "Now, they are mostly gone. This presents an opportunity to try to restore some of this heritage by planning the disease-tolerant elms." The city and The Nature Conservancy are joined in the collaboration by the U.S. Department of Agriculture Forest Service. The plantings at the Forest Park nursery will be done by members of the participating groups and volunteers, officials said. The Nature Conservancy will provide a fence around the nursery area to protect the elms from deer, and the plantings will be overseen and maintained by the city's forestry division. The responsibilities of each group is outlined in a memorandum of understanding. Patrick Sullivan, the city's parks director, and Alex Sherman, of the forestry division, said they are excited by the collaborative project and very optimistic about its long-term potential for success. "The city has a rich history of the American elms," Sullivan said. "The Dutch elm disease destroyed these city street trees, and we are excited about reintroducing the Liberty Elm to our urban tree inventory." The peak die-off of the American elm occurred in the 1950s and continued through the 1960s and 1970s, Marks said. The disease is a fungus spread by the elm bark beetle. Scientists have been working on the development of disease-resistant Elm trees in recent years. "For the American elm to regain its former role as a canopy tree of both urban and flood plain forests, it's essential to have trees that are disease resistant," Marks said. "In the long term, I'm confident it will succeed." The Nature Conservancy's stated mission is to conserve lands and waterways.
/** * LanguageMetrics implementation class. * * @author Yoshito Umaoka */ class LanguageMetricsImpl extends LanguageMetrics { private final EnumMap<TranslationStatus, Integer> ts; private final ReviewStatusMetrics rs; private final Map<String, Integer> ps; LanguageMetricsImpl(EnumMap<TranslationStatus, Integer> ts, ReviewStatusMetrics rs, Map<String, Integer> ps) { this.ts = ts; this.rs = rs; this.ps = ps; } @Override public EnumMap<TranslationStatus, Integer> getTranslationStatusMetrics() { if (ts == null) { return null; } return ts.clone(); } @Override public ReviewStatusMetrics getReviewStatusMetrics() { return rs; } @Override public Map<String, Integer> getPartnerStatusMetrics() { if (ps == null) { return null; } return Collections.unmodifiableMap(ps); } }
package com.airbnb.lottie.value; public class LottieFrameInfo<T> { private float endFrame; private T endValue; private float interpolatedKeyframeProgress; private float linearKeyframeProgress; private float overallProgress; private float startFrame; private T startValue; public LottieFrameInfo<T> set(float f, float f2, T t, T t2, float f3, float f4, float f5) { this.startFrame = f; this.endFrame = f2; this.startValue = t; this.endValue = t2; this.linearKeyframeProgress = f3; this.interpolatedKeyframeProgress = f4; this.overallProgress = f5; return this; } public float getStartFrame() { return this.startFrame; } public float getEndFrame() { return this.endFrame; } public T getStartValue() { return this.startValue; } public T getEndValue() { return this.endValue; } public float getLinearKeyframeProgress() { return this.linearKeyframeProgress; } public float getInterpolatedKeyframeProgress() { return this.interpolatedKeyframeProgress; } public float getOverallProgress() { return this.overallProgress; } }
<reponame>somanyunknowns/KFNet import numpy as np import matplotlib.pyplot as plt import argparse, random, cv2, os def read_lines(filepath): with open(filepath) as fin: lines = fin.readlines() lines = [line.strip() for line in lines] return lines def vis_flow_arrows(image_file1, image_file2, flow_file, thres=100, output_prefix=None): image1 = cv2.imread(image_file1) image2 = cv2.imread(image_file2) image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB) image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB) flow_uv = np.load(flow_file) blend_image = cv2.addWeighted(image1, 0.7, image2, 0.3, 0) height, width, _ = flow_uv.shape for h in range(2, height-2, 3): for w in range(2, width-2, 3): confidence = flow_uv[h, w, 2] if confidence < thres: continue u = flow_uv[h, w, 0] v = flow_uv[h, w, 1] x = w * 8 + 4 y = h * 8 + 4 cv2.line(blend_image, (x, y), (int(x+u*8), int(y+v*8)), (0, 255, 0), 2) if output_prefix: blend_image = cv2.cvtColor(blend_image, cv2.COLOR_RGB2BGR) cv2.imwrite(output_prefix + '.png', blend_image) else: plt.imshow(blend_image) plt.show() plt.close('all') def main(): parser = argparse.ArgumentParser() parser.add_argument('flow_file_list', type=str) parser.add_argument('image_list', type=str) parser.add_argument('output_folder', type=str) parser.add_argument('--thres', default='100', type=float, help="Confidence threshold") args = parser.parse_args() flow_files = read_lines(args.flow_file_list) image_files = read_lines(args.image_list) assert(len(image_files) > len(flow_files)) for i in range(len(flow_files)): image_file1 = image_files[i] image_file2 = image_files[i+1] flow_file = flow_files[i] flow_file_name = os.path.split(flow_file)[1] flow_file_name = os.path.splitext(flow_file_name)[0] output_prefix = os.path.join(args.output_folder, flow_file_name) vis_flow_arrows(image_file1, image_file2, flow_file, args.thres, output_prefix) print "Finish", output_prefix if __name__ == "__main__": main()
def create_preset(self, gedialog): if self.check_valid_data(gedialog): name, data = self.get_valid_data(gedialog) update = False if gedialog.jsonFunction.fileContent.has_key(name): if c4d.gui.QuestionDialog("Are you sure to update {}".format(name)): update = True else: return True gedialog.jsonFunction.add_data(name, data) gedialog.jsonFunction.save_json_file() gedialog.jsonFunction.load_json_file() gedialog.ud.create_cycle(Const.UI_CYCLE, gedialog.jsonFunction.get_all_name(), gedialog.ud.idGroups[Const.UI_GROUP_CURVE_METAL_TYPE], "Metal Type") if update: c4d.gui.MessageDialog("{} material successfully updated".format(name)) else: c4d.gui.MessageDialog("{} material successfully created".format(name))
Microstructure, Tensile Property, and Surface Quality of Glass Fiber-Reinforced Polypropylene Parts Molded by Rapid Heat Cycle Molding -e microstructure of a molded product considerably influences its macroscopic properties. In this study, the influence of molding process on microstructure, tensile property, and surface quality was explored on the glass fiber-reinforced polypropylene (GFRPP) parts molded by rapid heat cycle molding (RHCM) and conversion injection molding (CIM). Tensile strength and surface gloss were chosen to measure macroscopic properties of the molded parts. -e microstructure including multilayer, fiber orientation, crystallinity, and fiber-matrix bonding strength were analyzed by simulations, scanning electron microscopy, wideangle X-ray diffraction, and dynamic mechanical analysis. -e relationship between the macroscopic properties and microstructure of the RHCM samples was also discussed. -e results indicate that as the mold cavity surface temperature increases, the tensile strength increases firstly and decreases thereafter. -e tensile strength of RHCM parts reached the maximum at the mold heating temperature of 60°C. It is also observed that the surface gloss of the sample increases as the mold cavity surface temperature rises, and the increase of surface gloss decreases distinctly with the mold heating temperature higher than 90°C. Introduction Short glass fiber-reinforced polypropylene (GFRPP) composites are recyclable materials with good mechanical properties, high chemical resistance, excellent thermal stability, and attractive performance-cost ratio. In view of these advantages, the GFRPP is widely used in structural applications in the fields of aerospace, automotive, civil, and marine engineering. It is well known that the macroscopic performance of the product is substantially dependent on its microstructure. In the past, considerable efforts have been devoted to adapt the microstructures of GFRPP products and improve their macroscopic performance. It is presumed that the mechanical properties of GFRPP products significantly depend on the combined effects of glass fiber orientation and distribution, fiber-matrix interface bonding strength conditions, and state of matrix crystallization, while some surface defects, such as flow marks, floating fibers, weld lines, and jetting marks cannot be avoided through the CIM process. e rapid heat cycle molding (RHCM) process is a relatively new technique that can not only significantly reduce or even eliminate surface defects but also significantly shorten the production cycle and reduce energy consumption and environmental pollution. e most evident difference between the RHCM and CIM processes is that the former employs dynamic mold temperature control technology whereby the mold temperature is rapidly increased to a preset temperature before the mold is filled with the melt. During the filling and packing stage, high temperature is maintained; thereafter, the melt is quickly cooled to demold it before proceeding to the next stage of the production cycle. us, the RHCM process has a more complex thermomechanical history than the CIM process. To date the research works on the relationship between microstructure and macroscopic properties of products molded by the RHCM process have received increasing attention. Wang et al. investigated the effect of mold cavity surface temperature in the filling process on the crystallization state of virgin PP in injected plastic part layers with different thicknesses. ey also explored the relationship between the surface quality of plastic parts, crystallinity, and residual stress in the RHCM. Recently, Speranza et al. reported that the mold surface temperature markedly influenced the molecular orientation and morphology developed in injection-molded samples, and the combination of flow fields and cooling rate experienced by the virgin polymer determined the multilayer structure of samples. Under strong flow field and high temperature conditions, a tightly packed structure (called shish-kebab) aligned along the flow direction was observed, whereas the formation of -phase in cylindrite form was observed in weak flow fields. Li et al. proposed a novel method to acquire the crystallization evolution information of virgin polymers during the RHCM process. e influence of temperature and shear rate on crystallization was considered in this method, thereby the method was beneficial to the optimization of the RHCM molding process for the manufacture of high-quality products. Liparoti et al. analyzed the effect of the operative conditions of injection molding process on the morphology distribution inside molded products and found that the shear layer thickness was reduced in samples produced at high mold temperatures. It was particularly noted that the shear layer disappeared when the packing pressure and heating time were 360 bar and 20 s, respectively. Based on the foregoing investigations, literature studies are mainly focused on the virgin polymer. So far, limited research works are about short glass fiber-reinforced polymer under the RHCM process. Li et al. established a novel model describing the confluent process for the fiberreinforced melt in the RHCM and described the relationship between the mold temperature and appearance of the confluent region. Furthermore, they presented the effect of fiber orientation in the confluent region on the impact strength of resultant parts. Wang et al. used two reinforced plastics including ABS/PMMA/nano-CaCO 3 and 20% fiber-reinforced polypropylene to investigate the influence of cavity surface temperature immediately before filling on the surface appearance and texture of the molded reinforced plastic parts using the RHCM. e roughness, gloss, and morphology of the surface are characterized with white light interferometer, gloss meter, and optical microscope, respectively. ey found that the mold surface temperature immediately before filling has a significant influence on the surface appearance of the molded part. In another work, the same research group found that for fiber-reinforced plastics of PP + 20% glass fiber produced through the RHCM, the cavity surface temperature set to 118°C at the filling stage reduced the tensile strength of the part without a weld mark but slightly increased that of the part with a weld mark. e aforementioned studies indicate that the microstructure (e.g., multilayer structure, fiber orientation, state of crystallization, interfacial bonding strength, and surface morphology) and macroperformance (e.g., tensile strength and surface gloss) of GFRPP product molded by RHCM have not been established. is makes the processing and parameter adjustment of the RHCM have great blindness. Hence, it is crucial to obtain the microstructure evolution law of reinforced thermoplastics produced by the RHCM process to reveal the mapping relationship between the macroscopic properties and microstructure of RHCM products. In this study, an electric heating RHCM is employed to understand the different cavity surface temperature action mechanisms that regulate the tensile strength and the surface appearance quality of a reinforced polymer product without a weld line. e macroscopic properties and microstructure are tested and analyzed by scanning electronic microscope (SEM), wide-angle X-ray diffraction (WAXRD), dynamic thermomechanical analysis (DMA), Autodesk Moldflow ™, surface roughness meter, and surface gloss meter. e corelationships between the macroscopic properties, microstructure, and processing of RHCM products are explored. Materials. e testing material adopted in this study is PP with 30% short glass fibers (supplied by Suzhou Yulian Engineering Plastics Co. Ltd., China) with a density of 1.18 g/ cm 3 (ASTM D792), melt flow rate of 1.8 g/10 min (ASTM: D1238), and heat deflection temperature of 140°C (ASTM: D648). Experimental Setup and Procedure. e employed electric heating RHCM apparatus ( Figure 1 ) is mainly composed of electric heating rods, K-type thermocouple sensor, and mold temperature controller MTS-32II system (Beijing CHN-TOP Machinery Group Co. Ltd., China) to measure, regulate, and display the mold surface heating temperature, and a two-plate RHCM mold (the cavity surface is polished to achieve a glossy mirror surface with a roughness Ra of about 57 nm). Electrical heating rods are only deployed at the fixed side of the mold to heat this side rapidly before filling. e regular water-cooling tunnels are used to cool both movable and fixed sides of the mold with turbulent room temperature cooling water after the packing stage. An injection molding machine (HTFX5 series, Haitian Plastic Machinery Group Co. Ltd., China) is used. All process parameters are listed in Table 1. And only the samples produced after stabilizing the process parameters are used. For each set of molding conditions, no less than five samples are employed for the subsequent measurements and analysis. Specimen Preparation. e sample position is selected as shown in Figure 2. To facilitate description, the samples molded with cavity surface temperatures of 60, 90, and 120°C are designated as RHCM60, RHCM90, and RHCM120, respectively. All of the RHCM test samples are selected and processed on the side of the fixed half mold. e injection-molded samples are placed in liquid nitrogen and thereafter fractured as illustrated in Figure 2. Advances in Polymer Technology microstructure of the sample is observed with a scanning electron microscope (Hitachi S-4700, Japan). e scanning voltage is 15.0 kV, and a total magnification of 150X is applied. e WAXRD technique is employed to estimate the crystallinity in each specified layer across the sample thickness. e sample position is specifically selected along the thickness, as illustrated in Figure 2. Performance e WAXD is performed with an X'Pert PRO X-ray diffraction instrument (PANalytical B.V., Almelo, Netherlands) with a Cu K source and an average wavelength of 0.154056 nm. e equipment is operated at 40 kV and 40 mA in ambient temperature with a diffraction angle (2) range of 10°-40°a nd a 2°/min scan rate. e interfacial bonding strength of GFRPP composites can be characterized by the DMA test using DMAQ800 (TA instruments, America). Rectangular test samples (each 3 mm 1 mm 2 mm) are heated from −20 to 40°C at a rate of 2°C/min. e single cantilever mode is employed to test the samples at an oscillation amplitude and frequency of 10 m and 1 Hz, respectively. Autodesk Moldflow ™ is utilized to evaluate the glass fiber orientation distribution in the sample thickness direction. In the simulation, the material properties and corresponding boundary conditions are set according to the parameters used in the actual CIM and RHCM experiments. Effect of RHCM Process on Multilayer Microstructure of GFRPP. e SEM micrographs of multilayer structures are shown in Figure 3, which appear in the cross sections of CIM and RHCM60 samples. In the cross section of samples, the CIM sample exhibits a distinct symmetric skin-shear-core structure, whereas RHCM samples present an asymmetric multilayer structure. e layers thicknesses on the fixed mold side are considerably different from those on the movable mold side. is asymmetric structure is mainly attributed to the heating of the cavity surface in the fixed half mold with dynamic mold temperature control technology, and it well demonstrates the effect of different heating temperatures on the microstructures of RHCM samples. Figure 4 exhibits the skin layer thickness variation trends in RHCM and CIM samples produced at different cavity surface temperatures before filling. Based on the SEM images of specimen cross sections, the thickness of multilayers is determined by taking the average of multiple measurements with the aid of JMICROVISION software. Figure 5(a) shows that the skin layer thickness of the sample decreases as the cavity temperature before filling increases, and such a decreasing tendency can be interpreted as temperature gradient distribution. Compared with the RHCM process, the CIM process has the largest temperature gradient between the melt and the cavity surface; hence, the entering melt rapidly cools upon contact with the cold cavity surface and immediately condenses and forms the skin layer, and accordingly, the CIM sample has the thickest skin layer. With the increase of temperature, the temperature gradient decreases. According to the foregoing, it is not difficult to sort the skin layer thickness in descending order as the cavity temperature before filling increases. In Figure 5(b), the shear layer thickness in all RHCM samples is thicker than the shear layer thickness in the CIM sample. According to the previous analysis, the CIM process produces the thickest skin layer. Evidently, this skin layer provides the best heat insulation effect for the melt and weakens the heat exchange between the melt and mold wall and thereafter decreases the temperature gradient below the CIM skin layer; as a result, the shear layer thickness in the CIM sample is the thinnest. Compared with CIM sample, as the cavity surface temperature further increases, the skin layer thermal insulation performance further weakens and the shear layer thickness of the RHCM samples presents a descending order. In Figure 5(c), it can be observed that the core layer thickness in the RHCM samples increases with the increase in the cavity surface temperature, and the core layer thickness in the CIM sample is thicker than that in the RHCM samples processed at cavity surface temperatures of 60 and 90°C. is phenomenon can be attributed to the fact that the thicker skin layer in the CIM sample has a good heat insulation effect, the temperature gradient in the melt under the skin layer is smaller, and consequently, the core layer thickness in the CIM sample is relatively thicker. In the RHCM process, the temperature difference between the cavity surface and melt gradually decreases with the increase in the cavity surface temperature; this explains the gradual increase in the core layer thickness. When the cavity surface temperature rises to 120°C, the core layer reaches its maximum thickness. Effect of RHCM Process on Fiber Orientation in the GFRPP Microstructure. e model is constructed using tetrahedral elements with 11 layers meshed in the thickness direction. Figure 6 shows the meshed model established with the software. e fiber orientation distribution in different layers on the side of the fixed half mold is obtained by Moldflow ™ simulation, as shown in Figure 7. In the CIM process, the shear layer has the highest fiberreinforced orientation parallel to the flow direction, followed by the skin layer. In the core layer, the fibers are observed to be oriented randomly. ese results confirm those reported in literature. Figure 7(a) shows the skin layer fiber orientation tensors obtained at different cavity surface temperatures. As the cavity surface temperature increases, the cooling and freezing rates of the skin layer are slowed down. ese afford ample time for the fibers to be oriented in the flow direction as well as more time for the molecular orientation to relax. Under this condition, both shearing and stretching coupled with the molecular orientation effect are strengthened. ese factors have opposite effects on the skin layer fiber orientation, and the final fiber orientation results are determined by these two competing effects. For RHCM60, the shearing and stretching effects are more predominant in the skin layer fiber orientation; accordingly, this sample has a relatively higher skin layer fiber orientation than the CIM sample. On the other hand, because the melt cooling rate in RHCM90 is lower than that in RHCM60, the skin layer is in a high elastic state over a longer period. e Advances in Polymer Technology shearing and stretching effects are weaker because the velocity gradient is further reduced under this condition, and the internal fibers undergo a certain random movement based on the high temperature relaxation effect of the oriented polymer macromolecular chains. All of the these allow RHCM90 sample to have a relatively lower skin layer fiber orientation than the CIM sample. In the RHCM120 process, the cooling rate is the slowest, the oriented molecular chain relaxation is the most sufficient, and the skin layer fiber orientation is basically unoriented. Figure 7(b) shows the fiber orientation tensor of the shear layer in the RHCM samples. It can be observed that RHCM60 sample has the highest shear layer fiber orientation. In the RHCM process, the melt in the shear layer is subjected to a lower shearing force compared with that experienced in the skin layer. However, the shear action time is longer, and the inner fibers have sufficient time to be oriented. Hence, the degree of fiber orientation in the shear layer is relatively higher than that in the skin layer. Moreover, notice that the fiber orientation tensor in the shear layer decreases drastically with the increase in mold temperature. Recall that as the heating temperature of the mold increases, the temperature gradient in the melt decreases; this causes the flow velocity difference among the layers to decrease. ereafter, the shearing effect among the layers is weakened, particularly in RHCM120 sample. Figure 7(c) shows the core layer fiber orientation tensor of samples. In general, the core layer fiber orientation in all RHCM and CIM samples is relatively low. Compared with the skin and shear layers, the core layer has a uniform temperature distribution and a melt temperature that can be maintained higher than the glass transition temperature in both RHCM and CIM processes. Moreover, it has practically no shearing effect that can impact its fibers, and its polymer molecules have sufficient time to relax. Accordingly, with its fibers freely oriented, the core layer has a low fiber orientation. Advances in Polymer Technology crystallinity in each layer in the RHCM samples is higher than that in the CIM sample; furthermore, the crystallinity in all layers increases continuously as the cavity surface temperature increases. In the RHCM process, when the melt comes into contact with the cavity surface having a higher temperature, the melt experiences a temperature range that is suitable for crystallization. And it is widely known that good fluidity results in less pressure loss in the melt flow direction and enhances the subsequent packing effects and, hence, decreases the free volume in the melt and increases the melt density; thereafter, the orientation of the molecular chains formed in the filling stage is further strengthened during the subsequent packing stage, and all these factors facilitate crystallization. Additionally, it is found that the crystallinity in all layers increases continuously as the cavity surface temperature increases, the core layer has a higher crystallinity than the shear layer, and the skin layer has the lowest crystallinity. e heat exchange between the melt and the mold cavity surface is weakened because of the skin layer's heat insulation effect, at the same time the temperature in the shear layer is higher than that in the skin layer, and the crystallization time is relatively longer. And the intensive shear stress field in the shear layer triggers shear-induced crystallization. us, it can be concluded that as the heating temperature of the mold increases, both crystallization rate and crystallization time increase, and the crystallinity in shear layer is higher than that in skin layer. Additionally, both skin and shear layers act as insulating layers to further reduce the heat exchange between the mold surface and core layer; the core layer has a relatively longer and better crystallization time and zone. us, the core layer in each sample has the highest crystallinity, followed by the shear layer and skin layer in sequence. Effect of RHCM Process on Fiber-Matrix Bonding Strength of GFRPP Composites. e interfacial bonding strength determines the stress transfer between the reinforcing fiber and matrix and is recognized as the key factor in the global mechanical performance of composites. To estimate the interfacial bonding strength of GFRPP composites under different mold cavity surface temperatures, the DMA test is employed. e damping factor (tan ) curves indicate the interfacial bonding strength of GFRPP composites, as shown in Figure 9. As the mold cavity surface temperature increases, the peak of the mechanical loss in the sample gradually decreases, which indicates that the bonding strength between the reinforcing fiber and PP matrix increases as the temperature increases. It can be explained as follows: the global crystallinity in the sample increases under this condition, which causes the crystal area to expend, and the free movement volume of the molecular chain to reduce; these factors limit the movement of polymer molecular chains. Hence, the friction between molecular chains and fibers is decreased, and the sample's mechanical loss (tan ) peak gradually decreases. Accordingly, the fiber-matrix bonding strength increases gradually with the increase of cavity surface temperature. Figure 10 shows the tensile strength of samples produced by different CIM and RHCM processes. RHCM60 sample has the highest tensile strength, which reaches 57.61 MPa; this indicates that the RHCM process effectively improves the tensile property of the GFRPP composites. However, it is also found that as the mold cavity temperature before filling increases, the tensile strength of the RHCM sample gradually decreases, and even the tensile strength of RHCM120 sample is lower than that of the CIM sample. e tensile strength difference can be illustrated by the microstructure of the molded samples. Figure 11 shows the microstructure comparison of RHCM60 and CIM samples. It is observed that the shear layer thicknesses with highly oriented fibers of RHCM60 increase by 53.5% compared with the CIM sample. However, the thicknesses of the skin and core layers, with the relatively lower fiber orientation tensors, are reduced by approximately 19.3% and 47.1%, respectively. Meanwhile, both crystallinity and fiber orientation in each layer in RHCM60 are higher than those in the CIM sample. e crystallinity in the skin, shear, and core layers increased by 19.4%, 6.5%, and 12.1%, respectively. us, the overall crystallinity and fiber orientation of RHCM60 in the cross section are higher than those in the CIM sample. When the external force exerts a tensile load on the sample, the increase in fiber orientation causes more fibers to bear the load transmitted by the substrate. e increase in matrix crystallinity indicates that the contact area between the crystal and amorphous regions increases; this also aids in the loads transfer from the matrix to the fiber. Hence, the tensile strength of RHCM60 is higher than that of the CIM sample. Relationship between Microstructure and Tensile Property. It is evident in Figure 12 that as the cavity temperature is increased, the thicknesses of the skin and shear layers with higher fiber orientations are considerably reduced with decrements of 10.4%, 14.5%, and 14.9%, 14.3%, respectively. e thickness of the core layer with low fiber orientation is increased significantly with increments of 58.8% and 33.2%. Hence, as the cavity surface temperature is increased, the overall fiber orientation in the thickness direction of the sample is drastically lowered. Moreover, the crystallinity in each layer has different degrees of increment, ranging from 1.3% to 9.4%; the overall crystallinity in the RHCM samples increases continuously as the temperature increases. Although the high crystallinity matrix under this condition is advantageous for sustaining more applied loads, it is presumed that the effect of drastic reduction in the thickness of the shear layer with high fiber orientation on the tensile strength is more distinct. Relationship between Microstructure and Surface Gloss. As shown in Figure 13, the RHCM process can effectively and continuously improve the surface gloss of the sample from 37.5 to 78.8 Gs as the surface temperature rises. e sample surface gloss is determined by the surface morphology of the product. Table 2 summarizes the surface roughness measurement result of each sample. It indicates that the surface roughness 8 Advances in Polymer Technology of the sample can be effectively improved by increasing the mold surface temperature. In the RHCM process, as the mold cavity surface temperature increases, the number of exposed surface fibers decreases because the fluidity of the polymer in the skin layer is enhanced. It is easier for the polymer melt to fill in the gap formed between the fiber and cavity surface, and the fiber of the skin layer can be wellwrapped by the polymer matrix without being exposed outside the skin layer. Figure 14 shows the microsurface images of GFRPP samples molded at different cavity surface temperatures immediately before filling. With the temperature rising, a smooth mold surface can be accurately reproduced; hence, the surface roughness of the product is gradually decreased. e greater the surface roughness, the larger the undulation of the microscopic surface of the product and the stronger the scattering of light; this means that the gloss of the product becomes lower with the increase in surface roughness. Figure 14 further shows that the surface gloss increase is not distinct after the surface temperature reaches 90°C; this phenomenon is consistent with results found by Wang et al.. ey attributed this high gloss surface to the amorphous phase, which completely wrapped all crystalline phases on the surface; finally, the surface presented a pure and uniform amorphous phase. However, this conclusion differs from the observations of other researchers who indicated that the surface of the PP sample is composed of both crystalline and amorphous phases. Another researcher (Wang et al. ) pointed out that, for the virgin PP, the surface gloss exhibited a different trend: instead of increasing, it tended to decrease as the cavity surface temperature continued to rise after reaching 110°C. To better comprehend the mechanism involved in attaining the best gloss quality exhibited by RHCM120, WAXRD technology is employed to estimate the surface crystallization. e WAXRD patterns of RHCM90 and RHCM120 are shown in Figure 15. Note that the orientation parameters of these two samples are practically the same; however, the surface crystallinity of RHCM120 is relatively higher than that of RHCM90. is is because the higher mold surface temperature not only substantially increases the surface crystallization rate of the product, but also increases the crystallization time; thus, the crystallinity on the surface of RHCM120 is relatively high. e high surface crystallinity indicates that the surface crystal area is large, and the molecular chains on the surface are regularly arranged; this can effectively suppress light scattering and thereby improve surface gloss. e foregoing analysis indicates that when the mold heating temperature is below 90°C, the increase in the surface gloss of the sample is mainly because of the reduction in the surface-exposed floating fibers and the improvement in the melt surface replication ability. When the cavity surface temperature is more than 90°C, the surface gloss increase is primarily attributed to the increase in surface crystallinity combined with the higher surface replication ability of the melt. Conclusions In this study, the effect of mold cavity surface temperature on the tensile and surface gloss properties is investigated in 30% short glass fiber-reinforced PP composites without weld lines. e influence mechanisms of temperature are discussed and revealed by coupling simulation, SEM, WAXRD, DMA, roughness meter, and surface gloss meter. e corelationship between the macroscopic properties (tensile strength and surface gloss), microstructure, and processing of RHCM samples are discussed. e main conclusions drawn are as follows. e tensile strength first increases and then decreases as the cavity surface temperature increases before filling. RHCM60 sample has the highest tensile strength followed by RHCM90 and CIM samples; RHCM120 sample has the lowest tensile strength. ese differences are mainly because the microstructure of GFRPP composites considerably depends on the preassigned cavity surface temperature. With increasing cavity surface temperature before filling, the thickness of skin layer with a medium fiber orientation along the flow direction decreases continuously. e thickness of the shear layer with a strong fiber orientation first increases and then decreases, and all the shear layers of RHCM samples are thicker than that of the CIM sample; RHCM60 sample has the widest shear layer thickness. e thickness of the core layer with a weak fiber orientation first decreases and thereafter increases; RHCM120 and RHCM60 samples have the thickest and thinnest core layers, respectively. Each layer fiber orientation in all samples exhibits an increasing and thereafter decreasing trend with increasing cavity surface temperature. Each layer fiber orientation in RHCM60 sample is higher than that in the CIM sample. e crystallinity in each layer in all samples increases with increasing cavity surface temperature, and the same variance trend can be observed between the fiber-matrix interfacial bonding strength in all samples and cavity surface temperature. e sample surface gloss increases with the increase in the surface temperature of the mold cavity; however, when the temperature exceeds 90°C, the growth trend sharply slows down. Moreover, among all samples, the surface crystallinity in RHCM120 sample is the highest. When the mold heating temperature is lower than 90°C, the increase in surface gloss is mainly attributed to the reduction in the exposed floating fiber and the enhancement of the melt surface replication ability. When the mold heating temperature is more than 90°C, the surface gloss increases mainly because of the increase in the surface crystallinity coupled with the high level of the mirror surface replication ability of the melt. Data Availability All data used to support the findings of this study are included with the article. Conflicts of Interest e authors declare that there are no conflicts of interest regarding the publication of this paper. Advances in Polymer Technology 13
What we do with penalty rates strikes at an issue that is much more profound and affects just about all of us: preserving the weekend against the pressures round-the-clock consumption. So if penalty rates are really up for debate, as the government wants, let's acknowledge what is also at stake here. This isn't just about the number of jobs that might (or might not) be created. It's about how we value those two days when most of us are off work – Saturday and Sunday – and compensate those who miss out. Penalty rates are perhaps the most contentious aspect of the Productivity Commission's draft report on the workplace relations framework. The commission agrees weekend penalty rates are fair in principle. But it wants them cut on Sundays for workers in cafes, bars, restaurants, entertainment, and retail. Why are these staff singled out for wage cuts? Well, the first step in the commission's argument is to say that being open on a Sunday is now par for the course in these industries. "Australian society expects to be able to shop, go to a pharmacy, and eat at cafes and restaurants on weekends. The value of supermarket shopping on Sundays now exceeds some weekdays," the report says. All true. However, the next leap in the argument is the bit we should be very wary of. "This trend will not diminish. Consequently, the workplace relations system should embrace the concept of seven-day weeks in the relevant services industries," it says. "It should provide penalty rates that take account of the types of jobs and skills needed in these industries and that are proportionate to the impacts on their employees." Hmmm. This might be heresy to some business types, but since when did the expectations of consumers justify pay cuts for the people who are providing the service? As University of Queensland economics professor John Quiggin says in a submission to the commission, the fact that people expect services are provided on the weekend "has no obvious relevance to the question of whether employees should be compensated for providing such services". He makes a good point. To embrace the concept of the seven-day working week, I'd like to see stronger evidence than: people like shopping on Sundays. Quiggin, who has provided expert evidence to the Fair Work Commission for the union United Voice, also points out a big inconsistency. The Productivity Commission says workers in essential services are also expected to work every day but their penalty rates should remain unchanged because these are in "lock-step" with community expectations. I agree. People providing essential services should of course be compensated well for giving up their weekend. But why are they more deserving of the extra pay on Sunday than others? People who wait tables, pour beers or stack the supermarket shelves are giving up the same thing – time off work, often spent with friends and family. So why should their Sunday pay be assessed differently? And that brings us to an inevitable consequence of any cuts in penalty rates: it would hurt the wages of low-paid staff more than better-paid professionals. The Productivity Commission acknowledges these concerns, saying "most existing employees would face reduced earnings". But it says we need to balance these costs for individuals the more widely dispersed gains to society. These would include more jobs being created and gains for consumers such as better service, it argues. "They may get better service if employers choose to use more experienced staff and less juniors on Sundays, and face fewer queues due to potentially higher staffing ratios." So, customers may get better deal on Sundays but it comes at the expense of the (mostly lower-paid) staff serving them. If we accept the logic of cutting penalties in the consumer-heavy part of the economy, it can become a slippery slope. The relentless expansion of consumption into the weekend is unlikely to be confined to hospitality, retail and entertainment. Other services will probably follow, all the while pushing more people to also work on weekends. Indeed, the commission says we should look into which other industries should also have Sunday penalty rates rolled back over time. Sound like the direction we want to be heading in? I don't think so. There is indeed undeniable evidence that consumption is increasing on weekends as a result of changing customer habits. However, that doesn't mean we should further entrench the process through workplace rules. What are presented as the "gains" of cutting penalty rates – job creation and shorter queues for a coffee on a Sunday – need to be weighed against the risk of accelerating the erosion of the weekend. Ross Gittins is on leave.
President George W. Bush has a case for going to war. It’s a slim case, but a case. And he keeps undermining it with dishonest remarks. During his Thursday night press conference–only the eighth news conference of his presidency (Bill Clinton had logged 30 by this point in his first term)–Bush once again tried to argue for war. He offered nothing new. And, to be fair, at this stage of the game–after months of prep work–no one should expect to hear much in the way of fresh argument. But Bush took one more shot at explaining his thinking. One could argue that while the actual danger posed by Saddam Hussein (and whatever weapons he might possess or might develop) is difficult to assess, the United States cannot risk guessing wrongly. At the news conference, Bush declared, “My job is to protect the American people.” Clearly, his expansive view of that mandate includes going to war against a tyrant whose actions may end up threatening the United States. Bush’s problem has been that a case for war based on the potential threat from Iraq is, obviously, not as compelling as a case predicated on an actual and immediate threat. If a nation faces a potential threat, it has the luxury of weighing–and debating–various aspects of going to war: the moral legitimacy of the action, the possible consequences and costs, how other governments and populations will react, the alternatives to an invade-and-occupy response. Many of these concerns, though, could be shoved aside, if the United States were confronting a clear-and-present danger. Since Bush cannot make the threat-end of his case more convincing–he seems to have stretched the available evidence as far as it can go–he has attempted to strengthen his argument by dissembling on other fronts. During the press conference, he said he was willing to stick with “diplomacy” for a little while longer. That is not so. What he is willing to do is to spend a few more days trying to wring out of the UN Security Council a resolution that would directly or indirectly approve a US-led attack against Iraq. But diplomacy entails more than winning approval for war. In most instances, it would mean resolving a conflict without resorting to the use of force. But Bush has offered no alternatives to all-out war. Sure, if Saddam fled the country, Bush might accept that as a reason to call off the invasion. But Bush and his top advisers have scoffed at inspections, which are one form of diplomacy. If Saddam Hussein is not to be trusted–and he is not–then no matter what steps Iraq takes, Washington can never have 100-percent confidence Saddam has fully complied with the Security Council resolutions and disarmed. And if 100-percent confidence is the working standard, as the Bushies seem to insist, then all talk from the administration of disarming Saddam is bunk. The only disarmament they can accept is de-Saddamization. And that, in all likelihood, can only come through war. Bush and his officials have refused to entertain the possibility of coercive inspections–that is, inspections backed by military force. (Imagine a no-fly zone across almost all of the country, or military raids against suspected WMD sites.) Not only is diplomacy not an option for Bush; neither is force short of war. In this vein, at the press conference, Bush said–as he has repeatedly–“the risk of doing nothing, the risk of hoping that Saddam Hussein changes his mind and becomes a gentle soul, the risk that somehow that inaction will make the world safer is a risk I’m not willing to take for the American people.” With this statement, Bush was presenting a false dichotomy: war or nothing. If that’s the choice, war may seem less avoidable. Yet the nations opposing his push for war–France, Germany, Canada–have indeed proposed other courses of action involving more aggressive and intrusive inspections. Bush is free to argue that such means cannot succeed and are not worth even attempting. Instead, he dismisses his opposition by suggesting it is naively and foolishly counting on Saddam’s transformation into a saint. This has been one of the critical distortions he has used to promote his war. Bush repeated his claim that war is necessary to preserve the relevance of the United Nations. This was the type of arrogant remark that has been fueling anti-American sentiment overseas since Bush assumed office. UN Security Council Resolution 1441 promised there would be “serious consequences” if Saddam Hussein did not comply with its disarmament orders. It did not define these consequences. What Bush has been saying is that unless the Security Council embraces his definition of “serious consequences”–war right now–it is a pointless body. “The credibility of the Security Council is at stake,” he maintained. But what if the Security Council were to decide to toughen up the inspections and conduct them for another five months? Why would that be evidence of its meaninglessness? Indeed, it is Bush who is placing the Security Council in a position of irrelevance. Should he ignore the deeply-felt sentiments of its member-nations (and the populations they represent) and launch a war unsupported by the Security Council, it will be he who is declaring–and proving–that the United Nations does not really matter. At the press conference, Bush said once more that his war against Iraq would be a war of liberation for the Iraqi people. That may well be–unintentionally. Bush’s war-for-democracy pitch is essentially window-dressing. This administration would have no interest in sacrificing American lives and assuming political risks if the goal were primarily to help out people ruled by a brute. If war does occur, let’s hope a free and democratic Iraq is an outcome. But it’s hard not to wonder what the Bush administration will do if an Iranian-backed demagogue who wants to nationalize the oil industry and supports the Palestinian uprising is freely and fairly elected in the “new” Iraq. At the moment, what Bush has to say matters little. He has no new evidence to reveal. He has no better case to make. He’s got what he’s got. Moreover, there’s no jury or judge he has to convince. It’s his decision, and it appears it has already been rendered. The only answer to this threat (real or potential) is a disarmed Saddam. The only disarmed Saddam is a dethroned Saddam. That requires war. What happens in the UN over the next days seems to have no bearing on what will transpire in Iraq. The question is merely whether Bush has to run a red-light on his way to Baghdad. His foot is already heavy on the gas. Emboldened by his own half-truths and lies, he is heading off to war.
package com.appdynamics.lambda.dal; import java.io.IOException; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.List; import java.util.TimeZone; import java.util.Calendar; import java.util.Date; import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBAttribute; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBAutoGeneratedKey; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBHashKey; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapper; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapperConfig; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBScanExpression; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBTable; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.github.javafaker.Faker; @JsonIgnoreProperties(value = { "db_adapter", "client", "mapper" }) @DynamoDBTable(tableName = "PLACEHOLDER_ORDERS_TABLE_NAME") public class CommerceOrder { private static final String ORDERS_TABLE_NAME = System.getenv("ORDERS_TABLE_NAME"); private DynamoDbAdapter db_adapter; private final AmazonDynamoDB client; private final DynamoDBMapper mapper; private String orderId; private String company; private String contact; private String address; private String phoneNumber; private int numOrderItems; private double price; private long expiresAt; public CommerceOrder() { DynamoDBMapperConfig mapperConfig = DynamoDBMapperConfig.builder() .withTableNameOverride(new DynamoDBMapperConfig.TableNameOverride(ORDERS_TABLE_NAME)).build(); this.db_adapter = DynamoDbAdapter.getInstance(); this.client = this.db_adapter.getDbClient(); this.mapper = this.db_adapter.createDbMapper(mapperConfig); } @DynamoDBHashKey(attributeName = "id") @DynamoDBAutoGeneratedKey public String getOrderId() { return orderId; } public void setOrderId(final String orderId) { this.orderId = orderId; } @DynamoDBAttribute(attributeName = "company") public String getCompany() { return company; } public void setCompany(final String company) { this.company = company; } @DynamoDBAttribute(attributeName = "contact") public String getContact() { return contact; } public void setContact(final String contact) { this.contact = contact; } @DynamoDBAttribute(attributeName = "address") public String getAddress() { return address; } public void setAddress(final String address) { this.address = address; } @DynamoDBAttribute(attributeName = "phone") public String getPhoneNumber() { return phoneNumber; } public void setPhoneNumber(final String phoneNumber) { this.phoneNumber = phoneNumber; } @DynamoDBAttribute(attributeName = "itemcount") public int getNumOrderItems() { return numOrderItems; } public void setNumOrderItems(final int numOrderItems) { this.numOrderItems = numOrderItems; } @DynamoDBAttribute(attributeName = "price") public double getPrice() { return price; } public void setPrice(final double price) { this.price = price; } @DynamoDBAttribute(attributeName = "expiresAt") public long getExpiresAt() { return expiresAt; } public void setExpiresAt(long expiresAt) { this.expiresAt = expiresAt; } public static class Builder { private String company; private String contact; private String address; private String phoneNumber; private int numOrderItems; private double price; public Builder random() { final Faker faker = Faker.instance(); if (this.company == null) { this.company = faker.company().name(); } if (this.contact == null) { this.contact = faker.name().fullName(); } if (this.address == null) { this.address = faker.address().fullAddress(); } if (this.phoneNumber == null) { this.phoneNumber = faker.phoneNumber().cellPhone(); } if (this.numOrderItems == 0) { this.numOrderItems = faker.number().randomDigitNotZero(); } if (this.price == 0.0) { this.price = faker.number().randomDouble(2, 50, 500); } return this; } public CommerceOrder build() { final CommerceOrder order = new CommerceOrder(); order.company = this.company; order.contact = this.contact; order.address = this.address; order.phoneNumber = this.phoneNumber; order.numOrderItems = this.numOrderItems; order.price = this.price; // Set expiry in 2 hours. Date date = new Date(System.currentTimeMillis()); Calendar calendar = Calendar.getInstance(); calendar.setTime(date); calendar.add(Calendar.HOUR_OF_DAY, 2); order.expiresAt = calendar.getTimeInMillis() / 1000l; return order; } } public List<CommerceOrder> recentOrders() throws IOException { List<CommerceOrder> results = new ArrayList<CommerceOrder>(); for (int i = 0; i < 10; i++) { results.add(new CommerceOrder.Builder().random().build()); } return results; } public void save() throws IOException { this.mapper.save(this); } @Override public String toString() { String retval = ""; try { retval = new ObjectMapper().writeValueAsString(this); } catch (JsonProcessingException e) { retval = ""; } return retval; } }
Wafer-level fabrication of individual solid-state nanopores for sensing single DNAs For biomolecule sensing purposes a solid-state nanopore platform based on silicon has certain advantages as compared to nanopores on other substrates such as graphene, silicon nitride, silicon oxide etc Capitalizing on the developed CMOS technology, nanopores on silicon are scalable without any requirement for additional processing, the devices are low cost and the process can be repeatable with a high yield. One of the essential requirements in biomolecule sensing is the ability of the nanopore to interact with the analyte. In this work, we present a method for processing high aspect ratio, single nanopores in the range of 1030 nm in diameter and approximately 700 nm in length on a silicon-on-insulator (SOI) wafer. The presented method of manufacturing the high aspect ratio individual nanopores combines optical lithography and anisotropic KOH etching with a final electrochemical etching step to form the nanopores and is repeatable and can be processed in batches. We demonstrate electrical detection of dsDNA translocation, where the characteristic time of the process is in the millisecond range. We also analyse the translocation parameters and correlate the enhanced length of the nanopore to a longer translocation time as compared to other substrates. Introduction Solid-state nanopores are considered as a promising candidate for the next-generation platform for a broad range of lowcost, high-throughput molecular diagnostics. The interest in solid-state nanopores has considerably increased since the earliest reported manufacturing of solid-state nanopores. It is widely accepted that nanopores both as a single and as Original content from this work may be used under the terms of the Creative Commons Attribution 4.0 licence. Any further distribution of this work must maintain attribution to the author(s) and the title of the work, journal citation and DOI. an array can be used for biomolecule sensing experiments. Solid-state nanopores manufactured on silicon nitride, silicon, graphene, MoS 2, HfO 2, polyimide, PET and others have been demonstrated to detect DNA and proteins at the single-molecule level. By functionalizing the surface of the single solid-state nanopore a similarity towards biological ion channels is demonstrated. One of the drawbacks of these kinds of nanopores has been that they were unable to demonstrate a large scale and repeatable manufacturing process. Solid-state nanopores in silicon provide unique advantages, such as a controlled spacing, potentially low fabrication cost and high yield as compared to the very popular biological nanopore -Haemolysin. Large scale manufacturing of nanopores that is available at a reasonable cost may help to accelerate the practical implementation of this detection concept. A low-cost method of fabricating largescale high aspect ratio individual nanopores with a diameter of ∼2 nm would present a unique platform related to DNA sequencing. But larger nanopores in the range 5-50 nm may also be interesting for other type of applications such as for detection of large proteins, extracellular vesicles (where exosomes have recently attracted a large interest), virus, immune cells etc There are several methods for the fabrication of nanopores: Shrinking of a large nanopore processed using focused ion beam from approx. 300 nm to a few nm by thermal heating, or by electron beam induced heating, the formation of a small opening by controlled dielectric breakdown, wet etching, focused ion beam, track-etching and e-beam drilling to name a few. Of the several fabrication techniques to fabricate a single nanopore, controlled processing using e-beam lithography on silicon is a well-known method. However, due to the high equipment cost and low throughput, these methods are expensive and become slow for processing large quantities. With several control parameters that are required to process the nanopores, it is often difficult to repeat the process. In addition, for a measured translocation speed of DNA through a nanopore typically at ∼40 bases s −1 it requires a measurement instrument with high bandwidth. The electrical noise from the nanopore poses serious limitations in identifying different bases. Therefore, a viable solution that meets both low costs in terms of technology and low bandwidth (slow DNA translocation) would ideally contribute significantly towards the efficient detection of DNA and proteins of interest. For the DNA to be detected base by base, there needs to be a technological shift that would help in increasing the time during which the DNA stays in the nanopore. One possibility is by increasing the length of the nanopore which means that slowing the translocation so that the time that each base occupies the nanopore detector preferably is ⩾1 msec. The drawback for long nanopores is that single base pair resolution cannot be achieved using the blocking current and one has to resort to some kind of lateral electrodes. In this work, we present a wafer-level fabrication method of single or arrays of nanopores using a silicon-on-insulator (SOI) wafer and with such chips, we successfully demonstrate electrical detection of 1000 bp double-stranded (ds) DNA with a long translocation time. The controlled nanopore processing is achieved by an optical lithography step, allowing arbitrary positioning of the nanopores on the surface of a 2 m thick silicon device layer followed by breakdown electrochemical etching, resulting in high aspect ratio ∼10-30 nm diameter nanopores. Prior work from our group on a similar method has been presented elsewhere. In that case, however, arrays of nanopores were fabricated and detection of translocation was by optical readout using fluorophores. The nanopore diameters, as observed in the SEM images, can be controlled by varying the voltage bias profile of the electrochemical etching. This method of processing a nanopore is fast (∼8 mins), requires fewer control parameters and can be processed on a large scale. For our process, we produced twelve wafers in four batches with 26 devices per wafer in total. The devices were 15 mm 15 mm in dimension and contained single nanopores as well as array of nanopores. The wafer level fabrication of SOI wafer yielded about 60%-70% with the diameter in the range of 10 nm to 50 nm. Also, we present the electrical measurement of double-strand DNA translocation through a single nanopore. Using electrical methods DNA translocation with these high aspect ratio nanopores has been investigated. A long nanopore allows DNA to be in the nanopore for approx. 1 ms and measures a blockade current of about 680 pA. A low-cost method of fabricating a large-scale high aspect ratio nanopore down to approximately 10 nm presents a unique opportunity in understanding the function and properties of biomolecules. A schematic illustration in figure 1 shows our single nanopore platform with an inverted pyramid on a device layer of an SOI wafer. The main objective is that when a bias is applied across the pore the dsDNA translocates through the nanopore by electrophoresis thus allowing the detection of single DNA translocation events. Materials and methods In this section, we describe a method used for the fabrication of single solid-state nanopore on a silicon-on-insulator (SOI) wafer. A detailed explanation of the process flow using UV lithography and the electrochemical etching for nanopore processing is presented. Single nanopore chip processing Several approaches can be followed in developing a suspended device layer membrane. The following described process has a lower level of contamination and increases the yield of devices when the fabrication is started from the device layer and then followed by ICP etching of the handle layer. For the nanopore processing, SOI wafers of 500 ± 25 m thick handle layer and a 2 ± 0.3 m thick silicon device layer are used with a 1 m silicon dioxide buried layer. The device layer is chosen to be an n-type with <100> orientation with a resistivity in the range of 1 to 3 Ohm-cm. The process flow is shown in figure 2. The wafer is cleaned with piranha solution with a mixture ratio of (3:1) of (H 2 SO 4 : H 2 O 2 ), sulphuric acid and hydrogen peroxide for 5 mins and then in 'IMECclean', a diluted HF solution, for 120 s. The HF clean removes the native oxide that is present at the surface. This step is followed by a 'standard clean-2', or 'RCA-2 clean' typically at 80 C for 10 min. This consists of a mixture of (6:1:1) of (H 2 O: HCL: H 2 O 2 ). These steps ensure the wafer becomes clean from organic and metal contaminants. A 2 m thick layer of PECVD oxide (Plasmalab 80Plus, Oxford systems) is deposited on both sides of the wafer as shown in figure 2(b). The oxide layer serves as a hard mask for further processing of the device and handle wafers. The wafer is subjected to Hexamethyldisilane (HMDS) vapours for a few minutes. This process allows for better adhesion of the photoresist (PR) onto the silicon dioxide surface. For opening a square window to the device layer, a photoresist (SPR 700) layer is spin-coated on the oxide layer with a thickness of 1 m. The PR is patterned using UV Stepper lithography (Nikon NSR TFHi12) to achieve an array of 2 m 2 m square windows. After exposure, the wafer is heated at 90 C for 1 min for a post-exposure bake (PEB). During the PEB, the UV exposed region becomes soluble in the developer. The wafer is developed with CD-26 developer for 3 min. The exposed oxide layer is etched using an RIE etcher (Applied Materials Precision 5000 Mark II) for 12 min as shown in figure 2(c). Following the oxide etch, the SOI wafer is dipped in a 30% KOH solution at 80 C for 120 s. This allows for the formation of an entrance port that has an inverted pyramid shape in the device layer as shown in figure 4(a). This step is critical to achieving a pointed tip at the inverted pyramid shape serving as an initiation point for the following electrochemical etching step. The wafer is rinsed and dried. To protect the device layer from further unwanted processing, a thick layer of 3 m photoresist is coated on the surface. The wafer is hard baked at 90 C for 20 min in the oven. For the handle layer processing (backside), the wafer is coated with an adhesion layer (HMDS) for a few minutes at 150 C and then spin-coated with a 1 m thick photoresist (SPR-700). The handle layer pattern on the mask is aligned with the device layer inverted pyramid using a UV Mask aligner (MA6/BA6, Karl Suss) as shown in the schematic figure 2(e). After exposure, the wafer is heated at 90 C for 1 min for a post-exposure bake (PEB). The wafer is developed with CD-26 developer for 3 min. As a result, a square pattern of size 100 100 m is opened on the photoresist from the backside. The exposed oxide mask is etched with RIE etcher using STS ICP Multiplex Advanced Oxide Etch system for about10 min. Following this step, a deep reactive ion etching (DRIE) of the handle silicon is carried out using STS ICP DRIE as shown in the schematic figure 2(f). The DRIE-Bosch procedure is carried out in two steps. In the first step, the etching is carried out at 13.56 MHz at 15 W power for 150 min at 3 m min −1 etch rate, which etches 450 m of silicon from the handle layer side. The second step is to etch at a low frequency at 380 kHz to minimize notching with a low etch rate at 1 m min −1 for 50 min. This method protects the device layer and buried oxide layers and avoids harm with the aggressive etching by ICP. The result is shown in the SEM image of figure 4(b). Following this, the wafer is diced into small pieces to obtain 15 15 mm chips, each containing a template for single nanopore etching. Electrochemical etching of nanopores Further processing is carried out on the chip level to obtain a long and straight nanopore by electrochemical etching in the breakdown mode. A schematic of the electrochemical etching set-up is shown in figure 3(a). The 1 m thick BOX layer is removed using diluted HF at a rate of 20 nm min −1. The chip is mounted on a home-built PTFE cell that is filled with 5% HF: H 2 O: C 2 H 5 OH (1:8:3) solution on the device side of the chip and NaCl solution (9 mg ml −1 ) on the handle layer of the chip. Two O-rings contact on two faces of the chip to avoid liquid leakage. A voltage bias is applied between these two chambers using platinum electrodes which, is inert to the aggressive electrolyte. When a bias (V b ) is applied between the electrodes in the electrochemical cell, the bottom tip of the inverted pyramid experiences a stronger field strength due to the decreased membrane thickness and the high curvature at that point. With sufficient bias, the breakdown field strength can be selectively reached (3 10 5 V cm −1 ) at the bottom tip of the inverted pyramid as compared to a planar surface. Holes are generated in the breakdown process, thereby initiating electrochemical etching of silicon at pre-defined locations. In addition, the anisotropy of the etching rate plays an important role in the formation of straight pores. At the tip, since the current density equals to the critical current density due to the assumption that a tetravalent reaction happens, the current density is the highest along <100> directions of the silicon crystal thus the pores tend to grow along the direction. Figure 3(b) shows a schematic of a long nanopore in the device layer of the silicon chip. The voltage bias versus time duration that was applied to create a nanopore is shown in figure 3(c). The applied bias is generated from a DC voltage source. Two relays are used to control and switch on and off the circuit to adjust the bias voltage. An optimized voltage bias-time duration profile is required to achieve the desired nanopore diameter. A longer etching time leads to wider diameter of nanopore. A thorough investigation of the relation between the nanopore diameter and the rate of electrochemical breakdown etching is described elsewhere. Each bias profile has two regions: a high bias and a low bias. To initiate the etching at the tip of the inverted pyramid, a high voltage (15 V) is applied for 30 s to trigger avalanche breakdown at the tip, generating holes which are necessary for the electrochemical etching of silicon. The voltage is then lowered gradually for 30 s to 10 V and held stable for 7 mins to continue etching perpendicular to the device layer in the breakdown regime. To minimize the pore diameter and to ensure smooth sidewalls of the pore, the voltage bias is decreased to a value of 10 V. During the transition phase from high voltage to low voltage, the current first peaks to a value of 30 A and then decreases to a value of approx. 20 A. A steady-state duration of a low bias region lasts for 7 min and ensures the formation of a high aspect ratio and smooth nanopore. However, careful control of the duration of the voltage bias is required to ensure a narrow pore. Appropriate control parameters among which the high bias voltage duration, slope and the low bias region are important, and influences on the diameter of these nanopores. By this method, nanopores of sizes of approximately 20 to 30 nm in diameter were obtained using optimized parameters. This method of processing nanopore is fast (8 min), and requires fewer control parameters than the previously described etching method which uses illumination to achieve breakdown. The SEM images in figure 4 show the nanopore chip before and after electrochemical etching. Figure 4(a) shows a top view from the device layer of the pyramidal cavity after the silicon processing and before electrochemical etching. The 111 planes of silicon intersect to form a sharp-pointed apex of the pyramid as shown in the inset image. From the handle layer side, the SEM images in 4b and the close-up view shows a deep reactive ion etched (DRIE) square-formed hole with the exposed SiO 2 layer and the formation of the scallops along the walls indicate a complete etching of the handle layer revealing the suspended membrane. After electrochemical etching, the surface of the silicon becomes rough and a nanopore is formed at the bottom of the inverted pyramid as shown by the SEM image in figure 4(c). From the substrate side looking down to the device layer, the nanopore is visible indicating that the nanopore has been etched completely through the device layer and has a diameter in the range of 30-40 nm as visible in figure 4(d) and the inset image. The processing of many devices has consistently yielded single or arrays of nanopores in this diameter range as defined by the initial lithography step. Nanopore DNA measurement set-up A chip containing a nanopore with 10 nm to 30 nm diameter (figure 4) is placed in between the two chambers of a Teflon cell. The schematic of the biomolecule detection set-up is shown in figure 5. The chip is held by an O-ring and is tightly sealed to the Teflon cell. The chamber is filled with 0.1 M KCl solution on both sides. Before assembling, the chip is cleaned in oxygen plasma to improve the wettability of the chip surface. After assembling, trapped air bubbles are carefully removed from the membrane by insertion of buffer solution. Two Ag/AgCl electrodes (Alvatek LF1-45, UK) are connected to each of the chambers to create an electric field through a nanopore under applied bias and is also used for measuring the ionic current. A low-noise and low power voltage-clamp amplifier from Tecella (Pico-2, Tecella, US) is used to measure the ionic current through the pore. The data acquisition is set to 40 kHz sampling rate and recorded on a computer. To minimize external noise during measurement, the entire system of the amplifier and the Teflon cell is placed inside a Faraday cage. Analysis of translocation events The information about translocation events such as current blockade amplitude (I b ) dwell time (t d,FWHM ), and the relation between the dwell time and current blockade is analysed from the recorded data using custom Matlab ® routines. The dwell time is the time related to the DNA translocating through the nanopore and it is defined as the full width half maximum (FWHM) of each translocating event. According to Pedone et.al, the FWHM value of the translocation of a DNA in a nanopore is the best represented time value since it avoids the erroneous calculation due to finite filter rise time and pulse dilation. In addition, only those events are counted which amplitudes are above the 3 value from the baseline. This method of exclusion is required to avoid false detection of an event. Thus, short current spikes were avoided and not counted as a translocation event. The current blockade, I b is defined as the difference between the ionic current at the local minima of an event and the average baseline value. The event charge deficit is given as, ecd = ∫ event I (t) dt, where I(t) is the ionic current and dt is the time increment over the duration of an event and is given in kilo electron charge (ke). Results and discussion The detection and analysis of dsDNA translocation through the silicon nanopore on SOI is presented and discussed. We demonstrate that a long and high aspect ratio nanopore typically gives longer translocation time that can be potentially used for single biomolecule detection purposes with lower bandwidth instruments. Detection of dsDNA translocation in the nanopore The measurement set-up for detection of translocation of individual dsDNA strands is described in the earlier section. The conductance of the nanopore is experimentally determined by varying the applied voltage (V b ) from 0 to ±800 mV in steps of 100 mV and calculated to be 0.02 S at 1 M KCl. The measurement of a finite conductance also confirms the wetting of the nanopore. For the detection of dsDNA, a solution containing dsDNA with a concentration of 300 pM of 1000 bps length (Gene Technology Group, KTH, Sweden) is prepared and carefully dispersed in an 0.1 M KCl electrolyte on the negative potential side of the Teflon chamber as shown in figure 5. By varying the applied voltage bias after the dsDNA is dispersed into the solution, a threshold voltage of 300 mV is determined for the set-up. Below this voltage and at negative bias, no translocation of DNA occurs. Analysis of the DNA translocation through the nanopore is presented in figure 6. For DNA translocation trace recording, the voltage is increased to 800 mV and the ionic current is recorded for a duration of 300 s. After that current spikes are observed as shown in figure 6(a), ascribed to a current blockade obtained from the DNA translocation. The noise in the ionic current was estimated to be about 300 pA. The voltage was increased to 800 mV to have a better signal to noise ratio (approx. 2.5 times than at 300 mV) and allow more dsDNAs translocating per second. The analysis results for translocation events occurring at 800 mV bias voltage is shown in figures 6(c) and (d). For example in one of the translocation events captured, it is assumed that the DNA enters the pore through one end of the DNA in and exits by the other end which could be inferred due to the constant current value of 1.6 nA for approx. 1.5 ms (figure 6(a) and inset) whereas in another event, it is speculated that the DNA may take up a coiled formation due to a shorter translocation time of approx. 500 s. The histogram analysis of dwell times gives us an average dwell time for the DNA in the solid-state nanopore of t d,FWHM = 1.1 ms. Luo et al observed that the geometry of the nanopore, especially the length of the nanopore, plays an important role in slowing down the DNA. With our silicon technology, the DNA translocation process is observed to be slower by 2-3 orders of magnitude compared to, e.g. thin silicon nitride membranes. However, with functionalized nanopores, it is reported that the DNA can be slowed down even more. With our silicon technology, the DNA translocation process is observed to be slower by 2-3 orders of magnitude compared to, e.g. thin silicon nitride membranes. However, with functionalized nanopores, it is reported that the DNA can be slowed down even more. The SEM image of the nanopore After the DNA translocation events which was carried out for 300 s is shown in figure 6(b). From the figure, it is seen that the nanopore diameter is enlarged from 30-40 nm (figure 4(d)) to. 60-70 nm. Thus, the nanopore has enlarged by 20-40 nm at a rate of 4-8 nm min −1. The slow etching of the silicon affected by the voltage bias caused a dynamic increase in nanopore diameter. One of the methods to mitigate the unwanted etching of the nanopore is to deposit a thin layer of dielectric around the nanopore. From the analysis, the average blockade current and the average event charge deficit is calculated to be approx. 700 pA and 13 780 kilo electron charge (ke) as shown in figures 6(c) and (d), respectively. The distribution of the dwell time and the blockade current is plotted as a scatter plot in figure 6(c). We speculate the broad span of dwell time and ionic current blockade might be due to the large variation in the molecular shape during translocation. The span of the dwell time and the blockade current is from 0 to 2 ms and 0 to 2 nA. We can see that there is a positive correlation between the dwell time and the current blockade. It was observed that for 1200 mV (not shown), the translocation rate was calculated to be 60 translocations s −1 and for 800 mV the translocation rate of approx. 15 translocations sec −1. The increase in the number of events with bias confirms the single-molecule origin of the translocation. The capture volume increases with a higher bias resulting in a larger translocation rate for a given DNA concentration. The inverted pyramid could act as a micro funnel that guides the DNA into the access region of the nanopore. Therefore, our novel design of a combination of an inverted pyramid together with the long nanopore proves an efficient and effective way of DNA translocation detection. Conclusion In summary, we have demonstrated single nanopore fabrication on a wafer-scale. For our process, we produced twelve wafers in four batches with 26 devices per wafer in total. The devices had 15 mm 15 mm in dimension and contained single nanopores as well as array of nanopores. The wafer level fabrication of SOI wafer yielded about 60%-70% with the diameter in the range of 10 nm to 50 nm. The characterized single nanopore chip had a high aspect ratio, with a range of 30-40 nm diameter and approx. 700 nm long nanopore processed on an SOI wafer followed by electrochemical breakdown etching. A repeatable and stable process flow is demonstrated. The solid-state nanopores thus obtained was successfully tested for 1kbps dsDNA translocation. The dwell time for one translocation of dsDNA is measured to be approx. 0-2 ms and is significantly slower than a typical silicon nitride nanopore. The demonstrated concept is advantageous for biomolecule sensing allowing for better control of the analyte-nanopore interaction. The plan is to further increase the length of the nanopore and to determine the translocation time for various lengths of DNA. Also, to functionalize the inner diameter of the nanopore for further slowing down the DNA molecule. Besides, we plan to utilize the functionalized surface to study the biomolecule behaviour of binding and unbinding from the surface through the conductance measurements.
Q: How defensive should we be? We've been running Pex over some code, and it has been showing some good things (well bad things, but showing them before it gets to production!). However, one of the nice things about Pex is that it doesn't necessarily stop trying to find issues. One area we found is that when passing in a string, we were not checking for empty strings. So we changed: if (inputString == null) to if (string.IsNullOrEmpty(inputString)) // *** That fixed the initial issues. But then, when we ran Pex again, it decided that: inputString = "\0"; was causing problems. And then inputString = "\u0001"; What we've decided is that defaults can be used if we encounter // *** and that we are happy seeing the exception caused by any other odd input (and dealing with it). Is that enough? A: Three questions should help you determine how defensive to be in your coding. First - What are the consequences of bad input getting through? If it is an error message on one of your developer's PC's, maybe not so critical to be defensive. Could it result in financial disruptions at clients, IE accounting information disruption? Is it a real time system where lives are at risk? In the life / death scenario, there should probably be more validation and error handling code than actual feature code. Secondly, how many people will re-use this function or piece of code? Just you? Your department? Your company? Your customers? The wider the use of the code the more defensive. Third - what is the source of the input I am validating? If it is input from the user or from a public web site, I would be super defensive. If the input is always coming from your code, be somewhat defensive, but don't spend undue time putting in checks. It will always be possible to add in more error checking and validation in a system. The point is will the cost of writing and maintaining this code outweigh the cost of the problems caused by errors in the code. A: Users are evil, and anything they input should be checked with the utmost rigor. Anything generated without the benefit of user input, or from pre-sanitized data, shouldn't need to be checked at the same level. The problem here is when you forget and use these methods on bad data, because you've forgotten that the code wasn't hardened. The one thing you should always check is anything that might cause an overflow or a crash. Doesn't matter how deeply that method is buried, and how sure you are that that condition could never occur. You need to program for it anyway, just to placate Murphy.
Image copyright Neil Thomas Douglas Image caption Neil Thomas Douglas, right, met his doppelganger on a flight to Galway A man has spoken of the "total weirdness" of encountering his doppelganger on a flight. Neil Thomas Douglas boarded a flight on Thursday night and came face-to-face with his lookalike. Mr Douglas, a photographer from Glasgow, said: "Everyone around us had a laugh, we took a selfie and that was it." The selfie was posted on Twitter and has been retweeted thousands of times. The photographer explained that when he boarded the Ryanair flight to Galway via London Stansted there was already someone sitting in his seat. He said: "There was a dude already on my seat. When the guy looked up, I thought: 'He looks like me'." But the pair were later to encounter a further coincidence when they checked into the same hotel in Galway. Mr Douglas added: "Later that night, I went to the pub and again, there was my twin. Total weirdness. We had a laugh and a pint." Image copyright Neil Thomas Douglas Image caption Neil Thomas Douglas, left, enjoyed a drink with his doppelganger after bumping into him for a second time The selfie was posted on Twitter by Lee Beattie, director of Wire Media, who wrote: "Guy on right is the husband of my friend. Guy on left is a STRANGER he met on a flight last night!"
///////////////////////////////////////////////////////////////////////////////// // TgcViewer-cpp // // Author: <NAME> // ///////////////////////////////////////////////////////////////////////////////// #pragma once //General Includes: #include "Examples/TgcViewerIncludes.h" #include <D3DX10math.h> namespace Examples {class BorderBox;} namespace Examples { struct Ray { Vector3 origin; Vector3 dir; }; /** * Axis aligned bounding box */ struct AABB { Vector3 min; Vector3 max; AABB() { min = Vector3(0, 0, 0); max = Vector3(0, 0, 0); } AABB(Vector3 min, Vector3 max) { this->min = min; this->max = max; } /** * Test AABB-AABB */ static bool testAABBAABB(AABB a, AABB b) { // Exit with no intersection if separated along an axis if (a.max.X < b.min.X || a.min.X > b.max.X) return false; if (a.max.Y < b.min.Y || a.min.Y > b.max.Y) return false; if (a.max.Z < b.min.Z || a.min.Z > b.max.Z) return false; // Overlapping on all axes means AABBs are intersecting return true; } /** * Ray-AABB Colision */ static bool intersectRayAABB(Ray ray, AABB aabb, Vector3 &q) { bool inside = true; float max_t[] = {-1.0f, -1.0f, -1.0f}; float coord[3]; for (unsigned int i = 0; i < 3; ++i) { if (ray.origin.v[i] < aabb.min.v[i]) { inside = false; coord[i] = aabb.min.v[i]; if (ray.dir.v[i] != 0.0f) { max_t[i] = (aabb.min.v[i] - ray.origin.v[i]) / ray.dir.v[i]; } } else if (ray.origin.v[i] > aabb.max.v[i]) { inside = false; coord[i] = aabb.max.v[i]; if (ray.dir.v[i] != 0.0f) { max_t[i] = (aabb.max.v[i] - ray.origin.v[i]) / ray.dir.v[i]; } } } // If the Ray's start position is inside the Box, we can return true straight away. if (inside) { q = ray.origin; return true; } unsigned int plane = 0; if (max_t[1] > max_t[plane]) { plane = 1; } if (max_t[2] > max_t[plane]) { plane = 2; } if (max_t[plane] < 0.0f) { return false; } for (unsigned int i = 0; i < 3; ++i) { if (plane != i) { coord[i] = ray.origin.v[i] + max_t[plane] * ray.dir.v[i]; if (coord[i] < aabb.min.v[i] - 0.001f || coord[i] > aabb.max.v[i] + 0.001f) { return false; } } } q = Vector3(coord); return true; } }; struct PickingRay { Ray ray; void update() { //Crear Ray en base a coordenadas del mouse float sx = (float)GuiController::Instance->input->mouseX; float sy = (float)GuiController::Instance->input->mouseY; int w = GuiController::Instance->windowHandler->screenWidth; int h = GuiController::Instance->windowHandler->screenHeight; Matrix4 matProj = GuiController::Instance->renderer->projectionMatrix; Vector3 v; v.X = (((2.0f * sx) / w) - 1) / matProj.M11; v.Y = -(((2.0f * sy) / h) - 1) / matProj.M22; v.Z = 1.0f; //Transform the screen space pick ray into 3D space //Matrix4 m = Matrix4::SInvert(GuiController::Instance->renderer->viewMatrix); Matrix4 view = GuiController::Instance->renderer->viewMatrix; D3DXMATRIX dxView = D3DXMATRIX( view.M11, view.M12, view.M13, view.M14, view.M21, view.M22, view.M23, view.M24, view.M31, view.M32, view.M33, view.M34, view.M41, view.M42, view.M43, view.M44 ); D3DXMATRIX mOut; D3DXMatrixInverse(&mOut, NULL, &dxView); Matrix4 m = Matrix4( mOut._11, mOut._12, mOut._13, mOut._14, mOut._21, mOut._22, mOut._23, mOut._24, mOut._31, mOut._32, mOut._33, mOut._34, mOut._41, mOut._42, mOut._43, mOut._44 ); //Matrix4 m = Matrix4::SInverse(GuiController::Instance->renderer->viewMatrix); Vector3 rayDir = Vector3( v.X * m.M11 + v.Y * m.M21 + v.Z * m.M31, v.X * m.M12 + v.Y * m.M22 + v.Z * m.M32, v.X * m.M13 + v.Y * m.M23 + v.Z * m.M33 ); Vector3 rayOrig = Vector3(m.M41, m.M42, m.M43); //Picking Ray creado ray.origin = rayOrig; ray.dir = rayDir; } }; /** * EditorBox */ class EditorBox { public: enum BoxFace { Front = 0, Back = 1, Top = 2, Bottom = 3, Left = 4, Right = 5, None = -1 }; EditorBox(); EditorBox(const EditorBox&); ~EditorBox(); void create(); void updateValues(); void render(); void dispose(); void setExtremes(Vector3 min, Vector3 max); EditorBox::BoxFace pickFace(); AABB getFaceAABB(EditorBox::BoxFace face); private: public: static const int VERTEX_COUNT = 36; TgcVertexBuffer* vertexBuffer; TgcIndexBuffer* indexBuffer; TgcEffect* effect; Vector3 size; Vector3 position; Color commonColor; Color selectedColor; Matrix4 transform; AABB aabb; BorderBox* border; BoxFace selectedFace; bool selected; private: }; }
package helper_classes; import java.util.List; /** * This is a class that mimics a tuple in python */ public class CmdArgTuple { private final String command; private final List<String> arguments; /** * Constructor for the tuple */ public CmdArgTuple(String command, List<String> arguments) { this.command = command; this.arguments = arguments; } /** * Return first object */ public String getCommand() { return this.command; } /** * Return second object */ public List<String> getArguments() { return this.arguments; } }
//+--------------------------------------------------------------------------- // // Class: CMarshalList (ml) // // Purpose: Maintains a list of marshaled exposed objects that // represent the same storage or stream // // Interface: See below // // Notes: This class is intended to solve the "pointer identity" // problem. When an IStorage or IStream is passed as an // [in, out] RPC parameter, two marshal/unmarshalings occur, // one for the [in] side, and a reverse marshaling for // the [out] side. The previous algorithm always allocated // a new exposed object for every unmarshaling, so the // pointer going into an [in,out] call isn't the same // as the pointer returned from the call. // // This algorithm links the exposed objects together, each // link keyed by the ContextId (ProcessId). When unmarshaling, // this list is checked for a valid exposed object that // can be reused. If not, then a new exposed object // is allocated and inserted into the linked list. // Instances of this class must live in shared memory // in order for the list to traverse across processes // // History: 26-Mar-96 HenryLee Created // //---------------------------------------------------------------------------- class CMarshalList { protected: inline CMarshalList (); inline ~CMarshalList (); public: inline CMarshalList * GetNextMarshal () const; inline void SetNextMarshal (CMarshalList *pml); inline ContextId GetContextId () const; CMarshalList * FindMarshal (ContextId ctxid) const; void AddMarshal (CMarshalList *pml); void RemoveMarshal (CMarshalList *pml); private: CBasedMarshalListPtr _pmlNext; ContextId _cntxid; }
High Rates of Tuberculosis and Opportunities for Prevention among International Students in the United States. RATIONALE Foreign-born persons traveling on a student visa are not currently screened for tuberculosis on entry into the United States, despite residing in the United States for up to several years. OBJECTIVES To characterize the risk of tuberculosis in international students entering the United States and to identify strategies for early diagnosis and prevention in this population. METHODS Data were collected in 18 tuberculosis control jurisdictions in the United States. A cohort of 1,268 foreign-born patients of known visa status, diagnosed with active tuberculosis between 2004 and 2007, was used for analysis. Incidence rates were estimated on the basis of immigration data from study jurisdictions. MEASUREMENTS AND MAIN RESULTS Tuberculosis was diagnosed in 46 student residents, providing an annual estimate of 308 cases nationally. The estimated tuberculosis case rate in student residents was 48.1 cases per 100,000 person-years (95% confidence interval, 35.6-64.8), more than twice that of the general foreign-born population. Students identified by tuberculosis screening programs were more likely to be diagnosed within 6 months of U.S. arrival (75 vs. 6%; P < 0.001), and those with pulmonary disease were less likely to have a positive sputum smear for acid-fast bacilli compared with those not screened (18 vs. 63%; P = 0.05). In unscreened students, 71% were diagnosed more than 1 year after U.S. arrival and only 6% were previously treated for latent tuberculosis infection. CONCLUSIONS The tuberculosis case rate in foreign-born students is significantly higher than in other foreign-born individuals. Screening this group after arrival to the United States is an effective strategy for earlier diagnosis of active tuberculosis.
An Information System for the Analysis of Color Distributions in MovieBarcodes We present an ongoing project from the field of quantitative film studies, sometimes also referred to as Cinemetrics. Most of the related work in this area is focused on quantitative analyses of shot lengths and distributions. In this paper, we suggest color as an additional quantitative parameter for movie analysis and describe an information system that allows scholars to search for movies via their specific color distribution. As a source of condensed movie color information, we make use of the MovieBarcode database. A MovieBarcode is created by skewing each frame of a movie to be only 1 pixel wide. Lining up all these frames in a row creates a barcode-like visualization of the most dominant colors in a movie (fig. 1). Our information system makes use of the color diff 4 library to map more than 1,500
Internet-based video streaming is increasingly the choice of viewers who seek convenient access to video outside of conventional video distribution channels (including over-the-air broadcasts, cable TV, satellite TV, and prerecorded physical media). Using streaming technologies, viewers may access channels of live video as well as prerecorded video from libraries of video assets that are accessible over an Internet connection. In some cases, streaming video is viewable on a wide range of devices, including desktop computers and laptop computers, tablets, smartphones, wearable computers, and specialized devices such as smart televisions. The flexibility and convenience of streaming video are responsible in part for its widespread adoption. However, the distribution and delivery pipeline for streaming video is typically a complicated one, and the pipeline may break down at any of several points. A video asset or live stream may first be acquired, e.g., from a broadcaster. The video may then be processed and transformed in any of several ways (potentially including compression, encryption, and other forms of encoding) for eventual distribution to viewers. A hierarchy of servers over a wide geographical area may be used to deliver the video to many viewers in an efficient manner. The viewer may then attempt to play the video on a viewing device. If the pipeline has failed at any point, then the video may fail to play or may play in an unsatisfactory manner (e.g., choppy or in a low resolution). Diagnosing the problem may prove difficult due to the complexity of the pipeline. In addition, diagnosis may be complicated by the numerous vendors and entities who are responsible for various components of the pipeline, from the broadcaster to the vendor of the client device or its software. While embodiments are described herein by way of example for several embodiments and illustrative drawings, those skilled in the art will recognize that embodiments are not limited to the embodiments or drawings described. It should be understood, that the drawings and detailed description thereto are not intended to limit embodiments to the particular form disclosed, but on the contrary, the intention is to cover all modifications, equivalents and alternatives falling within the spirit and scope as defined by the appended claims. The headings used herein are for organizational purposes only and are not meant to be used to limit the scope of the description or the claims. As used throughout this application, the word “may” is used in a permissive sense (i.e., meaning “having the potential to”), rather than the mandatory sense (i.e., meaning “must”). Similarly, the words “include,” “including,” and “includes” mean “including, but not limited to.”
def WideResnetBlocknt(channels, strides=(1,1), channel_mismatch=False, batchnorm='std', parameterization='ntk'): Main = stax_nt.serial(_batch_norm_internal(batchnorm), stax_nt.Relu(), stax_nt.Conv(channels, (3,3), strides, padding='SAME', parameterization=parameterization),_batch_norm_internal(batchnorm), stax_nt.Relu(), stax_nt.Conv(channels, (3,3), padding='SAME', parameterization=parameterization)) Shortcut = stax_nt.Identity() if not channel_mismatch else stax_nt.Conv(channels, (3,3), strides, padding='SAME', parameterization=parameterization) return stax_nt.serial(stax_nt.FanOut(2), stax_nt.parallel(Main, Shortcut), stax_nt.FanInSum())
Hedge Funds and their (Non)regulation The objective of this contribution is not only to e xplain the position of institutional investors on global capital markets, but also evaluate their impact on the operation of financial systems. The c ore of this contribution is dedicated to hedge funds that in the period befo re the outbreak of global financial crisis were not subjected to almos t any regulation, except for some dishonest practices. Institutional respons e to the global financial crisis however changed significantly the regulatory -supervisory approach also to hedge funds. The Dodd-Frank Act introduced quite promptly sensitive registration of important investment mana gers of hedge funds in the USA. At the same time, the new American financi l legislation passed the duty for managers of hedge funds and other priv ate funds to keep prescribed records. On the other hand, the European directive on managers of alternative investment funds has a very restrictive nature, because it implements a standard license system, co stly internal and external control mechanisms, and increases inadequa tely powers of supervisory authorities. Hedge Funds and their (Non)regulation # # # # Petr MUSLEK * Institutional investors count among significant participants on global capital markets. These investors are however not a homogeneous group, since they not only use different investment policy, but operate also in different regulatory and tax environment. The objective of this contribution is to analyze the impact of institutional investors on capital markets, while special attention will be paid to the segment of hedge funds, both, in the period before the outbreak of global financial crisis and in the post-crisis period, as exemplified by the USA and the European Union. Institutional investors and their impact on capital markets Hedge funds may be classified as institutional investors playing considerable role in financial systems. What do we understand under the term "institutional investor"? According to Gitman and Joehnk (1990, p. 10), institutional investors are "... professionals paid for management of other people's money. They are employees of financial institutions such as banks, life insurance companies, mutual funds, pension funds, large non-financial corporations, and in some cases also individuals...". For the purpose of this contribution, we determine the institutional investor as an institution managing investment assets of great extent, while using professional investment methods. Following institutions are classified as institutional investors: open and closed funds of collective investment, banks, pension funds, insurance companies, hedge funds, and other managers of private portfolios. The ever-increasing significance of institutional investors on capital markets is evoked by several factors, and deregulation of financial systems in the eighties and nineties of the last century and at the beginning of the new millennium prior to the outbreak of global financial crisis ranks among the most important. Not only in consequence of massive competition increase in the area of commercial banking, but also as a market response to implementation of the Basel model of banking regulation that required considerable increase of shareholders equity, bank institutions expanded into assets management, which is analyzed in details by Revenda. Also liberalization of the investment environment contributed to the boom of institutional investment, both, in the area of generating and distribution of investment and pension products and also in possibilities to allocate investment assets. Another factor of institutional investors' expansion is a demographic development that is specific by constant aging of population in developed countries, which becomes evident in huge demand for investment-insurance products that provide income to persons in retirement age. And finally, technological revolution in the area of computer and telecommunication technology opened wide opportunities to institutional investors in the liberalized investment environment. Technical progress contributed to definite drop of transaction and management costs related to professional asset management. Increased importance of institutional investors influences significantly the microstructure of capital markets. Liquidity of secondary capital markets, adequate standards of information disclosure, market oriented accounting, functional legislative environment, and healthy banking system are extremely important for activities of institutional investors. Liquidity requirements of institutional investors support probably not only consolidation of fragmented national stock exchanges into multinational stock electronic trading systems, but also contribute to creating specialized institutional markets. Institutional investors influence significantly also the structure of demand for investment instruments. As compared with individual investors, institutional investors in principle invest in long-term and foreign investment instruments. It seems that also the development of financial engineering is stimulated by institutional investors who permanently require new products to control investment risks, tax positions, and compliance or evasion of regulatory rules, which is analyzed in details by Dvok. Institutional investors also positively contribute to formation of the global capital market through international investment development. With respect to the fact that national capital markets were not positively correlated perfectly in the past, it was possible to diversify the systematic risk based on international investments. Precisely the effort to diversify the systematic risk became progressively evident by extensive multi-nationalization of portfolios of institutional investors, which was however the cause of price integration of individual national capital markets into a global market at the beginning of the new millennium. Institutional investors also positively contribute to stimulation of the offer of the risk capital. In particular American institutional investors allocate part of their portfolios in young and fast-growing companies, which was one of the factors supporting the dynamic economic growth from the second half of the eighties of the last century practically to the outbreak of global financial crisis in 2008. We must also not ignore the fact that institutional investors manage companies based on the pressure applied to the management by maximizing performance of stock instruments of publicly tradable companies. In the nineties of the last century, some institutional investors and their asset managers switched gradually from passive to active execution of ownership rights, which contributed positively not only to the growing value of stocks, but also to increased efficiency of joint-stock companies. Reasonable price volatility of investment instruments is a common part of the investment process. Financial economists however examine particularly the impact of excessive or even extreme price volatility of investment instruments on stability of financial systems. Are institutional investors the cause of turbulences on the global capital market, or do they on the contrary rather contribute to its higher stability by their activities? Empirical studies carried out to date (e.g. Sias, 1996, Kirkpatrick, 2009 Claessens, Ariccia, Igan, and Laeven, 2010) rather confirm the opinions that consolidation of investment assets under management of large institutional investors may generate excessive price volatility of investment instruments. Portfolio managers are namely often vulnerable to "herd" behavior, since their performance is usually judged to the market benchmark set, which then proves in imitating investment strategies of crucial investment players on capital markets. Institutional investors also sometimes respond to unexpected information influencing prices in similar manner, which in aggregate expression causes particularly significant fluctuation of net demand for insufficiently liquid investment instruments. It is evident that institutional investors use also standardized systems of risk management. The core of these systems consists in statisticalmathematical models with very similar and preset parameters, which in case of both, positive and negative price shock outbreak, leads to "herd" responses of institutional investors. In particular globally operating commercial banks started expanding to investment markets relatively on a massive scale. Banking houses at the same time developed a new banking discipline in connection with the development of mathematical financial economy and computer technology -the so-called modern risk management. In 1994, the Value-at-Risk (VaR) approach was used in banking for risk management for the first time by American group JPMorgan Chase. VaR measures what loss may be realized due to modification of monitored risks (interest rate, exchange rate, stock, or commodity) in a certain period with predefined probability. The use of VaR approach started massively prior to the outbreak of global financial crisis also in the area of asset management, not only due to its simple application, but mainly because this approach was intelligible and easily understandable to top managers and members of statutory bodies. VaR approach was not only recommended, but also even often required by regulatory and supervisory authorities. This approach has however several fundamental (and difficult to remove) deficiencies, the most serious of which are: VaR does not express what are the highest losses that may be achieved from investment instruments or investment portfolio, and significantly underestimate probability of occurrence of an extreme price-forming shock, VaR neglects the systemic risks, most VaR methods are based on presumption of normal distribution of returns from investment instruments, VaR approach supports pro-cyclic behavior of capital markets, since most VaR users operate with similar input data (indeed with historical time series of returns/losses of investment instruments or portfolios) and characteristics of models, which necessarily bring out "herd" behavior of banks and managers of investment portfolios and escalates the financial panic even further, VaR is in principle based on the presumption of previous development repetition (though with certain modifications) in the future period, which is however rather a coincidence than reality, since there are no rational grounds for repeating of unexpected fundamentally-psychological significant price-forming events that change the behavior of return rates of investment instruments or portfolios, in the period prior to the outbreak of global financial crisis, credibility risk and the risk of loss of liquidity were absolutely neglected in applying the VaR method. Extensive and usually mechanical use of VaR method (though in some cases supplemented with stress testing and analysis of different scenarios) confirmed mainly managers of banking houses in their belief that they may invest on capital markets in volatile instruments using substantial financial leverage, since they are capable to manage risks effectively based on the so-called modern risk management approach, and no considerable threat is imminent. This hazardously oriented strategy was successful in certain banks (e.g. American, British, Dutch, or Icelandic) for relatively long time, and some managers managed to increase profitability unprecedentedly and thus transform the bank stocks from previously conservative titles into titles of the growth type (growth stocks). We however believe that it was not due to applying the so-called modern risk management, but mainly due to unusually long (practically from the mid-eighties of the last century to the middle of 2008) favorable development on global investment markets. After shifting the investment mood, many banks did not manage to change their investment strategy on time and got into serious problems. We cannot even rule out that institutional investors may also behave in irrational manner, since their investment strategies are not supported with adequate fundamental factors. In particular the newly formed investment markets, young companies, and investment innovations often suffer from information asymmetry. Institutional investors and asset managers then in conceiving and realization of their investment strategies proceed rather from assumptions and investment feelings than from true and accurate fundamental information, which results in investment mistakes that are usually positively correlated. Its consequence is economically unfounded demand for certain investment instruments and subsequent price bubble. Special type of institutional investors: hedge funds Traditional hedge funds do not represent ordinary institutions of collective investment, but they have the nature of private and nonregulated investment products intended for selected clients. For economic, tax, and regulatory-supervisory reasons, hedge funds operate in particular in the USA and off-shore centers, and relatively small number in the European Union. Clients of hedge funds consist exclusively of rich or professional investors and their number usually does not exceed several tens or hundreds, which means that hedge fund securities have the nature of private investment instruments. Managers of hedge funds use flexible approach to portfolio management, because they are not subject to any regulation and assets allocation and diversification regulations. They invest in wide range of investment instruments (e.g. stocks, bonds, financial derivatives, or commodities) using active investment techniques including leverage (encumbered) investment operations. Managers of hedge funds are remunerated using both, fixed method (manager's commission usually 1-2% p.a. of the value of managed assets, supposing however that the portfolio manager achieves positive returns) and based on performance of the managed portfolio (usually 20% of annual returns achieved). It is usually required at the same time that the portfolio manager becomes a partner of the investment structure and invests also his/her own funds in the hedge fund's portfolio. Investors cannot withdraw invested funds from the hedge fund immediately, but they must observe the notice period, which is between 30 days and 3 years. This notice period is sometimes also supplemented with considerable sanction withdrawal charges. Hedge funds may not be mixed with other types of non-regulated investment structures. Hedge funds are very often similar to private equity funds, not only by the method of capital collection, but also by the system of managers' remuneration. Great difference however consists in the investment method. Hedge funds invest mainly to liquid investment instruments, which enables investors to withdraw from fund structures. On the contrary, private equity funds invest large portion of assets in nonliquid investment instruments, which means that fund investors have practically no chance to withdraw before the predefined horizon of realized investment policy. Venture capital funds invest assets in private nontradable companies through important capital participation for a relatively long investment period. Venture capital funds search for highly speculative investment opportunities. Therefore their instruments rank among considerably hazardous and practically non-liquid investment assets. Hedge funds were originated on the American market at the end of forties of the last century. In 1949, Jones Hedge Fund was founded that used hedge transactions with stocks, based on combination of margin trading and short selling. Profits were achieved through the selection of undervalued stock titles for margin trading and overvalued instruments for short sales. In following two decades, more than 150 hedge funds were founded on the American market. Some of them however progressively abandoned basic hedge principles and used increasingly borrowed funds. Upon the drop of share prices in the second half of the sixties of the last century, most of them suffered great losses and essential part of them also terminated their business activities. The follow-up renaissance of hedge funds was experienced in the mid-nineties of the last century, not only due to liberalization of international investments, but also due to wide supply of new investment opportunities. Newly founded hedge funds in last two decades started using various investment strategies, which enables to differentiate macro funds (they enter into positions of changing global economic conditions that show in stock prices, interest rates, or exchange rates), global funds (they invest on selected developed and emerging stock markets), market-neutral funds (they use hedging operations consisting in combination of margin trading and short selling), sectorial funds (they invest in selected sectorial stock only), short sales funds (they realize mainly short sales and thus speculate in price fall of investment instruments), event-driven funds (they search for specific events such as acquisitions or mergers, cause significant fluctuations in market prices of stock instruments), and funds of funds (they invest in other hedge funds, using borrowed funds). Macro funds, global funds, event-driven funds, and in recent years also short sales funds became the most widespread. At the end of the nineties of the last century, more than 5 000 hedge funds existed on global investment markets, and they managed almost USD 300 bil. The volume of assets managed by hedge funds was constantly increasing in the new millennium and exceeded USD 2.5 trillion before the outbreak of global financial crisis. In the course of the global financial crisis, many hedge funds suffer great losses and withdrawal of clients; the volume of assets managed dropped approximately to one half, and hundreds of funds had to terminate their operation. With gradual overcoming of the results of global financial crisis, certain stabilization was experienced in the segment of hedge funds. Substantial characteristics of the sector of hedge funds are demonstrated by the following table. Post-crisis regulation of hedge funds It results from the review above that hedge funds did not play very great role on global investment markets at the beginning of the nineties of the last century. Their importance however increased significantly in the new millennium. In addition, hedge funds started using extensively also leverage products and some of them took a fancy of high-frequency trading, which multiplied their position on global investment markets. As already mentioned, part of hedge funds also operates from off-shore centres (in particular from Cayman Islands) where regulatory and supervisory is almost non-functional. Even before the first outbreak of global financial crisis, financial research started bringing clear evidence that hedge funds may contribute to financial instability, mainly thanks to the aggressive investment strategy used (e.g. Fung and Hsieh, 2000), "herd" behaviour (e.g. Brunnermeier and Nagel, 2004), excessive indebtedness (e.g. Garbaravacius and Dierick, 2005), or credit risk occurrence of the counterparty (e.g. Cifuentes, Ferrucci, and Shin, 2005). In addition, non-transparency of the sector of hedge funds makes almost impossible to monitor the systemic risks of this specific investment sector. Not until the outbreak of global financial crisis, after several decades of operation of hedge funds as non-regulated investment structures, the segment of hedge funds began to be considered a nontransparent market that may deepen financial instability even by regulatory authorities. Washington summit of G-20 countries in autumn 2008 came to the conclusion that hedge funds may contribute to financial turbulences, therefore it is appropriate to monitor reasonably their investment activities, including the volume of their leverage positions. At the next summit in London (April 2009), G-20 representatives reached an agreement that hedge funds and their managers should not only be subjected to registration and information duty, but also to the obligation to implement functional system of risk management. Following the London summit, the International Organization of Securities Commissions (IOSCO), issued a document in June 2009, with general principles for regulation and supervision of funds that invest in hedge structures. Toronto summit of G-20 in June 2010 supported prompt implementation of transparent supervision of hedge industry, having integrated international nature of non-discriminatory character. American mortgage crisis and fall of the leading investment bank Lehman Brothers opened necessarily the discussion on sweeping reform of the American financial system. Multi-factor nature of American financial crisis consists not only in deflation of the bubble of real-estate prices supported by excessive credit expansion, but also in extensive speculation of insufficiently capitalized financial institutions on non-transparent markets, inefficient operation of rating assessment market, spread of dishonest practices, and failure of the regulatory-supervisory mechanism. The Ministry of Finance of the USA therefore prepared a proposal of a sweeping financial reform that however underwent a stormy professional and law-makers' debate. The original proposal of financial reform underwent significant changes within the legislative process and transformed into a compromise version known as Dodd-Frank The Wall Street Reform and Consumer Protection Act, in short DFA. As usual in the USA, this act also carries indication of key persons in the legislative process. The key role was played this time by Ch. Dodd and B. Frank. The act became effective in July 2010, although some of its provisions have delayed legal force. The objective of DFA is to reduce progressively the risk exposure of large banks, not only by increasing the capital adequacy, implementing liquidity rules and leverage principle, but also by segregating the business department with certain derivatives from the main business operation of the bank to subsidiary companies. As already mentioned, the importance of hedge funds in particular on American capital markets increased significantly in the new millennium. After the fall of Lehman Brothers, American hedge funds were considered not only a non-transparent segment, but also potential source of systemic risks that may be the cause of financial crisis. Therefore DFA includes an act in its clause four that regulates registration of investment managers of private funds (Private fund Investment Advisers Registration Act, 2010). This act amends the Investment Advisers Act from 1940 and revokes the greater part of previous exceptions from registration duty applied for several decades by managers of hedge funds and other private funds. Post-Lehman legislative regulation implements mandatory registration of hedge funds managers who are responsible for assets exceeding USD 150 mil., at the federal Securities and Exchange Commission. Managers of smaller hedge funds and other private funds (within USD 25 to 150 mil.) are also subject to registration, however not on the federal but national level. The new implemented registration system for managers of hedge funds and other private funds however still enables to use several exceptions that relate in particular to foreign private managers located outside the USA (with less than 15 private clients in the USA with investment assets not exceeding USD 25 mil.), managers of risk funds, and managers of family funds. New legislation also regulated the definition of an accredited investor, who may together with professional investors invest in hedge funds. The accredited investor is considered a physical entity with the minimum income USD 200 ths. in last two years (or USD 300 ths. with a spouse), with the assumption to maintain such income in the current year, or who owns net assets exceeding USD 1 mil., without major residential estate. Not only registered but also non-registered managers of hedge funds and other private funds will have to also create information systems, in which they are obliged to keep data particularly on the volume of assets managed, amount of indebted positions, risk of counterparties, business and investment positions, types of investment assets, appraisal policy, and business practices that must be controlled by supervisory authorities on a regular basis. Supervisory authorities may also acquire other important and fundamental information from information systems of managers. Managers of hedge funds, registered at the federal Securities and Exchange Commission are obliged to create a position of a Chief Compliance Officer. This person is responsible not only for preparation of the regulation base in the "Compliance Manual" form (including most important policies of managers such as monitoring, marketing, claims, initial offerings, evidence, dishonest practices, portfolio management, appraisals, information duty or personal data protection), but also for observance of legal and internal regulations. DFA also seeks to reduce connection of banks and hedge funds, based on application of Volcker's principle. This principle reduces banks' business activities at their own account and banks' investments in hedge funds and private funds up to maximum amount of 3% of the capital defined as Tier I. It is however modified version of the original Volcker's principle only, which was to forbid banks fully to deal with and invest to risky investment instruments at their own account. After heated professional debate, significantly lighter version of this principle was adopted. If hedge funds execute transactions with derivatives, then postcrisis approach to the derivate market will apply to them. It is in particular significant tightening of over-the-counter transactions with derivatives (mainly swap contracts). We may at the same time anticipate that not only the institute of central counterparty for most over-the-counter derivatives but also the duty to execute derivate transactions either on stock markets and/or on special swap markets will be implemented for financial institutions (including hedge funds) in the foreseeable future. The postcrisis model of regulation and supervision of managers of hedge funds may still be classified as a "light" regulatory approach, based mainly on almost overall implementation of registration duty of hedge fund managers and on the effort to reduce occurrence of information asymmetry in relation to investors and supervisory authorities. Managers of hedge funds and other private funds had a very short time to adapt to the new legislation, i.e. 12 months only. The problem however is that DFA and other new legal regulations are relatively general legal standards, and discretional power to create regulations is mostly transferred to supervisory authorities, which are in addition obliged to process tens of specialized studies and propose or adopt other regulation rules according to them. It is therefore very difficult to estimate the definite nature not only of the whole American financial reform but also of individual regulations for managers of hedge funds and other private funds. Significant changes in the approach to the segment of hedge funds were experienced in the post-Lehman period also in the European Union, where these funds however do not play such great role in the financial system as in the USA, except for Great Britain. Global crisis uncovered some weak components also in the European financial system, and British hedge funds are just one of them. Although the operation of hedge funds was by no means the primary cause of the global (and also European) financial crisis, their non-transparent and often aggressive investment style complicated considerably not only identification of financial instability, but obstructed also in adopting effective recovery programs. Several years lasting discussion in various bodies of the EU on the regulation model of hedge-type business operation finally eventuated in accepting an extensive Alternative Investment Fund Managers Directive, that however does not cover hedge funds only, but also private equity funds, venture capital funds, trust structures, and all other funds operated in the EU that are not subject to regulation according to the Directive of the European Parliament and the Council No. 2009/65/EC, on coordination of legal and administrative regulations of collective investment in transferable securities, as yet. Adopted Directive will significantly influence not only the operation of European funds, but also funds outside the Union that are offered to investors from the European Union. The Directive implements the permission mechanism for managers of alternative funds who actively collect the capital from large number of investors and are not at the same time subjected to the regime according to the Directive UCITS. Lighter permission regime (drawn up on the national level) will apply to funds with the assets up to EUR 100 mil., or funds up to EUR 500 mil., if they do not used leverage products and the investors' right of resale is longer than 5 years. On the contrary, the new Directive will not apply to holding companies, joint ventures, pension funds, family funds, and securitization special-purpose units. Member states of the EU may permit offering of all or selected alternative investment funds even to non-professional investors, based on the appraisal whether the fund is suitable also for less experienced investor. Obtaining licenses for managers of alternative funds is subject to quite strict requirements, and the applicant must present required information not only on the manager (top managers, identification of stockholders and associates of the manager with qualified participation, business plan, remuneration system), but also on individual alternative investment funds (status, investment strategy, depository). The precondition for obtaining the license to for alternative funds management effective in all member states of the EU is in particular the sufficient capital adequacy (EUR 125 ths.). If the value of assets of alternative investment funds however exceeds EUR 250 mil., the fund manager must increase the capital by 0.02% of the amount that exceeds EUR 250 mil., and the total of the initial and additional capital must not exceed EUR 10 mil. Another important precondition for obtaining the license of the manager of alternative funds is the sufficient and quality personnel. Managers of alternative investment funds must act with professional care, impartially, and prevent potential conflicts of interest. They must in particular fulfill the information duty against supervisory authorities and investors (e.g. provide required information prior to execution of the investment decision, disclose annual reports, or report amounts of leverage positions). Managers of alternative investment funds are at the same time obliged to implement risk and liquidity management systems (identification and measuring of all risks including regular stress tests) that must be also functionally and hierarchically separated from operations and management activities. Managers must also define maximum leverage effect for each alternative investment fund and observe them. National supervisory authorities will even gain powers to define maximum amount of the leverage position, with the aim to reduce the systemic risks in the period of financial instability and thus minimize the risk of financial crisis occurrence. Managers of alternative investment funds will have to implement large number of external and internal control mechanisms. Each alternative investment fund will be subjected to control of an independent depository (bank or investment firm). Lighter regime will apply in case of alternative investment funds without resale right in first five years, private equity funds or risk funds, when depository's duties may be transferred to a notary public, attorney, registrar, and/or other subject. Managers must also implement the system of independent, regular, and impartial appraisal of assets in the portfolio of alternative investment funds, and such appraisal may be realized by external expert, depository, and/or own administrator, if sufficient organizational conditions providing a guarantee of fair appraisal of the assets alternative investment funds are created for this. Fundamental change in the segment of hedge funds, in the fashion of banks, will consist in implementation of rules for remuneration of managers and top employees of alternative investment fund managers. The remuneration system must correspond with due and efficient risk management; and the most important principles are in particular: remuneration principles are in compliance with objectives of managers of alternative investment funds and their investors, and concurrently do not incite conflict of interests, independent regular (at least annual) controls of the remuneration system, definition of the balanced fixed/floating remuneration component ratio, evaluation of the results achieved is carried out in longer period and within the context of the whole investment cycle, considerable share payment, however at least 40% of the floating component, is made at the time that is reasonable with respect to the life cycle and principles of repurchase of respective alternative fund, definite payment of the floating component is only made under the condition of good financial situation of the manager and respective alternative investment funds. Although the Directive on managers of alternative investment funds became effective in August 2011, its putting into the European investment practice will be quite a lengthy and complicated issue, since the transposition period for individual member states of the EU ends in August 2013 and market participants have then one more year to adapt to the requirements set. Considerable complication is however the fact that the Directive will be accompanied with a whole series of implementing regulations, the processing of which is currently only in progress. Even more lengthy issue will be to implement the Directive and other Union regulations in relation to hedge fund managers outside the Union whose transition period is practically till 2018. Conclusion In the long-term point of view, institutional investors have very positive impact on capital markets, since they not only contribute by their extensive and frequent activities to higher liquidity of secondary markets and reduction of transaction cost, but also incite creation of modern trading systems. On the other hand, we cannot rule out that institutional investors may be liable under certain conditions to "herd" behavior, which may create seeds of future financial instability. The question however is whether regulatory-supervisory authorities (in particular financial stability committees) may identify the "herd" behavior of managers of institutional investors' portfolios on time and take adequate measures. Special type of institutional investors -hedge funds -for several decades operated in almost unregulated environment. Institutional response to global financial crisis influenced significantly the position of hedge fund managers who are newly subjected to regulatory-supervisory mechanism. The main objective of the new approach to hedge funds industry is not only to create conditions for financial stability (by central monitoring of the systemic risks resulting from portfolios of hedge funds and their leverage positions), but also to reduce information asymmetry by making the business and investment policy of hedge fund managers more transparent (keeping important records and their regular or random control). The new regulatory approach to hedge funds does by no means seek detail regulation of their investment policies by defining acceptable investment assets or various rules of risk diversification, which is on the contrary usual for regulation and supervision of collective or pension investment institutions. The American law regulating registration of investment managers of private funds had a very short implementation period and represents quite sensitive approach to the hedge industry. We cannot however say the same of the European Directive for managers of alternative investment funds. We believe that this Directive is too extensive, it implements nonunderstandably standard licensing of alternative fund managers (similar to investment companies operating in collective investment area), and its putting into investment practice is inadequately complicated and lengthy. We are afraid that this insensitive European model of regulation and supervision of alternative fund managers may negatively influence not only the competitive environment, but increase also unreasonably the management cost of hedge funds and other private funds.
Considerations for SAR image quantification unique to orbital systems A discussion is presented of quantitative expressions required for the response of a synthetic aperture radar (SAR) to both point and distributed scatterers for purposes such as calibration and polarimetry. Image gains depend on the viewing geometry, which is unlike the flat Earth case, which often is assumed to apply in an orbital geometry. Image signal-to-noise ratio is dependent on footprint velocity, but the mean clutter-to-noise ratio for distributed scatterers is dependent on spacecraft velocity. When imagery of a distributed scene observed by an orbital SAR is to be calibrated by comparison to the impulse response of a reference point scatterer, for example, the velocity ratio enters the expression for peak power, but does not enter when an integral is used over the impulse response. The author also looks at the processing gain resulting from overlapping image pixels in azimuth through sampling of the pulse repetition frequency. >
Green Synthesis of Ag NPs Using Ustilago maydis as Reducing and Stabilizing Agent Ustilago maydis (UM) is a fungus that grows naturally on Zea mays ; it reduces the corn yields, and thus, it represents huge economic loss; however, it can be used as an exotic food, and in the present work, it is successfully used as a reducing and stabilizing agent for the preparation of silver nanoparticles (Ag NPs) due to its content of amino acids and biosurfactants. The effects of the concentration of UM aqueous extract, pH, and sunlight on the particle size, surface plasmon resonance, stability, and morphology of Ag NPs obtained by green synthesis were evaluated. A green reduction was observed only in presence of UM, and colloidal Ag NPs were obtained with or without the presence of sunlight; nevertheless, continuous sunlight exposure greatly increased the reaction rate. Ag NPs tend to increase in size from 153nm to 1400nm at a higher pH and a greater amount of UM, and also, UM tends to stabilize the Ag NPs preventing their agglomeration according to measurement of zeta potential ( − 10.75 0.84mV) and SEM observation; furthermore, surface plasmon resonances were more intense between 400 and 480nm of wavelength adding greater amount of UM. This study concludes that UM not only reduces AgNO 3 but also acts as stabilizer of Ag NPs. eir properties depend on their size, surface structure and shape. Ag NPs can be obtained via photochemical and chemical reduction. e use of natural sources as reducing agents has been considered a part of green synthesis and has drawn attention because it is considered safe, nontoxic, cost-e ective, and environment friendly. A high diversity of plant extracts, such as Eugenia jambolana, Saraca asoca, Rhynchotechum ellipticum, Malus domestica, no precipitation within a year because of the amino acids, vitamins, and carbohydrates that supply reducing and capping agents. Sunlight and artificial light are photoinductors in the reduction of AgNO 3 as a precursor for Ag NPs, and it is difficult to establish the exact mechanism of how the natural extracts induce the reduction and structural conformation of Ag NPs. On the other hand, Ustilago maydis (UM) is a smut fungus that belongs to Basidiomycetes; it infects maize (Zea mays) and represents crop loss, except in Mexico, where it is called "huitlacoche," and it is an exotic food with a unique appearance and flavor and a valuable nutritional source. e UM contains amino acids, fatty acids, vitamins, phenolic compounds, and biosurfactants different from those contained in maize and together have the potential to reduce and stabilize the Ag NPs, as this study suggests. e aim of this work was to obtain and characterize the Ag NPs obtained using U. maydis as a reducing bioagent and to evaluate the effect of its concentration, pH, and sunlight on the Ag NPs formation since there is lack of information in using U. maydis as a stabilizing agent nor the quantification of sunlight effect in the Ag NPs synthesis using a green reductor. Materials and Reactants. UM was purchased at a local market in Santa Mara del Monte, Zinacantepec, Mexico. AR-grade AgNO 3 was purchased from Sigma Aldrich. e pH was adjusted to 0.1 N with NaOH and potentiometer Science Med with a glass electrode. Deionized water was used for the preparation of all solutions for this work (0.1 S/ cm of electrical conductivity). Synthesis of Ag NPs. UM was manually separated from corn kernels and dried at 60°C in a laboratory oven for 24 h until a constant weight was reached; then, it was manually grounded and sieved before use. Extracts were prepared with 0.1, 0.5, or 0.9 g of UM powder mixed with 50 mL of deionized water and stirred at room temperature for 10 min; then, the mixture was filtrated and the pH was adjusted to 9, 10, or 11. e reduction was performed similar to ; it was done by mixing 10 mL of 0.01 N AgNO 3 with 10 mL of the UM aqueous extract with or without a natural or artificial source of light. By using a lux meter, it was determined that in the absence of light (inside a dark chamber), the reaction mixture was exposed to 0-20 lux, in contrast to >10000 lux, when exposed to direct midday sunlight. As a control of the reduction reaction, 10 mL of deionized water was added to the AgNO 3 solution instead of the aqueous extract of U. maydis. Sample Characterization. To monitor the rate of the reduction step, UV-Visible (UV-Vis) spectroscopy was performed (DR6000 spectrophotometer, HACH, USA) in a 0.2 cm path length quartz cuvette and scanning over a 240-900 nm wavelength range. e quartz cuvette was cleaned constantly with acidified water (0.2% HNO 3 ) and rinsed thoroughly with deionized water to avoid cross-interference between readings of the samples. Dynamic light scattering (DLS) was done by duplicate on a laser particle size analyzer (Litesizer 500, Anton-Paar, Switzerland) with a polyethylene cuvette with 1 cm of path length, a light source of 40 mW and 658 nm in automatic measurement angle, and the 0.159 as refractive index and 4.3 m −1 as the absorption coefficient, assuming that the sample material was reduced silver and water was the solvent with a refractive index of 1.33. Zeta potential at 25°C was performed on the same particle analyzer by electrophoretic light scattering with an Omega cuvette (polycarbonate cuvette with two gold-coated electrodes located at the ends of a U-shaped capillary tube) using the Smoluchowski approximation. Reaction conditions were adjusted to 0.5 g of U. maydis, at pH 10 and room temperature. Journal of Nanotechnology Scanning electron microscopy (SEM) was acquired from dried drops of the samples left on an aluminum disk sample holder that was mounted into the electronic microscope (JSM-6510LV, JEOL, Japan) at a high vacuum and was observed at an acceleration voltage of 5-8 kV with a secondary electron detector (SED). Energy dispersive spectrometry (EDS) analysis was performed with QUAN-TAX-EDS X-FLASH 6L 30 (BRUKER) detector, and elements were quantified with ESPRIT software. Effect of Sunlight. e sunlight effect on AgNO 3 under conditions of 0.5 g de UM at 10 pH was evaluated, and it is shown in Figures 1 and 2. e first peak of the maximum wavelength was achieved after 50 seconds of sunlight exposure, and the absorbance of the reduced AgNO 3 started to increase; a redshift on surface plasmon resonance was observed relatively proportional to the time of sunlight exposure. e well-defined absorption in the region between 400 and 480 nm is related to Ag NPs formation, the increase in absorbance is related to the number of Ag NPs formed, while the increase in maximum wavelength or redshift is related to an increase in the particle size of the Ag NPs. e fact that both maximum wavelength and absorption increase is because NPs agglomeration occurs. First, the number of Ag NPs escalates and accumulates in the reaction volume, and the NPs continue to assemble with each other into clusters, as shown in Figure 2 which indicates the displacement increment of the maximum wavelength absorption of the nanoparticles during the synthesis (black curve) and the increase of absorbance intensity (red curve). Furthermore, the experience in the dark and after 24 hours of reaction was followed. A spectrum revealing a maximum wavelength at 384 nm and 0.159 of absorbance intensity was obtained, which can be related with smaller and less abundant Ag NPs. Also, it was assessed if sunlight is required to start the reaction or if it is also necessary for the reaction to pass chemical equilibrium to reach completion. It was observed that even if the reaction occurs in the dark, changes in number and size are fewer when the reaction is kept in the dark, even with initial sunlight exposure. e vast difference between sunlight and darkness effects gave us the insight that to clearly study the effect of UM, the next reactions had to be carried out either in the absence of light for a long period of time or in sunlight for a short period of time. Hence, it was selected for 100 seconds of initial light exposure and a follow-up of up to 10 min and to 6 days to study the pH effect and the addition of UM extract to the reaction. Effect of U. maydis and pH. A control reaction without UM was also studied and no reaction was observed. Aqueous extracts of 0.1 and 0.9 g of UM (0.1 U and 0.9 U, respectively) were used at pH 9 and 11 with 100 seconds of light exposure (Figure 3(a)) followed by 10 min (Figure 3(b)) and 6 days (Figure 3(c)) in the absence of light to study the Ag NPs stability. In general, both amounts of UM and pH increases the number and size of Ag NPs. A 0.9 U extract at pH 11 had the highest absorbance intensity during the initial 100 seconds and the next 600 seconds in the dark. However, after 6 days of being maintained in the dark, the 0.9 U at pH 9 had the highest absorbance intensity around 420-450 nm, and thus, the higher production of dispersed Ag NPs. On the other hand, the UV-Vis spectrum of the reaction of 0.9 U at pH 11, which was the darkest of the four samples, had a wider and less defined band in the region of 600 nm that indicates that Ag NPs were agglomerated. Figure 4, the particle size distributions of Ag NPs are shown to analyze the effect that pH and UM aqueous extracts had on the colloidal solutions prepared after 100 seconds of sunlight exposure. e NPs obtained at pH 11 tend to increase in size, as was observed by UV-Vis spectroscopy. With extracts of 0.1 U, the difference of particle size greatly differs from pH 9 to 11 than when 0.9 U extracts are used at both pHs. Interestingly, in the particles obtained with the 0.1 U extract at pH 11, two distributions were found, a larger one with 1232 ± 389 nm average diameter and another of 121 ± 30 nm average diameter. No aggregation was observed at pH 9 with either 0.1 U or 0.9 U extracts, in whose peaks, the average diameter obtained for the nanoparticle populations was 153 ± 51 nm and 170 ± 67 nm, respectively. For 0.9 U at pH 11, the mean particle size increased to 256 ± 102 nm. e results are of interest because the increase of UM in the reaction had little impact on the increase of particle size, and it diminished the agglomeration of the Ag NPs obtained. DLS measurements are supported by SEM microscopy observations, as shown in Figure 5; however, comparison between techniques must be done carefully as the particle size of the Ag NPs determined DLS refers to a hydrodynamic diameter, while SEM observes a particle size in the dry state, and thus, the sizes obtained by these techniques have different interpretations. In previous SEM observations, the Ag NPs were observed as bright dots with quasi-spherical morphology and an average diameter of approximately 145 ± 47 nm for 0.1 U at pH 9, 225 ± 45 nm for 0.9 U at pH 9, 4183 ± 1263 nm for 0.1 U at pH 11, and 178 ± 78 nm for 0.9 U pH 11 (Table 1). We select some patters observed on SEM to make EDS analysis to confirm that at higher pH, Ag NPs tend to increase in size and quantity and that C atoms were found in higher presence with UM increase; for 0.1 U at pH 11, Ag NPs tend to agglomerate, and for 0.9 U at pH 11, Ag NPs tend to present as encapsulated by a biological structure that we can infer is a combination of different structures presented in UM. As can be seen, particle size determined by SEM for sample 0.1 U at pH 11 was higher than expected; it can probably be related to the sample treatment prior to the micrography that consists of sample drying at 50°C, which can induce agglomeration of the particles. In the micrography, this is observed as a bright cloud that tends to agglomerate. Particle Size and Stability. In In Figure 6, the zeta potential of the four conditions is shown as a box graph with the respective standard deviation. All samples presented a negative zeta potential with a significant difference (p < 0.05) for 0.9 U at pH 9 that showed the lowest negative value of −10.75 ± 0.84 mV, which is an indication of the conditions for the most stable nanoparticles obtained, which points to the fact that the increase of UM extract can confer stability to the nanoparticles, and it does not only act as a reducing agent like other green reducing agents that can be extracted from other natural sources. Further studies must be done to identify which conditions of extraction of UM can modify its reducing and stabilizing effects on the preparation of Ag NPs via green synthesis, as well as to assess which components of the UM are responsible into confer stability. It must be noted that the procedure presented in this work is based on Ag NPs prepared with an aqueous extract that may contain colloidal suspended solids of UM that may provide a surface interface for the cation reduction, and thus, these may work as reactive sites for stable nanoparticle growth. Also, sunlight was used for some time in the reaction, and it is composed of a wide range of wavelengths and multiple direction components that may interact differently during the reducing step of the synthesis. A direct control of the light characteristics (wavelength, incident angle, intensity, and polarization) and time exposure may result in nanoscalecontrolled particle growth and accurate assembly of the nanoparticles, as other studies have shown. ough different maize varieties have significant differences on UM characteristics, it is considered to have good nutritional features as it contains essential amino acids (including lysine and tryptophan), fair amount of unsaturated fatty acids, glycerol, minerals, monosaccharides and polysaccharides (including glucose and fructose as the most abundant, both reducing sugars), vitamins (including vitamin C, a potent antioxidant), and abundant phenolic compound, flavonoid compounds, and chlorophylls. us, regarding the reduction reaction, it may be impossible to pinpoint a single specific compound that interacts with AgNO 3 and carries out the reduction to produce Ag NPs. Research studies exploring other reducing agents propose several mechanisms at alkaline conditions that can explain it, as Ag + cations can interact with hydroxide ions to form silver oxide particles that may allow for catalytic reduction of Ag + on their surface. Also, it has been demonstrated that Ag + cations can be reduced by aldehydes and alkoxides which can be formed in organic matter containing alcohols, ketones, and aldehydes in the presence of dissolved oxygen. Even at neutral pH, it has been proposed that conversion of hydroxyls groups into aldehydes and later to carboxylic acids may be a reduction mechanism for Ag +. Moreover, dehydration of UM may have initially produced esterification of various components that were reversibly reconverted to monosaccharides, alcohols, and fatty acids in the presence of water and then again oxidized by Ag +. e stabilization of nanoparticles prevents their agglomeration and sedimentation; therefore, it allows to adjust particle sizes and shapes; for this purpose, surfactants are typically added to the reaction. It has been identified that under low nitrogen concentrations, UM produces large amounts of ustiliagic acid and ustilipid acid, two biosurfactants derived from glycolipids. Furthermore, lignin, a complex polymer present in plant support tissues including corn and likely also present in UM, has been used successfully as a reducing and capping agent in Ag PNs synthesis and may be part of the active compounds in UM extracts. is is relevant since not only reducing agents are of interest but also amphipathic biochemical molecules with surface active properties have several advantages in green synthesis, compared to synthetic surfactants, because the former are biodegradable and have low toxicity, and they work effectively with controlled activity at specific pH and temperatures. Conclusions Aqueous extracts of Ustilago maydis, natural smut of Zea mays, induce the reduction of AgNO 3 to obtain Ag NPs within sizes between 100 and 5000 nm, depending on the Figure 6: Zeta potential of Ag NPs obtained under 0.1 g or 0.9 g of U. maydis at pH 9 and pH 11 after 100 s of sunlight exposure at room temperature. reaction conditions. e main factors that affect the size and stability of Ag NPs are the presence of sunlight, pH, and quantity of UM aqueous extract which not only acts as a reducing agent but also confers stability to the metallic nanostructures. e analysis of this green synthesis method confirms that components in UM are effective in reducing silver nitrate and in stabilizing Ag NPs. Further studies are necessary to know which components of UM act as reducer and stabilizer. Data Availability e data used to support this study are included within the article. Disclosure e authors are part of SNI-Conacyt. Conflicts of Interest e authors declare that they have no conflicts of interest.
The present invention relates to a material that can be either hydrophilic or hydrophobic depending on the temperature, and to a method for obtaining said material. The present invention also relates to the use of this material for the decontamination of effluents and waste waters. Substances displaying temperature-dependent hydrophilic or hydrophobic characteristics are known, such as thermo-reversible polymers. Thermo-reversible polymers can shift from an initial state to a final state by either heating or cooling, depending on the initial state of the polymer. Thermo-reversible polymers, above a phase transition temperature called the Lower Critical Solution Temperature (LCST), change from a hydrophilic state to a hydrophobic state. These polymers posses a hydrophilic moiety and a hydrophobic moiety. These polymers can find various applications, particularly for effluent decontamination. Conventionally, for such applications, the thermo-reversible polymers can be adsorbed on suitable supports, made of glass, plastics or metal. This adsorption does not afford stable or permanent deposits or layers, i.e., a thermo-reversible polymer adsorbed on a support will not adhere sufficiently on that support. Thermo-reversible polymers can also be polymerized in situ on a support. For example, the surface of a support can be impregnated with a monomer solution, and then this monomer can be polymerized, as described by K. Ista et al. in Applied and Environmental Biology, 1999, page 1603, or in Feng U.S. Pat. No. 5,997,961. Feng discloses a method of attaching poly(N-isopropylacrylamide) to glass surfaces which comprises the steps of (1) reacting a photosensitizer attached to a trimethoxysilane with a glass surface, and (2) photopolymerizing a N-isopropylacrylamide onto this glass surface, in the presence of a cross-linking agent. However, this method does not guarantee the adhesion and stability of the polymer layer obtained, nor does it allow a high degree of control over the properties of the polymer itself (e.g., its molecular weight, or its polydispersity, and thus its physical or mechanical properties). Besides, the use of a cross-linking agent prevents from obtaining linear chain of poly(N-isopropylacrylamide). The use of these thermo-reversible polymers thus raises difficulties, which could be overcome by establishing a stable binding between the thermo-reversible polymers and a support, without adversely affecting the thermo-reversibility of the polymers. It would therefore be desirable to be able to fix the polymer to the support so as to overcome the difficulties stated above. The object of the present invention is a method that allows to obtain the grafting of a thermo-reversible polymer onto a support, in particular by a reaction that generates covalent bonds between the thermo-reversible polymer and the support. A further object of the invention is a material that have temperature-dependent hydrophilic and hydrophobic characteristics and comprises a support having grafted thereon a layer of a thermo-reversible polymer thereon preferably by means of at least one covalent bond. The method of the invention comprises a grafting step which is performed by means of an intermediate bonding compound that comprises (i) a first function able to react with the surface OH of the inorganic support and form with them a covalent bond, and (ii) a second function able to react with an end-function of the thermo-reversible polymer and form with it a covalent bond. The grafting method of the invention thus comprises a first step in which the inorganic support is allowed to react with at least one intermediate bonding compound, and a second step in which the support treated in first step is allowed to react with an end-function of a thermo-reversible polymer wherein the thermo-reversible polymer, (i) results from the polymerization of a monomer of formula: where X is H or HO2CH3; Z and Y each represent hydrogen or a linear or branched, substituted or unsubstituted alkyl group of from 1 to 6 carbon atoms, Z and Y can be combined to form a substituted or unsubstituted heterocycle and 2 and Y cannot be both hydrogen; (ii) has an end-function which is capable to react with the intermediate bonding compound; and (iii) has a weight average molar mass (Mw) in the range of from 500,000 to 2,000,000.
package models import ( "regexp" "time" ) type Train struct { ID uint64 `sql:"type:bigint PRIMARY KEY` StepRefer uint64 //TIPS: C'est la liste des opérations (1+2, 1, 50-1+3) Children []TrainChild `gorm:"foreignkey:TrainRefer"` CreatedAt time.Time } type TrainChild struct { ID uint64 `sql:"type:bigint PRIMARY KEY` TrainRefer uint64 Operation string Result string //TIPS: Temps en millisecondes Speed int64 CreatedAt time.Time } func (t Train) Created() string { return t.CreatedAt.Format("02/01/2006 15:04:05") } //TODO: Vérifier que cette fonction fonctionne bien // Est censé retourner : ['3', '+', '7'] func (t TrainChild) ChildrenSplit() []string { re := regexp.MustCompile(`[+-]`) childSplit := re.Split(t.Operation, -1) return childSplit }
<reponame>qamaruz/dfms from django.shortcuts import render from django.http import JsonResponse from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from django.http import HttpResponseRedirect, HttpResponse from v_0_1.models import data from django.contrib.auth.models import User from django.core import serializers import simplejson import urllib import json from django.contrib.auth.decorators import login_required from django.contrib import auth from django.contrib.auth.forms import UserCreationForm from django.template.context_processors import csrf import smtplib from email.MIMEMultipart import MIMEMultipart from email.MIMEText import MIMEText # Create your views here. url='/v_0_1/' def getData(request): if request.user.is_authenticated(): ad= data.objects.all().order_by('date') ad1= [(str(d.date), d.cm, d.inch,d.time, d.sensorDate,d.firstLevel,d.secondLevel,d.thirdLevel,d.arduinoid) for d in ad] rad1= list(reversed(ad1)) return JsonResponse(rad1 ,safe=False) def openData(request): ad= data.objects.all().order_by('date') ad1= [(str(d.date), d.cm, d.inch,d.time, d.sensorDate,d.firstLevel,d.secondLevel,d.thirdLevel,d.arduinoid) for d in ad] rad1= list(reversed(ad1)) return JsonResponse(rad1 ,safe=False) def getPlotData(request): if request.user.is_authenticated(): ad= data.objects.all() ad1= [(str(d.date), d.cm, d.inch,d.time, d.sensorDate,d.firstLevel,d.secondLevel,d.thirdLevel,d.arduinoid) for d in ad] return JsonResponse(ad1 ,safe=False) def getSensorData(request): if request.user.is_authenticated(): url="http://qamaruzyun.local/data/get" response = urllib.urlopen(url) sensorData = json.loads(response.read()) print sensorData d=data() d.cm=sensorData['value']['cm'] d.inch=sensorData['value']['inches'] d.sensorDate=sensorData['value']['date'] d.time=sensorData['value']['date'][11:19] d.firstLevel = sensorData['value']['firstLevel'] d.secondLevel = sensorData['value']['secondLevel'] d.thirdLevel = sensorData['value']['thirdLevel'] d.arduinoid = sensorData['value']['deviceid'] d.save() obj = data.objects.latest('id') obj1= [(str(obj.date), obj.cm, obj.inch,obj.time, obj.sensorDate,obj.firstLevel,obj.secondLevel,obj.thirdLevel,obj.arduinoid)] return JsonResponse(obj1,safe=False) def getRealSensorData(request): if request.user.is_authenticated(): url="http://qamaruzyun.local/data/get" response = urllib.urlopen(url) sensorData = json.loads(response.read()) print sensorData firstLevel = sensorData['value']['firstLevel'] secondLevel = sensorData['value']['secondLevel'] thirdLevel = sensorData['value']['thirdLevel'] real = [(firstLevel,secondLevel,thirdLevel)] return JsonResponse(real, safe=False ) def hazardMap(request): if request.user.is_authenticated(): url="http://qamaruzyun.local/data/get" response = urllib.urlopen(url) sensorData = json.loads(response.read()) firstLevel = sensorData['value']['firstLevel'] secondLevel = sensorData['value']['secondLevel'] thirdLevel = sensorData['value']['thirdLevel'] long = '114.67232912778854' lat = '4.718136002726354' real = [(firstLevel,secondLevel,thirdLevel,long,lat)] return JsonResponse(real, safe=False ) def checkSensor(request): if request.user.is_authenticated(): url="http://qamaruzyun.local/data/get" try: response = urllib.urlopen(url) state='on' except IOError: state='off' return JsonResponse(state, safe=False ) def sendEmail(request): if request.user.is_authenticated(): fromaddr = "<EMAIL>" toaddr = "<EMAIL>" msg = MIMEMultipart() msg['From'] = fromaddr msg['To'] = toaddr msg['Subject'] = "WARNING WATER LEVEL RISING" body = "WARNING WARNING WARNING" msg.attach(MIMEText(body, 'plain')) server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() server.login(fromaddr, "Brune1234") text = msg.as_string() server.sendmail(fromaddr, toaddr, text) server.quit() status='sent' return JsonResponse(status, safe=False ) def getWeather(request): if request.user.is_authenticated(): return JsonResponse(status, safe=False ) return @login_required(login_url=url) def main(request): if request.user.is_authenticated(): return render(request, 'v_0_1/main.html') @login_required(login_url=url) def sensorList(request): if request.user.is_authenticated(): return render(request, 'v_0_1/newSensorStatus.html') @login_required(login_url=url) def dataLog(request): if request.user.is_authenticated(): return render(request, 'v_0_1/dataLog.html') @login_required(login_url=url) def realTime(request): if request.user.is_authenticated(): return render(request, 'v_0_1/realTime.html') def logout(request): auth.logout(request) return render_to_response('v_0_1/logout.html')
<reponame>dbeetoven/website import Document, { Html, Head, Main, NextScript } from 'next/document'; class CustomDocument extends Document { return = (): JSX.Element => ( <Html> <Head> <title>Bitmon Space</title> <meta name="viewport" content="width=device-width, initial-scale=1"/> <meta name="author" content="BitmonSpace"/> <meta name="keywords" content="Bitmon Space,BTMN,NFT,Play to Earn,Bitmon Space universe,Bitmon Marketplace"/> <meta name="description" content="Bitmon Space is a digital metaverse where people can find digital friends that will accompany you day to day playing different kinds of minigames, collecting and battling."/> <link rel="icon" href="/favicon.png" /> </Head> <body> <Main /> <NextScript /> </body> </Html> ); } export default CustomDocument;
Platelet-Derived Drug Targets and Biomarkers of Ischemic StrokeThe First Dynamic Human LC-MS Proteomic Study Objective: The aim of this dynamic LC-MS (liquid chromatography and mass spectrometry) human platelet proteomic study was to identify the potential proteins candidates for biomarkers of acute ischemic stroke (AIS), their changes during the acute phase of stroke and to define potential novel drug targets. Methods: A total of 32 patients (1880 years old) were investigated that presented symptoms of AIS lasting less than 24 h from the onset, confirmed by neurological examination and/or new cerebral ischemia visualized in the CT (computed-tomography) scans. The analysis of platelet proteome was performed using LC-MS at baseline, and then on the third and seventh day from the onset of symptoms. The control group was demographically matched without any clinical signs of acute brain injury. Results: The differences between platelets, at 24 h after first symptoms of stroke subjects and the control group included: -amyloid A4 and amyloid-like protein 2, coactosin-like protein, thymidine phosphorylase 4 (TYMP-4), interferon regulatory factor 7 (IRF7), vitamin K-dependent protein S, histone proteins (H2A type 1 and 1-A, H2A types 2B and J, H2Av, -z, and -x), and platelet basic protein. The dynamic changes in the platelet protein concentration involved thrombospondin-1, thrombospondin-2, filamin A, B, and C. Conclusions: This is the first human dynamic LC-MS proteomic study that differentiates platelet proteome in the acute phase of ischemic stroke in time series and compares the results with healthy controls. Identified proteins may be considered as future markers of ischemic stroke or therapeutic drug targets. Thymidine phosphorylase 4 (TYMP-4) holds promise as an interesting drug target in the management or prevention of ischemic stroke. Introduction Cardiovascular disease (CVD) remains among the most common causes of morbidity and mortality, as reported worldwide annually. In Europe, almost half of fatal cases have been directly connected with CVD. Stroke is among the most common causes of death and persistent disability in adults. In 70-80% of cases, it is caused by cerebral ischemia and is increasingly referred to as "acute cerebral syndrome", indicating its similarity to acute coronary syndromes. Therefore, the detection of biochemical markers of vascular-derived brain damage should have a diagnostic and prognostic meaning, similar to that of myocardial infarction and heart failure (e.g., troponin (Tn) and N-terminal natriuretic peptide type B (NT-proBNP)). Since biomarkers must be characterized by certain features, there is still a lack of substances that could be used as reliable indicators of acute central nervous system (CNS) ischemia, which could allow quick performance of diagnostics and appropriate treatment. Platelets play a pivotal role in the pathogenesis of cardiovascular disease, including ischemic stroke, and are an important therapeutic target in the treatment and secondary prevention of cardiovascular events. Thrombocytes can be affected by numerous stress factors present in the blood, which lead to their activation and aggregation that trigger a prothrombotic cascade. During activation, they secrete numerous compounds that support initiation or exacerbation of ischemia. Proteomics enable the search for differences between protein content and could provide information about their changes during development of a disease and its treatment. Since a stroke is associated with thrombus formation, investigating changes in platelet protein expression from subjects in the acute phase of cerebral ischemia should provide important information regarding its pathophysiology as well as provide some useful biomarkers and drug targets. Hence, in this study, we assessed the dynamic variability of platelet proteome and peptidome in the acute phase of ischemic stroke in order to define its new platelet-derived biomarkers or drug targets with neuroprotective potential. The aim of the study was to falsify the hypothesis regarding the absence of significant differences in the platelet proteome between patients with ischemic stroke as compared with a control, using a liquid chromatography and mass spectrometry (LC-MS) technique. The dynamic timedependent relationships between the course of the disease and the level of identified potential candidate proteins were also analyzed. Recruitment A total of 61 subjects with newly diagnosed acute ischemic stroke (AIS) qualified for the study: 32 patients, aged 18-80 years, hospitalized for this reason at the Clinical Department of Neurology, and 29 volunteers, demographically matched to the study group, hospitalized at the Clinical Department of Internal Medicine of the same hospital. Participants of the study group met all the inclusion criteria defined by the study protocol ( Figure 1 shows a flow chart of the study and Figure 2 presents the experimental protocol). The exclusion criteria included: inability to obtain fully informed consent, no accurate history of past or coexisting diseases, lack of precise information on the used pharmacotherapy and the duration of CNS ischemia symptoms, qualification for thrombolytic treatment or thrombectomy, anemia or thrombocytopenia. Additionally, subjects were excluded with a history of vascular diseases of the nervous system, extensive head injuries, atrial fibrillation, cancer, chronic inflammatory diseases, active infections, chronic kidney disease (eGFR < 45 mL/min/1.73 m 2 ), and the use of drugs potentially affecting the results of study (antithrombotic, anticonvulsants, type 5 phosphodiesterase inhibitors, contraceptives, and hormone replacement therapy). Study Design Within 24 h after the onset of the first stroke symptoms, the patients underwent a detailed anamnesis, physical examination, and full evaluation by a neurologist under the current guidelines. The diagnosis was supported by an imaging of the central nervous Study Design Within 24 h after the onset of the first stroke symptoms, the patients underwent a detailed anamnesis, physical examination, and full evaluation by a neurologist under the current guidelines. The diagnosis was supported by an imaging of the central nervous LC-MS Proteomic Analysis The LC-MS analysis was performed at the Environmental Mass Spectrometry Laboratory of IBB PAS. Platelet proteins were extracted by incubation of platelet deposit in 1% sodium deoxycholate buffer solution in 25 mM ammonium bicarbonate, and then sonifica-tion in a water bath (10 cycles of 30 s) was performed to dissolve the proteins. Then, the suspension was centrifuged at 14,000 g for 15 min (Eppendorf Minispin). The obtained protein solution was reduced with 50 mM phosphine (TCEP) (30 min, 60 C), alkylated with 200 mM thiosulfonate (MMTS) (15 min, at room temperature), and digested with trypsin overnight (modified trypsin sequencing, ega V5111). In the morning, the digestion was stopped by acidifying the samples with 2 L 10% trifluoroacetic acid (TFA) and the precipitated sodium deoxycholate was removed by centrifugation. Then, the concentration of the obtained peptides was estimated using direct detection (Millipore ® ). The resulting peptide mixtures were applied to an RP-18 pre-column (nano-ACQUITY Symmetry C18, Waters 186003514), as a mobile phase 0.1% TFA was applied in water. Then, the solution was applied to an RP-18 HPLC nano-column (nanoACQUITY BEH C18, Waters 186003545), using acetonitrile (5-35% AcN in 180 min) in the presence of 0.1% formic acid, with a flow rate of 250 nL/min. The column outlet was directly connected to the Velos Orbitrap ion source (Thermo Electron Corp., San Jose, CA, USA) with a change from MS (peptide mass measurement) to MS/MS (peptide fragmentation). To ensure the absence of cross-contamination by the previous samples, each time, empty tests were analyzed first. Subsequently, the data were processed by Mascot Distiller to verify hits in the Swiss-Prot database limited to the Homo sapiens sequence. Peptides with a Mascot Score above a threshold value corresponding to <1% FDR were identified as positive. The determined proteins were analyzed using the Diffprot ® software. Statistical Analysis The statistical analysis was performed using the Statistica 13.3 StatSoft ®. The presented data are expressed as an arithmetic mean with standard deviation (SD). The Mann-Whitney U-test or the Student's t-test, following the Shapiro-Wilk test and Levene's test as appropriate, were used to assess the significance of differences between the mean values and ANOVA followed by Tukey's test, or a Friedman test was used when more than two groups were investigated. The analysis of proteomic/peptidomic data were performed using the Diffprot ® software (by the laboratory performing the determination), as described in the previous section. Study Design Within 24 h after the onset of the first stroke symptoms, the patients underwent a detailed anamnesis, physical examination, and full evaluation by a neurologist under the current guidelines. The diagnosis was supported by an imaging of the central nervous system using a Siemens ® SOMATOM Definition 64-row dual-source CT scanner. Biological material (40 mL of venous blood) was non-traumatically collected (using a Sarstedt ® S-Monovette aspiration and vacuum kit) three times from subjects qualified for the study group: on the first (group A), third (group B), and seventh (group C) day of hospitalization, and from the control group once on the day of obtaining consent to participate in the project (group K) ( Figure 2). From the collected material, the aggregation tests and evaluation of platelet proteome using liquid chromatography with mass spectrometry (LC-MS) were performed. Moreover, blood serum was used to conduct biochemical tests (creatinine, eGFR, sodium, potassium, hsCRP, glucose, thyroid-stimulating hormone, bilirubin, hepatic enzymes, and lipid profile) in order to evaluate the cardiovascular risk, following the 2016 European Society of Cardiology (ESC) guidelines. Whole blood was obtained to determine the complete blood count (CBC) and plasma was used to determine coagulation parameters. The biochemical tests, coagulation, and CBC were performed in a certified hospital laboratory using the following analyzers: Sysmex ® (XT-4000i), Siemens ® (The Dimension ® EXLTM), and Thermo Fisher Scientific ® (Konelab 20 Clinical Chemistry Analyzer). Platelet Preparation Whole blood with sodium citrate was supplemented with a solution of PGI2 prostacyclin (PGI2 in Tris-Cl buffer with pH 9.0 at a concentration of 1 mg/1 mL) at a concentration of 0.06 g/mL of whole blood, which prevented spontaneous platelet aggregation. To obtain platelet-rich plasma (PRP), the solution was centrifuged for 20 min at 230 g. PGI2 at 0.3 g/mL concentration was added to PRP and centrifuged at 1000 g for 10 min at 21 C to obtain low platelet plasma and platelet sediment. Then, plasma was removed, and the remaining platelets were washed three times with 1 mL of Tyrodes HEPES buffer (H-T buffer), Ca 2+ free, with pH 7.4. After the last rinsing, the obtained precipitate was supplemented with H-T buffer pH 7.4 to the volume of 3 mL. The obtained suspension was analyzed for platelet count and contamination with white blood cells (WBCs) and red blood cells (RBCs) in a Sysmex ® device (Clinical Laboratory Department, University Hospital, Wroclaw, Poland). The pure platelet (PLT) suspension was brought with Tyrodes HEPES pH 7.4 buffer to a final concentration of 2.5 10 8 /mL and centrifuged at 16,000 g for 10 min at 4 C. Then, the material was stored at −80 C in Eppendorf ® tubes until proteomic determinations were performed in the Environmental Mass Spectrometry Laboratory operating at the Institute of Biochemistry and Biophysics of the Polish Academy of Sciences (IBB PAS). Within 24 h after the onset of the first stroke symptoms, the patients underwent a detailed anamnesis. Bioethics Statement All procedures in the study protocol were investigated and approved by the Local Bioethical Committee (Approval number: KB-371/2018). Participants, after learning about study procedures, signed a written informed consent which was previously verified by the bioethical committee. The project is consistent with the principles of the Declaration of Helsinki (Seventh Revision, 64th World Medical Association meeting, Fortaleza, 2013). Demographic, Clinical, and Biochemical Characteristics of the Investigated Population There were no differences in the basic demographic characteristics nor co-morbidities between groups, including hypertension, coronary artery disease, diabetes mellitus ( Table 1). None of the subjects had chronic kidney disease with egfR < 45 mL/min, as the CKD was an exclusion criterion in this study. In the CBC, significantly lower mean platelet volume (MPV) and significantly higher mean corpuscular hemoglobin mass index (MCH), white blood cells (WBCs), and neutrophil concentration were observed in subjects in the acute phase of ischemic stroke. The biochemical analysis in the acute stroke group (A) showed significantly higher values of TSH and glycemia followed by significantly lower serum potassium concentration as compared with the control group, however, all of these values were within the normal range. Proteomic Analysis During the examination of platelet proteomes, we identified several proteins with concentrations that differed significantly among groups ( Table 2). The differences between platelets 24 h after first symptoms of stroke and the control group included: 1. Platelet basic protein. The differences in the concentrations of interferon regulatory factor 7 (IRF7) and thymidine phosphorylase TYMP-4 persisted for the next three days from the onset of stroke (a significant difference between group B and the control). The dynamic changes in platelet protein concentration in the course of stroke involved: 1. Filamin A, filamin B, filamin C (group A vs. group B, following the first two days from the onset of the stroke) 2. Thrombospondin-1, thrombospondin-2 (group A vs. group C, following one week from the onset of symptoms). It is noteworthy for both thrombospondin types that significant changes in their concentrations were also observed between control and the stoke subjects at one week from the onset of symptoms. *-coefficient q statistically significant (q < 0.05); q-test probability; A-blood collected on day 1 of hospitalization; B-blood collected on day 3 of hospitalization C-blood collected on day 7 of hospitalization; K-blood collected from the control group; K vs. A-difference at the beginning of observation; K vs. B-difference after a three-day observation; K vs. C-difference after a seven-day observation; A vs. B-the difference between the first and third day of hospitalization; A vs. C-difference between the first and seventh day of hospitalization. The bold text highlighet in red presents the significant differences. Discussion This is the first human dynamic LC-MS proteomic study to evaluate the changes in human platelet proteome and peptidome in the acute phase of ischemic stroke using a highly reproducible LC-MS technique. Demographic and Clinical Characteristics The study groups were similar regarding age, sex, as well as occurrence of comorbidities including hypertension, coronary artery disease, and diabetes mellitus. The higher WBC and neutrophiles in the complete blood count could be attributed to the acute phase reaction in the first 24 h following the onset of stroke. The higher plasma glucose levels on admission in the stroke group could reflect a non-fasting condition or sympathetic activation on admission, as the prevalence of diabetes was similar in both groups. Nevertheless, assessment of the HbA1c in both groups, which would clarify this issue, was missing in some subjects, therefore, it was not considered in this study. Some studies have indicated that the level of thyroid stimulating hormone may be associated with the occurrence and prognosis of an acute stroke. Moreover, elevated TSH levels in subclinical hypothyroidism did not increase the risk of stroke and were likely to be associated with a better prognosis after an ischemic event, especially if the hypothyroidism appeared before the onset of the disease. Nevertheless, the serum TSH concentration in the acute ischemic stroke as a single parameter is difficult to interpret, as it is strongly dependent on a patient's age. Proteomic Analysis Platelet secretion of thromboxane A2, adenosine diphosphate (ADP), matrix metallo proteinase-9 (MMP-9)-following their activation-promotes thrombus formation in a positive feedback loop, resulting in uncontrolled secretion of preformed platelet proteins and peptides from platelet cytosol and granules. The platelet releasate contains the secreted inflammatory and vasoactive molecules, including granules or micro-vesicles; -granules are the most abundant and contain both the membrane-bound proteins as well as soluble proteins. Thrombosis and mechanisms of platelet-mediated inflammation require close interaction of platelets with endothelial and immune cells, as well as with the extracellular matrix. Activation of the endothelial cells, local hypoxia, and subsequent lactic acidosis promote thromboinflammation. Membrane proteins are expressed and comprise integrins, adhesive glycoproteins, and other granule membrane-specific receptors. The content of -granules secreted through special surface-connected channels, open canalicular system (OCS) and SNARE is the core of the fusion machinery. Proteomic studies have suggested that hundreds of soluble proteins are released by -granules from activated platelets and many of them are present in plasma with differences in structure or function. Moreover, antiplatelet drugs have been demonstrated to modify the content of the platelet releasate, which could determine their modulative role on the platelet paracrine function, in addition to their anti-aggregatory function. Hence, we believe that some of the identified proteins could be released from platelets in the acute phase of stroke in response to their activation and developing neuro-thromboinflammation. As far as the literature is concerned, there are few reports describing platelet proteins in the acute phase of ischemic stroke (AIS). One of them, performed by Cevik et al., analyzed the difference in PLT proteome in blood collected within 24 h after occurrence of stroke symptoms as compared with a control group of the same size, and demonstrated the presence of 83 proteins that differed significantly between the groups. In our study, the material obtained from 32 patients after the ischemic incident and 29 volunteers from the control group were analyzed. Additionally, the blood was collected not only at the first day of hospitalization, but also after three and seven days from the ischemic incident. Then, the results were compared in time intervals (first, third, and seventh days of hospitalization) within the study group. The identified proteins with concentrations that varied significantly are potential candidates for biomarkers of an acute ischemic stroke and may reflect the cascade of events related to its evolution over time. With this approach, the study provides a more accurate description of the cascade of changes in the platelet proteome as compared with those of previous studies. In the current study, we observed an elevated concentration of intraplatelet -amyloid protein (APP) in the AIS as compared with the control group. This is in line with some previous reports that showed the presence of -amyloid protein in platelet -granules, which could be a source of up to 90% of plasma circulating APP. Moreover, platelet activation has been shown to result in a three-fold increase in APP expression on their surface. It has been shown that APP, via positive feedback, can induce platelet aggregation, thus, initiating a cycle based on continuous activation and its release from the platelets. This theory was supported in a study by Ming Y. Shen et al., where -amyloid stimulated the platelet signaling pathway leading ultimately to their aggregation. The tests performed on gerbils showed the connection between increased accumulation of APP in neurons and their exposure to ischemia. Given the above, an increase in -amyloid protein plasma concentration could potentially be a useful indicator of the acute phase of ischemic stroke, but its broad role in the development of CNS degenerative diseases may significantly reduce the specificity of its determination. However, as in the case of troponin determination in the acute phase of myocardial infarction, it may be clinically relevant to prove an increase or decrease in APP concentration as an indicator of CNS ischemia. Analogous observations to those of APP in our study concerned the APP-like protein 2 (APLP-2), i.e., its intraplatelet concentration was elevated in the AIS as compared with the control group. Due to their common origin, they have been assigned similar functions: transport, intracellular signaling, and apoptosis. In pathophysiology, they have been associated with the induction of trophic changes in neurons and synapses. Because of the small amount of information on the family of amyloid-like proteins and their role in stroke, similarly as in the case of APP, further research is necessary to establish their exact role in this group of patients. The proteomic analysis showed that the concentration of coactosin-like protein (COTL) was lower in the control group as compared with acute CNS ischemia. Its function is currently the subject of numerous studies. A correlation between decreased coactosin-like protein concentration or its absence in platelets and impaired glycoprotein GPIb function was shown, which could be directly translated into prolonged bleeding time and corresponding protection against blood clot formation. In addition, a reduction in coactosin-like protein levels resulted in a reduction in leukotriene formation, and consequent reduction in proinflammatory activity. Supporting this hypothesis, Inge Scheller et al. showed that the lack of this protein did not affect the number and activation, but it reduced the formation of platelet aggregates on collagen and their adhesion to vWF in in vitro tests. It is possible that lowering platelet COTL concentration in patients with stroke could have a protective effect against further clot formation and aggravation of the ischemic changes. Considering the above characteristics, the possibility of using COTL as a stroke biomarker remains questionable, however, its function makes it a potential new drug target in primary and secondary AIS prevention. Interestingly, we have shown dynamic changes in the expression of the thymidine phosphorylase TYMP-4 during ischemic stroke, which was significantly higher as compared with the controls. We postulate that TYMP-4 might become a platelet therapeutic target in humans, reducing the extent of ischemic penumbra in the acute phase of ischemic stroke. Thymidine phosphorylase is also known as a platelet-derived endothelial growth factor, the presence of which has been described in human thrombocytes, monocytes, and macrophages (including astrocytes). This protein promotes angiogenesis in vivo, stimulates the growth and chemotaxis of endothelial cells in vitro, regulates platelet hemostasis, which in turn leads to the development of cardiovascular diseases, including ischemic stroke. Under physiological conditions, TYMP-4, together with VEGF, are responsible for the continuity of the blood-brain barrier. Noteworthy, there are several reports that TYMP expression is increased in post-ischemic neurons and may protect them from ischemia-reperfusion injury. Recently, a growing body of evidence points at TYMP-4 as a potential drug target, and some animal studies have shown that its inhibition may provide a novel effective and safe therapeutic strategy. Since the TYMP-4 inhibitor molecule tipiracil is already being used in clinical practice (in the chemotherapy of colorectal cancer as an adjuvant that inhibits the disintegration and increases the concentration of the chemotherapeutic nucleoside analogue, trifluridine), we started the future direction of our study on the usefulness of TYMP-4 by tipiracil in reducing the extent of brain ischemia reperfusion injury in an animal model. Nevertheless, to the best of our knowledge, our study is the first to demonstrate the dynamic and time-dependent upregulation of platelet TYMP-4 in the acute phase of human stroke as a "real-life" clinical scenario, which translates the experimental results from bench to the bedside. In the current study, a higher concentration of interferon regulatory factor 7, the key IFN type I transcription regulator, was observed in the platelet proteome of subjects with AIS as compared with the control. Statistically significant results were present when the control group was compared with the study group on the first and third day of hospitalization. There is only limited information available in the literature on the role of IRF-7 in ischemic stroke. Scientific reports have connected the occurrence of these incidents with the development of a proinflammatory response involving interferon regulatory factors. This response initiates locally at the site of ischemia, and then the inflammatory mediators spread throughout the entire body. Moreover, in a study conducted on mice, Stevens et al. demonstrated that the presence of IRF-3 and IRF-7 was necessary to induce TLR-dependent (Toll-like receptors) neuroprotection. On this basis, these two factors were identified as key mediators in the neuroprotective genomic program, which ultimately led to reduced damage during ischemic stroke. The reason for the higher concentration of IRF-7 in platelets in our study group still needs further explanation. This may be related to excessive use and requirement for interferon regulatory factor 7 in the acute phase of ischemia, which would be associated with its potential neuroprotective and proinflammatory function. Therefore, the elevated concentration of platelet IRF-7 may be be a biomarker that could be potentially useful for establishing a diagnosis of AIS, but also as a factor indicating better prognosis. Some data have confirmed the relationship between reduced plasma concentration of protein S, a non-enzymatic element of hemostasis that regulates the activity of protein C, and the occurrence of ischemic stroke. In studies conducted on mice, it was found that the injection of protein S had a neuroprotective function, reducing the size and extent of ischemic lesions. There is a single report that has proved that platelet -granules contained protein S, the release of which was triggered by the presence of thrombin. In addition, protein S could bind to the stimulated platelets, which consequently activated the anticoagulant activity of activated protein C on their surface. In this study, we observed an increased concentration of protein S in platelet proteome in the acute phase of ischemic stroke as compared with the control. Based on previous reports, it can be considered that the expression of protein S increases in platelets during cerebral ischemia to activate its anticoagulative and neuroprotective properties. Therefore, it can be concluded that an increase in protein S concentration in platelets may indicate the presence of ischemic changes, as well as the occurrence of actions aimed at reducing the neurological deficit after a stroke. Determination of platelet protein S in AIS may not be useful to confirm the diagnosis, but the finding of its low concentration may be an indication to administer exogenous protein S to reduce ischemic penumbra. We observed a decrease in the platelet concentration of filamins between the first and third day after an acute ischemic stroke. Filamins belong to the family of actin-binding proteins that serve as a scaffold for signaling proteins and connect to the cytoskeleton. Among these peptides, there are three FLN isoforms, i.e., A, B, C, which come from three different homologous genes. It has been shown that FLN mutations in platelets can lead to impaired activation of integrin IIb3 or, on the contrary, by increasing its activity, can lead to an increased risk of thrombosis. It has also been found that they are involved in both the production and activation of platelets. Due to the limited information available so far, it can only be speculated that changes in the concentration of filamins may be a potentially useful indicator of the acute phase of stroke, or that they are a consequence of the introduction of antiplatelet therapy. This issue requires further validation and clinical trials on a larger group of patients. In this study, significant increases in TSP-1 and TSP-2 concentrations were observed on the seventh day of hospitalization. Thrombospondins play a variety of functions including participating in the aggregation of platelets, affecting endothelial cells and smooth muscles, and playing a role in regulating angiogenesis ; the earliest discovered is thrombospondin-1 (TSP-1), which is mostly released from the platelet -granules following their activation. The thrombospondin group has also been assigned an important role in the development and regeneration of nervous system cells. In rat studies, the TSP was isolated from embryonic tissue. Moreover, an increase in its concentration was found after damage to the nervous tissue of the brain of these animals with cainic acid. Based on the cited study, it can be concluded that this protein contributes both to the formation of nerve cells during embryonic development and their regeneration after pathological incidents. A study by Mller et al. showed an increase in thrombospondin concentration from four to seven days after a facial nerve injury, which overlapped the peak of the maximum post-traumatic proliferation of the microglia. During an AIS, increased expressions of TSP-1 and TSP-2 at the level of mRNA and protein, which exhibited different profiles of temporal expression, were confirmed in a study by Navarro-Sobrino et al., where increased thrombospondin-2 levels on admission in patients with ischemic stroke and increased thrombospondin-1 levels two hours after treatment with tissue plasminogen were identified. In addition, an ischemic stroke is followed by intensified angiogenesis, especially in the penumbra region, which also involves proteins from the thrombospondin group. Based on the cited and our results, it can be postulated that TSPs have a protective function after the occurrence of a stroke and determination of their intraplatelet concentration may indicate the beginning of nervous tissue regeneration processes or the effectiveness of reperfusion treatment. In addition, the available literature also showed that these proteins can act as biomarkers for predicting a long-term prognosis. In a study by Gao et al., higher TSP-1 concentrations in stroke subjects were associated with a shorter sixmonth survival. Nevertheless, there are reports that thrombospondins may exacerbate the negative effects by reducing tissue reperfusion. Isenberg et al. demonstrated that endogenous TSP-1 limited the recovery of tissue perfusion after ischemic damage in the skin lobes of mice. In order to more precisely assess the roles of thrombospondin-1 and thrombospondin-2 and their effects on the prognoses in stroke patients, longer prospective observation following the onset of AIS is needed. In the present study, increased concentrations of peptides belonging to the family of histone proteins were observed in platelets: H2A type 1 and H2A types 1-A, 2-B, J, and -v, -z, -x were observed in subjects with stroke. The basic role of histone proteins is the regulation of gene transcription. They connect with cellular DNA in nucleosomes, which are physiologically located in cellular nuclei. Extracellular histone proteins, isolated from plasma, may originate from dying cells or be actively secreted during the inflammation. Their elevated concentration has been detected in inflammatory (including autoimmune), ischemic, and neoplastic diseases. They can play a proinflammatory and toxic role, which was proven by Xu et al.. Moreover, it has been shown that human histone proteins cause fibrinogen binding, the release of von Willebrand factor, increased P-selectin exposure, and exert a pro-aggregation effect ; among the proteins of this group the strongest prothrombotic functions are performed by H4 histone. There are no other reports on the dynamics of changes in the concentration of histone proteins in platelets during an acute ischemic incident. Based on our data, it can be postulated that the concentrations of intraplatelet histones might be used as biomarkers of AIS, facilitating diagnosis and predicting disease processes associated with the activation of the immune system. In our study, significantly higher concentrations of platelet basic protein (PBP) were observed in subjects with stroke, which was consistent with the results of studies by Rex et al. who proved an increased release of PBP during thrombin activation of platelets. A few reports on platelet basic protein have indicated its multidirectional effects, i.e., it participates in the formation of inflammation; acts as a bactericide; and participates in angiogenesis, hemostasis, and clot formation. PBP, after posttranslational modification, forms, among others, connective tissue activating peptide III (CTAP-III), neutrophil-1 activating peptide (NAP-2), -thrombomodulin, and thrombocydine. The available literature indicated that PBP had proinflammatory properties, while the closely related platelet factor 4 (PF4) played a role in hemostasis and thrombosis. Based on the studies, it can be assumed that the demonstrated increase in the PBP concentration in platelets may be a consequence of ongoing pathological processes including ischemia and secondary inflammation. As this effect only slightly decreases over the first seven days after an AIS, changes in platelet PBP concentration may be a potentially useful indicator not only of the acute phase of ischemic stroke, but also may confirm a history of CNS ischemic incident in the more distant past. Further studies on a larger group of patients are needed to understand the exact role of PBP usefulness as an indicator of previous CNS ischemia. Conclusions In this first human dynamic LC-MS based study on ischemic stroke, several qualitative and quantitative differences in platelet proteomes were found as compared with the control group, as well as within the group of stroke subjects in the course of the acute phase of stroke. The identified proteins may act as potential biomarkers or indicators of a high risk of an ischemic incident. The changes in the platelet proteome observed in patients with ischemic stroke during hospitalization, point, in turn, to their relationship with the disease dynamics. These proteins may serve as the prognostic factors, indicating the timing of the ischemic episode and the patient's prognosis, but may also help to determine the risk of stroke delivery. In addition, some of the proteins might be considered to be potential targets for future drugs with potential neuroprotective effects. Thymidine phosphorylase 4 (TYMP-4) holds promise to be an interesting drug target in the management or prevention of ischemic stroke, however, future studies are needed to demonstrate its exact role in the pathophysiology and pharmacological management. Informed Consent Statement: Informed consent was obtained from all subjects involved in the study. Data Availability Statement: The original data used to support the findings of this study are available from the corresponding author upon request. Conflicts of Interest: The authors declare no conflict of interest.
It is known for a user to log into accounts on networks, Internet sites, software and web applications, cellular phones and the like by inputting identification details through a keypad or keyboard. For example, financial institutions provide Internet banking services where users are required to enter a username and password via a keyboard to access their account information, transfer funds, pay bills, and the like. Even automated teller machines (ATMs) require user identification and a password in the form of a PIN. Entering usernames and passwords using a keyboard over networks, and particularly public networks such as the Internet, involves some risk to users. Third parties have invented various schemes to gain unauthorised access to usernames and passwords, for example through keyboard logging, skimming devices, password guessers and phishing. Keyboard logging is the practice of noting the keys struck on a keyboard, typically in a covert manner so that the person using the keyboard is unaware that their actions are being monitored. This is typically performed through installing software programs on a user's computer, unbeknown to the user. Skimming devices are connected to computer hardware, for example an automated teller machine, and collect information from a user as it is inputted into the keypad. For example, a skimming device connected to an automated teller machine may collect account details, usernames, passwords and PINs of an institution's customers, again unbeknown to the customer. Password guessing programs enable automated attempts at guessing a user's password, for example by running through the entirety of words in a dictionary at a very rapid speed. Phishing is the process of attempting to acquire sensitive information, such as usernames, passwords and credit card details for example, by masquerading as a trustworthy entity in an electronic communication. Typically users are sent an email which appears to the user to be a legitimate email from a trusted institution which asks the user to click through a link and enter in their username and password. The link does not however take the user to the legitimate institution website but rather to a false website operated by a third party, thereby allowing the third party to obtain the username and password of the user. Due to the inherent security risks, many customers refuse to engage in Internet or phone based transactions. This is not only an inconvenience to the customer but also to the institution who cannot implement their entire business solely online if desired. Existing attempts to solve the above security problems focus on preventing installation of such devices and software, however they do not assist when such devices and software are successfully installed. Nor do they assist in preventing all of the above security threats. Password guessing programs can be defeated by account lock out mechanisms, but these are often not used on networks because of capacity and user database management constraints. Where they are used, they are typically set to low tolerances to avoid the customer being inadvertently inconvenienced. Digital certificates are used to prevent unauthorised use of user names and passwords, but in high end applications this does not prevent access if the device security is breached or the digital certificate is stolen. Digital certificates also require a significant degree of skill by the end user to implement, often to such an extent that assistance of a technician for most users is required. This renders the use of digital certificates for Internet and WAN based applications cost prohibitive, as significant help desk resources are required by the institution, as well as higher costs for the end user in obtaining onsite technical assistance. Digital certificate management is being further complicated by the evolving numbers of operating systems that must be tailored to work with the digital certificate. This will become even more apparent with the wider use of cellular network enabled devices to access the Internet each with their own proprietary operating systems. One currently available system which was designed to prevent phishing attacks is known as “SiteKey”. SiteKey is a web-based security authentication system which ask a series of identity questions to increase security. A user identifies him/herself to a website by entering his/her username (but not password). If the username is valid, an image and accompanying phrase is displayed which has been previously configured to the user. If the user does not recognise the image and phrase as his/her own, the user assumes the site is a phishing site and abandons the login. If the user does recognise the image and phrase, the user may consider the site authentic and proceed with the login process. However weaknesses have been found in the SiteKey system. Most importantly, it offers no immunity against some of the most common phishing scenarios, as it compromises user privacy by requiring users to disclose personal information in response to the questions, it is susceptible to man-in-the-middle attack, and it allows bulk harvesting of usernames. It has also been found that users are prone to provide their login credentials even in the absence of the SiteKey image and phrase appearing. Accordingly, it has not been entirely successful and has in some cases lead to increased incidents of identity theft because personal information is exposed and the phisher can still illicit information from targets relatively easily. There is accordingly a need for improving the security to a user when entering login credentials to access a user account, to inhibit at least some of the above described security threats.
def _binary_fetch( pipe: Pipe, begin: Optional[datetime.datetime] = None, end: Optional[datetime.datetime] = None, debug: bool = False, **kw ): from meerschaum.utils.packages import import_pandas pd = import_pandas() source_rowcount = pipe.connector.get_pipe_rowcount( pipe, remote=True, begin=begin, end=end, debug=debug, ) target_rowcount = pipe.instance_connector.get_pipe_rowcount( pipe, begin=begin, end=end, debug=debug, ) if source_rowcount == target_rowcount: return None if target_rowcount == 0 or begin is None or end is None: return pipe.fetch(begin=begin, end=end, debug=debug) threshold = datetime.timedelta(hours=24) intervals = [] def find_intervals(_begin, _end): _target_rowcount = pipe.instance_connector.get_pipe_rowcount( pipe, begin=_begin, end=_end, debug=debug, ) _source_rowcount = pipe.connector.get_pipe_rowcount( pipe, remote=True, begin=_begin, end=_end, debug=debug, ) if _target_rowcount == _source_rowcount: return if _target_rowcount == 0: intervals.append((_begin, _end)) return if _begin is None or _end is None: return _interval = _end - _begin if _interval < threshold: intervals.append((_begin, _end)) return find_intervals(_begin, _end - (_interval / 2)) find_intervals(_begin + (_interval / 2), _end) find_intervals(begin, end) fetched_dfs = [] for _begin, _end in intervals: fetched_dfs.append(pipe.fetch(begin=_begin, end=_end)) return pd.concat(fetched_dfs) if fetched_dfs else None
Income support for parents of children with chronic conditions and disability: where do we draw the line? A policy review Objective The aim of this review was to identify and describe whether parents who have had to stop paid employment to care for a child with a chronic condition or disability are eligible for unemployment, family and children, and disability and carer government-provided financial benefits. Design Policy review. Setting Group of seven high-income countries. Main outcome measures All policies related to unemployment, family and children, and disability and carer benefits were included. Information regarding the policy type and description, parent/carer qualification, amount of financial support payable, eligibility criteria and information source were extracted. Payment schedules were converted into 2020 US dollars, using Purchasing Power Parities. Maximum monthly benefit payments were compared with standardised per capita monthly costs of living to determine payment support suitability. Results Fifty-eight policies relevant to unemployment, family and children, and disability and carer benefit supports were identified. Germany had the highest number of welfare policies for individuals not in employment (n=11), followed by the USA (n=6). Parents or carers of children with chronic conditions or disability who were not in employment qualified for 31 of the 58 policies (53.4%). Most policies required a child to have an impaired ability to function, not just a chronic condition or disability. Conclusions Greater support for parents and carers to continue their paid employment alongside caring responsibilities is necessary. Graded benefit schedules will also be critical to supporting the spectrum of childhood chronic conditions and disability, and the subsequent spectrum of caring responsibility. International comparisons of income support policies across seven nations show unemployed parents or carers of children with chronic conditions or disability qualified for 32 of the 58 policies. Most policies required a child tohave an impaired ability to function, not just a chronic condition or disability.
/** * restyleForInlineText.tsx * * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT license. * * When a ReactXP component appears as a child of an RX.Text, it needs to be styled * specially so that it appears inline with the text rather than introducing line * breaks. * * This utility restyles the component that is passed to it as inline so it flows * with the text. When a ReactXP component is a child of a text, pass the return value * of its render method to this utility. See RX.View for an example. */ import * as React from 'react'; import assert from '../../common/assert'; import { assign } from './../utils/lodashMini'; function restyleForInlineText(reactElement: React.ReactElement<any>) { const style = reactElement.props.style; assert( style && style.width !== undefined && style.height !== undefined, 'Children of an <RX.Text> must have a defined height and width' ); /* We'll use display: inline-block for inline element because inline-flex will introduce new lines into the text that is copied to the clipboard. In most our use-cases inline-block is fine and should behave same as inline-flex. Example: <flex> <inline> // InlineView <inline text node> <inline-flex> // set by this function - will introduce new line when copied <inline transparent text with emoticon text representation> <inline-block sprite clipping element> <inline-block sprite background texture> <inline text node> result into selection: "[text node][transparent text][new line][text node]" with inline-block this will properly resolve as "[text node][transparent text][text node]" */ return React.cloneElement(reactElement, { style: assign({}, style, { display: 'inline-block', // Reset the line height so the value from outside // the inlined item doesn't cascade into this element. lineHeight: 'normal' }) }); } export default restyleForInlineText;
High photodegradation performance of ZnO nanoparticles supported on porous Zeolite Na-A: Effects of ZnO loading Zeolite Na-A supported ZnO nanocomposites (ZnO/Zeolite Na-A NCs) were synthesized at low temperature (70 ℃) via the sol-gel process and characterized by X-ray diffraction technique (XRD), scanning electron microscopy (SEM), transmission electron microscopy (TEM), UV-visible diffuse reflectance spectroscopy (UV-vis DRS) and Fourier transform infrared (FTIR) spectroscopy for structural, morphological, optical and bonding properties. The textural properties and porosity were obtained by Brunauer-Emmett-Teller (BET) technique. The obtained XRD and microscopy results indicated that the obtained nanopowders were crystalline in nature and no collapse of the structure of zeolite Na-A. In addition, the synthesized ZnO nanoparticles occurred mainly on the surface of the zeolite support. It is clear that the zeolite supported ZnO nanoparticles were more dispersed as compared to the pure ZnO with improved porosity and high surface area. Photocatalytic activity for the ZnO/zeolite Na-A was tremendously increased which was attributed to the synergetic combined effects of both ZnO and zeolite aluminosilicate network such as increased surface area (S BET ), high adsorption and restrained charge recombination. Introduction Recycling of waste-water is one of the commonly used methods for alleviating the perennial water shortage in many parts of the world. Much of the well-known causes of water pollution include natural biological pollutants such as human and animal waste which harbours micro-organisms most of them been protozoa and bacteria. Some agricultural practices such as improper use and disposal of farm fertilizers and chemical sprays may also contribute towards pollution of water. However, the greatest challenge comes from heavy industrial use of water resulting in to wide spread pollution from industrial chemical wastes such as dyes, heavy oils, acids and bases. In order to get rid of these persistent pollutants, advanced oxidation process (AOPs) that are driven by powerful non-selective oxidizing radicals such as OH have been developed. The advantage of the AOPs is that the have the ability to convert organic compounds to less toxic molecules and in an ideal situation it may lead to a complete mineralization of the pollutant molecules in to CO2 and H2O. Among the AOPs techniques, semiconductor heterogeneous photo-oxidation catalysts have proven to be very efficient and low cost effective procedures for removal of stable harmful organic and inorganic compounds including inorganic dyes. These heterogeneous photocatalyst are responsible for the triggering of the formation of the active oxidation radical through the absorption photon energy at ambient conditions without regeneration of counter harmful bi-products. The heterogeneous photocatalysts are also advantageous in that they can be recycled thus cutting the use of the material and hence the cost of water treatment. Among the semiconductor heterogeneous photocatalysts, ZnO is one the most popular in removal of organic molecules since it is effective, inexpensive and non-toxic. More advantages include high photon absorption in the UV. However ZnO has inherent challenges such as being a wide bandgap material ( = 3.37 ), its absorption spectrum is limited. ZnO is usually unstable, dissociates easily in acidic medium, has poor adsorption and is prone to particle agglomeration. These aggregations and agglomerations minimise the active photocatalytic surface thus reducing the performance or some case leading to a complete loss of photocatalytic ability. In order to circumvent such limitations, attempts have been made to immobilize the catalyst on a suitable solid support such as the zeolite network, fibre glass and carbon. In such situations, the ZnO particles are well embedded in the walls or surface of support material thus maintain a suitable distribution. The effective result is increased porosity ZnO in a ZnO/Support material. In addition to the improved porosity, catalyst recovery process is eased by use of the zeolite support. Among the above mentioned ZnO support materials, zeolites are the most preferred because of their extraordinary structures, uniform pores and straight channels which leads to high adsorption properties. Its lattice discontinuity points creates the uncompensated negative charges which in conjunction with OHand H2O leads to a unique ion exchange property. Zeolites exist naturally or can be obtained synthetically and are classified according to their structure in to various forms such as A, X, Y, ZSM-5 and clinoptilolite. Among these, single phase zeolite-A are highly preferred because of their large cation exchange capacity, large number of acid sites due to its 8-ring, 6-ring, and 4-ring channel structure with the largest cavity dimension measuring 0.41 nm x 0.41 nm. Besides zeolite-A has a significant absorption and excellent water holding capacity in its free channels. Previously, immobilization of ZnO on zeolite has been studied by various researchers such as Hadjltaief et al., who worked on ZnO supported on clay obtained through the sol-gel process. Based on their results, ZnO nanoparticles were well dispersed on the clay surface that led to improved photocatalytic performance. Jagannatha et al., worked on ZnO/Zeolite nanocomposite for photocatalytic elimination of benzophenone and caffeine which revealed increased photodegradation of > 80% compared to that of zeolites 10 % or pure ZnO. Similar results pertaining zeo/ZnO were also reported by Marrero et al.,. This implies that immobilization of ZnO NPs on a zeolite support can effectively increase the surface area leading to increase in the exposed active sites for photodegradation. Zeolites further increase the photocatalytic activity unlike other forms of support material because the zeolite itself can enhance the photocatalytic efficiency by minimizing the electron/hole recombination. Moreover, the -OHradicals on the surface of ZnO can easily be transferred onto the zeolite surface. Even though zeolite supported ZnO has been previously studied, several aspects such as the effect of pH, annealing temperature and type of precursors have not been exhaustively investigated. In this research work, we focus on ZnO nanoparticles synthesized via sol-gel technique and supported on zeolite type A (zeolite Na-A). The specific objectives of the study is to report on different ZnO loadings and its effects on the various properties of the ZnO/zeolite-A catalyst that contributes to increased photocatalytic performance. The ZnO/zeolite catalyst nanopowders were synthesized via a slightly modified sol-gel method and analyzed by XRD, SEM, TEM, BET and UV-vis DRS. The catalyst's pollutants removal ability was tested by the photodegradation of methylene blue dye as a model pollutant. Experimental details 2.1 Precursors, solvents and support material During the synthesis process, analytical grade zinc acetate dihydrate 99.9% pure (Merck) was used without further purification as the precursor for ZnO whereas 2-isopropanol was the solvent during the sol-gel process. Monoethanolamine (99% pure) was used as a stabilizing agent during the hydrolysis stage. Further materials used in this work included synthetic zeolite Na-A supplied by Sigma-Aldrich with a chemical formula Na 12 27H 2 O and pore opening 4 which was used as the support frame for the zeolite supported ZnO. Methylene blue dye of molecular formula C 16 H 18 ClN 3 Sx nH 2 O, n = 2 − 3 (concentration = 20 mmol/L) (Merck) was used as the model pollutant. Analytical grade reagents for the preparation and also scavenger studies included sodium hydroxide (NaOH), potassium permanganate (KMnO4), hydrogen peroxide (H2O2) and ascorbic acid (C6H8O6) were also supplied by Merck Chemicals. Preparation of ZnO sol A stock solution was prepared as follows: zinc acetate dihydrate, 6.29 g, was dissolved in 2-isopropanol (100 ml) by magnetic stirring for 30 minutes inside a beaker. This was followed by addition of 1.73 ml of Monoethanolamine drop-wise until a clear solution was achieved then followed by further stirring for 1 hour at 70 C. Monoethanolamine was used as a stabilizing agent. ZnO sols of varying %ZnO (0.5, 0.75, 1.0, 1.5, and 2.0) concentrations were prepared from this stock solution. Preparation of the ZnO/Zeolite Na-A NCs The procedure applied for the Zn 2+ ion exchange in zeolite was obtained with a slight modification from and was as follows: 2 g of zeolite powder was added to distilled water 50 ml to form aqueous suspension then added to 0.5M of zinc acetate solution. The mixture was refluxed for 8 hours at a temperature of 80 ℃ so as to allow ion exchange into and out of the zeolite framework then ZnO sol prepared in different concentrations as per the procedure in 2.1.1 above was added. 0.1 M of sodium hydroxide (NaOH) was added to the mixture up to a pH of 12 and resulting slurry was stirred for 3 hours at 70 C then followed by ageing for 48 hours and drying at 100 ℃ for 4 hours. In order to immobilize ZnO on the zeolite calcination was the done at 600 C for 4 hours. The obtained ZnO/Zeolite Na-A catalyst samples were labelled (0.5%, 0.75%, 1.0%, 1.5% and 2.0%) ZnO/Zeolite Na-A respectively and were compared to both bare Zeolite and pure ZnO nanoparticles. Characterization and data collection methods 2.2.1 X-ray diffraction technique The structural characterization of the prepared zeo, ZnO and zeo/ZnO catalyst was carried out using Bruker AXS Discover diffractometer with monochromatic CuK (1.5406 ) radiation. The powder samples were spread on low background Si sample holders, compacted and carefully placed on the sample stage then scanned for two-theta range 20≤ 2 ≤ 70. Electron microscopy imaging The surface morphology and the effects of ZnO on the zeolite surface were observed using the Shimadzu Superscan ZU SSX-550 electron microscope. The scan was done at various points of the sample at different magnifications with the probe size of 115 nm, probe current 0.02 nA and accelerating voltage of 5.0 keV. Carbon coating was used during SEM measurement to prevent charging. EDX spectra of the respective samples was performed using an Oxford X-Max detector and the associated Aztec software. Beam setting were as follows: 15KV, 5mm working distance and 120 mm aperture. TEM images were obtained using the Phillips 301 HRG Transmission electron microscope. UV-Vis diffuse reflectance spectra (DRS) The UV-vis diffuse reflectance spectra (DRS) were measured by the Perkin Elmer Lambda 950 UV-Vis-NIR spectrophotometer within the wavelength 250 nm-800 nm equipped with an IRS 240 integrating sphere attachment. The reference spectrum was obtained from the reflectance of BaSO4 as the standard reflectance. Fourier transform infra-red (FT-IR) The characteristic functional groups were obtained by the Fourier transform infra-red (FT-IR) spectrometer for all samples in the range 4000-400 cm -1. To obtain the infrared spectra, 0.02 g of the nanocomposite powder sample was mixed with dried potassium bromide (dried overnight in an oven for 24-hours) in the ratio of sample: KBr = 1: 200 pressed into pellets using mortar, pestle and other mechanical devices. The FTIR spectrum for pure KBr was also measured and used as the reference. Nitrogen adsorption-desorption Nitrogen adsorption-desorption isotherms were obtained at 77 K as a function of relative pressure P/P0 for the range of 0.01-0.99 using a using the Micromeritics Tristar II 3020 (U.S.A.) Surface Area Analyzer. The out gassing of the samples was carried out at a temperature of 150 °C for 2 h under vacuum prior to each of the measurements. Specific surface area of some of the samples was calculated according to the Brunauer-Emmett-Teller (BET) method using adsorption data in the relative pressure P/P0 for the range 0.05-0.2. The total pore volumes (Vt) were estimated from the amount adsorbed at the relative pressure of 0.98. The pore size distribution (PSD) was calculated by using the improved Barrett-Joyner-Halenda (BJH) model evaluated from the desorption branch. Photodegradation experiments 2.3.1 Photodegradation tests To test the photocatalytic performance of the synthesized zeolite, ZnO and zeo/ZnO nanocomposites, photodegradation of methylene blue dye under ultraviolet light was conducted. An aqueous solution of methylene blue dye of concentration 20 mmol/L as the initial solution was prepared in a glass beaker and 0.03 g of each the different component photocatalyst incorporated into a 60 ml of the aqueous solution followed by magnetic stirring in darkness to obtain desorption-adsorption equilibrium of MB molecules on the catalyst surface. Aliquots were taken after every one hour for 6 hours, and the intensity of the absorbance peak determined. The suspension was then subjected to a constant magnetic stirring at a temperature of 25℃ under continuous illumination using the ultraviolet light. The source of the radiation was a low-pressure lamp emitting UV light at a wavelength of 365 nm positioned at about 12 cm above the photoreactor in open air conditions to avoid heating. The intensity of the emitted radiation was 413 mW/cm 2. An aliquot of 4 ml was again taken after every one hour centrifuged and the catalyst filtered off, then the absorbance spectra of the supernatant measured using the UV-visible spectrometer and the intensity of the main absorption peak was recorded. The degradation efficiency of methylene blue dye was determined using the relation: Where Co and Ct are the initial and final concentration of the MB dye. Degradation kinetics The photodegradation kinetics of methylene blue by the catalysts was fitted by the Langmuir-Hinshelwood model assuming that pollutants concentration is very low (usually a few dozen parts per million is common) KC<<1, decomposition reaction assumes a pseudo-first-order kinetic model with respect to the target pollutant concentration. Eq. below represents the dependence between the dye concentrations in the aqueous versus time of UV irradiation. rt is the reaction rate, Ct is the concentration of the methylene blue dye at time t, kr is the reaction rate constant, K1 is the coefficient of adsorption of the dye, K2 is the adsorption coefficient of dissolvent and Cst is the concentration of the solvent at time t. Compared with the reactant, the adsorption of the dissolvent is negligible. Hence, Eq. can be expressed as Eq.. It can be noted that the concentration of the dyes is extremely low and K1Ct is much lower than 1. This implies that Eq. can be further simplified to Eq., where K is the product of kr and K1 and is also a constant. The final kinetic model (Eq. ) can be obtained by computing the integral of Eq. to obtain Eq. 5. As seen in Eq., the constant k represents the efficiency in the degradation of the MB dyes. In Eq. 6, ln is the natural logarithm, Co is the target pollutants' (MB) initial concentration while Ct is the concentration after time t and k is the reaction rate constant. The nature of the dye degradation process is assumed to be of the first-order reaction, hence the constant reaction rate can be determined as the slope of the linear regression. The constant k can be obtained from the slope of the straight-line plot of ln(Co/C) versus t in equation as a function of experimental parameters. Stability tests Reusability is an important factor to consider in the application process of a catalyst as it greatly leads to cost reduction especially in long run or in case of large scale use in degradation systems. The regeneration experiments were performed and the photocatalysts were recycled 5 times. The regeneration of the composite in each subsequent cycle was done by washing with distilled water (20 mL), filtered, and then, the powder was dried at 100 °C for 3 h. Scavenger studies In order to determine the participating radicals in the photodegradation process scavenger studies were carried using potassium iodide, isopropanol and ascorbic acid as hole (ℎ + ), hydroxide radical ( ) and superoxide ( 2 − ) radical hunters respectively. Effects of hydrogen peroxide on the rate of photodegradation of the catalyst was also studied. P63mc space group. The XRD structural patterns ZnO loaded zeolites revealed a combination of sharp peaks indexed to both ZnO and the zeolites indicating that the ZnO/Zeolite Na-A catalyst was also highly crystalline in nature. No impurity peaks were detected in the patterns indicating high purity of the synthesised samples. It was also clear that there was no collapse of the zeolite structure even after increased ZnO loadings. There was a redshift of ZnO/Zeolite peaks relative to the raw ZnO peaks which is an indicator of successful loading of ZnO on the zeolite. The shift may have been caused by compression of the zeolite structure by ZnO nanoparticle loading without changing its cubic morphology. The intensity of Zeolite Na-A peaks after ZnO loading appeared diminished mainly due to the shielding effect of the X-rays on the zeolite crystallites by the ZnO nanoparticles on the zeolite surface. Moreover, decreased XRD peak intensities implies that the ZnO NPs were also formed on the surface of the zeolites. This was further confirmed by the fact that increase in ZnO loading led to a decrease in the zeolite peaks and was in agreement with the obtained SEM images. Another possible cause of decreased zeolite peaks may be due reduced crystallinity caused by either base leaching of Al during modification or during the annealing process. With no extra peaks, a match between ZnO/ZeolitesNa-A and the unloaded zeolite peak positions was observed which implies that the zeolite's internal framework was unaltered during the ion exchange and also after the ZnO loading. The obtained results agreed with those obtained by Ejhieh and Khorsandi. Crystalite size calculations by Scherrer The crystallite size of the ZnO/ZeoliteNa-A nanopowders, were calculated using the scherrer relation in equation 6. Where D is the crystallite size, is the Braggs angle, K is the Scherrer's constant ( = 0.9), is the X-ray wavelength ( = 1.5406 ) and is the Full Width at Half Maximum (FWHM) of the integral breadth of the most intense XRD peak. The obtained crystallite sizes for ZnO ranged between 24.8 -35.1 nm which indicates that the synthesized ZnO/Zeolite Na-A reached the expected nanosize confirming a successful formation of the catalyst. Compared to the Zeolite Na-A average pore size of 81.12, it is not possible that the ZnO crystallites could enter in to the zeolite micropores. This agrees with the obtained XRD results that the synthesized ZnO nanoparticles were only formed on the zeolite surface. It was also observed that crystallite sizes increased with the ZnO loading which is probably caused by high amount of Zn + ions due to increased precursor concentration. Effects of loading on ZnO lattice constants and the induced strain According to Table 1, the prepared ZnO reported lattice constants slighltly smaller than the standard (JCPDS) values which could be due to nature of the synthesis technique as opposed to effects of the support. However there was a slight variation but not significant effect of the support on the obtained lattice constants which was caused by the induced lattice micro-strain. The induced strain resulting from the loading of ZnO on the zeolite support was estimated by the Williamson-Hall plot as shown figure 2(a-f). The obtained values of microstrain are depicted on the plots of strain versus the ZnO loading in figure 2(g) which reveals that the strain increased with loading up to a maximum of 1.5ZZ and then decreased. From the obtained values it is clear that the obtained strain had minimum impact on the internal structural changes induced by the zeolite support. Morphological properties 3.2.1 SEM images The scanning electron microscope images obtained for zeo, ZnO and ZnO/Zeolite Na-A catalyst are shown in fig. 3 (a-c). It is clear that both raw and ion-exchanged zeolites exhibited a cubic morphology with smooth surfaces typical for zeolite Na-A which implies that there was no structural changes induced during the ion-exchange process. On the other hand, ZnO nanoparticles maintained the granular spherical morphology dominated by particle agglomerations. The SEM images for the ZnO/Zeolite Na-A indicated regions of different colour intensities with light grey representing the ZnO nanoparticles on a relatively darker zeolite background. In addition, the ZnO loaded zeolite composite maintained its cubic morphology which implies that there was no collapse of zeolite structure due to ZnO loading which is in agreement with the obtained the obtained XRD structural results. ZnO nanoparticles were observed attached to the zeolite surface. The attachment of the ZnO nanoparticles on the surface of the zeolite support is vital because it reduces the level of nanoparticle agglomerations and thus increases the exposed surface for photodegradation process. However the degree of agglomeration appeared to decrease with ZnO loading up to 1.5%ZnO beyond which agglomerations increased attributed to excess ZnO. Similar SEM images were also obtained by Alswata et al., who studied zeolite supported ZnO Oxide nanocomposites for toxic metals removal from water. Elemental composition The EDX spectra for the elemental composition of pure ZnO and ZnO/Zeolite Na-A nanocomposites is shown in figure 4. The spectra for pure ZnO oxide consisted of Zinc and Oxygen elements only and no impurity atoms were detected. In addition to the elements detected in pure ZnO spectrum, ZnO/Zeolite Na-A reveled elements such as sodium (Na), silicon (Si) and aluminum (Al) inherent in zeolites Na-A. This implies that ZnO was successfully loaded on zeolites. The carbon peak seen in the spectra of figure 4 (b) emanated from the carbon tape coating used during SEM imaging as explained in section 2.2.2 above. Figure 5(a-f) shows transmission electron images (TEM) of pure zeolite Na-A, ZnO and the ZnO/Zeolite Na-A catalyst. Zeolite revealed particle sizes of about 5m whereas ZnO revealed particles hexagonal morphology of shapes ranging between 40-180 nm as indicated in figure 5(c). These images confirmed that the ZnO/Zeolite Na-A composite catalyst was highly crystalline in nature. In addition the ZnO remain unchanged even after loading on to the zeolite surface. Figure 5(f) clearly shows ZnO particles attached on the zeolite surface which was in agreement with the results obtained by XRD and SEM. It was also seen that the ZnO nanoparticles on the zeolite surface were least agglomerated as compared to the pure ZnO. HRTEM shows particles of different morphology which could be mapped in to ZnO and zeolite. The Selected Area Electron Diffraction (SAED) shown in figure 5(h) consist of concentric bright which implies that the ZnO/Zeolite Na-A is polycrystalline in nature. The obtained morphological study agrees with previous study by Sacco et al.,. Nitrogen adsorption-desorption for surface area and pore size The adsorption behaviour of pure zeolite, pure ZnO and the ZnO/Zeolite Na-A NCs are shown in Fig. 6. All the three selected samples exhibited mesoporous type IV isotherm curve with hysteresis loop of type H3 according to the International Union of Pure and Applied Chemistry (IUPAC) classification. These types of isotherms are likely due characteristics of capillary condensation in parallel plate-shaped pores. Pure zeolites revealed the least amount of adsorbed N2 while the highest adsorption was observed on ZnO/Zeolite Na-A NC catalyst which implies an increase in adsorption properties when ZnO are loaded on the zeolites. This is caused by increased porosity due the presence of zeolitic micropores as well as mesopores formed by the aggregation of ZnO nanocrystallites on zeolite surface. The Brunauer-Emmett-Teller (BET) surface area (SBET) for zeolite and ZnO was 0.40 m 2 /g and 3.32 m 2 /g respectively whereas for the selected sample of the Zeo/ZnO catalyst was found to be 6.87 m 2 /g. This increase in surface area of the prepared ZnO/Zeolite Na-A loaded catalyst indicated that the ZnO NPs may have formed a porous layer on the surface zeolite structure. Further, the formed, ZnO NPs layer, with a large surface area, provides a large number of functional groups that enhance the adsorption process. The results show an increase of the surface area, a behaviour that is common when a replacement of a monovalent cation by a divalent one is carried out. When the Zn 2+ exchange process occurs, one Zn 2+ cation occupies the place of two Na + cations which leads to a surface area increment. Increase in surface area of the catalyst leads to stronger adsorption properties thus enhanced photodegradation properties. On the other hand, the average pore size for Zeo, ZnO and ZnO/Zeolite Na-A were 81.12, 96.54 and 149.70 respectively. The Barrett-Joyner-Halenda (BJH) pore size distribution curves are shown in the inset of fig. 6(b and c). The pore size distribution curve for ZnO shows a strong peak 2.38 nm and a weak one at about 7.78 nm while that of ZnO/Zeolite Na-A sample revealed a broad peak centred at 22.6 nm clearly confirming that the majority of the pores were mesoporous. It was also observed that there was the presence of macro-pores which was as a result of spaces between ZnO on the surface of the zeolite or probably due to the zeolite structure itself. Generally it was observed that ZnO loaded catalyst had displayed higher porosity as compared to pure ZnO which leads to enhanced adsorption of the pollutant molecules and thus improving photocatalytic ability. The obtained textural properties of the Zno/Zeolite Na-A catalyst were in agreement with results from previous studies such as Gayatri et al., who worked on ZnO/Zeolite Na-A and obtained SBET, pore size and pore volume of 95.98 m 2 /g, 44.2 and 0.08 cm 3 /g respectively. They attributed the obtained pore size to pore widening due to removal of impurities after the ZnO/zeolite activation treatment. A comparable results were also obtained by Mustapha et al.,. Optical properties 3.4.1 UV-Vis DRS spectroscopy The optical properties of zeo, ZnO and ZnO/Zeolite Na-A with various ZnO loads were obtained by running the UV-visible diffuse reflectance spectroscopy on the samples in the wavelength range 200 nm -800 nm. The reflectance of pure zeolite was high in the visible light region with a gentle decline in the UV implying slight absorption in the UV region. A peak in this region at around 254 nm which is a characteristic absorbance peak associated with zeolite was also observed. These absorption bands in the short-wavelengths are inherent in different types of zeolites and they are likely to have originated from the charge transfer 2 − → 3+ which involve the participation of aluminum atoms at specific locations such as surface, corners, defects, etc. The ZnO and zeo/ZnO samples exhibited reflectance spectrum quite different from that of pure zeolite powder. A sharp rise in reflectance around wavelength = 368 nm for the pure ZnO powder is due fundamental absorption from valence band to the conduction band due to the 2− → 2+ ligand to metal charge transfer transition. Moreover, the spectra of ZnO/Zeolite Na-A was different to that of pure zeolite sample which was due to the ZnO loading. On the contrary ZnO/Zeolite Na-A showed higher reflectance intensity in the visible region attributed to the ZnO particles embedded on zeolite. A slight wavelength shift associated with ZnO nanoparticle loading on the zeolite surface was also observed between 359 nm and 387 nm which could be caused by variation in crystallite sizes. Fig. 7: Diffuse reflectance spectroscopy (DRS) of pure Zeolite, pure ZnO and the ZnO/Zeolite Na-A loaded catalyst. Optical band gap determination In order to calculate the band gap, reflectance is converted to the equivalent absorption coefficient using the modified Kubelka-Munk function given by Eq. 7; Where ( ) (Kubelka Munk function) is proportional to the absorption coefficient and is the reflectance. Given that; Here, is the wavelength of the absorbed photon and is the extinction coefficient. The bandgap of the composites can easily be obtained extrapolating the linear portion specifically where the absorption begins of the plot of ( ℎ ) 1 versus ℎ to the ℎ axis of the Tauc equation; The proportionality constant is denoted by and is related to the nature of the material, ℎ is Planck's constant(6.63 10 −34 −1 ), whereas = 0.5 for direct allowed transitions and = 2 for indirect transitions respectively. Since ZnO is a direct band gap semiconductor = 0.5 was applied in evaluating the band gap Eg. The variation of obtained band gap Eg with zeolite addition shown in figure 7 (b) revealed the band gap for ZnO loaded catalyst was larger than that of pure ZnO due the nonconducting nature of zeolites. As the amount of ZnO increased, the band gap approached the band gap value of pure ZnO. The reason for the decrease in bandgap Eg is due increase in crystallite sizes. A band gap decline is useful in increased wavelength response necessary for the catalytic photodegradation. Functional groups by FTIR The FTIR spectra for the determination of the functional groups in zeolite, ZnO and Zeolite Na-A were obtained in the wavelength range 400 -4000 cm -1 as shown in figure 8. The broad band in the region 3607-3200 cm -1 and 1657 cm -1 in the zeolite spectrum is assigned to the vibration stretching of O-H due the zeolitic water (H2O) entrapped in the micro porous structure of the zeolite frame work. The FTIR peaks between 1200 cm -1 and 400 cm -1 are assigned to Si-O-Al and Si-O bending vibration associated with the tetrahedral or alumina-and silico-oxygen bridges of the zeolite aluminosilicate internal structures. Pure ZnO revealed weak peaks O-H in a similar region to the of Zeolites Na-A (3426 cm -1 ) and 1633 cm -1 due to absorbed atmospheric water vapour. The weak peaks on the sample located at 1379 cm -1 are caused by -CH3 sourced from titanium tetraisopropoxide and 2-propanol (precursors) whereas the peak at 877cm -1 is assigned to Zn-O stretching. This precursor peak was missing in pure zeolite sample. Zn-O characteristic absorption peaks are located around 450 cm -1 (not visible). All ZnO/Zeolite Na-A samples revealed peaks in similar positions to those of zeolites. This is because there was no peak shift due the zeolite treatment and ZnO loading since zeolite structure does not change when a transition metal oxide cation leaks in to the zeolite structure. This indicates high stability of the ZnO/zeolite which is in agreement with earlier XRD and SEM results. There were faint bands associated with Si-O-Zn or Zn-O-Al detected about 668 cm -1 which implies that the interaction of zeolites with ZnO may likely have occur only on surface of the zeolites. However the broadening of the ZnO peak at 1032 cm -1 assigned to Zn-O-ZnO stretching may be due to the combination of ZnO band and the original vibrational bands of the zeolite framework. The FTIR study shows that the ZnO/zeolite Na-A formed strong bonds between the ZnO and the zeolite surface which ensures that the ZnO nanoparticles strongly adhered to the zeolite surface throughout the photodegradation and recycling process. Photodegradation measurements 3.6.1 Photocatalytic dye removal. To test the effects of ZnO loading on the photodegradation a study was conducted on its ability to photodegrade MB in water as the model pollutant. The photocatalytic experiments were conducted as per the procedure explained in section 2.3.1 above and the obtained results were as shown in figure 9. From the figure it is clear that there was minimum degradation of the dye on UV but without the catalyst (photolysis). Similar curves were also obtained from the adsorption studies by the ZnO/Zeolite Na-A catalyst. When the degradation experiments were conducted in both the presence of the UV and the catalyst as shown in figure 9b the dye was easily degraded leading to almost the disappearance of the MB main peaks at 662 nm in 360 min. This implies that the decrease in the peaks intensity was proportional to the loss in the Uv-visible absorbance ability and was caused by the photodegradation effects of the catalyst. The intensity of the MB peaks is the measure of the dyes concentration from which we can conclude that the decrease in peaks is associated with the breakdown of the MB internal structures. Kinetics of photodegradation To study the degradation kinetics, it was assumed that the photodegradation process of the methylene blue dye assumes pseudo first-order kinetics. This was achieved via the plots of the best fit of ln C/Co versus time in minutes (Fig. 10) where by their gradients were evaluated in order to determine the rates constant k and the statistical correlation coefficient R 2 values. The obtained values are as shown in Table 3. From the plots, ZnO/Zeolite Na-A revealed higher photodegradation rates compared to pure ZnO nanopowders which is probably caused by the combine surface effects of ZnO on the zeolite surface such as high electron mobility and suppressed electron hole recombination. There was an increase of photodegradation rate with increase in the ZnO loading with 1.5%ZnO/Zeolite Na-A showing the optimum rate of photodegradation from which a decline was observed. The reason for the high rate could be due to the synergetic effect of loading in conjunction with optimum dispersion of ZnO nanoparticles. Such a case, ensures availability of exposed active sites when ZnO is loaded on to the zeolites. On the other hand the accessible porous zeolite can absorb the MB and increase the contact time with catalyst. It was noted that excessive loading led to decline of photodegradation rate for obvious reason that agglomerations would contribute to effective decrease in the surface area. The proposed mechanism of degradation The pure ZnO structure has Zn-O bonds whereas new bonds such as Zn-O-Al and Zn-O-Si may be formed when ZnO was loaded on the zeolites surface. This results in traces ZnO being incorporated in to aluminosilicate framework of zeolite which has a high thermal and chemical stability. In the proposed mechanism of photodegradation, ZnO on absorbs UV radiation whereby electrons are excited from the valence band to conduction band simultaneously generating holes at the valence band (Fig.11). The generated electrons interact with atmospheric oxygen producing superoxide radicals whereas the holes interact with water to produce OH radicals. These active radicals interact with the adsorbed dye molecules causing destruction of the dye molecular structure. However for the pure ZnO this process is highly hindered by charge recombination as the excited electron fall back and recombine with holes. When ZnO semiconductor is loaded on the zeolites, the entire process is altered; the ZnO absorbs the UV radiation generating electron-hole pairs. The electrons diffuse in to the. In such situations, the charge recombination process is highly slowed down extending the reaction time with dye molecules. In addition, the generated hydroxyl ions and super radicals are readily transferred from the ZnO to the zeolite surface where organic pollutant molecules are already adsorbed maintaining prolonged contact between the pollutant and radicals. Mineralization Discoloration rate of MB dye is an indication of disintegration of the basic structures of the dye which is reflected in the decrease in intensity of the dyes main peaks. To ascertain if the dye underwent complete mineralization the amount of organic matter in the dye after photodegradation was determined after conducting the chemical oxygen demand (COD) experiments. Table 4 shows the percentage of decrease in COD for the respective catalyst samples. From the table it can be seen that after 6 hours of continuous irradiation, there was remarkable reduction of the percentage of COD recorded implying disintegration of MB's organic matter for most samples. However, the blank sample COD reduction was insignificant indicating that associating the reduction on the catalysts effect. A reduction in COD reflects the extend of photodegradation and mineralization of organic pollutants. The decrease in the level of COD reflected a trend similar to the obtained photocatalytic discoloration of the main absorption peak intensity of the methylene blue dye. Role of the zeolite framework After the experiments, ZnO/Zeolite Na-A revealed higher photocatalytic activity compared to pure Zeo or ZnO. Zeolite are photocatalytically inactive, are suitable for use as support materials, whereas in unsupported ZnO low catalytic efficiency is associated with particle agglomerations as noted in SEM and TEM images. ZnO nanoparticles have high tendencies of agglomeration to due to large surface energies which is usually weakened by loading it on the zeolite. The reason behind high photocatalytic activity in ZnO/ZeoliteNa-A is attributed to well dispersed ZnO nanoparticles on the zeolite support frame work which ensures high surface area. ZnO nanoparticles are embedded on zeolite active site where the exchange active ions with the zeolites. During the exchange, electrons from ZnO EVB are excited to the ECB of self then diffuse into oxygen (O) atoms and finally to the aluminium atoms in the zeolite framework hence preventing electron/hole recombination which causes increased photodegradation efficiency. Besides, zeolites have high adsorption capacity of the dye molecules which bring them close to the surface of catalyst molecules increasing their chances of being attacked by the OH radicals. These OH radicals are also trapped in the frame work of the zeolite where the dye molecules are adsorbed enhancing further degradations reactions. It is also believed that zeolite framework can also participate actively in the catalytic process through hindering charge recombination by acting either as an electron donor or acceptor. This means that it is likely that some Zn 2+ cations in ZnO/zeolite Na-A may bind to the zeolite and interact with its anionic (AlO 2 − ) framework triggering anion-cation pairs which results in strong electrostatic properties leading to increase in adsorption of the cationic MB dye hence the increases the degradation extent of the pollutant. These results demonstrate that the active centers are composed of the ZnO on the zeolite structure. Effects of scavengers and H2O2 on the active radicals Further investigation was performed to determine the nature of primary active species in the photocatalytic process. To verify if holes, electrons or hydroxyl radical are involved in the photocatalytic degradation of MB on the pure ZnO, zeolite and zeolite supported ZnO nanoparticles, the photocatalytic activity was carried out in the presence of different scavengers. The procedure for photodegradation was repeated as described in section 2.3.4 above using potassium iodide, isopropanol and ascorbic acid as hole (ℎ+), hydroxide radical (OH ) and superoxide 2 − radical scavengers respectively. The rate of degradation of MB over zeolite, ZnO and ZnO/Zeolite Na-A supported ZnO follows the order of OH > holes h + > electrons which shows that OH radicals played the important role on MB degradation rate (Fig. 12). The possible reaction equations are shown; Charge carrier recombination may occur after charge separation resulting to heat Without recombination, holes in the valence band ℎ + attack adsorbed water as shown in equation 2 + ℎ + → + + * − + ℎ + → * On the other hand electrons in the conduction band ( − ) attack adsorbed atmospheric oxygen resulting to production of the superoxide radical ( 2 * − ). ( 2 ) However, with regard to photocatalytic degradation of MB upon addition of oxidants, it was found that the adding of hydrogen peroxide (H2O2), promoted the photodegradation rate. Increase in the photodegradation rate of MB dye is attributed to the reaction between electrons on the conduction band of ZnO in the catalyst with hydrogen peroxide as in equation. Hydrogen peroxide is a better electron acceptor as compared to the dissolved oxygen and thus can act as an alternative electron acceptor which hinders electron-hole recombination as per equation Effect of catalyst dosage To determine the optimum amount of catalyst dosage required for a particular application, the photodegradation was repeated with application of different amounts keeping the other factors constant. The catalyst dosage applied ranged 0.1g to 1.0 g/L for each of the experiments and various degradation the rates k obtained by plotting ln C/Co verses time (min). To obtain the relationship between the catalyst loadings, the various degradation constants obtained were plotted against the mass of the catalyst dosage (mg). From the results obtained in figure 13, the degradation constant k increased with increase in the catalyst dosage. When the amount of catalyst was increased from 0.03 g, the degradation constant k increased from 5.79 to 7.49 min -1 beyond which a decline in k value was observed. This implies the optimum catalyst dosage lies around 0.12 g although it advisable to use a slightly lower amount which will gives almost similar results at lower cost. The main reasons for declined performance beyond 0.12 g include the aggregation of the catalyst reducing the exposed surface area with active sites. Further too much catalyst may mask, reflect or scatter light thus preventing its penetration which reduces photon absorption charge separation thus reducing the photodegradation efficiency. In addition aggregation may result in to deactivation of the catalyst due to collisions between the excited ZnO and ground state ZnO atoms when in the same vicinity and therefore the right amount should be added to avoid excesses. Effect of initial MB concentration The initial concentration of MB dye may affect the photodegradation efficiency of the catalyst. In order to determine the effect of the substrates initial concentration on the rate of photodegradation, several experiments were conducted with varying MB concentrations ranging 20 mmol/L to 70 mmol/L on the optimum catalyst loading of 1.5ZnO/zeolite Na-A sample. The rate of photodegradation increased with increase in initial concentration of MB up to 5ommol/L after which a decline in the rate was observed as in figure 14. Usually the generated active radicals such as hydroxyl ions have limited lifetime in the nanosecond range and in most cases their action is localized around where they are formed. This implies that when the MB concentration is increased, the number of dye molecules per unit volume is also increased which in turn increases the chances of these dye molecules reacting with hydroxyl ions. However as the MB concentration exceeds a certain limit, excess adsorption of the dye on the catalysts occurs thus blocking the active sites. Excess dye result to high adsorption of the dye which may block the incoming light photons from reaching the catalyst limiting the catalyst's absorption and excitation. These results in to a decrease in charge carrier separation. Simultaneously, a large amount of intermediates and other organic substances may probably adsorbed on the surface of the catalyst, making it more difficult for holes to enter the solution contact to the catalyst surface. Table 5 shows the various photodegradation constants for various MB concentrations. Stability of the ZnO/zeolite Na-A catalyst To test the stability of the ZnO/Zeolite Na-A catalyst, 5 repetitive tests were conducted with fresh MB solution for sample 1.5%ZnO/Zeolite Na-A in each turn and while all the previous conditions were maintained constant. After each experiment the catalyst was filtered out, washed 3 times with distilled water and then dried at 110 ℃ for 12 hours before reloading it to preceding experiment. In the subsequent experiments, the catalyst demonstrated degradation constant of 5.79, 5.34, 5.20, 5.05 and 4.75 (x10 -3 (min -1 )) in the 1 st, 2 nd, 3 rd, 4 th and 5 th repeat experiments respectively. From this result, it is clear that the catalyst deactivation became significant as from the 4 th repeat cycle implying the ZnO/Zeolite Na-A catalyst portrayed high stability which is vital for reusability. The probable cause of deactivation may be due to blockages of the catalysts active sites by the adsorbed intermediates. Further, the XRD spectra measurement for used and recovered catalyst was taken and a structural comparison was made as shown in figure 15. It is easily seen that the crystalline structure of the catalyst was maintained and was only weaken after several cycles. This results indicate that the ZnO catalysts supported by zeolite possess high levels of recyclability and therefore it can be applied in photodegradation of waste dye and generally in water treatment Conclusion In this study the ZnO was successfully loaded on the Zeolite Na-A surface and the resulting ZnO/Zeolite Na-A catalyst was characterized by XRD, SEM, TEM, UV-Visible DRS, FTIR and nitrogen adsorption-desorption techniques. The obtained results revealed that ZnO nanoparticles loaded on the zeolite surface were more dispersed as compared to pure ZnO which contributes to an increase in surface area which was also confirmed by SBET results. Increased surface area is essential for attachment of the dye molecules as well as the active species and thus increases the photodegradation rates. The highest degradation activity was obtained at 1.5% ZnO loading on the zeolite and was attributed to the synergetic effects between Zeolite aluminosilicate network and the charge separation mechanism of the ZnO nanoparticles. Further study revealed high stability of the catalyst that is necessary for repetitive usage. It can be conclude that photodegradation of ZnO can be largely increased if the ZnO are loaded on zeolite support it is highly recommended for all de activation of dye pollutants.
1. Field of the Invention The present invention relates to apparatus for handling a web of material made up of individual tows, and more particularly to apparatus for removing a fill yarn which is alternately interlaced with the web across the width thereof so as to hold the individual tows thereof together for prior processing. 2. History of the Prior Art It is known to weave or interlace a length of fill yarn alternately back and forth across the width of a web of individual tows disposed in side-by-side relation across the width of the web so as to hold the tows in place for processing thereof. During the carbonization of filaments of PAN or similar material, for example, it is common practice to weave a length of fill yarn back and forth across the width of the web prior to carbonization so as to hold the individual tows of filaments in place as part of an integral woven structure. The resulting web tends to minimize or avoid damage of the delicate filaments as the web is advanced over rollers and other equipment during the carbonization of the filaments. Apparatus for inserting the fill yarn to form such a web is described in U.S. Pat. No. 4,173,990 of Langlois et al, issued Nov. 13, 1979 and commonly assigned with the present application. Upon completion of carbonization or other processing requiring the presence of the fill yarn, it is usually necessary that the fill yarn be removed so that other processing or ultimate utilization of the tows comprising the web can take place. This is usually accomplished using one of several techniques involving a substantial amount of manual labor. In the most common of such techniques the different lengths of fill yarn extending across the width of the web are cut at approximately their midpoints by scissors or other appropriate cutting instruments as the web is advanced through a work area. Following that, the loops of fill yarn at the opposite edges of the web are grabbed and pulled by hand to remove the cut pieces of the fill yarn. Such process is relatively slow and tedious. It typically requires the full time and attention of two people, one standing on each side of the web which is typically three to four feet or more in width. At the same time the process is relatively slow because of the time required to hand cut and hand remove the fill yarn. Optimum speed for such a process is usually about five feet per minute of web advance. Accordingly, it would be desirable to be able to provide an improved technique and improved apparatus for removing fill yarn from a web of tows. It would be especially advantageous to be able to remove the fill yarn automatically by machine so as to eliminate the need for manual labor and require only occasional operator supervision. It would furthermore be advantageous to be able to remove the fill yarn from a web of tows at speeds greatly in excess of those realizable with present manual techniques.
The homologous recombination machinery modulates the formation of RNADNA hybrids and associated chromosome instability Genome instability in yeast and mammals is caused by RNADNA hybrids that form as a result of defects in different aspects of RNA biogenesis. We report that in yeast mutants defective for transcription repression and RNA degradation, hybrid formation requires Rad51p and Rad52p. These proteins normally promote DNADNA strand exchange in homologous recombination. We suggest they also directly promote the DNARNA strand exchange necessary for hybrid formation since we observed accumulation of Rad51p at a model hybrid-forming locus. Furthermore, we provide evidence that Rad51p mediates hybridization of transcripts to homologous chromosomal loci distinct from their site of synthesis. This hybrid formation in trans amplifies the genome-destabilizing potential of RNA and broadens the exclusive co-transcriptional models that pervade the field. The deleterious hybrid-forming activity of Rad51p is counteracted by Srs2p, a known Rad51p antagonist. Thus Srs2p serves as a novel anti-hybrid mechanism in vivo. DOI: http://dx.doi.org/10.7554/eLife.00505.001 Introduction Genome instability can lead to a range of alterations in both the sequence and structure of chromosomes. While such changes may help drive evolution, more often they are associated with decreased organism fitness and increased susceptibility to disease (Aguilera and Gmez-Gonzlez, 2008). Historically, most genome instability was thought to occur as the result of errors during replication, or the failure of DNA repair pathways. However, work in Saccharomyces cerevisiae and mammalian cells has demonstrated that genome instability also arises from lesions generated from the formation of RNA-DNA hybrids (Huertas and Aguilera, 2003;Li and Manley, 2005;Kim and Jinks-Robertson, 2009;;;). Many important aspects of hybrid-mediated genome instability remain to be elucidated. Genome-wide screens in budding yeast and human cells have revealed that levels of RNA-DNA hybrids increase when RNA biogenesis is disturbed at sites of transcription initiation or repression, elongation, splicing, degradation, and export (Huertas and Aguilera, 2003;Li and Manley, 2005;;;). The co-transcriptional binding of many RNA processing and transcription factors suggests that they prevent hybrid formation by restricting the access of nascent RNA molecules to the DNA template at the site of transcription (Aguilera and Garca-Muse, 2012). Recent studies suggest that these RNA biogenesis factors are not sufficient to prevent transient hybrid formation at some loci in wild-type budding yeast; rather, hybrids form but are removed rapidly by hybrid removal factors, including two endogenous RNase H enzymes, and Sen1p, an RNA-DNA helicase (;). In RNA biogenesis mutants eLife digest Cells with an unusually large number of mutations-either in the form of changes to the DNA sequence or changes in the number or structure of chromosomes-are said to show genome instability. Although these mutations sometimes boost an organism's chances of survival and reproduction, they more often have detrimental effects, which can include cancer. Genome instability can arise as a result of mistakes occurring during the repair of damaged DNA, or due to inappropriate hybridization of RNA to its DNA template. These RNA-DNA hybrids had been thought to occur strictly during the transcription of DNA into RNA. During this process, the two strands of the DNA molecule separate behind the moving RNA polymerase, and this provides an opportunity for the newly formed RNA to hybridize back to its DNA template. When these RNA-DNA hybrids persist, they give rise to DNA damage that leads to genome instability. Although much is known about the factors that prevent the formation of hybrids, or promote their removal, little is known about how hybrids form in the first place. Now, Wahba et al. have identified one such mechanism in the model yeast, Saccharomyces cerevisiae. It involves a protein called Rad51p, which helps to join stretches of nucleic acids together to repair breaks in DNA. However, Wahba et al. showed that if Rad51p is not properly regulated, it can also trigger the formation of RNA-DNA hybrids; yeast cells that lack the gene for Rad51p showed significantly reduced levels of hybrid formation. Moreover, dysfunctional Rad51p causes RNA sequences to anneal to DNA throughout the genome, rather than just at the site in which the RNA was originally produced. This means that RNA sequences produced during transcription are much more of a threat to genomic stability than previously thought. The work of Wahba et al. presents a paradox in which a protein that is normally involved in repairing DNA can itself cause damage if it is not carefully regulated. It also raises the possibility that the elevated levels of Rad51p expression observed in cancer cells could be a cause, rather than a consequence, of mutations. DOI: 10.7554/eLife.00505.002 hybrids may occur in trans as well as in cis. The intriguing possibility that hybrids may occur in trans and contribute to genomic instability has not been assessed. In this work we used S. cerevisiae as the model system to test in vivo the role of Rad51p in hybrid formation. We report that the formation of RNA-DNA hybrids and associated genome instability in at least four RNA biogenesis mutants requires Rad51p and its activator, Rad52p. Furthermore, the deleterious hybrid-forming activity of Rad51p is suppressed in wild-type cells by Srs2p, a Rad51p inhibitor. Additionally, we developed a model locus system that allows us to monitor hybrid-mediated genome instability as a result of transcription. We manipulate this system to provide compelling evidence that hybrids and ensuing genome instability can occur via a trans mechanism that is dependent on Rad51p. Formation of RNA-DNA hybrids is dependent on Rad51p The conditions that drive the initial formation of RNA-DNA hybrids in vivo are not well understood. With the bacterial in vitro experiments in mind, we wondered whether hybrid formation was simply a strand exchange reaction, similar to that mediated by Rad51p during DNA repair and homologous recombination. To test this possibility, we examined the effect of deleting RAD51 on hybrid formation and the associated genome instability in RNA biogenesis mutants of budding yeast. We chose a representative set of mutants defective in elongation (leo1), repression (med12 and sin3), and degradation (kem1 and rrp6). We assayed directly for the presence of RNA-DNA hybrids in wild-type cells and these mutants by staining chromosomes in spread nuclei with S9.6 antibody (see 'Materials and methods'). Previously, we demonstrated the specificity of the S9.6 antibody for hybrids by two approaches. First, S9.6 staining in spreads of RNA biogenesis mutants is reduced to that seen in wildtype cells by post treatment of chromosome spreads with RNase H (). Similarly, spreads of an RNA biogenesis mutant over-expressing RNase H no longer stained with S9.6. As reported previously, less than 5% of wild-type nuclei stain with this antibody ( Figure 1A, Figure 1figure supplement 1). In contrast, 80-85% of nuclei in our representative set of RNA biogenesis mutants showed robust staining, indicating the formation of stable hybrids at many loci in most cells ( Figure 1A, Figure 1-figure supplement 1). The deletion of RAD51 (rad51) in these mutants diminished S9.6 staining in nearly all nuclei from the RNA biogenesis mutants threefold to fourfold to near background levels ( Figure 1A, Figure 1-figure supplement 2). To corroborate our cytological method, we isolated total nucleic acids from wild-type, sin3 (a representative RNA biogenesis mutant), and sin3 rad51 cells, transferred them to a solid matrix and monitored binding of S9.6. S9.6 binding to sin3 nucleic acids was elevated approximately tenfold relative to sin3 rad51 ( Figure 1-figure supplement 3). These results strongly suggest that hybrid formation in these mutants is highly dependent upon Rad51p. One prediction from the cytological results is that the suppression of hybrid formation by rad51 should also lead to the suppression of hybrid-mediated chromosome instability. To measure hybridmediated genome instability, we exploited an assay we developed previously using a yeast artificial chromosome (YAC) (;see 'Materials and methods'). The total rate of YAC instability (the sum of chromosome loss and terminal deletions) in wild-type cells was 6 10 −4 per division. Notably, rad51 alone caused no increase in YAC instability. In our subset of RNA biogenesis mutants, YAC instability increased fivefold to tenfold ( Figure 1B). The introduction of rad51 into the RNA biogenesis mutants completely suppressed the elevated YAC instability, both chromosome loss and terminal deletions, in leo1, kem1, rrp6, and sin3 mutants. In med12, YAC instability was mostly but not entirely suppressed despite the near complete suppression of hybrid formation as monitored by spreads, indicating that in the med12 rad51 strain a subset of the YAC instability was hybrid independent. Overall, the suppression of hybrid-mediated chromosome instability by rad51 corroborates its elimination of RNA-DNA hybrids and associated destabilizing lesions. To further validate the occurrence of Rad51-dependent hybrids, we sought to develop a model locus that can be used to induce hybrid formation and hybrid-mediated instability at a known region. From our previous study on RNA biogenesis mutants that induce hybrids, we noted that many of these mutants allow cryptic transcription, and likely the production of aberrant transcripts (;;). Based on this observation, we introduced a portion of the GAL1-10 promoter into the YAC (henceforth referred to as YAC-GALpr), such that the addition of galactose to the media would induce GALpr-dependent transcription of neighboring non-yeast sequences ( Figure 2A). We analyzed transcription of the human and vector sequences flanking GALpr by qRT-PCR. This analysis revealed approximately one hundredfold induction of RNA at least 1 kb on both sides of the GAL promoter ( Figure 2B). Using the model locus, we monitored the presence of transcription-induced hybrids specifically proximal to GALpr. Total nucleic acids were isolated from strains containing either the YAC or YAC-GALpr in the presence or absence of galactose. These samples were subjected to DNA immunoprecipitation (DIP) analysis with the S9.6 antibody that should only precipitate DNA in RNA-DNA hybrids (;see 'Materials and methods'). Using primers specific to the YAC region proximal to the GALpr, low DIP signals were observed in YAC-GALpr cultures in the absence of galactose, as well as in cultures with the YAC, with and without the addition of galactose ( Figure 2C absence of their transcription. In contrast, we observed a dramatic increase in the DIP signal for hybrids on the YAC sequences proximal to YAC-GALpr, 2 hr after induction by galactose ( Figure 2C). The specificity of this increased DIP signal was evident by the fact that no elevation in hybrid signal was detected in two regions of the YAC-GALpr distal to the GALpr (. Finally, hybrid formation at YAC-GALpr was dependent upon RAD51 (see 'Rad51p-dependent hybrid formation can occur in trans'). These data provide molecular evidence for the formation of RAD51-dependent hybrids at the YAC sequences transcribed by induction of GALpr. To determine whether the Rad51p-dependent hybrids induced by YAC-GALpr led to genome instability, we monitored the instability of YAC-GALpr upon galactose treatment. Indeed, its instability was elevated 25-fold with a distribution of chromosome loss and terminal deletions similar to that seen in wild-type cells and RNA biogenesis mutants ( Rad51p binding at the site of hybrid formation A second prediction concerning Rad51p-mediated hybrid formation is that Rad51p should be detectable near sites of hybrid formation. To test this prediction we used our YAC-GALpr model locus to assay for the presence of Rad51p binding around the site of hybrid formation. We generated cultures of strains containing the YAC or YAC-GALpr that had been grown in the presence or absence of galactose. These cultures were fixed and assayed for Rad51p binding to the YAC sequences by chromatin immunoprecipitation (ChIP) (see 'Materials and methods'). ChIP was performed using two independent antibodies, anti-HA against a C-terminal haemagglutinin (HA) tagged Rad51p and a polyclonal rabbit anti-Rad51p. No Rad51p binding was detected either on YAC-GALpr in the absence of galactose or on the YAC in the presence or absence of galactose ( Figure 3A). Thus the level of Rad51p binding to the YAC or vector sequences in the absence of transcription was very low if any. In contrast, using either antibody for ChIP, significant Rad51p binding was detected around the GAL promoter on YAC-GALpr upon the addition of galactose and induction of transcription ( Figure 3A, Figure 3figure supplement 1). Notably, Rad51p binding appears to extend further than the region of hybrid formation detected by DIP ( Figure 2C and Figure 3A). Rad51p is known to spread from regions of ssDNA into dsDNA (Zaitsev and Kowalczykowski, 2000), and it is possible that in our model locus Rad51p is spreading from the ssDNA or RNA-DNA hybrid into the neighboring dsDNA. To test further the correlation of transcription and Rad51p binding, we added dextrose to the galactose-treated YAC-GALpr cultures to repress galactose-induced YAC-GALpr transcription (see 'Materials and methods'). In these cultures, Rad51p binding disappeared ( Figure 3-figure supplement 2). Taking these findings together, we observe Rad51p binding to the region of the hybrid-forming locus on the YAC-GALpr only when transcripts from this region are induced. plasmid or an empty control vector. (E) Induced YAC instability is suppressed when RAD51 is deleted. Error bars represent standard deviation calculated from at least three independent colonies. DOI: 10.7554/eLife.00505.007 The following figure supplements are available for figure 2: immediately for the -Gal sample and fixed for chromatin immunoprecipitation (ChIP; see 'Materials and methods'). After 120 min, the +Gal sample was similarly fixed for ChIP. Input DNA and DNA coimmunprecipitated with -HA or --H2a.X (IP) antibody were amplified using primer sets along the yeast artificial chromosome (YAC) as annotated with black dashes on the YAC-GALpr or YAC schematic above each graph. We propose that the binding of Rad51p observed at the model locus is due to its role in hybrid formation. However, hybrids are thought to induce double-strand breaks (DSBs), and Rad51p binds at DSBs to initiate DNA repair through homologous recombination (;Figure 3figure supplement 3A). Therefore, the presence of Rad51p at the hybrid-forming locus might be due to its function in repair rather than in hybrid formation. To address this alternative explanation for Rad51p binding, we performed molecular and functional tests for the formation of DSBs 2 hr after the induction of transcription. As a molecular assay, we monitored a 20 kb region surrounding the GAL promoter for the accumulation of phosphorylated histone H2AX (-H2AX) by ChIP. This modification is one of the most dramatic and earliest markers of DSB formation, arising within minutes and spanning large regions of chromatin adjacent to the break Figure 3-figure supplement 3B). However, we did not detect a ChIP signal for -H2AX above background level in the YAC-GALpr strain even under conditions that induced Rad51p binding ( Figure 3B). Thus by this molecular assay Rad51p binding occurs at the site of hybrid formation prior to hybrid-induced DNA damage. As a functional test, we took advantage of the fact that adding dextrose after 2 hr suppressed transcription and Rad51p binding at the model hybrid locus. We reasoned that if Rad51p binding during the 2 hr prior to the addition of dextrose reflected Rad51p association with hybrid-induced DNA damage, then this damage would manifest as increased YAC instability. However, no increase in YAC instability was observed (Figure 3-figure supplement 4), indicating that binding of Rad51p to this locus during the first 2 hr was unlikely to result from DNA damage. Thus neither our molecular nor functional test supports the binding of Rad51p to the model locus prior to hybrid-induced DNA damage, pointing to a direct role of Rad51p in hybrid formation. Does the formation of all hybrids require Rad51p? Studies from our laboratory and the Aguilera laboratory suggest that hybrids not only form in RNA biogenesis mutants but also transiently in wild-type cells (;). The latter fail to persist because of their rapid removal by RNases H and Sen1 (;). To test whether these naturally occurring hybrids are also dependent on Rad51p, we monitored hybrid staining and YAC instability in rnh1rnh201 in the absence of RAD51. Neither hybrid staining nor YAC instability was suppressed ( Figure 4A,B), indicating that the transient hybrids in wild-type cells are not Rad51p dependent. Thus both Rad51p-dependent and -independent mechanisms for hybrid formation exist. Rad51p-dependent hybrid formation can occur in trans In the in vitro bacterial studies, RecA promoted hybrid formation in the absence of active transcription, suggesting that RNA-DNA hybrids can form post-transcriptionally, or in trans. To test whether in vivo hybrids could form in trans, we constructed a strain, LW7003, in which chromosome III contained 3.5 kb of vector and human sequences surrounding the galactose promoter of the YAC-GALpr (henceforth referred to as the YAC-GALpr module). This strain also contained the original unmodified YAC, allowing us to investigate whether transcription of the YAC-GALpr module on chromosome III could induce both hybrid formation on the YAC and YAC instability ( Figure 5A). To test directly whether hybrids can form in trans, DIP was performed on cultures of our LW7003 strain after growth in the presence or absence of galactose. One primer set that monitored hybrids from both the YAC and YAC-GALpr module generated a strong DIP signal only in the presence of galactose ( Figure 5B, primer 1). This combined hybrid signal was eliminated when the rad51 was introduced in this strain ( Figure 5B, primer 1). These results minimally corroborate our previous demonstration of hybrids forming in cis and show that hybrid formation is dependent upon Rad51p. Two other primer sets that monitored hybrids only from the YAC also revealed a strong DIP signal only in the presence of galactose ( Figure 5B, primers 2 and 3). These results demonstrated transcriptiondependent hybrid formation in trans. This trans-specific hybrid signal was eliminated when rad51 was introduced into our strain. The RAD51-dependent DIP results strongly support the formation of Rad51p-dependent hybrids in trans. We also tested for hybrid formation on the YAC in trans by monitoring YAC instability in LW7003. As expected, no increase in YAC instability was observed in this strain in the absence of galactose ( Figure 6A). However, YAC instability increased tenfold upon galactose-induced transcription of the YAC-GALpr module on chromosome III ( Figure 6A, black bars). The transcription-induced YAC instability was dependent on the homology between the YAC and the transcribed YAC sequences from the YAC-GALpr module on chromosome III, as deletion of the corresponding 1 kb of homology from the YAC completely suppressed the transcription-induced YAC instability ( Figure 6-figure supplement 1). The elevated YAC instability was blocked by RNase H over-expression, indicating the YAC instability was hybrid dependent ( Figure 6A, gray bars). YAC instability was also blocked after introduction of the rad51 in LW7003 ( Figure 6B). Thus transcription from the YAC-GALpr module on chromosome III acted in trans to cause the YAC to rearrange through a hybrid-and Rad51p-dependent mechanism. If the YAC instability induced by the YAC-GALpr module on chromosome III is mediated by hybrids formed in trans on the YAC, then these hybrids should lead to a similar distribution of YAC loss and terminal deletion as hybrids induced in cis. Indeed hybrids induced in trans and in cis both lead to a similar distribution of YAC instability events; on average 85% are HIS− URA− (chromosome loss) and 15% are HIS+ URA− (putative terminal deletions). However, the total rate of YAC instability increased only 10-fold by hybrids formed in trans (from the YAC-GALpr module on chromosome III) compared to 25-fold by hybrids formed in cis. Thus, hybrid formation in trans may be less efficient than in cis. While we have assumed that HIS+ URA− clones of LW7003 reflect terminal deletions of the YAC, these clones may have had rearrangements that occurred by an indirect mechanism as a result of hybrid-induced double strand breaks in cis. In this model hybrids would form in cis at the module on chromosome III and cause DSBs there. These DSBs in cis would induce recombination between the YAC sequences on the broken chromosome III and the YAC, resulting in a chromosome III;YAC translocation that has the same genetic phenotype (HIS+ URA−) as YAC terminal deletions ( indirect mechanism, we performed pulse-field gel and Southern analysis on DNA isolated from 10 independent HIS+ URA− colonies of LW7003. Amongst the 10 YAC rearrangements analyzed, nine were shorter than the existing YAC, consistent with the formation of YAC terminal deletions ( Figure 6C). Only one rearrangement was the size expected if a chromosome III;YAC translocation had occurred. Thus the structure of most rearranged YACs in LW7003 is consistent with the formation of terminal deletions through the formation of hybrids in trans. These results further support our hypothesis that hybrids can form in trans by a Rad51p mechanism, causing chromosome instability at sites distinct from the site of hybrid RNA transcription. Enhancers and repressors of Rad51p modulate hybrid formation During homologous recombination, the activity of Rad51p is regulated by a number of factors that modulate Rad51p binding to ssDNA and dsDNA (;). Because of the importance of such accessory factors for Rad51p function, we wondered whether they might also help regulate Rad51 in hybrid formation. To test this, we deleted positive and negative regulators of Rad51-DNA filament formation. Rad52p is required for the binding of Rad51p to ssDNA ( Figure 7A; Song and Sung, 2000). Deletion of RAD52 (rad52) in our panel of transcriptional mutants completely suppressed hybrid staining, as assayed by chromosome spreads (Figure 7B, Figure 7-figure supplement 1). Note that we were unable to test suppression of YAC instability in the double mutants because the rad52 alone caused substantial hybrid-independent YAC instability, an expected result given its central role in many repair pathways. Nonetheless, the suppression of hybrid staining by rad52 suggests that hybrid formation is not simply a consequence of rogue activity by Rad51p but rather occurs as part of the canonical Rad51p repair pathway. A number of inhibitors of Rad51p have been identified. SRS2 is a helicase involved in removing Rad51p filaments formed on ssDNA (), and Rad54p and Rdh54p are two translocases that promote the removal of Rad51p from double-stranded DNA (). We wondered whether these inhibitors might help suppress the rogue hybrid-forming activity of the Rad51p pathway in wild-type cells. To test this we deleted SRS2, RAD54, and RDH54 from cells and measured hybrid formation and YAC instability. Neither single nor double deletions of RAD54 and RDH54 significantly increased hybrid formation or YAC instability ( Figure 8A,B, Figure 8-figure supplement 1). In contrast, deletion of SRS2 increased both hybrid staining and YAC instability. Both of these phenotypes of the srs2 were suppressed in the srs2 rad51 mutant ( Figure 8C). Thus, Srs2p antagonizes the hybrid-forming activity of the Rad51p pathway and represents another mechanism by which cells protect their genome against hybrid formation. The hybrid staining pattern in srs2 nuclei was reminiscent of the pattern observed in sin3 cells, exhibiting an apparent enrichment of RNA-DNA hybrids at the RDN locus on chromosome XII, the site of 150 tandem rDNA copies (). We measured rDNA instability by monitoring the rate of excision of a URA3 marker inserted at the RDN locus (). In srs2 cells, the rate of rDNA instability is twenty threefold greater compared to wild-type cells, a marked increase in instability as compared to the fourfold increase in YAC instability ( Figure 8C,D). Together these results suggest that Srs2p has a particularly important role in protecting the highly transcribed rDNA locus against Rad51p-dependent hybrid formation and repeat instability. Discussion In this study we describe a compelling series of observations that demonstrate an in vivo role for Rad51p in promoting formation of RNA-DNA hybrids. First, cytological detection of RNA-DNA hybrids in RNA biogenesis mutants is dramatically suppressed when RAD51 is deleted. Second, Rad51p is required for the associated hybrid-mediated instability of a YAC in the RNA biogenesis mutants. Third, Rad51p is required for the hybrid formation and YAC instability that results from galactose-induced transcription of specific YAC sequences in our hybrid model locus. Fourth, cofactors canonically known to regulate Rad51p binding and function also modulate hybrid formation. Removal of Rad52p, a positive regulator of Rad51p, blocks hybrid formation. Conversely, the removal of Srs2p, a negative regulator of Rad51 filament formation, increases both hybrid formation and genome instability in a Rad51p-dependent manner. Finally, Rad51p binds to the YAC at the site of hybrid formation proximal to the galactose promoter, in a transcription-dependent manner, prior to any evidence that DSBs have formed. Taken together these results strongly suggest that Rad51p plays a direct role in RNA-DNA hybrid formation. Establishing a role for Rad51p in the formation of RNA-DNA hybrids marks the first direct in vivo evidence of a factor that facilitates hybrid formation. Indications that a strand-exchange mechanism is important for hybrid formation were suggested by in vitro work on RecA, where RecA catalyzes assimilation of complementary RNA into a homologous region in duplex DNA (; Zaitsev and Kowalczykowski, 2000). Interestingly, previous in vivo studies showed that overexpressing Rad51p can compromise genome integrity (;). We suggest that this instability may result in part from the ability of Rad51p to promote RNA-DNA hybrid formation. This deleterious activity of Rad51p is also intriguing because a number of studies have shown that RAD51 expression is up-regulated in tumor cells. The increased expression is part of a coordinated up-regulation of DNA repair proteins in response to increased damage in cancerous cells (Zhou and Elledge, 2000). These high levels of Rad51p have been interpreted as evidence for Rad51p acting as a tumor suppressor by ensuring non-faulty repair of DNA damage. However, our results are consistent with a different interpretation: high levels of Rad51p may be oncogenic, driving hybrid-mediated genomic instability that promotes carcinogenesis. How can Rad51p promote hybrid formation? In the forward reaction, the conventional mechanism for Rad51p action, it forms a filament on ssDNA, finds a homologous region of dsDNA, and then catalyzes a strand exchange. By analogy, Rad51p may form a filament on RNA and promote its invasion of dsDNA. However, the bacterial studies of RecA suggest an alternative mechanism in which Rad51 catalyzes RNA-DNA hybrid formation through an inverse strand exchange reaction. In this case, RecA first binds to ssDNA in a gap, forms a filament on adjacent dsDNA, and then promotes pairing and exchange with complementary RNA. Each of these mechanisms has strengths and weaknesses to explain our current findings. For example, the forward reaction but not the inverse strand exchange predicts the association of Rad51p with the hybrid locus should depend upon the induction of the hybrid forming RNA, as we observe. Conversely, the DNA-based inverse strand reaction more easily explains the role of DNA-dependent Rad51p cofactors like Rad52p. Furthermore, in vitro RecA is unable to catalyze the forward reaction with RNA. Clearly an exciting future direction will be to determine the biochemical nature of Rad51p-mediated hybrid formation by either of these mechanisms or an alternative mechanism like stabilization of the R-loop by binding to the RNA-DNA hybrid or the extruded DNA strand ( Figure 9A-C). Here we show that Rad51p allows transcripts of YAC sequences generated on chromosome III to act in trans to form hybrids and cause hybrid-mediated instability of the YAC. Thus RNA molecules, with the aid of Rad51p can invade duplex DNA to form RNA-DNA hybrids at sites distinct from the site of hybrid RNA transcription. The ability of hybrids to form in trans forces a broadening of previous in vivo models for hybrid formation that only considered co-transcriptional mechanisms. The lower level of instability observed in trans compared to in cis implies that hybrid formation occurs less efficiently in trans. A lower efficiency might be expected since the RNA concentration at a cis site of hybrid formation will invariably be higher than at a trans site of hybrid formation. Indeed, an effect for RNA concentration on hybrid formation has been documented in the in vitro reactions with RecA (Zaitsev and Kowalczykowski, 2000). While we show that Rad51p is clearly required to mediate hybrid formation in trans, it is likely also important for hybrid formation in cis. However, the fact that rad51 has no effect on hybrid formation in the rnh1rnh201 mutant implies that alternative mechanisms for hybrid formation, presumably in cis, exist. Notably, hybrid formation in trans might be a more potent promoter of genome instability than hybrid formation in cis, particularly for hybrid RNA generated from a highly repetitive element, which in trans can cause instability at a plethora of targets. The formation of hybrids in trans has potential applications beyond genome instability. Intriguingly, recent studies in mammalian cells and budding yeast have developed a system for generating targeted DNA breaks using the CRISPR system, where target specificity is determined by a guide RNA complementary to the region of interest (;). It is unknown how the guide RNA finds and hybridizes to its homologous DNA region. From our work, it is clear that these two steps could be mediated through Rad51p. Additionally, positive roles for hybrid formation in modulating transcription state have been found in both human cells and fission yeast (;). In fission yeast, there is evidence that RNA-DNA hybrids may provide a platform for RNAi-mediated heterochromatin formation, driving transcriptional silencing (). If transcripts from one locus can induce hybrid formation and modulate heterochromatin formation at a homologous locus, this could provide a rapid mechanism for the silencing of repetitive elements-including transposable elements-in a genome. Cells have developed a number of mechanisms to keep RNA-DNA hybrids in check. We provide evidence for a novel Srs2p-dependent pathway that limits the formation of Rad51p-dependent hybrids. We show that deletion of SRS2 causes Rad51-dependent hybrid formation and YAC instability. Srs2p interacts directly with Rad51p, dismantling inappropriate Rad51p filaments from DNA (;). By removing Rad51p from DNA, Srs2p could potentially inhibit hybrid formation by a mechanism like the inverse strand exchange. Alternatively, Srs2p might be playing a direct role in dismantling RNA-DNA hybrids. Its bacterial homolog UvrD has been shown to catalyze the unwinding of RNA-DNA hybrids in vitro. The work reported here, coupled with previous studies, has revealed that cells possess at least four lines of defense against RNA-DNA hybrids: suppressing the deleterious hybrid-forming activity of Rad51p by Srs2p; suppressing the amount of RNA in the nucleus with hybridization potential through proper RNA biogenesis; unwinding RNA-DNA hybrids by helicases such as Sen1p; and degrading RNA in RNA-DNA hybrids by RNases H. The fact that hybrids form when any one of these anti-hybrid pathways is abrogated clearly indicates that they are not completely functionally redundant. The increased propensity for hybrids to form and cause instability at the rDNA locus in srs2 cells is particularly intriguing, as it may be that the abundance of rRNA, as well as homologous rDNA loci, makes this region a particularly good substrate for Rad51p-mediated hybrid formation. When Rad51p activity is no longer limiting because of inactivation of Srs2p, hybrid formation may overwhelm the anti-hybrid activities of RNases H or Sen1p. Alternatively, the RNases H or Sen1 may be occluded from the nucleolus, making the rDNA locus more susceptible to hybrid formation by elevated Rad51p activity. It will be exciting to map where hybrids form in RNase H, sen1-1 and srs2 mutants to elucidate whether these different anti-hybrid systems are dedicated to protect different regions of the genome. Materials and methods Yeast strains, media, and reagents Full genotypes for the strains used in this study are listed in Supplementary file 1A. Strain LW6811, the YAC-GALpr strain, was made by integrating the GAL1-10 promoter along with the selectable marker CLONAT at site 323,280 kb on the YAC. The trans YAC module in LW7003 encompasses 1 kb of the YAC, along with the GALpr and CLONAT marker integrated on chromosome III in place of the BUD5 (YCR038C) open reading frame. All integrations were done using standard one-step PCR techniques. The 70mers used for integration are listed in Supplementary file 1B. The empty control and RNase H plasmids used are 2 plasmids, previously described in. Yeast strains were grown in YEP or minimal media supplemented with 2% glucose. 5-Fluoroorotic (5-FOA) was purchased from BioVectra (Charlottetown, PE). Quantitative assay for YAC instability Cells were dilution streaked out on SC-URA plates to select for the YAC terminal marker (URA3). Single colonies were then picked and resuspended in 0.5 ml of water, diluted, and 10 5 cells were plated onto 5-FOA and -HIS 5-FOA plates. Plating efficiency was monitored by plating 200 cells onto rich media plates. Plates were incubated at 30°C for 3 d after which the number of colonies formed on each plate was counted. The number of colonies that grow on 5-FOA, normalized for plating efficiencies, is a measure of the rate of events. Liquid assay for YAC instability with galactose induction Cells were picked from SC-URA plates, resuspended in SC-URA media, and grown to saturation. Fresh YEP or -URA media with 2% lactic acid, 3% glycerol was inoculated to an optical density (OD) of ∼0.3, and allowed to double to an OD of ∼1.0. Galactose was then added to a final concentration of 2%. Cells were shaken at 30°C and then plated onto 5-FOA 0, 2 and 5 hr after induction with galactose. Plating efficiency was monitored by plating 200 cells onto rich media plates. Dot blotting with S9.6 antibody Genomic DNA was isolated using the Qiagen Genomic DNA kit (Qiagen, Hilden, Germany). Roughly 1 g of DNA was resuspended to a final volume of 50 l in nuclease-free water, and spotted directly onto a nylon GeneScreen Plus membrane (NEF988; PerkinElmer, Waltham, MA) using a Bio-Dot Microfiltration Apparatus (Bio-Rad, Hercules, CA). The membrane was UV-crosslinked and blocked with 5% 1 PBS/0.1% Tween-20 prior to incubation with primary and secondary antibodies. A 5 g aliquot of S9.6 antibody was used for the primary, and a 25,000 dilution of goat anti-mouse HRP (Bio-Rad) was used as the secondary. The HRP signal was developed with Clarity Western ECL Substrate (Bio-Rad) and exposed to autoradiography film. Quantitative reverse transcriptase PCR Total RNA was isolated using an RNeasy Mini Kit (Qiagen). Reverse transcriptase was carried out with specified primer pairs using the OneStep RT-PCR Kit (Qiagen) and quantified using SYBR Green (Invitrogen, Carlsbad, CA) and the DNA Engine Opticon Continuous Fluorescence Detection System (CMJ Research). DNA immunoprecipitation (DIP) DIP analysis was performed as previously described (;). Briefly, 150-200 g of genomic DNA isolated using the Qiagen Genomic DNA kit was sonicated, precipitated, and resuspended in 50 l of nuclease-free water. Then 350 l of FA buffer (1% Triton X-100, 0.1% sodium deoxycholate, 0.1% SDS, 50 mM HEPES, 150 mM NaCl, 1 mM EDTA) was added to the DNA, and incubated for 90 min with 5 g of S9.6 antibody prebound to magnetic protein A beads. Beads were then washed and the DNA eluted according to standard ChIP protocols. % RNA-DNA hybrid amounts were quantified using quantitative PCRs on DNA samples from DIP and total DNA with the DyNAmo HS SYBR Green qPCR kit (Thermo Scientific, Waltham, MA). Chromatin immunoprecipitation (ChIP) Cells used for ChIP experiments were grown in YEP media with 2% lactic acid, 3% glycerol and collected either before galactose was added (−Gal) or 2 hr after the addition of galactose at a final 2% concentration (+Gal). Standard ChIP was performed as described previously (). Briefly, 5 10 8 cells were crosslinked in 1% formaldehyde for 30 min at room temperature. Chromatin was sheared 20 times for 45 s each (settings at duty cycle: 20%, intensity: 10, cycles/burst: 200) with 30 s of rest in between using a Covaris S2. Immunoprecipitation of Rad51-HA or untagged Rad51p was done with anti-HA antibody (Roche, Mannheim, Germany) or anti-Rad51p polyclonal antibody (Santa Cruz, Dallas, TX). Immunoprecipitation of -H2a.X was done with anti--H2AX (Abcam, Cambridge, UK). A no primary antibody control is also run to ensure specificity. Appropriate dilutions of input and immunoprecipitated DNA samples were used for PCR analysis to ensure linearity of the PCR signal. PCR and data analysis was carried out as described previously (). With the exception of the experiment shown in Figure 3-figure supplement 1 which was carried out once, all experiments were done at least twice and a representative data set is shown. ChIP primers are listed in Supplementary file 1B. Pulse-field gel electrophoresis and Southern analysis Yeast genomic DNA was prepared in 1% pulse-field grade agarose plugs (SeaPlaque 50100) and resolved as previously described (Schwartz and Cantor, 1984) with a Bio-Rad CHEF-DR III system. The following parameters were used: 6 V/cm, 120° angle, 20-50 s switch times, 17 hr at 14°C. For Southern analysis, gels were transferred onto a GeneScreen Plus membrane (PerkinElmer NEF988) and probed with a 0.5 kb fragment containing HIS3 sequence. Quantitative assay for rDNA instability Cells were dilution streaked out on SC-URA. The rate of rDNA instability was calculated from 5-FOA plates as described above for YAC instability.
Immunotherapy plus chemotherapy versus chemotherapy alone in metastatic nonsmall-cell lung cancer: A systematic review with meta-analysis. e20700 Background: Palliative systemic therapy is the primary approach for stage IV non-small cell lung cancer(NSCLC). For patients with NSCLC that lacks targetable mutations, immunotherapy alone or in combination with chemotherapy has become a promising alternative, focusing survival and quality of life. Our objectives were to review, summarize and compare the evidence of immunotherapy plus chemotherapy in first-line treatment in comparison with chemotherapy alone in patients with metastatic NSCLC in terms of effectiveness. Methods: A systematic review of randomized controlled trials (RCTs) was planned. PubMed, Embase and Lilacs were searched for trials evaluating metastatic NSCLC patients, comparing chemotherapy alone versus chemotherapy plus anti-PD1, anti-PDL1 or anti-CTLA-4 agents. Four investigators independently extracted characteristics and results of identified studies and performed standardized quality ratings. Meta-analyses for overall survival (OS), progression-free-survival (PFS), overall response rates (ORR) and toxicities were performed. Results: Six RCTs met the inclusion criteria. One trial with anti-PD-L1 (Atezolizumab), three trials with anti-PD-1 (Pembrolizumab) and two trials with anti-CTLA-4 (Ipilimumab) were included. Three trials included non-squamous carcinomas, two trials included squamous cell carcinoma and one trial included all NSCLC. The combination of anti-PD-1 or anti-PDL1 to chemotherapy improved OS (Hazard Ratio for death, 0.62; 95% confidence interval , 0.49 to 0.79; p < 0.0001). This combination also improved PFS (HR for progression or death, 0.57; 95% CI, 0.51 to 0.63; p < 0.00001) and ORR (Odds Ratio , 2.55; 95% CI, 1.80 to 3.61; p < 0.00001). The combination of anti-CTLA-4 to chemotherapy slightly increased the PFS (HR 0.84; 95% CI, 0.73 to 0.96; p = 0.01), but not OS (HR 0.92; 95% CI, 0.80 to 1.05; p = 0.21) or ORR (OR 0.92; 95% CI, 0.71 to 1.19; p = 0.52). General and immune mediated adverse events were higher in all combination groups. Conclusions: In patients with previously untreated metastatic squamous and non-squamous NSCLC without EGFR or ALK mutations, the addition of anti-PD-1 or anti-PD-L1 to standard chemotherapy resulted in significantly longer overall survival and progression-free survival than chemotherapy alone.
package com.gamecodeschool.c24platformer; interface EngineController { // This allows the GameState class to start a new level void startNewLevel(); }
import { IsNotEmpty, IsOptional } from 'class-validator'; export class AuthLoginByOauthGoogleReqDto { @IsNotEmpty() idToken: string; @IsOptional() accessToken: string; }
Analysis of Decision-Feedback Based Broadband OFDM Systems In wireless communications, about 25% of the bandwidth is dedicated to training symbols for channel estimation. By using a semi-blind approach, the training sequence length can be reduced while improving performance. The principle is as follows: the detected symbols (hard decision) are fed back to the channel estimator in order to re-estimate the channel more accurately. However, semi-blind approach can significantly deteriorate the performance if the bit error rate is high. In this paper, we propose to determine analytically the minimum signal to noise ratio (SNR) from which a semi-blind method starts to outperform a training sequence based only system
Walter Neale Walter Neale (fl. 1618–1639) was an English military officer and an explorer and colonial administrator in the territory of New England that later became New Hampshire. Born into a family that had served Queen Elizabeth I, Neale served in military campaigns in Europe from 1618 until about 1625. In 1629 he was hired by Sir Ferdinando Gorges and John Mason. Gorges and Mason, who between them claimed most of the territory north of the mouth of the Merrimack River, formed the Laconia Company to explore the interior, and they hired Neale to do this exploration, as well as to administer Mason's "lower" plantations on the Piscataqua River (on the coastline of present-day New Hampshire). In 1630 Neale arrived at Piscataqua. His administration of the lower plantations was marred by boundary disagreements with Thomas Wiggin, who administered the upper plantations on the river. He led exploratory expeditions as far as the White Mountains in the interior of New Hampshire, but never located the "Lake of the Iroquois" that his employers believed to exist. He returned to England in 1633; the Laconia Company venture failed. In 1634 King Charles I gave him command of the artillery park in London, and he was soon after appointed muster master of the city militia. He sought but did not obtain a military post in the Colony of Virginia. His last known posting is as lieutenant governor of Plymouth in 1639.
/* * Allocate and free memory for module loading. The loaded module * has to be placed somewhere near the current kernel binary load * point or the relocations will not work. * * I'm not sure why this isn't working. */ int vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes) { #if 1 size_t xtra; xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK; *basep = (vm_offset_t)sbrk(xtra + bytes) + xtra; bzero((void *)*basep, bytes); #else *basep = (vm_offset_t)mmap((void *)0x000000000, bytes, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_SHARED, -1, 0); if ((void *)*basep == MAP_FAILED) return ENOMEM; #endif return 0; }
<filename>SummonEngine/Physics/Include/PhysicsPlane.h<gh_stars>1-10 #pragma once #include <Plane.hpp> #include <Vector3.hpp> namespace physx { class PxRigidStatic; } class PhysicsMaterial; class PhysicsPlane { public: PhysicsPlane(); ~PhysicsPlane(); void Init(const CU::Vector3f& aNormal, const float aDistance, PhysicsMaterial* aMaterial = nullptr); void Init(const CU::Vector3f& aPoint, const CU::Vector3f& aNormal, PhysicsMaterial* aMaterial = nullptr); void Init(const CU::Vector3f& aPoint0, const CU::Vector3f& aPoint1, const CU::Vector3f& aPoint2, PhysicsMaterial* aMaterial = nullptr); void AddToScene(); void RemoveFromScene(); private: physx::PxRigidStatic* myPhysicsObject; PhysicsMaterial* myMaterial; };
After a long wait, Cardi B has finally announced an official release date for her debut album, and its sooner than you think. Bartier Cardi is releasing her “Invasion of Privacy” album on April 6. She took to Instagram to make the announcement that most likely has fans raving and breathing a sigh of relief that the day has finally come. Cardi first announced that her album was on the way after her big win at the iHeartRadio awards last month where she won for Best New Artist and Best Hip-Hop Artist. The stripper turned rapper said that her album would be out in April but did not provide an exact date leaving eager fans skeptical. Since the release of “Bodak Yellow,” Cardi has been the most talked about rapper in the game. She took Taylor Swift out of the number one spot with the now 5x platinum hit song and was nominated for two Grammy Awards, neither of which she took home. The album tracklist has not been released, but the follow up to “Bodak Yellow” was the 21 Savage assisted track, “Bartier Cardi” which recently earned the Bronx rapper a Gold Plaque to add to her ever expanding resume. Cardi seemed to be growingly frustrated with the process of making this album constantly posting exhausted videos from the studio on her Instagram story and seeming (at times) less outgoing in interviews. In a recent interview with Cosmopolitan, Cardi said that she feels “exhausted” and also mentions that she felt her “spirit was happier” before her rap career took off. Still, Cardi pushed through and is ready for her Atlantic Records debut. The Grammy nominated rapper is scheduled to perform at Coachella next month which she give her new album some stronger legs and is also scheduled to tour with her “Finesse” collaborator and fellow Atlantic Records artist, Bruno Mars, this summer for the 24k magic tour.
<gh_stars>10-100 from telegram.ext import CommandHandler, run_async from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper from bot import LOGGER, dispatcher from bot.helper.telegram_helper.message_utils import auto_delete_message, sendMessage from bot.helper.telegram_helper.filters import CustomFilters import threading from bot.helper.telegram_helper.bot_commands import BotCommands @run_async def list_drive(update,context): message = update.message.text search = message.split(' ',maxsplit=1)[1] LOGGER.info(f"Searching: {search}") gdrive = GoogleDriveHelper(None) msg = gdrive.drive_list(search) if msg: reply_message = sendMessage(msg, context.bot, update) else: reply_message = sendMessage('No result found', context.bot, update) threading.Thread(target=auto_delete_message, args=(context.bot, update.message, reply_message)).start() list_handler = CommandHandler(BotCommands.ListCommand, list_drive,filters=CustomFilters.authorized_chat | CustomFilters.authorized_user) dispatcher.add_handler(list_handler)
#include<bits/stdc++.h> #define mem(a,b) memset(a,b,sizeof(a)) #define MAX 100010 #define LL __int64 using namespace std; struct NODE { LL v, c; }nodes[MAX]; int n, p; int vis[MAX]; LL max1[MAX]; LL ans[2]; int no[2]; int main() { LL a, b; while(~scanf("%d%d",&n,&p)) { for(int i = 1; i <= n; i ++) { scanf("%I64d",&nodes[i].v); } for(int i = 1; i <= n; i ++) { scanf("%I64d",&nodes[i].c); } while(p --) { scanf("%I64d%I64d",&a,&b); mem(vis,-1); int cnt = 0; ans[cnt] = 0; no[cnt ++] = 0; for(int i = 1; i <= n; i ++) { int c = nodes[i].c; LL max2 = -100000000000000000LL; if(cnt == 1) { max2 = b * nodes[i].v; max1[c] = max2; vis[c] = 1; if(ans[0] < max2) { ans[1] = ans[0]; no[1] = no[0]; ans[0] = max2; no[0] = c; } else { ans[1] = max2; no[1] = c; } cnt ++; } else { if(no[0] == c) { max2 = max(max2, ans[1] + b * nodes[i].v); max2 = max(max2, ans[0] + a * nodes[i].v); if(max2 > max1[c]) { ans[0] = max2; max1[c] = max2; } } else { max2 = max(max2, ans[0] + b * nodes[i].v); if(vis[c] != -1) { max2 = max(max2, max1[c] + a * nodes[i].v); if(max2 > max1[c]) { max1[c] = max2; } } else { max1[c] = max2; } if(max2 >= ans[0]) { ans[1] = ans[0]; no[1] = no[0]; ans[0] = max2; no[0] = c; } else if(max2 > ans[1]) { ans[1] = max2; no[1] = c; } vis[c] = 1; } } } printf("%I64d\n",ans[0]); } } return 0; }
Adenovirus infection in two calves. Two 2-week-old calves with pyrexia, diarrhea, and subsequent dehydration were treated symptomatically for 1 week, but without favorable response. At necropsy, foci of necrosis were found in the abomasum and rumen of each calf. The small and large intestineswere dilated by grayish, turbid fluid. Numerous large amphophilic, intranuclear inclusions in endothelial cells of blood vessels in the abomasum and rumen, in endothelial cellsof the adrenal cortical sinusoids and renal glomeruli, and in intestinal epithelial cells were identified as adenovirus particles.
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). package org.rocksdb; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.util.*; import static org.assertj.core.api.Assertions.assertThat; public class OptionsUtilTest { @ClassRule public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = new RocksNativeLibraryResource(); @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); enum TestAPI { LOAD_LATEST_OPTIONS, LOAD_OPTIONS_FROM_FILE } @Test public void loadLatestOptions() throws RocksDBException { verifyOptions(TestAPI.LOAD_LATEST_OPTIONS); } @Test public void loadOptionsFromFile() throws RocksDBException { verifyOptions(TestAPI.LOAD_OPTIONS_FROM_FILE); } @Test public void getLatestOptionsFileName() throws RocksDBException { final String dbPath = dbFolder.getRoot().getAbsolutePath(); try (final Options options = new Options().setCreateIfMissing(true); final RocksDB db = RocksDB.open(options, dbPath)) { assertThat(db).isNotNull(); } String fName = OptionsUtil.getLatestOptionsFileName(dbPath, Env.getDefault()); assertThat(fName).isNotNull(); assert(fName.startsWith("OPTIONS-") == true); // System.out.println("latest options fileName: " + fName); } private void verifyOptions(TestAPI apiType) throws RocksDBException { final String dbPath = dbFolder.getRoot().getAbsolutePath(); final Options options = new Options() .setCreateIfMissing(true) .setParanoidChecks(false) .setMaxOpenFiles(478) .setDelayedWriteRate(1234567L); final ColumnFamilyOptions baseDefaultCFOpts = new ColumnFamilyOptions(); final byte[] secondCFName = "new_cf".getBytes(); final ColumnFamilyOptions baseSecondCFOpts = new ColumnFamilyOptions() .setWriteBufferSize(70 * 1024) .setMaxWriteBufferNumber(7) .setMaxBytesForLevelBase(53 * 1024 * 1024) .setLevel0FileNumCompactionTrigger(3) .setLevel0SlowdownWritesTrigger(51) .setBottommostCompressionType(CompressionType.ZSTD_COMPRESSION); // Create a database with a new column family try (final RocksDB db = RocksDB.open(options, dbPath)) { assertThat(db).isNotNull(); // create column family try (final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(new ColumnFamilyDescriptor(secondCFName, baseSecondCFOpts))) { assert(columnFamilyHandle != null); } } // Read the options back and verify DBOptions dbOptions = new DBOptions(); final List<ColumnFamilyDescriptor> cfDescs = new ArrayList<>(); String path = dbPath; if (apiType == TestAPI.LOAD_LATEST_OPTIONS) { OptionsUtil.loadLatestOptions(path, Env.getDefault(), dbOptions, cfDescs, false); } else if (apiType == TestAPI.LOAD_OPTIONS_FROM_FILE) { path = dbPath + "/" + OptionsUtil.getLatestOptionsFileName(dbPath, Env.getDefault()); OptionsUtil.loadOptionsFromFile(path, Env.getDefault(), dbOptions, cfDescs, false); } assertThat(dbOptions.createIfMissing()).isEqualTo(options.createIfMissing()); assertThat(dbOptions.paranoidChecks()).isEqualTo(options.paranoidChecks()); assertThat(dbOptions.maxOpenFiles()).isEqualTo(options.maxOpenFiles()); assertThat(dbOptions.delayedWriteRate()).isEqualTo(options.delayedWriteRate()); assertThat(cfDescs.size()).isEqualTo(2); assertThat(cfDescs.get(0)).isNotNull(); assertThat(cfDescs.get(1)).isNotNull(); assertThat(cfDescs.get(0).getName()).isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY); assertThat(cfDescs.get(1).getName()).isEqualTo(secondCFName); ColumnFamilyOptions defaultCFOpts = cfDescs.get(0).getOptions(); assertThat(defaultCFOpts.writeBufferSize()).isEqualTo(baseDefaultCFOpts.writeBufferSize()); assertThat(defaultCFOpts.maxWriteBufferNumber()) .isEqualTo(baseDefaultCFOpts.maxWriteBufferNumber()); assertThat(defaultCFOpts.maxBytesForLevelBase()) .isEqualTo(baseDefaultCFOpts.maxBytesForLevelBase()); assertThat(defaultCFOpts.level0FileNumCompactionTrigger()) .isEqualTo(baseDefaultCFOpts.level0FileNumCompactionTrigger()); assertThat(defaultCFOpts.level0SlowdownWritesTrigger()) .isEqualTo(baseDefaultCFOpts.level0SlowdownWritesTrigger()); assertThat(defaultCFOpts.bottommostCompressionType()) .isEqualTo(baseDefaultCFOpts.bottommostCompressionType()); ColumnFamilyOptions secondCFOpts = cfDescs.get(1).getOptions(); assertThat(secondCFOpts.writeBufferSize()).isEqualTo(baseSecondCFOpts.writeBufferSize()); assertThat(secondCFOpts.maxWriteBufferNumber()) .isEqualTo(baseSecondCFOpts.maxWriteBufferNumber()); assertThat(secondCFOpts.maxBytesForLevelBase()) .isEqualTo(baseSecondCFOpts.maxBytesForLevelBase()); assertThat(secondCFOpts.level0FileNumCompactionTrigger()) .isEqualTo(baseSecondCFOpts.level0FileNumCompactionTrigger()); assertThat(secondCFOpts.level0SlowdownWritesTrigger()) .isEqualTo(baseSecondCFOpts.level0SlowdownWritesTrigger()); assertThat(secondCFOpts.bottommostCompressionType()) .isEqualTo(baseSecondCFOpts.bottommostCompressionType()); } }
import { modelToGroupVersionKind, PersistentVolumeClaimModel, } from '@kubevirt-ui/kubevirt-api/console'; import DataVolumeModel from '@kubevirt-ui/kubevirt-api/console/models/DataVolumeModel'; import VirtualMachineSnapshotModel from '@kubevirt-ui/kubevirt-api/console/models/VirtualMachineSnapshotModel'; import { V1beta1DataVolume } from '@kubevirt-ui/kubevirt-api/containerized-data-importer/models'; import { IoK8sApiCoreV1PersistentVolumeClaim } from '@kubevirt-ui/kubevirt-api/kubernetes/models'; import { V1alpha1VirtualMachineSnapshot, V1VirtualMachine, } from '@kubevirt-ui/kubevirt-api/kubevirt'; import { getVolumes } from '@kubevirt-utils/resources/vm'; import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk'; type UseDeleteVMResources = (vm: V1VirtualMachine) => { dataVolumes: V1beta1DataVolume[]; pvcs: IoK8sApiCoreV1PersistentVolumeClaim[]; snapshots: V1alpha1VirtualMachineSnapshot[]; loaded: boolean; error: any; }; const useDeleteVMResources: UseDeleteVMResources = (vm) => { const dvVolumesNames = (getVolumes(vm) || []) .filter((volume) => volume?.dataVolume) ?.map((volume) => volume?.dataVolume?.name); const pvcVolumesNames = (getVolumes(vm) || []) .filter((volume) => volume?.persistentVolumeClaim) ?.map((volume) => volume?.persistentVolumeClaim?.claimName); const namespace = vm?.metadata?.namespace; const [dataVolumes, dataVolumesLoaded, dataVolumesLoadError] = useK8sWatchResource< V1beta1DataVolume[] >({ isList: true, groupVersionKind: modelToGroupVersionKind(DataVolumeModel), namespaced: true, namespace, }); const filteredDataVolumes = dataVolumes?.filter((dv) => dvVolumesNames?.includes(dv?.metadata?.name), ); const [pvcs, pvcsLoaded, pvcsLoadError] = useK8sWatchResource< IoK8sApiCoreV1PersistentVolumeClaim[] >({ isList: true, groupVersionKind: modelToGroupVersionKind(PersistentVolumeClaimModel), namespaced: true, namespace, }); const filteredPvcs = pvcs?.filter((pvc) => pvcVolumesNames?.includes(pvc?.metadata?.name)); const [snapshots, snapshotsLoaded, snapshotsLoadError] = useK8sWatchResource< V1alpha1VirtualMachineSnapshot[] >({ isList: true, groupVersionKind: modelToGroupVersionKind(VirtualMachineSnapshotModel), namespaced: true, namespace, }); return { dataVolumes: filteredDataVolumes, pvcs: filteredPvcs, snapshots: snapshots?.filter( (snapshot) => snapshot?.metadata?.ownerReferences?.some((ref) => ref?.name === vm?.metadata?.name) || snapshot?.spec?.source?.name === vm?.metadata?.name, ), loaded: snapshotsLoaded && dataVolumesLoaded && pvcsLoaded, error: snapshotsLoadError || dataVolumesLoadError || pvcsLoadError, }; }; export default useDeleteVMResources;
Analytic energy gradients in combined second order Mller-Plesset perturbation theory and conductorlike polarizable continuum model calculation. The analytic energy gradients in combined second order Mller-Plesset perturbation theory and conductorlike polarizable continuum model calculations are derived and implemented for spin-restricted closed shell (RMP2), Z-averaged spin-restricted open shell (ZAPT2), and spin-unrestricted open shell (UMP2) cases. Using these methods, the geometries of the S ground state and the T state of three nucleobase pairs (guanine-cytosine, adenine-thymine, and adenine-uracil) in the gas phase and aqueous solution phase are optimized. It is found that in both the gas phase and the aqueous solution phase the hydrogen bonds in the T state pairs are weakened by ~1 kcal/mol as compared to those in the S state pairs.
//Пример функции изменения формы и таблицы отображений #include <GL/glut.h> #include <math.h> #include <stdlib.h> const double TWO_PI = 6.2831853; GLsizei winWidth = 400, winHeight = 400; GLuint regHex; static void init(void){ GLint xhexVertex = 0, yhexVertex = 0; GLint xcircCtr = 0, ycircCtr = 0; GLdouble theta; GLint k; xcircCtr = winWidth /2; ycircCtr = winHeight /2; glClearColor(1.0, 1.0, 1.0, 0.0); regHex = glGenLists(1); glNewList(regHex, GL_COMPILE); glColor3f(1.0, 0.0, 0.0); glBegin(GL_POLYGON); for(k = 0; k < 6; k++){ theta = TWO_PI*k/6.0; xhexVertex = xcircCtr + 150 * cos(theta); yhexVertex = ycircCtr + 150 * sin(theta); glVertex2i(xhexVertex, yhexVertex); } glEnd(); glEndList(); //gluOrtho2D(0.0, (GLdouble) winWidth, 0.0, (GLdouble) winHeight); } void regHexagon(void){ glClear(GL_COLOR_BUFFER_BIT); glCallList(regHex); glFlush(); } void winReshapeFunc(GLint newWidth, GLint newHeight){ glViewport(0, 0, (GLsizei) newWidth, (GLsizei) newHeight); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0.0, winWidth, 0.0, winHeight); // glClear(GL_COLOR_BUFFER_BIT); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); } int main(int argc, char **argv){ glutInit(&argc, argv); glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB); glutInitWindowPosition(100,100); glutInitWindowSize(winWidth, winHeight); glutCreateWindow("0003"); init(); glutDisplayFunc(regHexagon); glutReshapeFunc(winReshapeFunc); glutMainLoop(); }
GEOCHEMICAL CHARACTERS OF TRACE ELEMENTS IN THE WULIPU LOESS-PALEOSOIL SECTION, SHANXI PROVINCE Studies on trace elements composition in the Wulipu loess-paleosoil section,Shanxi province, show that the concentration of these elements in the paleosoil differs from that of loess.This difference means that there are different geochemical behaviours between these elements during weathering and pedogenesis. It is suggested that the element in loess-paleosoil was variation as a result of paleoclimate changes. So it can be used as climatic proxies. Higher Pb, Cu, Zn, Cd, Cr, Ni, Co, Mn or lower Rb, Sr in paleosoil means a warm and wet climate, and luxuriant vegetation. Lower Pb, Cu, Zn, Cd, Cr, Ni, Co, Mn or higher Rb, Sr in loess means a dry and cold climate. The Holocence climate was not continued warm in the south part of the Loess Plateau and the waved development of climate is generalized as: Aeolian dust deposit (L t)→palaeosoil(S 2 0)→loess (L 2 0)→palaeosoil(S 1 0)→loess(L 1 0) and forming modern surface soil.
Asynchronous presentation of follicle center lymphoma and reactive lymphoid hyperplasia of the ocular adnexa. Although ocular lymphoproliferative diseases may be rarely encountered by dermatopathologists, the frequency may increase particularly as more and more subspecialties rely on dermatopathology services. Emerging data suggest that there are some similarities between ocular and cutaneous lymphoproliferative diseases with respect to their clinical behavior, prognosis, and cytogenetics. In this case report, a patient with ocular follicle center lymphoma who subsequently developed ocular reactive lymphoid hyperplasia is presented with an accompanying review of the literature on the subjects. The encounter of both follicular center lymphoma and reactive lymphoid hyperplasia in the same patient provides a rare opportunity to compare and contrast the clinical, histological, and immunohistochemical findings of the respective lymphoproliferative diseases situated at opposite ends of the spectrum.
@protocol A @end @interface A @end @interface B : A<A> @end
Activity of fludarabine in previously treated Waldenstrm's macroglobulinemia: a report of 71 cases. Groupe Coopratif Macroglobulinmie. PURPOSE There is no consensus on the treatment of patients with Waldenstrm's macroglobulinemia (WM) who develop primary or secondary resistance to frontline therapies. We report our experience on the activity and toxicity of fludarabine in 71 patients with WM resistant to prior chemotherapy regimens. PATIENTS AND METHODS From January 1991 to June 1995, 71 patients were included in this retrospective study. The median age, median time from diagnosis to treatment, median immunoglobulin M (IgM) level, and median number of previous treatments were 68 years (range, 42 to 81), 5.9 years (range, 0.6 to 20), 35 g/L (range, 5 to 126), and two (range, one to four), respectively. RESULTS Seventy-one patients received a median of six courses of fludarabine. Twenty-one (30%) responded with a partial response and 50 (70%) were considered as treatment failures. Forty-six patients died: 10 in the responder group and 36 in the failure group. Twenty-five patients were alive with a median follow-up time of 34 months. The overall median survival time of all treated patients was 23 months. The time to treatment failure was 32 months. The only factor that favorably influenced the response to fludarabine was a longer interval between the first treatment and the start of fludarabine. Pretreatment factors associated with shorter survival in the entire population were hemoglobin level less than 95 g/L (P =.02) and platelet count less than 75 x 10/L (P =.02). CONCLUSION The responses rate in this population with a poor prognosis is close to that reported in shorter series. Patients with WM who are resistant to alkylating agents should be identified early, so that salvage therapy with nucleoside analogs can be started without delay.
<gh_stars>0 #import <Foundation/Foundation.h> @interface IntroModel : NSObject @property (nonatomic, strong) NSString *titleText; @property (nonatomic, strong) NSString *descriptionText; @property (nonatomic, strong) NSString *imageName; @property (nonatomic, strong) NSString *_type; //@property (nonatomic, strong) UIImage *image; - (id) initWithTitle:(NSString*)title description:(NSString*)desc image:(NSString*)imageText type:(NSString*) type; @end
<filename>src/main/java/resource/utils/JsonUtil.java package resource.utils; import com.fasterxml.jackson.databind.ObjectMapper; import resource.AccountTransfer; public class JsonUtil { private static ObjectMapper mapper = new ObjectMapper(); public static String toJson(AccountTransfer transfer) throws Exception { return mapper.writeValueAsString(transfer); } public static AccountTransfer fromJson(String json) throws Exception { return mapper.readValue(json, AccountTransfer.class); } }
LOS ANGELES (MarketWatch) -- Since Congress squeezed out two tax bills at the very end of 2007, we already know the 2008 filing season will start with some delays. Did you know that there were a total of seven tax acts passed in 2007? Add that to all the changes to the Internal Revenue Code that affect 2007 tax returns, dating back to the Economic Growth Relief and Tax Relief Reconciliation Act of 2001, and this tax season is going to be a doozy. It just started, with IRS announcing a clamp-down on the predatory practices of certain purveyors of refund anticipation loans. A little-noticed provision of the Taxpayer Protection Act of 2007 gives the IRS the authority to prohibit the payment of taxpayer refunds to any refund anticipation loan business whose business practices are predatory. But don't worry about the scarcity of RALs. If you e-file and have your refund deposited into your bank account, your money will come to you swiftly enough. What can you do now since there's no rush to file early? It's time to pull out and review some of your prior-year returns. As a result of a recent IRS ruling and legislation, you just might have refunds coming. You can use Form 1040X to file your amended returns. For instance, payments under the Department of Veterans Affairs Compensated Work Therapy program are no longer taxable and disabled veterans who paid tax on these benefits in the past three years can now claim refunds. This is the result of a U.S. Tax Court decision that IRS decided was meaningful enough to reverse a 42-year policy. To claim deductions for these fringe benefits write "Filed Pursuant to Revenue Ruling 2007-69" at the top of each amended return. Another example of a change: Shareholders of S corporations may be able to amend their past three years' returns if they didn't use the self-employed health insurance deduction as an adjustment to income. Under a recent IRS notice, if the stated conditions are met, qualifying premiums for the entire family's accident and health insurance premiums are deductible. The premiums must be included in the shareholder's wages, but they are not subject to Social Security and Medicare taxes, as under the previous policy. The policy must be an employee policy, covering all employees, not just shareholders. (Note: Often shareholders are the only employees.) The corporation must be paying the premiums on the plan directly, or it must be issuing reimbursement checks to the shareholder/employee. The shareholder/employee cannot simply pay it out of pocket without having been reimbursed. IRS had been denying these deductions if the health policies were not issued in the name of the corporation. To claim deductions for these fringe benefits write "Filed Pursuant to Notice 2008-1" at the top of each amended return. Update your mailing address with all your 2007 employers, clients and financial institutions if you've moved or changed postal boxes. You don't want to miss any W-2s and 1099s heading your way. Your current and former employers and 1099 providers must mail them out by Jan. 31. It's time to review your paycheck withholding so it reflects the income and deductions you expect to have for 2008. If you need to increase or decrease your withholding, file a new Form W-4 with your payroll department. Remember to file a copy for your state, too, if that needs to be changed. What would trigger a need to make changes? Marriage, divorce, a new home, more money going into your retirement account. Speaking of paying taxes, it's time for the fourth installment of your 2007 estimated tax payments. If you expect to owe less than $1,000 on April 15, you can skip it. Otherwise, be sure you pay enough to cover 90% of your 2007 tax liability or 100% of the tax shown on your 2006 tax return. Use Form 1040-ES, voucher 4. If you use the IRS site to get your forms, be careful to select the 2007 form. The 2008 forms are already up. Those calculators will help with more than your estimated taxes. Print out that information for your student loan applications, too. Most schools and colleges have February and March filing deadlines. Start now! Rather than scrambling at the last minute, sort out your finances now and start drafting up your answers. The Free Application for Federal Student Aid (FAFSA) site (http://www.fafsa.ed.gov/) will let you enter and save information as you go along. You can edit the data as you get better information. Don't worry. The information doesn't get submitted until you sign the application electronically and transmit it. Remember to get your ex-spouse's or absent parent's information or signature to complete it. For those folks who are planning to ask a tax professional for help filling it in, let them know immediately so they can set aside some time. Those folks who filed "exempt" on their 2007 W-4s need to turn in a 2008 W-4 by Feb. 15. It seems the IRS wants you to confirm that you still won't owe any taxes in 2008. This is typical for people who won't earn much during the year, or whose itemized deductions and other deductions tend to be high enough to offset all their wages. Exemption from withholding does not universally apply to all students. That's a myth. Many students are shocked to learn when they finish their return that they earn enough income to end up owing taxes. Good news, though. The IRS no longer requires your company's payroll department to rat you out if you claim more than 10 exemptions. IRS won't bother you as long as you pay your taxes in full by the time you file your tax return, within the filing deadlines, of course. Do you get to deduct your vehicle expenses, either as an employee or business owner? Great! Mark down your odometer readings for the beginning of the year. This is a good time to start tracking your business mileage for the year. Also, charitable givers, remember to get receipts for all your charitable donations. Your cancelled check will not suffice. Be sure the receipt includes the name and address of the charity, the date and amount of your donation, and the phrase "no goods or services were received" or the value of the goods or services you did receive. It wouldn't hurt if the document also included the organization's taxpayer identification number. Call them now and be a pest. Some charities and religious organizations don't yet understand their obligations on this score. Every January, we all make resolutions to get organized and keep better records, along with the promises to diet, get more exercise, stop smoking, or clean up your home or office. If you're only going to keep one of your resolutions this year, keep this one: Resolve to keep meticulous records. It will pay off so handsomely in tax savings that you'll want to do it again next year. Eva Rosenberg is the founder of TaxMama.com and an enrolled agent licensed to represent taxpayers before the IRS. She is the author of the new book, "Small Business Taxes Made Easy." Reach her at taxwatch@gmail.com. Eva Rosenberg is a tax columnist for MarketWatch. You can follow her on Twitter @TaxMama.
The Relationship Between a Specific IgE Level and Asthma Outcomes: Results From the 20052006 National Health and Nutrition Examination Survey WD Arroyave, FA Rabito, JC Carlson. J Allergy Clin Immunol Practice. 2013;1:501508 The goal of this study was to examine the relationship between specific IgE (sIgE) to indoor allergens and asthma outcomes as defined according to emergency department (ED) visits and wheeze. Subjects included all participants in the 20052006 National Health and Nutrition Examination Survey who reported having current asthma. There were 351 children <17 years of age and 390 adults. Total and sIgE levels to 19 allergens were obtained on survey participants in the 20052006 National Health and Nutrition Examination Survey cycle,
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "reflect" "regexp" "testing" "github.com/aws/amazon-ec2-instance-selector/v2/pkg/cli" "k8s.io/kops/pkg/apis/kops" ) func TestGetFilters(t *testing.T) { commandline := cli.New("test", "test", "test", "test", nil) commandline.Flags = map[string]interface{}{ flexible: true, allowList: regexp.MustCompile(".*"), denyList: regexp.MustCompile("t3.nano"), } filters := getFilters(&commandline, "us-east-2", []string{"us-east-2a"}) if !*filters.Flexible { t.Fatalf("Flexible should be true") } if filters.AllowList == nil || filters.DenyList == nil { t.Fatalf("allowList and denyList should not be nil") } } func TestGetInstanceSelectorOpts(t *testing.T) { count := int32(1) outputStr := "json" commandline := cli.New("test", "test", "test", "test", nil) commandline.Flags = map[string]interface{}{ instanceGroupCount: count, nodeCountMax: count, nodeCountMin: count, nodeVolumeSize: &count, nodeSecurityGroups: []string{"sec1"}, output: outputStr, dryRun: true, clusterAutoscaler: true, } instanceSelectorOpts := &InstanceSelectorOptions{} setInstanceSelectorOpts(instanceSelectorOpts, &commandline) if instanceSelectorOpts.NodeCountMax != count || instanceSelectorOpts.NodeCountMin != count || *instanceSelectorOpts.NodeVolumeSize != count || len(instanceSelectorOpts.NodeSecurityGroups) != int(count) || instanceSelectorOpts.InstanceGroupCount != int(count) { t.Fatalf("node count max/min, volume size, and count of secrurity groups should return %d", count) } if instanceSelectorOpts.Output != outputStr { t.Fatalf("Output should be %s but got %s", outputStr, instanceSelectorOpts.Output) } if !instanceSelectorOpts.DryRun || !instanceSelectorOpts.ClusterAutoscaler { t.Fatalf("dryRun and clusterAutoscaler should be true, got false instead") } } func TestValidateAllPrivateOrPublicSubnets(t *testing.T) { userSubnets := []string{"utility-us-east-2a", "utility-us-east-2b", "utility-us-east-2c"} err := validateAllPrivateOrPublicSubnets(userSubnets) if err != nil { t.Fatalf("should have passed validation since all user subnets were utility- subnets") } userSubnets = []string{"us-east-2a", "us-east-2b", "us-east-2c"} err = validateAllPrivateOrPublicSubnets(userSubnets) if err != nil { t.Fatalf("should have passed validation since all user subnets were private subnets") } userSubnets = []string{"utility-us-east-2a", "utility-us-east-2b", "us-east-2c"} err = validateAllPrivateOrPublicSubnets(userSubnets) if err == nil { t.Fatalf("should have failed validation since one zone is not a utility- subnet") } } func TestValidateUserSubnetsWithClusterSubnets(t *testing.T) { clusterSubnets := []kops.ClusterSubnetSpec{ { Name: "us-east-2a", }, { Name: "us-east-2b", }, } userSubnets := []string{"us-east-2a", "us-east-2b"} err := validateUserSubnetsWithClusterSubnets(userSubnets, clusterSubnets) if err != nil { t.Fatalf("should have passed since userSubnets and clusterSubnets match") } clusterSubnets = []kops.ClusterSubnetSpec{ { Name: "us-east-2a", }, { Name: "us-east-2b", }, } userSubnets = []string{"us-east-2a"} err = validateUserSubnetsWithClusterSubnets(userSubnets, clusterSubnets) if err != nil { t.Fatalf("should have passed since userSubnets are a subset of clusterSubnets") } clusterSubnets = []kops.ClusterSubnetSpec{ { Name: "us-east-2a", }, { Name: "us-east-2b", }, } userSubnets = []string{"us-east-2c"} err = validateUserSubnetsWithClusterSubnets(userSubnets, clusterSubnets) if err == nil { t.Fatalf("should have failed since userSubnets are not a subset of clusterSubnets") } } func TestCreateInstanceGroup(t *testing.T) { zones := []string{"us-east-2a", "us-east-2b", "us-east-2c"} actualIG := createInstanceGroup("testGroup", "clusterTest", zones) if actualIG.Spec.Role != kops.InstanceGroupRoleNode { t.Fatalf("instance group should have the \"%s\" role but got %s", kops.InstanceGroupRoleNode, actualIG.Spec.Role) } if !reflect.DeepEqual(actualIG.Spec.Subnets, zones) { t.Fatalf("instance group should have all the zones passed in but got %s", actualIG.Spec.Subnets) } } func TestDecorateWithInstanceGroupSpecs(t *testing.T) { count := int32(1) instanceGroupOpts := &InstanceSelectorOptions{ NodeCountMax: count, NodeCountMin: count, NodeVolumeSize: &count, NodeSecurityGroups: []string{"sec-1", "sec-2"}, } actualIG := decorateWithInstanceGroupSpecs(&kops.InstanceGroup{}, instanceGroupOpts) if *actualIG.Spec.MaxSize != instanceGroupOpts.NodeCountMax { t.Fatalf("expected instance group MaxSize to be %d but got %d", instanceGroupOpts.NodeCountMax, actualIG.Spec.MaxSize) } if *actualIG.Spec.MinSize != instanceGroupOpts.NodeCountMin { t.Fatalf("expected instance group MinSize to be %d but got %d", instanceGroupOpts.NodeCountMin, actualIG.Spec.MinSize) } if *actualIG.Spec.RootVolumeSize != *instanceGroupOpts.NodeVolumeSize { t.Fatalf("expected instance group RootVolumeSize to be %d but got %d", instanceGroupOpts.NodeVolumeSize, actualIG.Spec.RootVolumeSize) } if !reflect.DeepEqual(actualIG.Spec.AdditionalSecurityGroups, instanceGroupOpts.NodeSecurityGroups) { t.Fatalf("expected instance group MaxSize to be %d but got %d", instanceGroupOpts.NodeCountMax, actualIG.Spec.MaxSize) } } func TestDecorateWithMixedInstancesPolicy(t *testing.T) { selectedInstanceTypes := []string{"m3.medium", "m4.medium", "m5.medium"} usageClasses := []string{"spot", "on-demand"} for _, usageClass := range usageClasses { actualIG, err := decorateWithMixedInstancesPolicy(&kops.InstanceGroup{}, usageClass, selectedInstanceTypes) if err != nil { t.Fatalf("decorateWithMixedInstancesPolicy returned an error: %v", err) } if actualIG.Spec.MixedInstancesPolicy == nil { t.Fatal("MixedInstancesPolicy should not be nil") } if !reflect.DeepEqual(actualIG.Spec.MixedInstancesPolicy.Instances, selectedInstanceTypes) { t.Fatalf("Instances in MixedInstancePolicy should match selectedInstanceTypes: actual: %v expected: %v", actualIG.Spec.MixedInstancesPolicy, selectedInstanceTypes) } if usageClass == "spot" && *actualIG.Spec.MixedInstancesPolicy.SpotAllocationStrategy != "capacity-optimized" { t.Fatal("Spot MixedInstancePolicy should use capacity-optimizmed allocation strategy") } } } func TestDecorateWithClusterAutoscalerLabels(t *testing.T) { initialIG := kops.InstanceGroup{} clusterName := "testClusterName" actualIG := decorateWithClusterAutoscalerLabels(&initialIG, clusterName) if _, ok := actualIG.Spec.CloudLabels["k8s.io/cluster-autoscaler/enabled"]; !ok { t.Fatalf("enabled cloudLabel for cluster autoscaler should have been added to the instance group spec") } if _, ok := actualIG.Spec.CloudLabels["k8s.io/cluster-autoscaler/"+clusterName]; !ok { t.Fatalf("cluster cloudLabel for cluster autoscaler should have been added to the instance group spec") } }
<reponame>flowgrammable/freeflow-legacy // Copyright (c) 2013-2014 Flowgrammable, LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an "AS IS" // BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing // permissions and limitations under the License. namespace freeflow { // -------------------------------------------------------------------------- // // Error /// Construct an error value that signifies success. inline Error::Error() : code_(), data_() { } /// Construt an error value with the given code. Additiional data /// may also be associated with the error. inline Error::Error(Code c, Data d) : code_(c), data_(d) { } /// Allows contextual conversion to bool, returning true if and only /// if there is no error (i.e., the underlying code has value 0). inline Error::operator bool() const { return !code_; } /// Returns the error category. inline const Error_category& Error::category() const { return code_.category(); } /// Returns the error code. inline Error::Code Error::code() const { return code_; } /// Returns associated error data. inline Error::Data Error::data() const { return data_; } inline std::string Error::message() const { return code_.message(); } /// Returns true when two errors have the same error code. inline bool operator==(Error a, Error b) { return a.code() == b.code(); } inline bool operator!=(Error a, Error b) { return not(a == b); } // -------------------------------------------------------------------------- // // Error constructors /// Returns an error condition based on a predicate. /// /// If the condition b is true, the resulting error condition will evaluate /// to SUCCESS. Otherwise, the condition will evaluate to the code c. inline Error ok(bool b, Error err) { return b ? Error() : err; } /// Returns an error code that encapsulates the current system error. inline Error system_error() { return system_error(errno); } /// Returns an error value corresponding to the given system error. inline Error system_error(int n) { return make_error_code(static_cast<std::errc>(n)); } // -------------------------------------------------------------------------- // // Expected value namespace impl { template<typename T> inline Expected_value<T>::Expected_value(const T& x) : value(x) { } template<typename T> inline Expected_value<T>::Expected_value(T&& x) : value(std::move(x)) { } template<typename T> inline Expected_value<T>::Expected_value(Error e) : error(e) { } } template<typename T> inline Expected<T>::Expected(const Expected& x) : which_(x.which_) { construct(x); } template<typename T> inline Expected<T>& Expected<T>::operator=(const Expected& x) { destroy(); which_ = x.which_; init(x); return *this; } template<typename T> inline Expected<T>::Expected(Expected&& x) : which_(x.which_) { construct(std::move(x)); } template<typename T> inline Expected<T>& Expected<T>::operator=(Expected&& x) { destroy(); which_ = x.which_; init(std::move(x)); return *this; } template<typename T> inline Expected<T>::Expected(const T& x) : which_(SUCCESS), data_(x) { } template<typename T> inline Expected<T>::Expected(T&& x) : which_(SUCCESS), data_(std::move(x)) { } template<typename T> inline Expected<T>::Expected(Error e) : which_(FAILURE), data_(e) { } /// Allows contextual conversion to bool. Returns true when the expected /// value is not an error. template<typename T> inline Expected<T>::operator bool() const { return which_; } /// Get the expected value. Behavior is undefined if this object indicates /// an error. template<typename T> inline const T& Expected<T>::get() const { assert(which_); return data_.value; } /// Move the expected value from this object. Behavior is undefined if /// this object indicates an error. template<typename T> inline T&& Expected<T>::take() { assert(which_); return std::move(data_.value); } template<typename T> inline Error Expected<T>::error() const { assert(!which_); return data_.error; } template<typename T> inline void Expected<T>::construct(const Expected& x) { if (which_) new (&data_.value) T(x.data_.value); else new (&data_.error) Error(x.data_.error); } template<typename T> inline void Expected<T>::construct(Expected&& x) { if (which_) new (&data_.value) T(std::move(x.data_.value)); else new (&data_.error) Error(std::move(x.data_.error)); } template<typename T> inline void Expected<T>::destroy() { if (which_) data_.value.~T(); else data_.value.~Error(); } // -------------------------------------------------------------------------- // // Trap /// Initialzie the trap with an error. This allows implicit conversions /// to trap objects. inline Trap::Trap(Error e) : err_(e) { } /// Allows contextual conversion to bool, returning true if and only if /// the underlying error does not indicate success. inline Trap::operator bool() const { return (bool)err_.code(); } inline Trap::operator Error() const { return err_; } /// Returns the underlying error. inline Error Trap::error() const { return err_; } /// Returns the error code. inline Error::Code Trap::code() const { return err_.code(); } /// Returns the error data. inline Error::Data Trap::data() const { return err_.data(); } } // namespace freeflow
A Dynamic Cooperative Monitor Node Selection Algorithm in Wireless Mesh Networks Wireless Mesh Network (WMN) is developed as an emerging key solution for wireless broadband connectivity through a flexible and cost-effective way. However, due to the lack of a physical line of defense, the security in such a network is a big concern. Intrusion Detection System (IDS) is considered as one of the most effective security mechanisms in WMN. Traditionally, IDS is operated on the monitor nodes that cooperatively identify and assess intrusions by analyzing their collected network data. Therefore, most IDS mechanisms need to choose a large number of monitor nodes for good rapidity and accuracy, which increases overhead and degrades network performance. In this paper, we propose a dynamic cooperative monitor node selection scheme based on social network analysis to solve the above issue. In the scheme, we choose the monitor nodes according to their influence in the network, which is evaluated by the relationship among the nodes and the analysis of mutual information. Then, the nodes with similar information are divided into the same monitor area. Simulation results show that the proposed scheme can effectively reduce network cost compared with the traditional node section schemes.
Extending a parallel CLP language to support the development of multi-agent systems An extension of the parallel constraint logic programming language ElipSys is presented. This extension is directed towards the development of multi-agent systems which have to deal with large combinatorial problems that are distributed in nature. Problems of this kind, after being decomposed into subproblems, may be tackled efficiently by individual agents using ElipSys powerful mechanisms, such as parallelism and constraint satisfaction techniques. The proposed extension supports the communication requirements of the agents, in order to have them cooperate and solve the original combinatorially intensive problem. The communication scheme among the agents is viewed as a three-layered model. The first layer is socket oriented, the second realizes a blackboard architecture and the third supports virtual point-topoint interaction among the agents.
Regionalization of neonatal intensive care in Korea In the current era of low-birth rate in Korea, it is important to improve our neonatal intensive care and to establish an integrative system including a regional care network adequate for both high-risk pregnancies and high-risk newborn infants. Therefore, official discussion for nation-wide augmentation, proper leveling, networking, and regionalization of neonatal and perinatal care is urgently needed. In this report, I describe the status of neonatal intensive care in Korea, as well as nationwide flow of transfer of high-risk newborn infants and pregnant women, and present a short review of the regionalization of neonatal and perinatal care in the Unites States and Japan. It is necessary not only to increase the number of neonatal intensive care unit (NICU) beds, medical resources and manpower, but also to create a strong network system with appropriate leveling of NICUs and regionalization. A systematic approach toward perinatal care, that includes both high-risk pregnancies and newborns with continuous support from the government, is also needed, which can be spearheaded through the establishment of an integrative advisory board to propel systematic care forward. Introduction The fertility rate in Korea has reduced to 1.149 per woman of childbearing age in 2009, which is one of the lowest in the world 1). This low birth rate is emerging as one of the nation's most serious problems in Korea. Recently, the average age of mothers giving birth has significantly increased due to social and economic changes, and the fertility rate is dropping with this increasing maternal age 2). In addition, the increased use of recently developed assisted reproductive technologies has led to a simultaneous increase in multiple and preterm births 3). Providing adequate neonatal intensive care for high-risk newborns is essential for decreasing their mortality and morbidity, which ultimately reduces the future costs for their morbidity of whole nation. and efficiently, in a timely manner. To deliver efficient perinatal and neonatal intensive care, the governments have built a network through nationwide regionalization. The neonatal intensive care unit (NICU) in Korea has been developed since the 1980s and there has been much progress until now. However, there is still a lack of a large number of NICU beds. Moreover, sufficient official investment and discussion for achieving a welldistributed nationwide network and regionalization has not been undertaken yet. Recently, the Committee for Data Collection and Statistics in the Society of Korean Neonatology has undertaken the survey for the nationwide status of NICU in 2010 with the support of the Management Center for Health Promotion 4). Here, a short review of the status of neonatal care in developed countries, including the US and Japan, and the current condition for networking and regionalization of neonatal intensive care in Korea are described. Status and regionalization of neonatal intensive care in the US In the US, more than 30 years earlier than in Korea, in 1970s, management and regionalization of not only neonatal intensive care, but also perinatal care, including high-risk pregnancies, was systemized and established 5). The system focused on decreasing perinatal morbidity and mortality, along with providing high quality of care for high-risk pregnancies and high-risk newborns by the most efficient use of facilities and equipment, as well as highly trained personnel, and maximizing the effect of medical resource investments 6). In 1975, the perinatal and neonatal management was classified into 3 levels under the motto "Toward Improving Outcome of Pregnancy (TIOP)" (Committee on Perinatal Health, 1975) 5 Quantitative and qualitative improvement in NICUs, as well as NICU regionalization, resulted in integrative improvement of neonatal intensive care around the nation. In 1993, TIOPII (Toward Improving Outcome of Pregnancy II, The 90s and Beyond) was launched to emphasize the importance of integrated and regionalized care system and classify the previous classification in more detail (Committee on Perinatal Health, 1993) 8,9). The levels of perinatal (Table 1) 9) and intensive neo natal care treatment recently used in US are as following (Table 2) 10). Newborn transfer system holds a great value in the regionalized me dical system. Transfer between levels I, II and III neonatal care centers is an important tool used to effectively distribute medical resources around the region. Transfer team is composed of nurses, respiratory treatment personnel, neonatal care doctors, residents or by other personnel, and ambulances, helicopters and fixed-wing aircrafts were being used as transport vehicles 11). Following the introduction of a centralized neonatal transfer service, response times improved significantly 12). Status and regionalization of neonatal intensive care in Japan Neonatal care in Japan was supported by the government early on. Medical support for premature infants and reporting system for lowbirth weight infants were introduced in 1958. In 1979, parts of obstetric and pediatric departments were combined as perinatal care system and operated regionalized perinatal medical centers based on the "Regionalization of neonatal care to improve newborn death rate" project. In 1984, Maternal Fetal Intensive Care Unit was set up as perinatal care units. In 1991, "Doctor Car" was first introduced to operate ambulances that focus on transfer of high-risk newborns between the medical centers. In 1995, Maternal and Child Health Law was revised and from the following year, one general perinatal care medical center per 1 million began to be built around the nation. These medical centers have been maintained and supported until today. A total of 242 regional perinatal care centers and 77 integrated general maternal-fetal medical centers now serve as the backbone of Japan's perinatal care. Three central (super) perinatal care units were established in the most populated regions of Tokyo to deal with the highest-risk pregnancies and neonates from the entire nation 13). A total of 80 million Yens (Japanese currency) (1 billion KRW ) for regional centers, 140 million Yens (1.8 billion KRW) for integrated general centers, and 190 million Yens (2.5 billion KRW) for central centers are being supported by the government 14). According to the Ministry of Health and Welfare in Japan, the Japanese government allocated 1.7 billion Yens (17.9 billion KRW) for the care of premature infants and 2.3 billion Yens (24.4 billion KRW) for perinatal care. The support and network for perinatal care is still increasing in Japan 15). The death rate of low-birth weight newborns has been decreasing for the past 30 years in Japan due to continuous and systemic efforts driven All of the regional operating NICU and NICU beds were reduced, except in the Gyeongsangdo and Ulsan areas, as shown in Table 3. Shortage of regional beds and medical resources of NICU There were 444,849 total live births in 2009, in Korea 1). Comparison of the number of regional NICU beds to the total number of regional live births is shown in Table 4. Number of NICU beds are 0.3 to 5.3 per 1,000 live births, depending on the region, with large variability. Seoul has the highest number of beds and Jollanamdo has the lowest number of beds. Large cities such as Seoul, Daejeon, Busan, and Gwangju have more than 3.2 beds per 1,000 live births. However, the number of beds is very small in regions neighboring the 4 areas Gyeonggido, Chungcheongnamdo, Gyeongsangnamdo, and Jollanamdo. When the regions are classified based on larger geographical features, or areas (Table 5), variability in the number of NICU beds are slightly decreased, to 1.8 to 3.2 per 1,000 live births, depending on the area. It can be expected that high-risk newborns are being transferred to neighboring regions. For instance, Seoul may be used to compensate for the required beds in the Gyeonggido region. On the scope of medical resources, Gyeongsangbukdo, Ulsan, and Jollanamdo, still have less than 0.3 mechanical ventilators per NICU bed (Table 5), and only 1 or 0 neonatologists (Table 6), as well as the lowest number of NICU beds per 1000 live births amongst all the national regions. Therefore, those regions do not have enough medical resources to provide adequate emergency neonatal intensive care. According to the national survey report for 2006 18) and 2010 4 18), the 2010 survey showed markedly decreased operating NICUs and NICU beds throughout the nation. Although a few NICUs and NICU beds were added or expanded due to regional demands, with or without support from the government, such as the project of "Regional NICU expansion" and "Regional Children's Hospital" of the Ministry of Health and Welfare, a considerable number of NICUs have been closed or cut in size over the last 5 years ( Table 3). The reasons behind this reduction were manifold. First, the inability of the hospital running the NICU to meet the requirement of legal NICU classification and standards for the facility and manpower enacted in 2006, thereby closing their NICU or reducing the number of beds. Secondly, a reduction in the number of obstetrical staff, which led to a decrease in deliveries, and a lack of medical staff available to treat high-risk newborns in NICU also led to the decrease in of functioning NICU beds. to 3.4 neonatal intensive care beds are needed per 1,000 live births, and 4.1 to 4.5 beds per 1,000 live births are needed, including the operation of supplementary beds. Therefore, more than 1.0 bed per 1,000 live births is needed in all of the regions, and even more in larger areas. Therefore, it is required to increase the number of beds and medical resources, including medical equipment and manpower, nationwide with proper allocation for the establishment of regionalization and network for neonatal care. Table 7 shows the flow of 929 neonatal patients in July 2010, re ported in the 2010 survey 4). When the nation is divided into 16 regions by cities and provinces, 749 newborns (80.6%) were transferred within the region and 180 newborns (19.4%) were transferred out of region. Therefore, approximately 20% of the high-risk newborns were transferred out of the region. If the nation was divided into 8 integral areas by grouping: Seoul, Incheon, and Gyunggido as Region 1; Gangwondo as region 2; Chungcheongbukdo, Chungcheongnamdo, and Daejeon as Region 3; Daegu and Gyeongsangbukdo as Region 4; Jollabukdo as Region 5; Gwangju and Jollanamdo as Region 6; Gyeongsangnamdo, Busan, and Ulsan as Region 7; and Jejudo as Region 8. Except the transfer to Seoul from all other in tegral regions, all of the transfers were made within each integral area. Therefore, in the future, regionalization of neonatal intensive care has to be set up by establishment of integral area NICU centers with careful consideration of proximity of neighboring regions, availability of traffic routes, and effective use of available beds. It is also evident that there are special needs for transfer of high-risk newborns, because of the need for highly-selective specialized surgery or high-end spe cialized care, or of lack of availability of regional NICU beds for superemergent cases. Like super perinatal care centers in Tokyo, Japan, we need to create national integrative care centers to serve the purpose of high-end and super-emergency neonatal care, and a final referral center. Since very large NICUs that handle more than 100 very-low birth weight newborn infants are located in Seoul, rather than to expand NICU beds in the Seoul area, it is advisable to set up or use big centers in Seoul as the final referral super center for the entire nation. Moreover, it is important to create a network with other regional and more integrative area NICUs around the nation. There is a need to classify the level of neonatal intensive care units in Korea, like in the US. Therefore, it is needed to effectively distribute the medical resources for neonatal care through proper leveling, regionalization and networking of neonatal care in Korea. Table 8 shows the transfer flow of high-risk pregnancies during 2 weeks of July 2010, reported in the 2010 survey 4). A total of 400 high-risk pregnant women were transferred to other hospitals and 83 patients (21%) were transferred out of region. Among 447 transfer contacts, 311 patients (69.6%) were actually accepted in the first hospital/ medical center to which they were requested for being transferred; however, 127 patients were denied their transfer in the first contact to transfer. The reason for denying the transfer was, 50% due to lack of NICU beds, 29.3% due to unavailability of mechanical venti lator in the NICU, 7.1% due to difficulties in surgery and special care, and 5.0% due to lack of medical personnel resources in NICU and delivery room. The transfer flow of high-risk pregnant women was very similar to transfer flow of high-risk newborns. Approximately 20% of the patients were transferred out of region and 70% of the patients were accepted on the first hospital/medical center to which Total Seoul 216 8 49 0 0 1 3 0 0 1 0 0 1 0 0 1 216 64 280 Incheon 0 0 Transfer within region 56 2 3 12 5 1 1 0 3 0 Total 151 49 17 22 41 25 34 10 50 1 317 83 400 they were requested for being transferred whereas the remaining 30% were wandering to find a hospital for their care. Since most of the denials for transfer were due to lack of medical resources in the NICU, management of high-risk pregnancies and high-risk newborns cannot be separated from one another. A perinatal approach, that includes both high-risk pregnancies and newborns, is needed when establishing the policies for their care. Conclusion Neonatal intensive care for high-risk newborns is a major health issue that should be supported by the government, as discussed in the cases of the US and Japan. Neonatal intensive care policies of advanced countries may be summarized as follows. First, the policies accommodate perinatal care and transport systems, including high-risk pregnant women and high-risk newborns. Second, the policies level and regionalize the high-risk newborn infant care system using expert groups and instating clear legislation. Third, there is continuous support from the government for systematically constructing highrisk neonatal and perinatal care systems. Fourth, there is an effective distribution and use of medical resources, by establishment of a clear transfer system and regionalization. In Korea, it is evident that there is a shortage of NICU beds and medical resources nationwide, with varying degrees across regions, and a lack of systemic perinatal and neonatal regional networking. In the recent analysis on transferred high-risk newborns and pregnant women, about 20% were transferred beyond their location, while most of them were transferred in the vicinity of their original location. Seoul tended to have patients transferred from all the parts of the country. As most high-risk newborn infants are transferred within the neighboring region, regional care centers should be upgraded, and there seems to be a need to bolster a few national central centers in Seoul for taking care of patients transferred from other regional medical centers, to provide challenging high-end treatments. The main cause for the transfer of high-risk pregnant women was the deficits of beds and medical resources of NICU, which were the same deficits that applied to high-risk newborn transfers. The success rate of transfers was only about 70%. Ultimately, to complete the health care system of highrisk newborns in the era of low-birth rates, an absolute increase in the number of NICU beds, along with proper equipment, facilities, and manpower, and the completion of proper transfer system through regionalization and leveling of NICUs are needed. Further, a systematic approach toward perinatal care that includes both high-risk pregnancies and newborns is needed, as well as the establishment of an integrative advisory board to drive the momentum of systematic approach. Establishment of regionalized perinatal centers, including those that can accommodate high-risk pregnancies and neonatal intensive care through a nationwide network will result in improving the outcome of high-risk newborns. Such a system will help improve the health and save the lives of thousands of newborns and provide an effective solution for low-birth rate issues in Korea.
Q: Importing feature class into ArcGIS Server feature dataset in geodatabase without stopping service? I have a feature dataset which holds data published through ArcGIS Server 10.3.1 and I need to import new datasets into the feature dataset but cannot due to locks. Do I really need to stop all services which read data from this feature dataset to import a new feature class or is there another way? A: In the 'Parameters' tab, disable the 'Lock Database Schema' setting (for each of the services that access the feature class(es) in the feature dataset). This is on by default, and of course you will need to consider if this may adversely affect anything that consumes the service(s) (some applications could get confused by schema changes). Of course, you do have to save and restart the service when you change this setting. But that's a one-off restart for each service and enables ongoing multiple schema changes without having to stop the service again. Documentation for this setting is at: http://enterprise.arcgis.com/en/server/latest/administer/windows/disabling-schema-locking-on-a-map-service.htm I always disable this setting on all of my map services. It's just too much of a headache to manage even a trivially dynamic GIS schema otherwise. The first thing I did when I started my current job was to disable this setting on all services. The only other way to avoid this problem is to avoid using feature datasets. Sometimes you need to use them (eg, for geometric networks, etc). But don't get tempted to use them just to organise feature classes into groups.
//========================================================================= // // CBaseCharCommand: CreateAndInsert // // Synopsis: Creates and inserts the specified element // //------------------------------------------------------------------------- HRESULT CBaseCharCommand::CreateAndInsert(ELEMENT_TAG_ID tagId, IMarkupPointer *pStart, IMarkupPointer *pEnd, IHTMLElement **ppElement) { HRESULT hr; SP_IHTMLElement spElement; IFR( GetMarkupServices()->CreateElement(tagId, NULL, &spElement) ); IFR( GetEditor()->InsertElement(spElement, pStart, pEnd) ); if (ppElement) { *ppElement = spElement; (*ppElement)->AddRef(); } RRETURN(hr); }
Fast joint bit-allocation between texture and depth maps for 3D video coding This paper presents a fast joint texture/depth bit-allocation method for 3DV coding. As compared to the previous bit-allocation schemes in which the pre-encoding process is required to determine model parameters, the proposed scheme proposed the real-time joint bit-allocation scheme without any pre-encoding process. As a result, any scene change detection for adaptive model parameter changes is also unnecessary in the proposed scheme. The simulation results show the proposed scheme is comparable to the time-consuming full-search or model-based approaches, while the additional complexity for bit-allocation is almost negligible.
Brian Howard allowed two hits over seven innings, and Durbin Feltman pitched scoreless eighth and ninth innings as TCU won Game 3 of an NCAA Super Regional series 4-1 Sunday night at Blue Bell Park, pushing the Frogs to the College World Series for a third consecutive season. TCU improved to 9-8 in Super Regional games and 4-1 in Super Regional Game 3s with Sunday’s 4-1 victory against Texas A&M. All-time, the Horned Frogs are 43-32 in the NCAA postseason. TCU will open at 2 p.m. Sunday in Omaha, Neb., against Big 12 regular-season champion Texas Tech, which also won its Super Regional in three games. The Frogs might not have guessed it at the start of the year. They had lost their infield from a year ago, started without their most experienced pitcher and center fielder/leadoff hitter because of injury, and had six newcomers in the lineup, including a freshman left fielder and designated hitter. Plus, the closer and three other starting pitchers had been drafted. Obviously, the standard is to go to Omaha. But for this team, going down there, with the adversity we’ve faced, we’ve done some pretty amazing things. TCU closed the regular season with five straight wins and has won 14 of 16 games overall, including five of six in NCAA postseason. The win at College Station was the second Super Regional Game 3 victory for TCU, to go with a win in 2010 against Texas in Austin. .172Batting average against TCU pitching in the Super Regional against Texas A&M. The Horned Frogs gave up 17 hits in the three games. Howard said he did just that.
<filename>app/src/main/java/com/example/herosorveteria/menu/ListaFornecedorActivity.java package com.example.herosorveteria.menu; import androidx.annotation.NonNull; import androidx.appcompat.app.AlertDialog; import androidx.appcompat.app.AppCompatActivity; import androidx.recyclerview.widget.ItemTouchHelper; import androidx.recyclerview.widget.LinearLayoutManager; import androidx.recyclerview.widget.RecyclerView; import android.content.DialogInterface; import android.content.Intent; import android.os.Bundle; import android.view.View; import android.widget.Toast; import com.example.herosorveteria.R; import com.example.herosorveteria.adapter.AdapterFornecedores; import com.example.herosorveteria.adapter.AdapterProduto; import com.example.herosorveteria.cadastro.CadastroFornecedorActivity; import com.example.herosorveteria.config.ConfiguracaoFireBase; import com.example.herosorveteria.helper.Base64Custom; import com.example.herosorveteria.model.Fornecedor; import com.example.herosorveteria.model.Produto; import com.google.firebase.auth.FirebaseAuth; import com.google.firebase.database.DataSnapshot; import com.google.firebase.database.DatabaseError; import com.google.firebase.database.DatabaseReference; import com.google.firebase.database.ValueEventListener; import org.jetbrains.annotations.NotNull; import java.util.ArrayList; import java.util.List; public class ListaFornecedorActivity extends AppCompatActivity { private DatabaseReference firebaseRef = ConfiguracaoFireBase.getFirebaseDatabase(); RecyclerView recyclerViewFornecedor; private ValueEventListener valueEventListener; private FirebaseAuth autenticacao = ConfiguracaoFireBase.getFireBaseAutenticacao(); private DatabaseReference fornecedorRef; private AdapterFornecedores adapterFornecedores; private List<Fornecedor> fornecedorList = new ArrayList<>(); private Fornecedor fornecedor; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_lista_fornecedor); inicializarComponentes(); swipe(); adapterFornecedores = new AdapterFornecedores(fornecedorList,this); RecyclerView.LayoutManager layoutManager = new LinearLayoutManager(this); recyclerViewFornecedor.setLayoutManager(layoutManager); recyclerViewFornecedor.setHasFixedSize(true); recyclerViewFornecedor.setAdapter(adapterFornecedores); } private void swipe() { ItemTouchHelper.Callback itemTouch = new ItemTouchHelper.Callback() { @Override public int getMovementFlags(@NonNull @NotNull RecyclerView recyclerView, @NonNull @NotNull RecyclerView.ViewHolder viewHolder) { int dragsFlags = ItemTouchHelper.ACTION_STATE_IDLE; int swiperFlags = ItemTouchHelper.START|ItemTouchHelper.END; return makeMovementFlags(dragsFlags, swiperFlags); } @Override public boolean onMove(@NonNull @NotNull RecyclerView recyclerView, @NonNull @NotNull RecyclerView.ViewHolder viewHolder, @NonNull @NotNull RecyclerView.ViewHolder target) { return false; } @Override public void onSwiped(@NonNull @NotNull RecyclerView.ViewHolder viewHolder, int direction) { excluirFornecedores(viewHolder); } }; new ItemTouchHelper(itemTouch).attachToRecyclerView(recyclerViewFornecedor); } private void excluirFornecedores(RecyclerView.ViewHolder viewHolder) { AlertDialog.Builder alertDialog = new AlertDialog.Builder(this); alertDialog.setTitle("Excluir, Fornecedor da Lista"); alertDialog.setMessage("Você tem certeza que deseja excluir esse fornecedor?"); alertDialog.setCancelable(false); alertDialog.setPositiveButton("Confirmar", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialogInterface, int i) { int position = viewHolder.getAdapterPosition(); fornecedor = fornecedorList.get(position); String emialUsuario = autenticacao.getCurrentUser().getEmail(); String idUsuario = Base64Custom.codificarBase64(emialUsuario); fornecedorRef= firebaseRef.child("produtos") .child(idUsuario); fornecedorRef.child(fornecedor.getKey()).removeValue(); adapterFornecedores.notifyItemRemoved(position); } }); alertDialog.setNegativeButton("Cancelar", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialogInterface, int i) { Toast.makeText(ListaFornecedorActivity.this, "Cancelado", Toast.LENGTH_SHORT).show(); adapterFornecedores.notifyDataSetChanged(); } }); AlertDialog alert = alertDialog.create(); alert.show(); } private void inicializarComponentes() { recyclerViewFornecedor = findViewById(R.id.recyclerListFornecedor); } public void cadastraFornecedor(View v){ Intent i = new Intent(this, CadastroFornecedorActivity.class); startActivity(i); finish(); } private void recuperarFornecedores() { String emialUsuario = autenticacao.getCurrentUser().getEmail(); String idUsuario = Base64Custom.codificarBase64(emialUsuario); fornecedorRef = firebaseRef.child("fornecedores") .child(idUsuario); valueEventListener = fornecedorRef.addValueEventListener(new ValueEventListener() { @Override public void onDataChange(@NonNull @NotNull DataSnapshot snapshot) { fornecedorList.clear(); for (DataSnapshot dados: snapshot.getChildren()){ Fornecedor fornecedor = dados.getValue(Fornecedor.class); fornecedor.setKey(dados.getKey()); fornecedorList.add(fornecedor); } adapterFornecedores.notifyDataSetChanged(); } @Override public void onCancelled(@NonNull @NotNull DatabaseError error) { } }); } @Override protected void onStart() { super.onStart(); recuperarFornecedores(); } }
On adaptive caching in mobile databases We consider an environment in which a collection of mobile clients accesses a stationary database server via a wireless channel. Due to the limited bandwidth of a wireless channel and the instability of the wireless network, caching of frequently accessed data items in a client's local storage becomes especially important for improving the performance and data availability of data access queries. However, the network stability and high transmission bandwidth requirements of existing caching mechanisms for conventional clientserver and distributed database applications conflict with the mobility nature of mobile clients as well as the low-bandwidth of wireless channel. In this paper, we investigate issues that need to be addressed in caching mechanisms for a mobile environment and propose an adaptive caching mechanism that could cope with the nature of a mobile environment as well as the low-bandwidth wireless media. The results of some preliminary exploratory experiments will also be illustrated to demonstrate the feasibility of our mechanism.
Sponsor support is very important to me here at The Pen Addict. My sidebar advertisers put their faith in me to deliver good value for them month in and month out, and I want to shine the light on the best of the best for you. If you are shopping for pens, paper, inks, and more, please check out these great companies and see what they have to offer. Some recent highlights: Anderson Pens has a fresh batch of Organics Studio inks in, including new 5th Anniversary releases. And don’t forget to visit them at the Baltimore Pen Show this weekend. JetPens is stocking one of my favorite recent releases, the Lihit Lab Smart Fit Cover Notebook in both A5 and B5 sizes. Goldspot has finished a complete website overhaul and snuck in their remaining Lamy Dark Lilac pens to celebrate. Go check it out!
import datetime import json import logging import unittest import webapp2 import webtest from api_handlers import api_routes from model import User, Organization, Project from unit_test_helper import ConsistencyTestCase, login_headers import config class TestGraphQLProject(ConsistencyTestCase): consistency_probability = 0 cookie_name = config.session_cookie_name cookie_key = config.default_session_cookie_secret_key def set_up(self): # Let ConsistencyTestCase set up the datastore testing stub. super(TestGraphQLProject, self).set_up() application = webapp2.WSGIApplication( api_routes, config={ 'webapp2_extras.sessions': { 'secret_key': self.cookie_key } }, debug=True ) self.testapp = webtest.TestApp(application) def test_get_single_project(self): user = User.create(email="<EMAIL>", user_type='super_admin') user.put() org = Organization.create(name="Org Foo") org.put() project = Project.create( organization_id=org.uid, program_label='demo-program', account_manager_id='User_001', liaison_id='User_002', priority=True, deidentification_method='total', loa_notes="Some stuff happened.", last_active=datetime.datetime.now(), ) project.put() query = ''' query GetSingleProject($uid: String!) { project(uid: $uid) { account_manager_id created deidentification_method deleted last_active liaison_id loa_notes modified organization_id organization_name organization_status priority program_description program_label program_name short_uid uid } } ''' response = self.testapp.post_json( '/api/graphql', # See http://graphql.org/learn/serving-over-http/#post-request { 'query': query, 'variables': {'uid': project.uid}, }, headers=login_headers(user.uid), ) self.assertEqual( response.body, json.dumps({'project': project.to_client_dict()}), ) def test_get_all_projects(self): user = User.create(email="<EMAIL>", user_type='super_admin') user.put() org = Organization.create(name="Org Foo") org.put() project1 = Project.create(organization_id=org.uid, program_label='demo-program') project2 = Project.create(organization_id=org.uid, program_label='demo-program') project1.put() project2.put() query = ''' query GetAllProjects { projects { uid } } ''' response = self.testapp.post_json( '/api/graphql', {'query': query}, headers=login_headers(user.uid), ) received = json.loads(response.body) # No particular order. self.assertIn({'uid': project1.uid}, received['projects']) self.assertIn({'uid': project2.uid}, received['projects'])
GaAs Schottky photodiode with 3dB bandwidth of 20 GHz We have developed and characterized GaAs Schottky barrier photodiodes with 3 dB bandwidths of 20 GHz which corresponds to a Gaussian impulse response with a full width half maximum (FWHM) of 16 picoseconds. These photodiodes, in addition to their wide bandwidths, operate at a reverse bias voltage of less than 5 volts with an associated dark leakage current of 5 picoamperes and still attain external quantum efficiencies of 30% at 600 nm and 25% at 845 nm which translate to a responsivity of 0.15 and 0.17 amperes per watt respectively. The photodiode is a mesa structure with a thin semi-transparent platinum film forming the photosensitive Schottky junction 25 microns in diameter in contact with an offset bond pad. The Schottky junction is formed on a low doped n type GaAs epitaxial layer grown by molecular beam epitaxy on n+GaAs substrate. To accurately characterize the transfer function of these high-speed photodiodes to 22 GHz, we have developed a time and freauency domain measurement system.
package com.iexec.blockchain.dataset; import com.iexec.blockchain.tool.IexecHubService; import com.iexec.blockchain.tool.QueueService; import com.iexec.blockchain.tool.Status; import com.iexec.common.chain.ChainDataset; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.*; class DatasetServiceTest { public static final String NAME = "name"; public static final String MULTI_ADDRESS = "multiAddress"; public static final String CHECKSUM = "checksum"; public static final String ID = "id"; public static final String DATASET_ADDRESS = "datasetAddress"; public static final String REQUEST_ID = "requestId"; @InjectMocks private DatasetService datasetService; @Mock private DatasetRepository datasetRepository; @Mock private IexecHubService iexecHubService; @Mock private QueueService queueService; @BeforeEach void setUp() { MockitoAnnotations.openMocks(this); } @Test void shouldCreateDataset() { Dataset dataset = mock(Dataset.class); when(dataset.getRequestId()).thenReturn(REQUEST_ID); when(datasetRepository.save(any())).thenReturn(dataset); String requestId = datasetService.createDataset(NAME, MULTI_ADDRESS, CHECKSUM); Assertions.assertEquals(dataset.getRequestId(), requestId); verify(queueService, times(1)) .addExecutionToQueue(any(), eq(false)); ArgumentCaptor<Dataset> datasetCaptor = ArgumentCaptor.forClass(Dataset.class); verify(datasetRepository, times(1)) .save(datasetCaptor.capture()); Assertions.assertEquals(Status.RECEIVED, datasetCaptor.getValue().getStatus()); Assertions.assertEquals(NAME, datasetCaptor.getValue().getName()); Assertions.assertEquals(MULTI_ADDRESS, datasetCaptor.getValue().getMultiAddress()); Assertions.assertEquals(CHECKSUM, datasetCaptor.getValue().getChecksum()); } @Test void shouldNotCreateDatasetOnChainAndStoreSinceLocallyMissing() { when(datasetRepository.findByRequestId(REQUEST_ID)) .thenReturn(Optional.empty()); datasetService.createDatasetOnChainAndStore(REQUEST_ID); verify(datasetRepository, times(0)).save(any()); } @Test void shouldCreateDatasetOnChainAndStoreSuccess() { Dataset dataset = Dataset.builder() .id(ID) .requestId(REQUEST_ID) .status(Status.RECEIVED) .name(NAME) .multiAddress(MULTI_ADDRESS) .checksum(CHECKSUM) .build(); when(datasetRepository.findByRequestId(REQUEST_ID)) .thenReturn(Optional.of(dataset)); when(datasetRepository.save(dataset)).thenReturn(dataset); when(iexecHubService.createDataset(NAME, MULTI_ADDRESS, CHECKSUM)) .thenReturn(DATASET_ADDRESS); datasetService.createDatasetOnChainAndStore(REQUEST_ID); verify(datasetRepository, times(2)) .save(dataset); assertThat(dataset.getStatus()).isEqualTo(Status.SUCCESS); } @Test void shouldTryCreateDatasetOnChainAndStoreFailure() { Dataset dataset = Dataset.builder() .id(ID) .requestId(REQUEST_ID) .status(Status.RECEIVED) .name(NAME) .multiAddress(MULTI_ADDRESS) .checksum(CHECKSUM) .build(); when(datasetRepository.findByRequestId(REQUEST_ID)) .thenReturn(Optional.of(dataset)); when(datasetRepository.save(dataset)).thenReturn(dataset); when(iexecHubService.createDataset(NAME, MULTI_ADDRESS, CHECKSUM)) .thenReturn(""); datasetService.createDatasetOnChainAndStore(REQUEST_ID); verify(datasetRepository, times(2)) .save(dataset); assertThat(dataset.getStatus()).isEqualTo(Status.FAILURE); } @Test void shouldGetStatusForCreateDatasetRequest() { Dataset dataset = mock(Dataset.class); when(dataset.getStatus()).thenReturn(Status.PROCESSING); when(datasetRepository.findByRequestId(REQUEST_ID)) .thenReturn(Optional.of(dataset)); Assertions.assertEquals(Optional.of(Status.PROCESSING), datasetService.getStatusForCreateDatasetRequest(REQUEST_ID)); } @Test void shouldNotGetStatusForCreateDatasetRequest() { Dataset dataset = mock(Dataset.class); when(dataset.getStatus()).thenReturn(null); when(datasetRepository.findByRequestId(REQUEST_ID)) .thenReturn(Optional.of(dataset)); Assertions.assertEquals(Optional.empty(), datasetService.getStatusForCreateDatasetRequest(REQUEST_ID)); } @Test void shouldGetDatasetAddressByRequestId() { Dataset dataset = mock(Dataset.class); when(dataset.getAddress()).thenReturn(DATASET_ADDRESS); when(datasetRepository.findByRequestId(REQUEST_ID)) .thenReturn(Optional.of(dataset)); Assertions.assertEquals(Optional.of(DATASET_ADDRESS), datasetService.getDatasetAddressForCreateDatasetRequest(REQUEST_ID)); } @Test void shouldNotGetDatasetAddressByRequestId() { Dataset dataset = mock(Dataset.class); when(datasetRepository.findByRequestId(REQUEST_ID)) .thenReturn(Optional.of(dataset)); Assertions.assertEquals(Optional.empty(), datasetService.getDatasetAddressForCreateDatasetRequest(REQUEST_ID)); } @Test void shouldGetDatasetByAddressFromCache() { Dataset dataset = mock(Dataset.class); when(dataset.getStatus()).thenReturn(Status.SUCCESS); when(datasetRepository.findByAddress(DATASET_ADDRESS)) .thenReturn(Optional.of(dataset)); Assertions.assertEquals(Optional.of(dataset), datasetService.getDatasetByAddress(DATASET_ADDRESS)); } @Test void shouldGetDatasetByAddressWithFetch() { when(datasetRepository.findByAddress(DATASET_ADDRESS)) .thenReturn(Optional.empty()); com.iexec.common.contract.generated.Dataset datasetContract = mock(com.iexec.common.contract.generated.Dataset.class); when(iexecHubService.getDatasetContract(DATASET_ADDRESS)) .thenReturn(datasetContract); ChainDataset chainDataset = ChainDataset.builder() .chainDatasetId(DATASET_ADDRESS) .name(NAME) .uri(MULTI_ADDRESS) .checksum(CHECKSUM) .build(); when(iexecHubService.getChainDataset(datasetContract)) .thenReturn(Optional.of(chainDataset)); Dataset dataset = mock(Dataset.class); when(datasetRepository.save(any())).thenReturn(dataset); Optional<Dataset> datasetByAddress = datasetService.getDatasetByAddress(DATASET_ADDRESS); Assertions.assertEquals(Optional.of(dataset), datasetByAddress); ArgumentCaptor<Dataset> datasetCaptor = ArgumentCaptor.forClass(Dataset.class); verify(datasetRepository, times(1)) .save(datasetCaptor.capture()); Assertions.assertEquals(chainDataset.getChainDatasetId(), datasetCaptor.getValue().getAddress()); Assertions.assertEquals(chainDataset.getName(), datasetCaptor.getValue().getName()); Assertions.assertEquals(chainDataset.getUri(), datasetCaptor.getValue().getMultiAddress()); Assertions.assertEquals(chainDataset.getChecksum(), datasetCaptor.getValue().getChecksum()); } @Test void shouldGetDatasetByAddressWithFailedFetchSinceNoDatasetContract() { when(datasetRepository.findByAddress(DATASET_ADDRESS)) .thenReturn(Optional.empty()); when(iexecHubService.getDatasetContract(DATASET_ADDRESS)) .thenReturn(null); Optional<Dataset> datasetByAddress = datasetService.getDatasetByAddress(DATASET_ADDRESS); Assertions.assertTrue(datasetByAddress.isEmpty()); } @Test void shouldGetDatasetByAddressWithFailedFetchSinceNoChainDataset() { when(datasetRepository.findByAddress(DATASET_ADDRESS)) .thenReturn(Optional.empty()); com.iexec.common.contract.generated.Dataset datasetContract = mock(com.iexec.common.contract.generated.Dataset.class); when(iexecHubService.getDatasetContract(DATASET_ADDRESS)) .thenReturn(datasetContract); when(iexecHubService.getChainDataset(datasetContract)) .thenReturn(Optional.empty()); Optional<Dataset> datasetByAddress = datasetService.getDatasetByAddress(DATASET_ADDRESS); Assertions.assertTrue(datasetByAddress.isEmpty()); } }
<filename>src/main/java/techcompanies/amazon/MostCommonWord.java package main.java.techcompanies.amazon; import java.util.*; public class MostCommonWord { public String mostCommonWord(String paragraph, String[] banned) { if(paragraph== null || paragraph.isEmpty()) return paragraph; String[] paragraph1 = paragraph.replaceAll("[^a-zA-Z ]", "").toLowerCase().split("\\s+"); List<String> bandList = Arrays.asList(banned); Map<String,Integer> cache = new TreeMap<>(Collections.reverseOrder()); for(String word: paragraph1){ if(!bandList.contains(word)){ cache.put(word, cache.getOrDefault(word,0)+1); } } return cache.entrySet().iterator().next().getKey(); } public static void main(String[] args) { MostCommonWord mostCommonWord = new MostCommonWord(); String[] banned = {"hit"}; System.out.println(mostCommonWord.mostCommonWord("Bob hit a ball, the hit BALL flew far after it was hit.",banned )); } }
<reponame>jacksonsr45/TravelAndCashControl package domain.use_cases.bank_account_manager.accounts; import domain.gateway.BankAccountInterface; import domain.presenter.BankAccountPresenterInterface; public class DeleteBankAccount extends DeleteBankAccountFactory { public DeleteBankAccount(BankAccountInterface repository, String id) { super(repository, id); } @Override public void execute(BankAccountPresenterInterface presenter) { presenter.present(this.repository.deleteBankAccount(this.getId())); } }
The cultural appropriateness and diagnostic usefulness of standardized language assessments for Indigenous Australian children Abstract Speech-language pathologists experience uncertainty about how to interpret standardized assessment results for Indigenous Australian children. There are risks for inappropriate diagnosis: both over- and under-diagnosis of language impairment may occur due to a convergence of linguistic features which causes difficulty in distinguishing between impairment and difference. While the literature suggests that standardized assessments are inappropriate for Indigenous Australian children, there is an absence of empirical documentation to show how Indigenous children perform on standardized tests of language ability. This study examined the performance of 19 Indigenous Australian children, aged 8;0113;08, from one school on the Clinical Evaluation of Language Fundamentals, Fourth Edition, Australian Standardized Edition. Standardized scores were compared with teacher ratings of children's oral language skills. Analysis showed poor alignment between teacher ratings and language assessment, and assessment scores were negatively influenced by features of Aboriginal English. Children rated with above average language skills presented with different linguistic profiles from the children rated with average and below average language abilities. The inappropriateness of current standardized language assessments for Indigenous children and the need for further research to guide appropriate assessment are discussed.
Macroglossia and periorbital ecchymoses in a patient with systemic amyloidosis: A case report Introduction: Amyloidoses comprise a group of rare diseases associated with the extracellular deposition of misfolded proteins, which can compromise the function of target organs and give rise to clinical disease with a broad range of manifestations. The aim of this study was to report a case of systemic amyloidosis with macroglossia and periorbital ecchymoses two uncommon semiological findings. Case Report: A 59-year-old female presented with dyspnea, vomiting, abdominal pain and distension. The patient was admitted for diagnostic workup, during which malnutrition, infiltrative thickening of the suprapubic abdominal wall, anasarca, macroglossia, and tongue petechiae were identified. The clinical picture was compounded by hematochezia and periorbital ecchymoses during hospitalization. Biopsy of the dermis and subcutaneous tissue of the hypogastrium revealed amorphous eosinophilic extracellular depositions on Congo red staining which had green birefringence under polarized light microscopy, consistent with amyloidosis. Conclusion: Patients with amyloidosis are usually extensively investigated before a diagnosis is made because in addition to being a rare disease with multifaceted presentation features, the signs and symptoms of amyloidosis are nonspecific. In the present report, cutaneous thickening with formation of periorbital ecchymoses accompanied by macroglossia were suggestive of amyloidosis, whose treatment and prognosis are influenced by timely diagnosis. (This page in not part of the published article.) International Journal of Case Reports and Images, Vol. 6 No. 6, June 2015. ISSN Int J Case Rep Images 2015;6:343347. www.ijcasereportsandimages.com Costa et al. 343 CASE REPORT OPEN ACCESS Macroglossia and periorbital ecchymoses in a patient with systemic amyloidosis: A case report Jamille Hemtrio Salles Martins Costa, Alosio Benvindo de Paula, Leonardo de Oliveira Campos, Rafaela Brito de Paula, Daniel Riani Gotardelo hypogastrium revealed amorphous eosinophilic extracellular depositions on congo red staining which had green birefringence under polarized light microscopy, consistent with amyloidosis. conclusion: Patients with amyloidosis are usually extensively investigated before a diagnosis is made because in addition to being a rare disease with multifaceted presentation features, the signs and symptoms of amyloidosis are nonspecific. In the present report, cutaneous thickening with formation of periorbital ecchymoses accompanied by macroglossia were suggestive of amyloidosis, whose treatment and prognosis are influenced by timely diagnosis. INtrODUctION Amyloidoses are a subgroup of diseases caused by the aggregation of misfolded proteins with extracellular deposition which compromises the function of target organs and gives rise to clinical disease. Amyloidosis is a rare disease and a diagnostic challenge because of its nonspecific presenting features. Being a rare disease, the exact incidence of amyloidosis is unknown. In the United States, incidence rates seem stable at around 6-10 cases/million/year. Older adults. The term "amyloid" was attributed by Rudolph Virchow in 1854, when he noted a reaction of metachromasia to iodine in necropsied tissue samples, similarly to what occurs with starch, and assumed the material was of glycidic origin. Although Friedreich and Kekule demonstrated in 1859, that the material was in fact protein, the denomination was already incorporated into the medical vocabulary and was thus maintained. The "amyloid" deposit is necessarily composed of a fibrillar protein, glycosaminoglycans and serum amyloid P-component. Amyloid fibrils have a secondary structure in common-a beta-pleated sheet configuration-and a single ultrastructure which determines the 30 different precursor proteins known to date. CASE REPORT PEER REviEwEd | OPEN ACCESS Amyloid diseases can be categorized as systemic or localized; hereditary or acquired. The current classification is based on the different types of protein of the amyloid fibrils, most often related to the distinct clinical presentations. The prognosis of localized disease is generally good with surgical treatment. If there is systemic involvement, the disease can be severe; with cardiomyopathy, nephrotic syndrome/ renal failure, hepatosplenomegaly, diarrhea, intestinal pseudo-obstruction, peripheral neuropathy, autonomic neuropathy, arthropathy, carpal tunnel syndrome, bleeding, adrenal dysfunction, gout, weight loss, pulmonary problems, fatigue, and malaise. A tissue biopsy and histopathological examination are done to establish the diagnosis. Amyloid deposition is identified using Congo red histological staining and subsequent observation of green birefringence under polarized light-the established gold standard. The precursor fibril is then characterized using histochemical and biochemical testing, and genetic analysis. The correct and specific diagnosis of the amyloidosis type is essential to guide treatment. cAsE rEPOrt A previously healthy 59-year-old female was admitted with mild dyspnea, vomiting, abdominal pain and distension for diagnostic workup. The patient complained of lower abdominal heaviness in addition to postprandial bloating and decreased appetite of approximately one year duration, resulting in progressive weight loss that warranted extensive medical investigation at the time. She denied fever, inflammatory signs and changes in bowel habits. On physical examination, malnutrition, anasarca, macroglossia ( Figure 1), tongue petechiae, and infiltrative thickening of the suprapubic abdominal wall were identified. During hospital stay, the disease progressed with hemorrhagic phenomenon consisting of massive hematochezia and bilateral periorbital ecchymoses in addition to extensive left-eye conjunctival hemorrhage ( Figure 2). Pericardial effusion, bilateral pleural effusion and ascitis were noted on computed tomography scan. Abdominal magnetic resonance imaging showed edematous infiltration of the mesenterium and subcutaneous tissue in the hypogastrium. A barium meal revealed reduced small bowel motility. Hyperemia and ulcers were found in the colon and rectum on colonoscopy, the etiology to be determined by histopathology. Laboratory tests revealed antinuclear antibody (ANA), double-stranded deoxyribonucleic acid (ds DNA), rheumatoid factor (RF), hepatitis B surface antigen (HBsAg), anti-hepatitis C virus (Anti-HCV) antibody and human immunodeficiency virus (HIV) antigen within normal limit. Mantoux (with 10 U tuberculin) and alcohol acid-fast bacilli (sputum): were negatives. Blood glucose, urea, creatinine, sodium, potassium and bicarbonate levels were normal. Routine peripheral smear examination showed hypochromic microcytic anemia. C-reactive protein (CRP) 48 mg/L. TSH 6.06 ng/dL (0.27-4.2) ng/dL. free T4 1.19 ng/dL (0.93-1.7 ng/dL). Urinalysis was within normal limits except for a erythrocyturia (20/HPF). Fundoscopy showed abundant diffuse opacities in the vitreous. Electrophoresis of urine and blood proteins showed a monoclonal peak in the region of alpha-2-globulins. Immunohistochemistry of the bone marrow biopsy specimen was positive for CD138 with numerous plasma cells (90% of cell count), in addition to the presence of light-chain kappa and lambda globulins. Echocardiography showed grade III left ventricular diastolic dysfunction, pericardial effusion in addition to parietal and visceral thickening. Biopsy of a fragment of the dermis and subcutaneous tissue of the hypogastrium revealed amorphous eosinophilic extracellular deposits on Congo red staining and green birefringence under polarized light, both consistent with amyloidosis. The possibility of the disease being associated with multiple myeloma was excluded given the clinical manifestations and evidence from ancillary tests (absence of hypercalcemia, renal failure, or osteolytic lesions consistent with myeloma). Cardiac decompensation followed, as the patient was in a condition of severe advanced disease, while the medical team was expecting the results of the biopsy with Congo red staining. The patient died before the specific therapy was instituted. DIscUssION Macroglossia is the most frequent oral manifestation of amyloidosis and can be found as the only presenting symptom or as only one of the symptoms of the disease. Before considering the presence of amyloid protein, other more likely causes of tongue enlargement, such as malignant tumors of the tongue, vascular abnormalities, hypothyroidism and deficiency of vitamin B12 and folic acid, should be considered. Xavier et al. described a case of an older adult with macroglossia, weight loss, www.ijcasereportsandimages.com Costa et al. 345 and dysphagia that seemed at first to be a malignant tumor of the tongue and that, after proper workup, was defined as amyloidosis. Tsourdi et al. reported a case of macroglossia as the sole manifestation of amyloidosis secondary to a monoclonal gammopathy of undetermined significance. Purpuric eruptions such as ecchymoses and hematomas can also be found in individuals with amyloidosis. They are due to coagulation factor X deficiency, likely the result of its absorption by the amyloid fibrils, in addition to amyloid infiltration of capillaries causing microvascular fragility. Few cases have been described in literature in which the two findings-macroglossia and periorbital ecchymoseswere concurrent in patients with systemic amyloidosis; in most such cases, amyloidosis was associated with multiple myeloma. In the case reported herein, no underlying diseases were found to account for the amyloid deposition, hence the diagnosis of primary systemic amyloidosis was considered. This type of amyloidosis is known as AL, with the first letter ("A") corresponding to "amyloidosis" and the second representing the biochemical makeup of the constituent fibril-in this case, amyloidosis involving the deposition of light-chain immunoglobulins ("L" for "light-chain"). Three of the four diagnostic criteria to confirm ALtype systemic amyloidosis were verified in our study: presence of a syndrome related to the amyloid deposits (heart failure and macroglossia, among other signs and symptoms); evidence of amyloid deposition on Congo red staining in a tissue biopsy sample; and presence of monoclonal plasma cell proliferation. The fourth diagnostic criterion would be the confirmation of light-chain proteins in the amyloid material through immunohistochemistry or other molecular biology techniques. These tests were not performed because of the rapidly fatal outcome. The prognosis of AL amyloidoses is typically poor. Heart failure and renal failure are the main causes of death. When amyloidosis is secondary to multiple myeloma, the mean survival is five months, while the primary form of the disease is associated with a survival of 2.1 years. The treatment for AL amyloidosis is intended to reduce the amount of circulating precursor proteins produced by B-lymphocytes and plasma cells, which can be achieved with cytotoxic agents such as prednisone and melphalan. cONcLUsION Macroglossia, periorbital ecchymoses, and other hemorrhagic manifestations are among the multiple presenting features to be found in systemic amyloidosis, which is a severe disease with complex symptomatology requiring thorough clinical examination and early recognition by the medical team to ensure timely treatment. ********* Author contributions Jamille Hemtrio Salles Martins Costa -Substantial contributions to conception and design, Acquisition of data, Analysis and interpretation of data, Drafting the article, Revising it critically for important intellectual content, Final approval of the version to be published Alosio Benvindo de Paula -Substantial contributions to conception and design, Acquisition of data, Analysis and interpretation of data, Drafting the article, Revising it critically for important intellectual content, Final approval of the version to be published Leonardo de Oliveira Campos -Substantial contributions to conception and design, Acquisition of data, Analysis and interpretation of data, Drafting the article, Revising it critically for important intellectual content, Final approval of the version to be published Rafaela Brito de Paula -Substantial contributions to conception and design, Acquisition of data, Analysis and interpretation of data, Drafting the article, Revising Edorium Journals: An introduction Edorium Journals Team But why should you publish with Edorium Journals? In less than 10 words -we give you what no one does. Vision of being the best We have the vision of making our journals the best and the most authoritative journals in their respective specialties. We are working towards this goal every day of every week of every month of every year. Exceptional services We care for you, your work and your time. Our efficient, personalized and courteous services are a testimony to this. Editorial Review All manuscripts submitted to Edorium Journals undergo pre-processing review, first editorial review, peer review, second editorial review and finally third editorial review. Peer Review All manuscripts submitted to Edorium Journals undergo anonymous, double-blind, external peer review. Early View version Early View version of your manuscript will be published in the journal within 72 hours of final acceptance. Manuscript status From submission to publication of your article you will get regular updates (minimum six times) about status of your manuscripts directly in your email. Mentored Review Articles (MRA) Our academic program "Mentored Review Article" (MRA) gives you a unique opportunity to publish papers under mentorship of international faculty. These articles are published free of charges. Favored Author program One email is all it takes to become our favored author. You will not only get fee waivers but also get information and insights about scholarly publishing. Institutional Membership program Join our Institutional Memberships program and help scholars from your institute make their research accessible to all and save thousands of dollars in fees make their research accessible to all. Our presence We have some of the best designed publication formats. Our websites are very user friendly and enable you to do your work very easily with no hassle. Something more... We request you to have a look at our website to know more about us and our services. We welcome you to interact with us, share with us, join us and of course publish with us. Invitation for article submission We sincerely invite you to submit your valuable research for publication to Edorium Journals. Six weeks You will get first decision on your manuscript within six weeks (42 days) of submission. If we fail to honor this by even one day, we will publish your manuscript free of charge. Four weeks After we receive page proofs, your manuscript will be published in the journal within four weeks (31 days). If we fail to honor this by even one day, we will publish your manuscript free of charge and refund you the full article publication charges you paid for your manuscript.
package oops.polymorphism; public class Dog extends Pet{ String name = "Mike"; public void walk() { System.out.println("Dog is walking"); } }
// Write does a write and calls the callback. func (wpw *WriteProgressWrapper) Write(buffer []byte) (n int, err error) { defer func() { if state := recover(); state != nil { err = log.Wrap(state.(error)) } }() startAt := time.Now() n, err = wpw.w.Write(buffer) log.PanicIf(err) duration := time.Since(startAt) err = wpw.progressCb(n, duration, false) log.PanicIf(err) return n, nil }
Dust deposition near an eroding source field Deposition of suspended dust near eroding source fields can have detrimental effects on vegetation, as well as on soil and water quality. This study was undertaken to quantify dust deposition within 200 m of a source field during wind erosion events. Erosion was measured with BSNE samplers on a small field of Amarillo fine sandy loam at field at Big Spring, TX. Suspensionsized dust discharge averaged 33 ± 5 per cent of the total sediment discharge and ranged from 180 to 1474 kg m−1 during eight selected storm events. Within 200 m of the source field boundary, dust collected in deposition samplers placed above a vegetated surface averaged 34 per cent of initial dust discharge. Predicted deposition, according to a line source model, was 43 per cent. Actual deposition was likely near that predicted, because of lateral diffusion of the dust and some undersampling by the disk samplers. Thus, the line source model seems useful in estimating both the pattern and quantity of deposition. About 30 per cent of the suspended dust was deposited within the initial 50 m of vegetated surface, but only about 1215 per cent was deposited in the initial 10 m. Published in 2006 by John Wiley & Sons, Ltd.
How to measure sustained psychic transformations in long-term treatments of chronically depressed patients: Symptomatic and structural changes in the LAC Depression Study of the outcome of cognitive-behavioural and psychoanalytic long-term treatments* ABSTRACT Worldwide, the pressure on psychoanalysis to prove the results of its treatments according to the criteria of so-called evidence-based medicine has increased. While a large number of studies on the results of psychoanalytic short-term therapies are now available, such studies are still largely lacking on psychoanalysis and psychoanalytic long-term therapies. In a large multicentre study, the results of psychoanalytical and cognitive-behavioural longterm therapies in chronically depressed patients were compared, Both psychotherapies led to statistically highly significant changes in depressive symptoms three years after the start of the treatments However, the focus of psychoanalytic treatments is not exclusively on reducing psychopathological symptoms, but on changes in the inner world of the patients that are reminiscent of the goal of psychoanalyses that Freud has characterized as developing the ability to love, work and enjoy life. In the German-speaking community, such transformations are called structural changes. This article reports results on such structural changes achieved with the help of a sophisticated measuring instrument, the Operationalized Psychodynamic Diagnostics (OPD). These so-called structural changes are compared with symptomatic changes. Three years after the start of the treatments, significantly more patients in psychoanalytical treatments show such structural changes than patients in cognitive-behavioural treatments.
<reponame>smyhvae/VlcTest<gh_stars>1-10 /***************************************************************************** * VideoListAdapter.java ***************************************************************************** * Copyright © 2011-2012 VLC authors and VideoLAN * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ package org.videolan.vlc.gui.video; import java.util.Comparator; import java.util.HashMap; import java.util.Locale; import org.videolan.libvlc.Media; import org.videolan.vlc.MediaGroup; import org.videolan.vlc.R; import org.videolan.vlc.util.BitmapCache; import org.videolan.vlc.util.BitmapUtil; import org.videolan.vlc.util.Strings; import org.videolan.vlc.util.Util; import android.content.Context; import android.content.res.ColorStateList; import android.graphics.Bitmap; import android.view.LayoutInflater; import android.view.View; import android.view.View.OnClickListener; import android.view.ViewGroup; import android.widget.ArrayAdapter; import android.widget.GridView; import android.widget.ImageView; import android.widget.ProgressBar; import android.widget.TextView; public class VideoListAdapter extends ArrayAdapter<Media> implements Comparator<Media> { public final static int SORT_BY_TITLE = 0; public final static int SORT_BY_LENGTH = 1; private int mSortDirection = 1; private int mSortBy = SORT_BY_TITLE; private boolean mListMode = false; private Context mContext; private VideoGridFragment mFragment; public VideoListAdapter(Context context, VideoGridFragment fragment) { super(context, 0); mContext = context; mFragment = fragment; } public final static String TAG = "VLC/MediaLibraryAdapter"; public synchronized void update(Media item) { int position = getPosition(item); if (position != -1) { remove(item); insert(item, position); } } public void setTimes(HashMap<String, Long> times) { // update times for (int i = 0; i < getCount(); ++i) { Media media = getItem(i); Long time = times.get(media.getLocation()); if (time != null) media.setTime(time); } } public void sortBy(int sortby) { switch (sortby) { case SORT_BY_TITLE: if (mSortBy == SORT_BY_TITLE) mSortDirection *= -1; else { mSortBy = SORT_BY_TITLE; mSortDirection = 1; } break; case SORT_BY_LENGTH: if (mSortBy == SORT_BY_LENGTH) mSortDirection *= -1; else { mSortBy = SORT_BY_LENGTH; mSortDirection *= 1; } break; default: mSortBy = SORT_BY_TITLE; mSortDirection = 1; break; } sort(); } public void sort() { super.sort(this); } @Override public int compare(Media item1, Media item2) { int compare = 0; switch (mSortBy) { case SORT_BY_TITLE: compare = item1.getTitle().toUpperCase(Locale.ENGLISH).compareTo( item2.getTitle().toUpperCase(Locale.ENGLISH)); break; case SORT_BY_LENGTH: compare = ((Long) item1.getLength()).compareTo(item2.getLength()); break; } return mSortDirection * compare; } /** * Display the view of a file browser item. */ @Override public View getView(final int position, View convertView, ViewGroup parent) { ViewHolder holder; View v = convertView; if (v == null || (((ViewHolder)v.getTag()).listmode != mListMode)) { LayoutInflater inflater = (LayoutInflater) getContext().getSystemService(Context.LAYOUT_INFLATER_SERVICE); if (!mListMode) v = inflater.inflate(R.layout.video_grid_item, parent, false); else v = inflater.inflate(R.layout.video_list_item, parent, false); holder = new ViewHolder(); holder.layout = v.findViewById(R.id.layout_item); holder.thumbnail = (ImageView) v.findViewById(R.id.ml_item_thumbnail); holder.title = (TextView) v.findViewById(R.id.ml_item_title); holder.subtitle = (TextView) v.findViewById(R.id.ml_item_subtitle); holder.progress = (ProgressBar) v.findViewById(R.id.ml_item_progress); holder.more = (ImageView) v.findViewById(R.id.item_more); holder.listmode = mListMode; v.setTag(holder); holder.more.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { if (mFragment != null) mFragment.onContextPopupMenu(v, position); } }); /* Set the layoutParams based on the values set in the video_grid_item.xml root element */ v.setLayoutParams(new GridView.LayoutParams(v.getLayoutParams().width, v.getLayoutParams().height)); } else { holder = (ViewHolder) v.getTag(); } Media media = getItem(position); /* Thumbnail */ Bitmap thumbnail = BitmapUtil.getPictureFromCache(media); if (thumbnail == null) { // missing thumbnail thumbnail = BitmapCache.GetFromResource(v, R.drawable.icon_g); } else if (thumbnail.getWidth() == 1 && thumbnail.getHeight() == 1) { // dummy thumbnail thumbnail = BitmapCache.GetFromResource(v, R.drawable.icon); } //FIXME Warning: the thumbnails are upscaled in the grid view! holder.thumbnail.setImageBitmap(thumbnail); /* Color state */ ColorStateList titleColor = v.getResources().getColorStateList( Util.getResourceFromAttribute(mContext, R.attr.list_title)); holder.title.setTextColor(titleColor); if (media instanceof MediaGroup) fillGroupView(holder, media); else fillVideoView(holder, media); return v; } private void fillGroupView(ViewHolder holder, Media media) { MediaGroup mediaGroup = (MediaGroup) media; int size = mediaGroup.size(); String text = getContext().getResources().getQuantityString(R.plurals.videos_quantity, size, size); holder.subtitle.setText(text); holder.title.setText(media.getTitle() + "\u2026"); // ellipsis holder.more.setVisibility(View.INVISIBLE); holder.progress.setVisibility(View.GONE); } private void fillVideoView(ViewHolder holder, Media media) { /* Time / Duration */ long lastTime = media.getTime(); String text; if (lastTime > 0) { text = String.format("%s / %s", Strings.millisToText(lastTime), Strings.millisToText(media.getLength())); holder.progress.setVisibility(View.VISIBLE); holder.progress.setMax((int) (media.getLength() / 1000)); holder.progress.setProgress((int) (lastTime / 1000)); } else { text = Strings.millisToText(media.getLength()); holder.progress.setVisibility(View.GONE); } if (media.getWidth() > 0 && media.getHeight() > 0) { text += String.format(" - %dx%d", media.getWidth(), media.getHeight()); } holder.subtitle.setText(text); holder.title.setText(media.getTitle()); holder.more.setVisibility(View.VISIBLE); } static class ViewHolder { boolean listmode; View layout; ImageView thumbnail; TextView title; TextView subtitle; ImageView more; ProgressBar progress; } public void setListMode(boolean value) { mListMode = value; } public boolean isListMode() { return mListMode; } }