content
stringlengths 7
2.61M
|
---|
import numpy as np
from rubin_sim.scheduler.utils import (empty_observation, set_default_nside)
import rubin_sim.scheduler.features as features
from rubin_sim.scheduler.surveys import BaseSurvey
from rubin_sim.utils import _approx_RaDec2AltAz, _raDec2Hpid, _angularSeparation
import logging
log = logging.getLogger(__name__)
__all__ = ['Scripted_survey', 'Pairs_survey_scripted']
class Scripted_survey(BaseSurvey):
"""
Take a set of scheduled observations and serve them up.
"""
def __init__(self, basis_functions, reward=1e6, ignore_obs='dummy',
nside=None):
"""
"""
if nside is None:
nside = set_default_nside()
self.extra_features = {}
self.nside = nside
self.reward_val = reward
self.reward = -np.inf
super(Scripted_survey, self).__init__(basis_functions=basis_functions,
ignore_obs=ignore_obs, nside=nside)
def add_observation(self, observation, indx=None, **kwargs):
"""Check if observation matches a scripted observation
"""
# From base class
checks = [io not in str(observation['note']) for io in self.ignore_obs]
if all(checks):
for feature in self.extra_features:
self.extra_features[feature].add_observation(observation, **kwargs)
for bf in self.basis_functions:
bf.add_observation(observation, **kwargs)
for detailer in self.detailers:
detailer.add_observation(observation, **kwargs)
self.reward_checked = False
# was it taken in the right time window, and hasn't already been marked as observed.
time_matches = np.where((observation['mjd'] > self.mjd_start) &
(observation['mjd'] < self.obs_wanted['flush_by_mjd']) &
(~self.obs_wanted['observed']) &
(observation['note'] == self.obs_wanted['note']))[0]
for match in time_matches:
distances = _angularSeparation(self.obs_wanted[match]['RA'],
self.obs_wanted[match]['dec'],
observation['RA'], observation['dec'])
if (distances < self.obs_wanted[match]['dist_tol']) & \
(self.obs_wanted[match]['filter'] == observation['filter']):
# Log it as observed.
self.obs_wanted['observed'][match] = True
self.scheduled_obs = self.obs_wanted['mjd'][~self.obs_wanted['observed']]
break
def calc_reward_function(self, conditions):
"""If there is an observation ready to go, execute it, otherwise, -inf
"""
observation = self._check_list(conditions)
if observation is None:
self.reward = -np.inf
else:
self.reward = self.reward_val
return self.reward
def _slice2obs(self, obs_row):
"""take a slice and return a full observation object
"""
observation = empty_observation()
for key in ['RA', 'dec', 'filter', 'exptime', 'nexp',
'note', 'rotSkyPos', 'flush_by_mjd']:
observation[key] = obs_row[key]
return observation
def _check_alts_HA(self, observation, conditions):
"""Given scheduled observations, check which ones can be done in current conditions.
Parameters
----------
observation : np.array
An array of scheduled observations. Probably generated with rubin_sim.scheduler.utils.scheduled_observation
"""
# Just do a fast ra,dec to alt,az conversion.
alt, az = _approx_RaDec2AltAz(observation['RA'], observation['dec'],
conditions.site.latitude_rad, None,
conditions.mjd,
lmst=conditions.lmst)
HA = conditions.lmst - observation['RA']*12./np.pi
HA[np.where(HA > 24)] -= 24
HA[np.where(HA < 0)] += 24
in_range = np.where((alt < observation['alt_max']) & (alt > observation['alt_min']) &
((HA > observation['HA_max']) | (HA < observation['HA_min'])))[0]
return in_range
def _check_list(self, conditions):
"""Check to see if the current mjd is good
"""
# Scheduled observations that are in the right time window and have not been executed
in_time_window = np.where((self.mjd_start < conditions.mjd) &
(self.obs_wanted['flush_by_mjd'] > conditions.mjd) &
(~self.obs_wanted['observed']))[0]
if np.size(in_time_window) > 0:
pass_checks = self._check_alts_HA(self.obs_wanted[in_time_window], conditions)
matches = in_time_window[pass_checks]
else:
matches = []
if np.size(matches) > 0:
# XXX--could make this a list and just send out all the things that currently match
# rather than one at a time
observation = self._slice2obs(self.obs_wanted[matches[0]])
else:
observation = None
return observation
def set_script(self, obs_wanted):
"""
Parameters
----------
obs_wanted : np.array
The observations that should be executed. Needs to have columns with dtype names:
Should be from lsst.sim.scheduler.utils.scheduled_observation
mjds : np.array
The MJDs for the observaitons, should be same length as obs_list
mjd_tol : float (15.)
The tolerance to consider an observation as still good to observe (min)
"""
self.obs_wanted = obs_wanted
self.obs_wanted.sort(order='mjd')
self.mjd_start = self.obs_wanted['mjd'] - self.obs_wanted['mjd_tol']
# Here is the atribute that core scheduler checks to broadcast scheduled observations
# in the conditions object.
self.scheduled_obs = self.obs_wanted['mjd']
def generate_observations_rough(self, conditions):
observation = self._check_list(conditions)
return [observation]
class Pairs_survey_scripted(Scripted_survey):
"""Check if incoming observations will need a pair in 30 minutes. If so, add to the queue
"""
def __init__(self, basis_functions, filt_to_pair='griz',
dt=40., ttol=10., reward_val=101., note='scripted', ignore_obs='ack',
min_alt=30., max_alt=85., lat=-30.2444, moon_distance=30., max_slew_to_pair=15.,
nside=None):
"""
Parameters
----------
filt_to_pair : str (griz)
Which filters to try and get pairs of
dt : float (40.)
The ideal gap between pairs (minutes)
ttol : float (10.)
The time tolerance when gathering a pair (minutes)
"""
if nside is None:
nside = set_default_nside()
super(Pairs_survey_scripted, self).__init__(basis_functions=basis_functions,
ignore_obs=ignore_obs, min_alt=min_alt,
max_alt=max_alt, nside=nside)
self.lat = np.radians(lat)
self.note = note
self.ttol = ttol/60./24.
self.dt = dt/60./24. # To days
self.max_slew_to_pair = max_slew_to_pair # in seconds
self._moon_distance = np.radians(moon_distance)
self.extra_features = {}
self.extra_features['Pair_map'] = features.Pair_in_night(filtername=filt_to_pair)
self.reward_val = reward_val
self.filt_to_pair = filt_to_pair
# list to hold observations
self.observing_queue = []
# make ignore_obs a list
if type(self.ignore_obs) is str:
self.ignore_obs = [self.ignore_obs]
def add_observation(self, observation, indx=None, **kwargs):
"""Add an observed observation
"""
# self.ignore_obs not in str(observation['note'])
to_ignore = np.any([ignore in str(observation['note']) for ignore in self.ignore_obs])
log.debug('[Pairs.add_observation]: %s: %s: %s', to_ignore, str(observation['note']), self.ignore_obs)
log.debug('[Pairs.add_observation.queue]: %s', self.observing_queue)
if not to_ignore:
# Update my extra features:
for feature in self.extra_features:
if hasattr(self.extra_features[feature], 'add_observation'):
self.extra_features[feature].add_observation(observation, indx=indx)
self.reward_checked = False
# Check if this observation needs a pair
# XXX--only supporting single pairs now. Just start up another scripted survey
# to grab triples, etc? Or add two observations to queue at a time?
# keys_to_copy = ['RA', 'dec', 'filter', 'exptime', 'nexp']
if ((observation['filter'][0] in self.filt_to_pair) and
(np.max(self.extra_features['Pair_map'].feature[indx]) < 1)):
obs_to_queue = empty_observation()
for key in observation.dtype.names:
obs_to_queue[key] = observation[key]
# Fill in the ideal time we would like this observed
log.debug('Observation MJD: %.4f (dt=%.4f)', obs_to_queue['mjd'], self.dt)
obs_to_queue['mjd'] += self.dt
self.observing_queue.append(obs_to_queue)
log.debug('[Pairs.add_observation.queue.size]: %i', len(self.observing_queue))
for obs in self.observing_queue:
log.debug('[Pairs.add_observation.queue]: %s', obs)
def _purge_queue(self, conditions):
"""Remove any pair where it's too late to observe it
"""
# Assuming self.observing_queue is sorted by MJD.
if len(self.observing_queue) > 0:
stale = True
in_window = np.abs(self.observing_queue[0]['mjd']-conditions.mjd) < self.ttol
log.debug('Purging queue')
while stale:
# If the next observation in queue is past the window, drop it
if (self.observing_queue[0]['mjd'] < conditions.mjd) & (~in_window):
log.debug('Past the window: obs_mjd=%.4f (current_mjd=%.4f)',
self.observing_queue[0]['mjd'],
conditions.mjd)
del self.observing_queue[0]
# If we are in the window, but masked, drop it
elif (in_window) & (~self._check_mask(self.observing_queue[0], conditions)):
log.debug('Masked')
del self.observing_queue[0]
# If in time window, but in alt exclusion zone
elif (in_window) & (~self._check_alts(self.observing_queue[0], conditions)):
log.debug('in alt exclusion zone')
del self.observing_queue[0]
else:
stale = False
# If we have deleted everything, break out of where
if len(self.observing_queue) == 0:
stale = False
def _check_alts(self, observation, conditions):
result = False
# Just do a fast ra,dec to alt,az conversion. Can use LMST from a feature.
alt, az = _approx_RaDec2AltAz(observation['RA'], observation['dec'],
self.lat, None,
conditions.mjd,
lmst=conditions.lmst)
in_range = np.where((alt < self.max_alt) & (alt > self.min_alt))[0]
if np.size(in_range) > 0:
result = True
return result
def _check_mask(self, observation, conditions):
"""Check that the proposed observation is not currently masked for some reason on the sky map.
True if the observation is good to observe
False if the proposed observation is masked
"""
hpid = np.max(_raDec2Hpid(self.nside, observation['RA'], observation['dec']))
skyval = conditions.M5Depth[observation['filter'][0]][hpid]
if skyval > 0:
return True
else:
return False
def calc_reward_function(self, conditions):
self._purge_queue(conditions)
result = -np.inf
self.reward = result
log.debug('Pair - calc_reward_func')
for indx in range(len(self.observing_queue)):
check = self._check_observation(self.observing_queue[indx], conditions)
log.debug('%s: %s', check, self.observing_queue[indx])
if check[0]:
result = self.reward_val
self.reward = self.reward_val
break
elif not check[1]:
break
self.reward_checked = True
return result
def _check_observation(self, observation, conditions):
delta_t = observation['mjd'] - conditions.mjd
log.debug('Check_observation: obs_mjd=%.4f (current_mjd=%.4f, delta=%.4f, tol=%.4f)',
observation['mjd'],
conditions.mjd,
delta_t,
self.ttol)
obs_hp = _raDec2Hpid(self.nside, observation['RA'], observation['dec'])
slewtime = conditions.slewtime[obs_hp[0]]
in_slew_window = slewtime <= self.max_slew_to_pair or delta_t < 0.
in_time_window = np.abs(delta_t) < self.ttol
if conditions.current_filter is None:
infilt = True
else:
infilt = conditions.current_filter in self.filt_to_pair
is_observable = self._check_mask(observation, conditions)
valid = in_time_window & infilt & in_slew_window & is_observable
log.debug('Pair - observation: %s ' % observation)
log.debug('Pair - check[%s]: in_time_window[%s] infilt[%s] in_slew_window[%s] is_observable[%s]' %
(valid, in_time_window, infilt, in_slew_window, is_observable))
return (valid,
in_time_window,
infilt,
in_slew_window,
is_observable)
def generate_observations(self, conditions):
# Toss anything in the queue that is too old to pair up:
self._purge_queue(conditions)
# Check for something I want a pair of
result = []
# if len(self.observing_queue) > 0:
log.debug('Pair - call')
for indx in range(len(self.observing_queue)):
check = self._check_observation(self.observing_queue[indx], conditions)
if check[0]:
result = self.observing_queue.pop(indx)
result['note'] = 'pair(%s)' % self.note
# Make sure we don't change filter if we don't have to.
if conditions.current_filter is not None:
result['filter'] = conditions.current_filter
# Make sure it is observable!
# if self._check_mask(result):
result = [result]
break
elif not check[1]:
# If this is not in time window and queue is chronological, none will be...
break
return result
|
print(7 / 4) # / is division
print(7 % 4) # % is modulus
print(50 % 5) # 50 is divisible by 5
print(20 % 2) # n mod 2 is a good way of checking whether a number is even
print(2 ** 3) # ** is power
print((2 + 10) * 3) # brackets work
|
Britain’s historic decision to leave the European Union after 40 years is already having major political and financial consequences. David Cameron has announced his intention to resign as prime minister, for instance, and the value of the pound has plummeted to its lowest level since 1985. And that’s after just one day. Brexit is a complicated, inherently divisive issue with seemingly innumerable facets. To those living outside the United Kingdom or having no firsthand experience with the EU, this story may be especially challenging to decipher. Who will be there to make sense of Brexit and boil it down to its very essence? The answer, as it has been for decades, is The Simpsons. There is precious little in this world that cannot be explained with images of the irradiated yellow inhabitants of Springfield. Brexit is well within their purview.
In a public Facebook group called “Woo Hoo!: Classic Simpsons Quotes,” fans have been using screenshots from the long-running Fox animated show to put Brexit in its proper perspective. A two-panel comic by user Louie Shaw, utilizing images from “The Springfield Connection,” casts Homer as Britain and Marge as the EU. Perhaps like the impetuous Homer, Britain has not thought this thing all the way through.
Another group member, Corey Pierce, has summarized his feelings about Brexit with a moment from “Lisa’s Wedding.” Hugh, Lisa’s future fiancé (but not her future husband), holds a smoldering, manure-covered British flag in his hands. Tears well up in his eyes as he contemplates what has happened to his home country.
Perhaps a humorous animated GIF will make everything better. For this, one need only turn to another Facebook page called The Content Zone. Once again, Britain has been depicted as the foresight-deficient Homer Simpson. In this case, it’s the Homer from “And Maggie Makes Three,” the one who literally burned his bridges when he quit his job at the Springfield Nuclear Power Plant. This would seem to be another slam on Britain, but in this metaphor, the EU becomes heartless plutocrat Mr. Burns. Does it matter that, by the end of “And Maggie Makes Three,” Homer literally comes crawling back to Burns on his hands and knees? |
Chapter 75
Qrow made his way down the hall, second door on the left Yang had told him, of course it was closed. He would have to purposefully intrude on a safe space to start with. Heaving a sigh, he knocked twice. "Yes?" Weiss' muffled voice snipped from within.
"It's Qrow." He replied. Met with silence, all he could do was wait.
"Come in." Ruby's voice softly replied.
Qrow felt a pang in his heart. She sounded tired, drained. It pained him to hear it, and things would probably only get worse. Slowly, he opened the door and stepped in, shutting it gently behind him. Ruby sat on Jaune's bed, Blake and Weiss flanking her, an arm wrapped around each of Ruby's, who clenched their hands like lifelines. At this point, they probably were. "Hey kiddo, Blake, Weiss." Qrow greeted.
Ruby managed a small smile, but it was just as tired as her voice. Weiss and Blake nodded, the latter indicating the desk chair across from them, which Qrow sat in and waited. Weiss seemed impassive, regarding him with an icy stare. Blake wore a small frown, her golden eyes burning. It reminded him of Raven. Ruby looked warn down, eyes red and bloodshot. That hurt even more. "What happens now?" Ruby finally asked, voice muted and strained.
"Well Ruby...I don't know." Qrow admitted. "It's really up to you. What do you want to happen?"
"What I've always wanted...I wanted to be a family again...and I still do." Ruby's voice strengthened, if only for a moment, but it was enough to make Qrow smile.
"Was this it?" Weiss interrogated. "No more secrets?"
"Nothing comes to mind." Qrow replied. "As far as I'm concerned, it's always been the one thing. You'll have to ask Summer."
"I guess so...Blake, Weiss, can I have a minute?" Ruby requested.
"Of course." Weiss agreed.
"Take your time." Blake added. "We'll keep Yang company."
"Thank you." Ruby pecked Blake on the lips with a smile, before turning to Weiss to do the same. "I love you."
"We love you too." Weiss whispered before standing. Blake followed as she left the room.
When the door shut again, Ruby clasped her hands together, staring at the door. "You were in love with mom?"
"I was." Qrow answered.. "We all were. But Summer was in love with Tai."
"But she...she slept with you?" Ruby asked.
"Yeah...she...wasn't in a good place." Qrow sighed.
"You don't have to defend her Qrow." Ruby noted.
"I know, but it takes two to tango, and when push came to shove, I didn't say no." Qrow insisted. "Things seemed bad for Summer and Tai, and Summer came onto me for...whatever reason, and I just...I didn't want to say no. So I didn't."
"And here I am." Ruby said.
"And here you are." Qrow nodded.
"So I was a mistake." Ruby suggested.
"No." Qrow snapped.
"How?" Ruby pressed.
"Because I say so." Qrow replied. "Sleeping with Summer was a mistake, going along with the lie was a mistake, telling the truth the way I did was a mistake. You? You're not a mistake Ruby. You were worth all that, even if I fucked everything else up royally. Even if I was only ever uncle to you. It was all worth it because you are not a mistake. You're the best thing to ever happen to me. That's a fact, and I'll...I'll fight anyone who says a word against you." Qrow swallowed, his voice thick, tears prickling at the back of his eyes.
"What…" Ruby croaked, swallowing herself, eyes teary. "What do you want Qrow?"
"I want what you want Ruby." Qrow answered.
"No, that's not what you want to want." Ruby shook her head. "That's selfless. Be selfish. What do you want Qrow? If you could have one thing come from all this, what would it be?" Her voice wavered, but her gaze was strong, those beautiful silver eyes boring into his heart more deeply than Summer ever had before her.
"I want to hold my daughter, not my niece." Qrow barely got the words out before Ruby slammed into him, scrawny arms squeezing him tightly as she buried her face in his shoulder.
"Like father like d-daughter huh?" Ruby said thickly through tears.
Qrow laughed breathlessly, squeezing her back. "Yeah, chip of the old block." He half sobbed, half laughed, Ruby joining him.
"A-all these years and...it's gonna' be so weird not calling you Uncle Qrow anymore." Ruby grinned.
"Yeah, Daddy Qrow doesn't have the same ring to it." Qrow joked.
"Daddy, Dad, that's gonna' take a lot of getting used to." Ruby admitted.
"You're not gonna' stop calling Tai dad are you?" Qrow asked.
"No...no I won't." Ruby replied. "He's not my father, but he'll always be my dad. He...he doesn't hate me does he?"
"No, God no Ruby." Qrow insisted. "He loves you so much. He just...he couldn't deal with it. It wrecked his head to think about."
"I just wanted him to talk to me." Ruby sighed.
"So did he, but it took him so long to get back on his feet, he just couldn't handle lying to your face." Qrow explained.
"And you could?" Ruby pulled away, enough to look Qrow in the eye, but not to let go.
Qrow sighed. "I can't excuse it away Ruby. I didn't want to screw things up harder. I didn't know what would happen if I told the truth. At least with the lie, Summer couldn't force me away entirely. I could still try and be there for you. It was a shit situation, and none of us handled it right. And I'm sorry."
Ruby nodded sadly, managing a small smile. She took Qrow's hands in her own. "It hurts Qrow, being lied to, but I understand. You've always tried your best for me, even with Summer forcing you away. Even when things kept going wrong you never gave up. You were always there for me." She smiled a larger, more genuine smile. "I forgive you Qrow. I wanna' start making up for all the days we lost, all the times I called you uncle instead of dad, every day Summer made you lie to me. We'll take them back, one by one, eh?"
Qrow smiled. "There's nothing I want more in the world Ruby."
Frightened by the shouting, Zwei had hidden under the couch. Once Summer had calmed down, Winter coaxed out the still anxious animal. It seemed he was no longer in the mood to play, and his adorable puppy yawns indicated he was tired. Winter set out the dog bed Qrow had provided, and Zwei dutifully climbed in, quickly falling asleep. After that, silence reigned supreme in the Rose household. Until it did not.
"I feel sick." Summer suddenly stood, hurrying from the lounge. Winter and Pyrrha exchanged a worried glance, before quickly setting their cups down and following her. Summer fumbled with the bathroom door handle, barely twisting it open before slamming her hand over her mouth, scrambling to the toilet bowl just in time for her lunch to make an untimely reappearance.
Winter entered the bathroom to see Summer clutching the rim of the bowl desperately, hacking and coughing, gasping for breath. Pausing to shoot Pyrrha a glance, Winter noted the look of helplessness on her face and tried to counter it with a smile. "I can handle this." She promised softly, not that Summer could hear her over the retching. Pyrrha hesitated but nodded, so Winter shut the door and moved to kneel beside Summer.
"I-I'm sor-" Summer's apology was interrupted by another heave. Her stomach seemed to have emptied, as barely a trickle came from her mouth.
Winter shushed and lay a hand on Summer's back, rubbing soothingly. "It's okay. You're under a lot of stress." She made to stand, and Summer's hand shot out, latching onto her shirt, forcing her to pause.
"Please don't...don't leave, please don't leave me please…" Summer begged before another dry heave wracked her body, forcing another pained sob from her throat.
Winter's heart ached for her love, and she took the clinging hand in her own, planting a kiss on its back. "I'm not leaving you Summer." She vowed. "I promise you that." Summer seemed to deflate, some of the tension leaving her body as she whimpered and hitched. "I am however getting you a towel, and a glass of water."
Summer's hands went limp, and she bobbed her head. "Mmkay." Her nasally voice echoed from the bowl.
Standing, Winter made her way to the vanity, opening its doors to find a pile of folded hand towels, but unfortunately no cups. Still, she took one of the towels and dropped it in the sink, turning on the faucet and moving to the door. Pyrrha stood frozen mid pace, scroll in her had, biting her thumbnail. "Could you bring a glass of water please?" Winter requested. Pyrrha nodded, hurrying off to the kitchen, and hurrying back almost as fast, a mug of water in hand. "Thank you, and Pyrrha." Pyrrha met Winter's gaze with an arched brow. "We're going to get through this, I promise."
Pyrrha managed a weak smile, which Winter returned, before closing the door once more. Shutting off the faucet and wringing out the towel, she returned to Summer's side. She had pulled herself into a more comfortable position by the toilet, hunched over it fearfully. Winter held out the towel, and Summer gratefully took it, wiping her mouth and nose, and her left hand, which had caught some of her initial outburst.
"Thank you." Summer whispered, meeting Winter's gaze with yet another weak smile.
Winter held out the mug. "You're welcome, now rinse." She commanded. Summer obeyed, taking the mug, sipping from it, and swishing it around her mouth before spitting into the bowl. She repeated this several times, before swallowing the last of the liquid and sighing. "How do you feel?"
"Better, a-a bit...it's just…" Summer slumped to the side, leaning against the wall beside Winter, pushing the mug and towel away. "I don't know what to do. I've never been good at...anything. Winter, I've just tried, and tried and tried and no matter how much I fail everything just kept working out, but I don't know what to even try to do here!" She hyperventilated, hiding her face in her hands. She felt Winter's arms wrap their way around her stomach, and a kiss planted on her shoulder.
"Breath, slowly, in." Winter inhaled audibly, and Summer imitated her. "And out." Winter exhaled, Summer's own breath much shakier. But it seemed to have some effect.
"I don't know what's going to happen Winter." Summer continued. "I'm scared."
"I know." Winter said.
"I just...Ruby was so hurt, and Yang was so angry, and I thought she was gonna' hit me, but she didn't and she should have because I deserve it, but she didn't and she's gonna' come back and yell at me and I don't wanna' go through that." Summer rambled. "I've lost them Winter. I've lost my baby girls. They hate me and it's all my fault."
"You haven't lost them Summer." Winter encouraged. "And they don't hate you, even if they say so. They love you too much for that. Things are just going to be difficult for a while."
"Things have always been difficult." Summer sighed. "At least before I had my daughters to support me, even if it was because of my lies."
"You still have Pyrrha, and you still have me." Winter noted.
"I lied to you too." Summer said.
"And I forgive you." Winter responded.
"I don't deserve it." Summer insisted.
"I don't give a damn what you do and don't deserve." Winter countered. "I love you, and if I say I forgive you, I forgive you, and that's that." Winter shook Summer for emphasis, pulling her closer against her chest.
"I love you too Winter, with all my heart." Summer declared.
"I know sweetie." Winter kissed Summer's cheek. "I know."
They sat there for a time, basking in the brief peace afforded them, uncomfortable as it may be against the cold bathroom tiles. Eventually, Summer shifted uncomfortably. "Winter, I really need to use the toilet."
"Hmm." Winter grunted, standing alongside Summer.
"Alone." Summer insisted. Winter sighed, reluctant to leave her lover emotional and alone. "Winter...I promise, if you leave me alone, I will come out of this room healthy and unscathed...or at least unscathed." Summer managed a slightly stronger smile this time, which Winter reciprocated.
"Fine, I'll go keep Pyrrha company." Winter agreed. "And brush your teeth." She said over her shoulder as she turned and opened the door.
Summer rolled her eyes. "Pfft, yes mom."
Nora woke up with a groan. She had to go to the bathroom...again. As her pregnancy progressed, her bladder's capacity shrunk, leading to ever more frequent trips to empty it. It was worst when she was just trying to sleep. Nora was rarely able to get more than an hour or two of sleep before being forced to answer nature's call. She rolled out of bed and onto her feet, then shambled into the adjacent bathroom. As she sat on the toilet, she noticed a smell. Not a typical bathroom smell, no, it was coming from elsewhere in the apartment. There was no mistaking it.
Food. And not just any food, but fast food. Just a whiff was enough to make Nora's stomach grumble. She quickly finished up, flushed and washed her hands before rushing out into the hallway. She made her way to the living room, only to stumble upon some unexpected guests. Yang, Blake and Weiss were arrayed on the couch. The bag of food that had lured her was on the coffee table in front of them. Blake and Weiss were picking at a cup of fries while Yang chomped on a cheeseburger.
"Uh...what are you guys doing here?" Nora asked.
The trio on the couch turned their attention to the new arrival. "Oh...hey Nora." Yang greeted shakily. "What are you doing up?"
"I smelled the food." Nora replied. "I guess you guys brought it?"
"No, Qrow brought it." Yang corrected.
"What's he doing here?" Nora asked.
"After you left the party, things got...tense." Weiss started. "There was an argument and...maybe it would be best for Yang to take over."
"Fine." Yang sighed. "Nora, maybe sit down."
"Oh my God, did someone die?" Nora gasped.
"No, no, it's not quite that bad." Yang shook her head. Nora took Yang's advice and sat in a chair across from the couch. "Though that might be easier to deal with. Thing is...Qrow got into an argument with Summer and Raven and well...it turns out Qrow is Ruby's father."
"What?!" Nora exclaimed.
"That's not it." Yang continued. "It wasn't his idea to lie about it either. It was Summer's. She decided to tell Tai the kid was his so he would marry her. She took advantage of Qrow while he was vulnerable, tricked my dad, spent years lying to Ruby...she destroyed my family." Yang's volume had been rising as her anger built, but she took a moment to calm down before continuing. Nora could do nothing but stare in disbelief. "When Ruby found out...she freaked, rode her scooter through the snow to here. She's still here, talking to Qrow in Jaune's room."
"Where's everyone else?" Nora asked.
"Summer's back home with Winter and Pyrrha." Yang replied.
"Is Summer okay?" Nora pressed.
"I don't know, and frankly, I don't care." Yang growled.
"I...I uh…" Nora stammered. "I have to make a call." Nora stood and returned to her room to retrieve her scroll. She had to know if Summer was okay. Even though Summer had done something terrible, she had still been a mother to Nora, Mama Rose, and nothing would ever change that. Nora went through her list of contacts, finding Summer, but paused. She was afraid to call her directly. What if something she said made things worse? No. She slid her finger across the screen until another name appeared. She would call Pyrrha instead. Pyrrha would be able to tell her what she needed to know.
Nora tapped her scroll to call Pyrrha. "Hello?" Pyrrha answered after a pair of rings.
"Hey Pyrrha." Nora responded. She wandered out of her room, slipping into the one next door, the room filled with her stuffed animals. "I uh...how are you?"
"I'm...alright." Pyrrha lied.
"Yang told me what happened." Nora noted. She climbed onto the bed with the stuffed toys, snuggling into the softness. It was what she did when feeling nervous or down, and this certainly qualified as one of those times. "I...wanted to make sure Summer's okay."
"Oh..she's...been better." Pyrrha struggled. "She's...feeling ill."
"Is she...she's going to be alright, isn't she?" Nora pressed.
Pyrrha did not answer immediately. "I...I don't know." She admitted. "Winter and I are doing our best but...she's not...hold on." Nora could hear Winter's voice in the background, but could not make out the words.
"Did something happen?" Nora asked.
"No, no, I just have to get something." Pyrrha answered. "Summer threw up, that's all."
"Oh, okay." Nora breathed a sigh of relief. "Should I...maybe if...is there anything I can do?"
"I don't think so." Pyrrha replied. "I've never seen Summer like this." She hesitated. "But don't worry, I'm sure she'll get through it with Winter's help. Ruby and Yang will forgive her eventually, then everything will be fine."
"What if they don't?" Nora pressed.
Pyrrha had no real answer. "I'm sure they will." She finally said. "Try not to worry too much, okay?"
"I'll do my best." Nora declared. "I just...tell Mama Rose I love her."
"I will." Pyrrha promised.
"Thanks." Nora responded. "Talk to you later."
"Bye." Pyrrha hung up.
Nora tapped her scroll and let it slip from her hand. It had been such a great day, a happy day, and now it had all gone wrong. Her family was hurting, and that meant Nora was hurting too. She pressed herself into the heap of stuffed animals, hoping that immersing herself in their softness would drive away the feeling of anxiety in the pit of her stomach. It did not work. |
State health officials on Tuesday unveiled a new website they hope will help educate people about prescription opioid misuse and encourage patients to safely dispose of their unused medications so they don't end up in the wrong hands, the latest in an effort by the state attorney general's office to combat Texas' growing opioid crisis.
The website, "Dose of Reality," is available in English and Spanish at doseofreality.texas.gov. It includes free education materials on how to prevent opioid abuse, how to safely store medication and how to respond to an opioid overdose, as well as an interactive map that shows dozens of locations where people can bring in their unused prescription medication to be discarded, including drug stores and law enforcement offices.
The site's features follow a format first used in Wisconsin to share information about the opioid crisis and has also been duplicated by other states.
Health officials — including from the Texas Department of State Health Services and the Texas Health and Human Services Commission, which teamed up on the initiative — say the cost to maintain the website is minimal. They have not directed any funding toward marketing and are relying on word of mouth to let people know about it.
In 2016, more than 42,000 people died from opioid overdoses, according to the U.S. Centers for Disease Control and Prevention, including 1,375 in Texas.
"One of the main drivers of the crisis has been deceptive marketing and promotion by pharmaceutical companies," Texas Attorney General Ken Paxton said Tuesday.
Last year, Paxton's office sued national drug manufacturer Purdue Pharma for what it said were deceptive marketing practices that misrepresented the risk of addiction associated with its opioid medications.
"That effort is ongoing, and my office continues to serve in a leadership role in investigating others responsible for creating the public health crisis we now face," Paxton said. "Pursuing justice for the responsible parties in the pharmaceutical industry is essential, but it is not enough because the damage done by industry through misinformation campaigns."
Paxton said the website is an attempt to correct that problem, by providing accurate information that is easily accessible.
"It’s a truthful counterweight to the false narrative that has won the day for way too long," he said.
Although opioid medications are still being misused, doctors have been prescribing fewer of them in recent years.
Texas is among the states with the lowest opioid prescribing rates in the nation, with 53.1 prescriptions written in 2017 per 100 people, according to the CDC. That number fell significantly in the last decade, from 71.2 prescriptions written per 100 people in 2007, records show.
In Austin, opioid prescribing rates — already lower than the state and national averages — also are dropping. In 2012, 69.9 prescriptions were written per 100 people. By 2016, that number had dropped to 51.2, according to the city’s director of public health, Philip Huang. “Our physicians and our community is doing better than some places,” Huang said last year.
Still, numerous law enforcement officers, health professionals and criminal justice experts testified to the Texas House Select Committee on Opioids and Substance Use over the summer about the problems they are seeing with opioids, including prescription painkillers, and other drugs.
In some Texas counties, there are more opioid prescriptions per year than people, Paxton said.
The committee in its final report made several recommendations, including efforts to expand drug take-back programs, which several people testified had been successful in cutting down the number of medications that are diverted for illegal use. It also recommended improvements to data collection that would better show the extent of the opioid problem in Texas, as well as expanding medication-assisted treatment programs, which experts have said are vastly lacking in the state.
Health officials said Tuesday they are continuing to explore ways to combat the problem from every angle.
"This is an issue that the governor and the Legislature are focused on, as are other state agencies, local governments and private organizations across Texas," Department of State Health Services Commissioner John Hellerstedt said. "Opioid misuse and addiction are complex problems, and no single agency or group can tackle the problem alone. Success will come about as a result of committed partners." |
/* execute_CLI: CLI command - Clear Interrupt disable bit
*/
void execute_CLI(Cpu6502* cpu)
{
strcpy(instruction, "CLI");
cpu->P &= ~FLAG_I;
} |
<reponame>indylab/xdo
import json
import logging
import os
from abc import ABC, abstractmethod
from typing import Tuple
from grl.utils.common import ensure_dir
from grl.utils.strategy_spec import StrategySpec
logger = logging.getLogger(__name__)
class P2SROManagerLogger(ABC):
"""
Logging for a P2SROManager.
Extend this class to add additional functionality like tracking exploitability over time for small games.
"""
@abstractmethod
def __init__(self, p2sro_manger, log_dir: str):
pass
def on_new_active_policy(self, player: int, new_policy_num: int, new_policy_spec: StrategySpec):
pass
def on_new_active_policy_metadata(self, player: int, policy_num: int, new_policy_spec: StrategySpec):
pass
def on_active_policy_moved_to_fixed(self, player: int, policy_num: int, fixed_policy_spec: StrategySpec):
pass
def on_payoff_result(self,
policy_specs_for_each_player: Tuple[StrategySpec],
payoffs_for_each_player: Tuple[float],
games_played: int,
overrode_all_previous_results: bool):
pass
class SimpleP2SROManagerLogger(P2SROManagerLogger):
"""
Saves payoff table checkpoints every time an active policy is set to fixed.
"""
def __init__(self, p2sro_manger, log_dir: str):
super().__init__(p2sro_manger, log_dir)
self._log_dir = log_dir
self._manager = p2sro_manger
self._payoff_table_checkpoint_dir = os.path.join(self._log_dir, "payoff_table_checkpoints")
self._payoff_table_checkpoint_count = 0
self._latest_numbered_payoff_table_checkpoint_path = None
self._latest_numbered_policy_nums_path = None
def on_new_active_policy(self, player: int, new_policy_num: int, new_policy_spec: StrategySpec):
logger.info(f"Player {player} active policy {new_policy_num} claimed")
def on_new_active_policy_metadata(self, player: int, policy_num: int, new_policy_spec: StrategySpec):
pass
def on_active_policy_moved_to_fixed(self, player: int, policy_num: int, fixed_policy_spec: StrategySpec):
logger.info(f"Player {player} policy {policy_num} moved to fixed.")
# save a checkpoint of the payoff table
data = self._manager.get_copy_of_latest_data()
latest_payoff_table, active_policy_nums_per_player, fixed_policy_nums_per_player = data
self._latest_numbered_payoff_table_checkpoint_path = os.path.join(self._payoff_table_checkpoint_dir,
f"payoff_table_checkpoint_{self._payoff_table_checkpoint_count}.json")
self._latest_numbered_policy_nums_path = os.path.join(self._payoff_table_checkpoint_dir,
f"policy_nums_checkpoint_{self._payoff_table_checkpoint_count}.json")
pt_checkpoint_paths = [os.path.join(self._payoff_table_checkpoint_dir, f"payoff_table_checkpoint_latest.json"),
self._latest_numbered_payoff_table_checkpoint_path]
policy_nums_paths = [os.path.join(self._payoff_table_checkpoint_dir, f"policy_nums_checkpoint_latest.json"),
self._latest_numbered_policy_nums_path]
for pt_checkpoint_path, policy_nums_path in zip(pt_checkpoint_paths, policy_nums_paths):
ensure_dir(file_path=pt_checkpoint_path)
ensure_dir(file_path=policy_nums_path)
latest_payoff_table.to_json_file(file_path=pt_checkpoint_path)
print(f"\n\n\nSaved payoff table checkpoint to {pt_checkpoint_path}")
player_policy_nums = {}
for player_i, (active_policy_nums, fixed_policy_nums) in enumerate(
zip(active_policy_nums_per_player, fixed_policy_nums_per_player)):
player_policy_nums[player_i] = {
"active_policies": active_policy_nums,
"fixed_policies": fixed_policy_nums
}
with open(policy_nums_path, "w+") as policy_nums_file:
json.dump(obj=player_policy_nums, fp=policy_nums_file)
print(f"Saved policy nums checkpoint to {policy_nums_path}\n\n\n")
# append checkpoints metadata to checkpoints_manifest.txt
checkpoints_manifest_path = os.path.join(self._payoff_table_checkpoint_dir, "checkpoints_manifest.json")
ensure_dir(file_path=checkpoints_manifest_path)
with open(checkpoints_manifest_path, "a+") as manifest_file:
if all(len(fixed_policy_nums) > 0 for fixed_policy_nums in fixed_policy_nums_per_player):
highest_fixed_policies_for_all_players = min(
max(fixed_policy_nums) for fixed_policy_nums in fixed_policy_nums_per_player)
else:
highest_fixed_policies_for_all_players = None
manifest_json_line = json.dumps({"payoff_table_checkpoint_num": self._payoff_table_checkpoint_count,
"highest_fixed_policies_for_all_players": highest_fixed_policies_for_all_players,
"payoff_table_json_path": self._latest_numbered_payoff_table_checkpoint_path,
"policy_nums_json_path": self._latest_numbered_policy_nums_path})
manifest_file.write(f"{manifest_json_line}\n")
self._payoff_table_checkpoint_count += 1
def on_payoff_result(self, policy_specs_for_each_player: Tuple[StrategySpec],
payoffs_for_each_player: Tuple[float], games_played: int,
overrode_all_previous_results: bool):
pass
# json_specs = [spec.to_json() for spec in policy_specs_for_each_player]
# logger.debug(f"Payoff result for {json_specs}, payoffs: {payoffs_for_each_player}, games: {games_played},"
# f" overrides existing results: {overrode_all_previous_results}")
#
#
# data = self._manager.get_copy_of_latest_data()
# latest_payoff_table, active_policy_nums_per_player, fixed_policy_nums_per_player = data
# latest_payoff_table: PayoffTable = latest_payoff_table
# print("Player 0 matrix ---------------------------------------")
# print(latest_payoff_table.get_payoff_matrix_for_player(0))
# print("------------------------------------------------------")
# print("Player 1 matrix ---------------------------------------")
# print(latest_payoff_table.get_payoff_matrix_for_player(1))
# print("------------------------------------------------------")
def get_current_checkpoint_num(self):
return self._payoff_table_checkpoint_count
def get_latest_numbered_payoff_table_checkpoint_path(self):
return self._latest_numbered_payoff_table_checkpoint_path
def get_latest_numbered_policy_nums_path(self):
return self._latest_numbered_policy_nums_path
|
Books and Devices from the Old -Their Renaissance in Computer Graphics In the following we present three of our institutes activities concerning cultural heritage. First we present a reconstruction of the Antikythera Mechanism, which is the worlds oldest calculator of astronomical purpose. The implementation extensively uses engines and sensors from the OpenInventor Graphics Library. This is a particularly interesting example of a delicate object which cannot be exhibited (without the use of a virtual Computer Graphics model) since it is too valuable and delicate. Secondly we illustrate the use of Computer Graphics to make valuable medieval books available to the public. A new texture mapping approach, allows the bilinear interpolation of texture coordinates on an arbitrary triangle mesh. This approach uses projective texture mapping and can therefore utilize the hardware of modern graphic workstations. Lastly we present an application of modern 3D Computer Graphics in the field of reconstructing ancient scientific instruments. The first-four-species calculator of Wilhelm Schickard is made accessible to the public in the World Wide Web using Java3D. All three examples illustrate the use of latest technology to model ancient books or devices. |
<gh_stars>1-10
// +build js,tinygo.arm avr
package runtime
// This file stubs out some external functions declared by the syscall/js
// package. They cannot be used on microcontrollers.
type js_ref uint64
//go:linkname js_valueGet syscall/js.valueGet
func js_valueGet(v js_ref, p string) js_ref {
return 0
}
//go:linkname js_valueNew syscall/js.valueNew
func js_valueNew(v js_ref, args []js_ref) (js_ref, bool) {
return 0, true
}
//go:linkname js_valueCall syscall/js.valueCall
func js_valueCall(v js_ref, m string, args []js_ref) (js_ref, bool) {
return 0, true
}
//go:linkname js_stringVal syscall/js.stringVal
func js_stringVal(x string) js_ref {
return 0
}
|
package fastIO;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.Arrays;
public class Reader implements AutoCloseable {
private File inputFile;
private FileReader fr;
private final int chunkSize;
private char[] currentChunk;
private int parserAt = 0;
private boolean hasStarted = false;
private int bytesRead = -1;
private boolean isDone = false;
/**
*
* @param inputFile The file to read from.
* @throws FileNotFoundException If the file could not be found.
*/
public Reader(File inputFile) throws FileNotFoundException {
this.chunkSize = 2048;
this.inputFile = inputFile;
this.fr = new FileReader(this.inputFile);
this.currentChunk = new char[chunkSize];
}
/**
*
* @param inputFile The file to read from.
* @throws FileNotFoundException If the file could not be found.
*
*/
public Reader(String inputFile) throws FileNotFoundException {
this(new File(inputFile));
}
/**
*
* @param inputFile The file to read from.
* @param chunkSize The size of chunks that is used internally. Default is 2048. Bigger chunks will be faster for big files.
* @throws FileNotFoundException If the file could not be found.
*
*/
public Reader(File inputFile, int chunkSize) throws FileNotFoundException {
this.chunkSize = chunkSize;
this.inputFile = inputFile;
this.fr = new FileReader(this.inputFile);
this.currentChunk = new char[chunkSize];
}
/**
*
* @param inputFile The file to read from.
* @param chunkSize The size of chunks that is used internally. Default is 2048. Bigger chunks will be faster for big files.
* @throws FileNotFoundException If the file could not be found.
*
*/
public Reader(String inputFile, int chunkSize) throws FileNotFoundException {
this(new File(inputFile), chunkSize);
}
/**
*
* @return Returns the entire file as String. More efficient than readLine();
* @throws IOException If there is an error reading the File.
*
*/
public String readEntire() throws IOException {
StringBuilder sb = new StringBuilder();
currentChunk = new char[chunkSize];
int charsRead;
while((charsRead = fr.read(currentChunk)) != -1) {
if(charsRead != chunkSize) {
currentChunk = Arrays.copyOfRange(currentChunk, 0, charsRead);
}
sb.append(String.valueOf(currentChunk));
}
close();
return sb.toString();
}
/**
*
* @return Reads on line as String.
* @throws IndexOutOfBoundsException If there is no next line. Check with hasNextLine().
* @throws IOException If there is an error reading the File.
*
*/
public String readLine() throws IndexOutOfBoundsException, IOException {
if(isDone) throw new IndexOutOfBoundsException();
if(!hasStarted) {
bytesRead = readNewChunk();
hasStarted = true;
}
// if a new chunk has to be read not, do it
if(parserAt >= chunkSize-1) {
bytesRead = readNewChunk();
parserAt=0;
}
// if that chunk was exactly the first with no bytes, end reading
if(bytesRead == -1) {
isDone = true;
return "";
}
StringBuilder sb = new StringBuilder();
// read loop
while(true) {
// as long as a chunk has to be read, read it
for(; parserAt<bytesRead; parserAt++) {
char currentChar = currentChunk[parserAt];
// stop reading at each new line
if(currentChar == '\n' || currentChar == '\r') {
parserAt+= 2;
return sb.toString();
}
sb.append(currentChar);
}
// if chunk done, read new chunk
bytesRead = readNewChunk();
if(bytesRead == -1) {
isDone = true;
close();
return sb.toString();
}
parserAt=0;
}
}
// internal method, reads new chunk
private int readNewChunk() throws IOException {
return fr.read(currentChunk);
}
/**
*
* @return If the Reader has a next Line.
*
*/
public boolean hasNextLine() {
return !isDone;
}
/**
*
* Closes the Reader
*
*/
@Override
public void close() {
try {
fr.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
|
A note on the Voigt profile function A Voigt profile function emerges in several physical investigations (e.g. atmospheric radiative transfer, astrophysical spectroscopy, plasma waves and acoustics) and it turns out to be the convolution of the Gaussian and the Lorentzian densities. Its relation with a number of special functions has been widely derived in literature starting from its Fourier type integral representation. The main aim of the present paper is to introduce the Mellin-Barnes integral representation as a useful tool to obtain new analytical results. Here, starting from the Mellin-Barnes integral representation, the Voigt function is expressed in terms of the Fox H-function which includes representations in terms of the Meijer G-function and previously well-known representations with other special functions. Introduction During the recent past, representations of the Voigt profile function in terms of special functions have been discussed in this Journal. With the present paper we aim to continue those researches. In particular, we derive the representation in terms of Fox H-function, which includes previously well-known results. The Voigt profile function emerges in several physical investigations as atmospheric radiative transfer, astrophysical spectroscopy, plasma waves and acoustics and molecular spectroscopy in general. Mathematically, it turns out to be the convolution of the Gaussian and the Lorentzian densities. Here we are studying the ordinary Voigt function and not its mathematical generalizations, for example, see the papers. The computation of the Voigt profile is an old issue in literature and many efforts are directed to evaluate this function with different techniques. In fact, an analytical explicit representation in terms of elementary functions does not exist and it can be considered a special function itself. Moreover it is strictly related to the plasma dispersion function and to a number of special functions as, for example, the confluent hypergeometric function, the complex complementary error function, the Dawson function, the parabolic cylinder function and the Whittaker function, see e.g.. All previous representations are derived starting from the integral formula due to Reiche in 1913 that is actually a Fourier type integral. The Voigt profile function remains nowadays a mathematically and computationally interesting problem because computing profiles with high accuracy is still an expensive task. The actual interest on this topic is proven by several recent papers on mathematical and numerical aspects. A huge collection exists of published works on this topic, but instead to report it, we give the significative datum that searching in Google Scholar the strings "voigt profile function" and "voigt function" the number of files found is ∼ 24, 600 and ∼ 70, 100, respectively. The Mellin-Barnes integrals are a family of integrals in the complex plane whose integrand is given by the ratio of products of Gamma functions. Despite of the name, the Mellin-Barnes integrals were initially studied in 1888 by the Italian mathematician S. Pincherle in a couple of papers on the duality principle between linear differential equations and linear difference equations with rational coefficients. The Mellin-Barnes integrals are strongly related with the Mellin transform, in particular with the inverse transformation. As shown by O.I. Marichev, the problem to evaluate integrals can be successfully faced with a powerful method mainly based on their reduction to functions whose Mellin transform is the ratio of product of Gamma functions and then, after the inversion, the problem consists in the evaluation of Mellin-Barnes integrals. Moreover, they are also the essential tools for treating higher transcendental functions as Fox H-function and Meijer G-function and a useful representation to compute asymptotic behaviour of functions. The main object of the present paper is to introduce the Mellin-Barnes integral representation as a useful tool to obtain new analytical results that in the future can lead to efficient numerical algorithms for the Voigt function. A successful application of such approach has been shown in, where the parametric evolution equation of the Voigt function (and its probabilistic generalization) is derived and the scaling laws, with respect to the parameter, in the asymptotic regimes are computed using the Mellin-Barnes integral representation. After all, this work can be seen also as an interesting exercise per se of application of the Mellin-Barnes integral method. The rest of the paper is organized as follows. In section 2 the basic definition of the Voigt profile function is given and some classical and recent representations are reviewed. In section 3 the Mellin-Barnes integral representation of the Voigt function is derived and, in section 4 starting from this result, first the Voigt function is expressed in terms of the Fox H-function and later, in cascade, the representation with the Meijer G-function and other special functions are obtained. Finally in section 5 the summary and conclusions are given. The Voigt profile function 2.1 Basic definition The Gaussian G(x) and the Lorentzian L(x) profiles are defined as where G and L are the corresponding widths. The variable x is a wavenumber and then its physical dimension is a length raised up to −1. The Voigt profile V (x) is obtained by the convolution of G(x) and L(x) The comparison between the Voigtian, Gaussian and Lorentzian profile is shown in figure 1 for different values of the width ratio L / G. Let f () be the Fourier transform of f (x) so that then and Formula is the integral representation due to Reiche. Some classical and recent representations Let x be the dimensionless variable x = x/ G, the Voigt function can be re-arranged in the form where y = L / G and from it follows that The Voigt function does not possess an explicit representation in terms of elementary functions and several alternatives to have been given in literature, mainly with the intention to obtain a more efficient numerical computation. Combining x and y in the complex variable z = x − iy, the function K(x, y) is where W (z) is strongly related to the plasma dispersion function and some classical representations can be found, see e.g.. In fact, form the relation of W (z) with the complex complementary error function Erf c(−iz) and the Dawson function F (z) = e −z 2 z 0 e 2 d then it follows that More recent representations are, for example, those derived in 2001 by Di Rocco et al K(x, y) = ∞ n=0 (−1) n 1 (n + 1) where 1 F 1 (, ; z) is the confluent hypergeometric function, and in 2007 by Zaghloul, which "completed" a previous formula given by Roston & Obaid, claimed to have derived an exact calculation of the Voigt profile that is proportional to the product of an exponential and a cosine function. However this representation assumes negative values in contrast with the non-negativity of the Voigt function. For this reason, this result is not correct. Further representations are given in terms of special functions, see for example the one involving the confluent hypergeometric function 1 and others involving the Whittaker function W k,m, the Erf c−function and the parabolic cylinder function K(x, y) = 1 2 e (y−ix) 2 Erf c(y − ix) + e (y+ix) 2 Erf c(y + ix), The Mellin-Barnes integral representation Let us consider again dimensional variables and the Gauss, Lorentz and Voigt functions defined as in. From we have where where L denotes a loop in the complex s plane which encircles the poles of (s) (in the positive sense) with endpoints at infinity in Re(s) < 0 and with no restrictions on arg z. The functions I ± (x) have the following Mellin-Barnes integral representations Hence the Mellin-Barnes integral representation of the Voigt function is given by However we note that for a complex number z = |z|e i, = arctan(Im(z)/Re(z)), the following rule holds z n +z n = |z| n e in + |z| n e −in = |z| n (e in + e −in ) = 2|z| n cos(n) = 2|z| n cos(n arctan(Im(z)/Re(z))), and formula becomes ds. Consider again, changing s → −s and taking the corresponding integration path L as a loop in the complex plane that encircles the poles of (−s), an other Mellin-Barnes integral representation of the Voigt function equivalent to is and in more compact form 4 The Fox H-and Meijer G-function representations The Fox H-function representation The Voigt function can be represented in terms of well known special function, see §2.2, but its representation in terms of Fox H-function is still not known. The expression in terms of Fox H-function is important because it is the most modern representation method and, actually, it is the most compact form to represent higher transcendental functions. The definition of the Fox H-function is given in Appendix. For G and L fixed, 2( L + ix)/ G = 0 and 2( L − ix)/ G = 0, from and we have, respectively, As a consequence of the fact that this is the most comprehensive representation, in cascade, the others with less general functions can be obtained. The Meijer G-function representation The first H-function in can be rewritten in terms of the Meijer Gfunction. Setting Z = 2( L + ix)/ G, we have where the change of variable s → 2s and the duplication rule for the Gamma function, (2z) = (z)(1/2 + z)2 2z−1 −1/2, are applied. The second Hfunction in follows from the first withZ = 2( L − ix)/ G. Finally, the Voigt function in terms of the Meijer G-function is given by The Meijer G-function can be reduced to other special functions and, for example, representations given in Hence, the Voigt function in terms of the Whittaker function is Moreover, from the identity and from G 21 we have Setting G = 1 in formulae are recovered, respectively. Summary and conclusions In the present paper the Mellin-Barnes integral representation of the Voigt profile function is derived. We think that this integral representation is a useful tool to have new analytical and numerical results in the subject. Starting from this, the Voigt function has been expressed in terms of the Fox H-function, which is the most comprehensive representation. In cascade, the expression in terms of the Meijer G-function is obtained and the previous well-known representations with the Whittaker, the Erf c and the Parabolic cylinder functions are recovered. |
A bill has been introduced in the South Carolina Legislature that would require Uber and Lyft drivers to display an illuminated sign on their vehicle window. Rep. Seth Rose said the bill was in response to the death of 21-year-old University of South Carolina student Samantha Josephson, who was killed after apparently getting into a car she thought was her Uber ride early Friday.
Titled the "Samantha L. Josephson Ridesharing Safety Act," the bill requires all transportation network companies (TNC) like Uber or Lyft to "possess and display certain illuminated signage at all times when the TNC driver is active."
House Speaker Jay Lucas placed the bill on the calendar for Wednesday to be debated by House members in the South Carolina Legislature.
Columbia, South Carolina, police believe Josephson mistakenly got into a car driven by 24-year-old Nathaniel David Rowland. She was traveling alone and was returning home after a night out.
"We believe ... that she simply mistakenly got into this particular car thinking it was an Uber ride," Columbia Police Chief Skip Holbrook said, noting surveillance video captured her getting into the car. "She opened the door and got into it and departed with the suspect driving."
Rowland was arrested Saturday after police conducted a traffic stop on his vehicle, which matched the description of the car Josephson was seen entering on video footage. Josephson's blood was found in the trunk and inside the black Chevy Impala, along with her cellphone and bleach, window cleaner and cleaning wipes, Holbrook said.
Rowland has been charged with murder and kidnapping. |
import yargs from 'yargs';
import getBitBucket, * as bbc from '../../bb_cloud';
import tkit from 'terminal-kit';
const terminal = tkit.terminal;
exports.command = 'show';
exports.aliases = ['$0'];
exports.desc = 'Show user info';
exports.builder = (yargs: yargs.Argv<{}>) => {};
exports.handler = async (argv: any) => {
let bb = await getBitBucket(argv);
let user = await bb.getUser();
terminal(`${user.username} ${user.nickname} ${user.created_on}\n`);
process.exit();
};
|
def histogram(self, name, variable, nbins, xmin, xmax, weights=''):
self.Draw('{variable}>>{name}({nbins!s}, {xmin!s}, {xmax!s})'.format(**locals()), weights, 'goff')
h = ROOT.gDirectory.Get(name)
h.SetDirectory(0)
h.Sumw2()
return h |
<reponame>alex/optimizer-model
from .. import Operations
class OpDispatcher(object):
def __init__(self):
self.dispatch_table = [None] * len(Operations)
def register(self, op):
def inner(func):
self.dispatch_table[op.value] = func
return func
return inner
def build_dispatcher(self, default=None):
dispatch_table = self.dispatch_table
def dispatch(self, optimizer, operation):
if dispatch_table[operation.op.value] is not None:
dispatch_table[operation.op.value](self, optimizer, operation)
elif default is not None:
default(self, optimizer, operation)
return dispatch
def build_handler(self):
def handler_default(self, optimizer, operation):
self.handle_back(optimizer, operation)
return self.build_dispatcher(default=handler_default)
|
import { Component, OnInit } from '@angular/core';
import { CustomerService } from './customer.service';
import {Customer} from './customer.component';
@Component({
moduleId : module.id,
selector: 'app-customers',
templateUrl: 'customers.component.html'
})
export class CustomersComponent implements OnInit {
customers : Customer [];
constructor(private _customerService : CustomerService) { }
ngOnInit() {
this._customerService.getCustomers().then(customers => this.customers = customers);
}
}
|
/**
*/
package edu.kit.ipd.sdq.kamp4req.model.modificationmarks.impl;
import edu.kit.ipd.sdq.kamp4is.model.modificationmarks.impl.ISModifyEntityImpl;
import edu.kit.ipd.sdq.kamp4req.model.modificationmarks.ReqModificationmarksPackage;
import edu.kit.ipd.sdq.kamp4req.model.modificationmarks.ReqModifyEntity;
import org.eclipse.emf.ecore.EClass;
import org.palladiosimulator.pcm.core.entity.Entity;
/**
* <!-- begin-user-doc -->
* An implementation of the model object '<em><b>Req Modify Entity</b></em>'.
* <!-- end-user-doc -->
*
* @generated
*/
public class ReqModifyEntityImpl extends ISModifyEntityImpl<Entity> implements ReqModifyEntity {
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected ReqModifyEntityImpl() {
super();
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected EClass eStaticClass() {
return ReqModificationmarksPackage.Literals.REQ_MODIFY_ENTITY;
}
} //ReqModifyEntityImpl
|
<gh_stars>0
package eu.kaluzinski.recipies.repositories;
import eu.kaluzinski.recipies.model.Category;
import org.springframework.data.repository.CrudRepository;
import java.util.Optional;
public interface CategoryRepository extends CrudRepository<Category, Long> {
Optional<Category> findByDescription(String description);
}
|
import * as React from 'react';
import { withStyles, StyleRulesCallback, Theme, WithStyles } from '@material-ui/core/styles';
import { Subscription, Observable } from 'rxjs/Rx';
import { assoc, compose, sort, take, values } from 'ramda';
import * as moment from 'moment';
import Menu from '@material-ui/core/Menu';
import { events$, init } from 'src/events';
import notifications$ from 'src/notifications';
import { markEventsSeen } from 'src/services/account';
import UserNotificationButton from './UserNotificationButton';
import UserNotificationList from './UserNotificationList';
type ClassNames = 'root' | 'dropDown';
const styles: StyleRulesCallback<ClassNames> = (theme: Theme & Linode.Theme) => ({
root: {
transform: `translate(-${theme.spacing.unit * 2}px, ${theme.spacing.unit}px)`,
},
dropDown: {
position: 'absolute',
outline: 0,
boxShadow: `0 0 5px ${theme.color.boxShadow}`,
overflowY: 'auto',
overflowX: 'hidden',
minHeight: 16,
width: 250,
maxHeight: 300,
[theme.breakpoints.up('sm')]: {
width: 380,
},
},
});
interface Props {
[index: string]: any;
}
interface State {
anchorEl?: HTMLElement;
events: Linode.Event[];
unseenCount?: number;
notifications: Linode.Notification[];
}
type CombinedProps = {} & WithStyles<ClassNames>;
interface EventsMap {
[index: string]: Linode.Event;
}
class UserNotificationMenu extends React.Component<CombinedProps, State> {
state = {
events: [],
notifications: [],
anchorEl: undefined,
unseenCount: 0,
};
subscription: Subscription;
mounted: boolean = false;
static defaultProps = {
unseenCount: 0,
};
componentDidMount() {
this.mounted = true;
this.subscription = Observable
.combineLatest(
notifications$,
events$
/** Filter the fuax event used to kick off the progress bars. */
.filter((event: Linode.Event) => event.id !== 1)
/** Create a map of the Events using Event.ID as the key. */
.scan((events: EventsMap, event: Linode.Event) =>
assoc(String(event.id), event, events), {}),
)
/** Wait for the events to settle before calling setState. */
.debounce(() => Observable.interval(250))
/** Notifications are fine, but the events need to be extracts and sorted. */
.map(([notifications, events]) => {
return [
notifications,
extractAndSortByCreated(events),
];
})
.subscribe(
([notifications, events]: [Linode.Notification[], Linode.Event[]]) => {
if (!this.mounted) { return; }
this.setState({
unseenCount: getNumUnseenEvents(events),
events,
notifications,
});
},
() => null,
);
Observable
.fromEvent(this.buttonRef, 'click')
.withLatestFrom(
events$
.filter(e => e.id !== 1)
.map(e => e.id),
)
.subscribe(([e, id]) => {
markEventsSeen(id)
.then(() => init())
.catch(console.error);
});
}
componentWillUnmount() {
this.mounted = false;
this.subscription.unsubscribe();
}
private buttonRef: HTMLElement;
setRef = (element: HTMLElement) => {
this.buttonRef = element;
}
render() {
const { anchorEl, events, unseenCount, notifications } = this.state;
const { classes } = this.props;
return (
<React.Fragment>
<UserNotificationButton
onClick={e => this.setState({ anchorEl: e.currentTarget })}
getRef={this.setRef}
notificationCount={unseenCount}
disabled={notifications.length + events.length === 0}
className={anchorEl ? 'active' : ''}
/>
<Menu
anchorEl={anchorEl}
getContentAnchorEl={undefined}
anchorOrigin={{ vertical: 'bottom', horizontal: 'right' }}
transformOrigin={{ vertical: 'top', horizontal: 'right' }}
open={Boolean(anchorEl)}
onClose={() => this.setState({ anchorEl: undefined })}
className={classes.root}
PaperProps={{ className: classes.dropDown }}
>
<UserNotificationList notifications={notifications} events={events} />
</Menu>
</React.Fragment>
);
}
}
const styled = withStyles(styles, { withTheme: true });
const extractAndSortByCreated = compose(
take(25),
sort((a: Linode.Event, b: Linode.Event) => moment(b.created).diff(moment(a.created))),
values,
);
const getNumUnseenEvents = (events: Linode.Event[]) => {
const len = events.length;
let unseenCount = 0;
let idx = 0;
while (idx < len) {
if (!events[idx].seen) {
unseenCount += 1;
}
idx += 1;
}
return unseenCount;
};
export default styled<Props>(UserNotificationMenu);
|
Stage-dependent cardiac regeneration in Xenopus is regulated by thyroid hormone availability Significance Heart failure is the leading cause of death worldwide. Cardiac regeneration studies currently focus on zebrafish, urodeles, and mammals, bypassing the amphibian Xenopus laevis, despite being considered as a leading model for regeneration research. We thus took advantage of X. laevis as a model to explore thyroid hormone (TH) influence on the cardiac regenerative process. We suggest a possible link between altered TH availability and the loss of cardiac regenerative capacity. Examining heart regeneration in Xenopus provides insight into how TH levels may contribute to the enigmatic loss of cardiac regeneration during vertebrate development, suggesting potential therapeutic leads of major biomedical and fundamental relevance for the development of future regenerative strategies, and ultimately to provide therapies for the human heart. Despite therapeutic advances, heart failure is the major cause of morbidity and mortality worldwide, but why cardiac regenerative capacity is lost in adult humans remains an enigma. Cardiac regenerative capacity widely varies across vertebrates. Zebrafish and newt hearts regenerate throughout life. In mice, this ability is lost in the first postnatal week, a period physiologically similar to thyroid hormone (TH)-regulated metamorphosis in anuran amphibians. We thus assessed heart regeneration in Xenopus laevis before, during, and after TH-dependent metamorphosis. We found that tadpoles display efficient cardiac regeneration, but this capacity is abrogated during the metamorphic larval-to-adult switch. Therefore, we examined the consequence of TH excess and deprivation on the efficiently regenerating tadpole heart. We found that either acute TH treatment or blocking TH production before resection significantly but differentially altered gene expression and kinetics of extracellular matrix components deposition, and negatively impacted myocardial wall closure, both resulting in an impeded regenerative process. However, neither treatment significantly influenced DNA synthesis or mitosis in cardiac tissue after amputation. Overall, our data highlight an unexplored role of TH availability in modulating the cardiac regenerative outcome, and present X. laevis as an alternative model to decipher the developmental switches underlying stage-dependent constraint on cardiac regeneration. |
Selfhealing enhancement through codeployment of automatic switches and electric vehicle PLs in an electricity distribution network Accomplishing highly reliable distribution grids is more and more important to today's modern society. Along the same line, self-healing is one of the most distinguishing features of the smart grid that could improve the reliability of the system up to a logical extent. In this study, the reliability of the electrical distribution grid has been improved by optimal allocation of automatic switches and parking lots (PLs). Thus, the optimal places of switches and PLs are determined with the purposes of minimising a combined effect of the customer-based (system average interruption duration index) and cost-based reliability indices. The TRC includes customer interruption costs, PLs and automatic switches investment costs, and the total cost of PLs incorporation in the service restoration process. The proposed approach is implemented in three different planning scenarios: switch placement, PL placement, and joint switch and PL placement. The particle swarm optimisation method is employed to solve optimisation problems. Finally, a standard reliability test system bus number four of the Roy Billinton test system (RBTS 4) is used to demonstrate the efficacy of the proposed method. |
<filename>definitions.h
/* definitions.h
Declares all functions used in the different files
This file written 2018 by <NAME>
Based on file "mipslab.h" written 2015 by <NAME>
some parts are original code written by <NAME>
For copyright and licensing, see file COPYING
*/
#include <stdint.h>
#include <pic32mx.h>
// Creates boc, shot and arrow of "data type" "Object_type"
typedef enum {
BOX,
SHOT,
ARROW
}Object_Type;
// Define a pixel, and give it x and y values
typedef struct {
unsigned char x;
unsigned char y;
} Pixel;
// Define an Object, define what type of object it is and that it is 10 pixels big
typedef struct {
Pixel piece[10];
Object_Type object_type;
}Object;
// Screen functions
void quicksleep(int cyc);
uint8_t spi_send_recv(uint8_t data);
void display_init(void);
void display_menu(void);
void game_over(void);
void display_highscore(void);
void display_update(void);
void render(void);
void show_pixel(const Pixel *pixel);
void draw_object(const Object *object);
void display_menu(void);
void display_clear(void);
void show_highscore(int highscore);
void show_number(int number, int position);
// Helper
unsigned int pow(unsigned const int bas, volatile unsigned int exponent);
void move_vertical(Object *object, int way);
void move_horizont(Object *object, int way);
void make_box(Object *object);
void make_shot(Object *object);
void make_arrow(Object *object);
void create_object(Object *object, int box_x);
void save_score(void);
void init(void);
int getbtns(void);
void release_press (void);
// Dodge the bullet
void work (void);
void main_menu(void);
void highscore(void);
void game(void);
void user_isr( void );
//Data
char textbuffer[4][16];
extern const uint8_t const menu_font[512];
extern const uint8_t const gameover_font [512];
extern const uint8_t const font[128*8];
extern const uint8_t const highscore_font[256];
extern const uint8_t const number_font[80]; |
<gh_stars>1-10
/*****************************************************************************
* *
* OpenNI 2.x Alpha *
* Copyright (C) 2012 PrimeSense Ltd. *
* *
* This file is part of OpenNI. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
*****************************************************************************/
#ifndef ILINKOUTPUTSTREAM_H
#define ILINKOUTPUTSTREAM_H
#include "XnLinkProtoLibDefs.h"
#include "XnLinkProtoUtils.h"
#include <XnPlatform.h>
#include <XnStatus.h>
namespace xn
{
class LinkOutputDataEndpoint;
class ILinkOutputStream
{
public:
virtual ~ILinkOutputStream() {}
virtual XnStatus Init(XnUInt16 nStreamID,
XnUInt32 nMaxMsgSize,
XnUInt16 nMaxPacketSize,
XnLinkCompressionType compression,
XnUInt16 nInitialPacketID,
LinkOutputDataEndpoint* pOutputDataEndpoint) = 0;
virtual XnBool IsInitialized() const = 0;
virtual void Shutdown() = 0;
virtual XnLinkCompressionType GetCompression() const = 0;
virtual XnStatus SendData(XnUInt16 nMsgType,
XnUInt16 nCID,
XnLinkFragmentation fragmentation,
const void* pData,
XnUInt32 nDataSize) const = 0;
};
}
#endif // ILINKOUTPUTSTREAM_H
|
Fiscal rules: problems of implementation in national practice The article examines global trends and urgent problems of the implementation of fiscal rules in the national practice. The role of fiscal rules in carrying out the macrostabilization function and ensuring the fiscal stability is highlighted. The article shows the institutional mechanisms of control over fiscal rules in the EU countries. Key prerequisites for efficient implementation of fiscal rules are revealed. The author identifies the advantages and disadvantages of various types of fiscal rules. The key problems that impede the effective functioning of fiscal rules in Ukraine are: harmonization of the statistical reporting on the basis of international standards, accuracy of fiscal forecasts and the quality of budget planning, problems of the effectiveness of the audit system, etc. The hidden risks of applying the system of numerical fiscal rules in Ukraine are: for the rules of actual deficit the problem of carefully fulfilling expenditure plans; for the rules of structural deficit the reliability of the methodology for assessing potential GDP and GDP gaps; and for the debt rules an additional assessment of the risks of debt growth doi: https://doi.org/10.33763/finukr2019.10.038, 102019 39 due to explicit contingent liabilities associated with government derivatives. The author calculates the retrospective dynamics of the control indicators of the numerical fiscal rules of Ukraine and carries out the international comparisons of Ukraine with the EU countries over 2002-2018. The author argues that legally established numerical fiscal rules require further development of monitoring mechanisms, enforcement procedures, control and responsibility mechanisms for compliance with fiscal rules, as well as their flexibility in the event of emergencies. The article emphasizes that full implementation of fiscal rules in Ukraine will allow controlling the parameters of public finances in the framework of international fiscal sustainability standards and bring public finance management tools in Ukraine closer to best world practices. |
Changes of bone mineral density, quantitative ultrasound parameters and markers of bone turnover during treatment of hyperthyroidism. BACKGROUND The extent of reversibility of loss of bone mass density (BMD) in hyperthyroid patients after treatment is not clear. METHODS The bone density measured by dual X-ray absorptiometry (DXA), the parameters of quantitative ultrasound (QUS) and biochemical markers of bone turnover of 22 patients were measured before and after one year of treatment with thiamazole and levothyroxine. RESULTS The mean BMD of lumbar spine, femoral neck, Ward triangle and total hip bone density increased by 5.9, 3.8, 3.0 and 6.7%, respectively, after one year of treatment, all significant increases except the increase in Ward triangle bone mass density. There was no significant change in QUS parameters, although the increase in broadband ultrasound attenuation (BUA) of the left and right calcaneus of 5.2 and 4.2%, respectively, suggests reversibility in the long term. Urinary pyridinoline cross-links declined significantly and normalised after treatment. Bone-specific alkaline phosphatase declined after an initial rise, not (yet) reaching normal values after one year of treatment. CONCLUSION The decline in BMD in hyperthyroid patients measured by DXA seems to be reversible after treatment of hyperthyroidism, whereas a change in the QUS parameters, probably also an indicator of bone elasticity and architecture, could not be found. |
<filename>sandbox/eigen.py
# required imports.
from doit.tools import run_once
# task_eigen3: task generator used to download the eigen3 sources.
def task_eigen3():
eigen_url = 'http://bitbucket.org/eigen/eigen/get/3.3.7.tar.gz'
return {
'actions': [f'wget -q {eigen_url} -O eigen3.tgz',
'tar xf eigen3.tgz',
'mv eigen-* eigen3'],
'uptodate': [run_once],
'targets': ['eigen3']
}
|
Death Note isn’t just among the most popular Japanese anime out there, it’s grown beyond the bounds of its domain to become an international phenomenon. You don’t have to be an Otaku to appreciate the immense depth of this psychological thriller. A unique concept couple with memorable characters and stellar plot twists, all in a relatively grounded world make it an excellent gateway to the world of anime. It isn’t perfect by any means, with the second half of the series often pegged as a step down from the first, but the recent live-action adaptation by Netflix has fans thinking it might not have been the worst thing that happened to the franchise.
To the uninitiated, it might not seem like the worst film in the world, as it holds its own based purely on the concept. Having watched the anime series and the first three Japanese live-action movies myself, I found it hard to see past its flaws, despite my best efforts. In what follows, I explore what I felt were the film’s top shortcomings.
Spoiler Warning: Minor to major spoilers for Netflix’s 2017 live action Death Note film and the Death Note anime series ahead.
Unnecessary Love Story
At this point, it’s become almost a given that if there’s more than a single person in a Hollywood movie, the script is going to at least hint at the possibility of a love affair, especially if there’s any mention of High School. Even though it’s among the oldest Hollywood tropes, writers continue to pivot their stories around it and not just because it sells; it is also the easiest (read: laziest) way to convince the audience of a character’s motivation.
The killer notebook’s appearance almost felt like an afterthought
The very first scene of Netflix’s Death Note sees its protagonist share a semi-moment with his future love interest. While this does help move things along a little faster in the scenes to come, it does so at the detriment of the story’s focus. The writers seem to have underestimated the impact of the way the anime went about introducing its audience to the Death Note, instead opting to treat it like every other element in the story. In fact, compared to the aforementioned glance dance between Light and Mia, the killer notebook’s appearance almost felt like an afterthought.
The unnecessary focus on Light and Mia’s romance doesn’t end there, however, as the movie continues to divert focus towards their complicated relationship while Light’s battle of wits with his polar opposite and intellectual equal, the detective known only as “L,” is reduced to a single forgettable scene.
Fading Light
A run-of-the-mill, impulsive teenager instead of a dangerous, calculating sociopath with a god complex
There’s no doubt that Netflix’s Light Turner and the anime’s Light Yagami are two very different individuals. One can make peace with the fact that the studio insisted on using the character’s first name even after stripping him of his original personality. They didn’t make slight tweaks to the character, though, with the end result being far from likeable. Much focus was taken away from his hyper-intellect to the point where he mostly seems like a run-of-the-mill, impulsive teenager instead of a dangerous, calculating sociopath with a god complex.
While the aim here might have been to have the character seem more grounded, that’s not all it did. Taking the intimidating nature of Kira away certainly didn’t help him. His twisted yet unwavering sense of justice remained largely unexplored as well, which contributed to making his motives ambiguous.
Through quite a majority of the film, Light didn’t even seem like his own character, instead coming off as a derivative of his relationship with Mia. To put it simply, Netflix’s take on Kira is a combination of a fairly ordinary teenage boy with ambiguous intent and a twisted teenage girl who – major spoiler – betrays his trust for power.
“L” is For Lackluster
His sense of justice takes the backseat when the going gets tough
There are few characters as revered for their intellect as Death Note’s “L.” The movie does manage to capture the signature eccentricity and intellect of the freelance detective, but does away with calm, calculating demeanor, once again, most likely to make him seem more human. He has the sweet tooth and strange mannerisms of his anime counterpart, but unlike the latter, his sense of justice takes the backseat when the going gets tough. In fact, while it is never confirmed, the movie’s ending does imply that Kira’s hunter is capable of cold-blooded murder – the same brand of vigilante justice that he so strongly condemns at the beginning of the movie.
Most disappointing is the fact that L and his efforts to thwart Kira feel like more of a sideshow throughout the movie with the relationship between Light and Mia taking center stage. Heck, even Light’s father – major spoiler – ultimately figures out his involvement in the Kira murders at the end of the movie, mostly without the detective’s help.
Battle of Wits Not Included
I’ve mentioned this twice before, but I felt it needed a section all to itself – one of the best elements of the anime is the battle of intellects between the vengeful Light and justful L. Of course, a 90-minute movie would be hard-pressed to include more than a couple examples from the anime, but Netflix’s adaptation made no effort whatsoever to go said route. There was a short, face-to-face confrontation for fans to chew on, but it hardly did justice to what defines the rivalry between the two – a complex, nail-biting match of chess driven by clashing ideologies.
Ryuk’s Unclear Motives
In the anime, Light’s shinigami companion is perceived as an oddball, even among his kin. Bored by centuries of watching life play out in the human world, Ryuk grows bored and decides to drop a Death Note to Earth to spice things up. While he seems unable to process empathy, and is clearly interested in Light’s ideology to use the Note to smite evil, if only for its entertainment value, he puts all his cards on the table, making his motive completely clear. He even warns Light about the dire repercussions of using the Note.
Ryuk’s Netflix variant, however, is more of a tenacious devil trying to tempt unwary souls into committing acts of evil than a playful death god on the hunt for a thrill. His backstory remains unexplored, with the only hint of his motives being that he is looking for a “keeper” to use the Note. Why he needs a keeper in the first place is never explained. It is made clear that he has a hidden agenda and is not to be trusted, but that’s all the viewer is ever told.
A lackluster formula entailing an impulsive Kira duo and an emotional L
Certain reviews around the web might suggest that these discrepancies were a product of the movie’s relatively short run-time, and while I do agree that an additional 30 minutes or an hour might have allowed for better character building, it won’t have affected the movie’s lackluster formula entailing an impulsive Kira duo and an emotional L being pawns in a Death God’s games.
It’s clear the studio wanted to take their adaptation in a different direction than the anime or the 2006 Japanese live-action films, though they might have been better off creating a story with all-new characters. The film does leave room for a sequel to tie its loose ends, but with a string of unflattering reviews weighing it down, the studio is likely to think twice about green-lighting a second installment.
Liked this post? Show us some love! Follow us on Facebook, Twitter and Google+ for instant social media updates from our website.
Related Posts |
Mefenamic acid anti-inflammatory drug: probing its polymorphs by vibrational (IR and Raman) and solid-state NMR spectroscopies. This work deals with the spectroscopic (supported by quantum chemistry calculations), structural, and morphological characterization of mefenamic acid (2- benzoic acid) polymorphs, known as forms I and II. Polymorph I was obtained by recrystallization in ethanol, while form II was reached by heating form I up to 175 °C, to promote the solid phase transition. Experimental and theoretical vibrational band assignments were performed considering the presence of centrosymmetric dimers. Besides band shifts in the 3345-3310 cm(-1) range, important vibrational modes to distinguish the polymorphs are related to out-of-phase and in-phase N-H bending at 1582 (Raman)/1577 (IR) cm(-1) and 1575 (Raman)/1568 (IR) cm(-1) for forms I and II, respectively. In IR spectra, bands assigned to N-H bending out of plane are observed at 626 and 575 cm(-1) for polymorphs I and II, respectively. Solid-state C NMR spectra pointed out distinct chemical shifts for the dimethylphenyl group: 135.8 to 127.6 ppm (carbon bonded to N) and 139.4 to 143.3 ppm (carbon bonded to methyl group) for forms I and II, respectively. |
/**
* Constructs the model from the given <code>metaModel</code>.
*
* @param metaModel the meta model.
* @return the package.
* @throws CreationFailedException
*/
private RefPackage createModel(final MofPackage metaModel)
throws CreationFailedException
{
RefPackage model = this.repository.getExtent(EXTENT_NAME);
if (model != null)
{
this.removeModel(EXTENT_NAME);
}
if (logger.isDebugEnabled())
{
logger.debug("creating the new meta model");
}
model = repository.createExtent(
EXTENT_NAME,
metaModel);
if (logger.isDebugEnabled())
{
logger.debug("created model extent");
}
return model;
} |
def add_parameter_write_table(self,table_header='',dcols=[],data=[[]],filename='isotope_yield_table_MESA_only_param_new.txt'):
import ascii_table as ascii1
tables=self.table_mz
yield_data=self.yield_data
data_cols=self.data_cols
col_attrs=self.col_attrs
col_attrs_data1=self.col_attrs_data
for k in range(len(tables)):
if not tables[k]==table_header:
continue
mass=float(tables[k].split(',')[0].split('=')[1])
metallicity=float(tables[k].split(',')[1].split('=')[1][:-1])
col_attrs=col_attrs
col_attrs_data=col_attrs_data1[k]
attr_lines=[]
for h in range(1,len(col_attrs)):
attr=col_attrs[h]
idx=col_attrs.index(attr)
attr_data=col_attrs_data[k][idx]
line=attr+': '+'{:.3E}'.format(attr_data)
attr_lines.append(line)
data_new=yield_data[k]
dcols_new=data_cols[:]
for h in range(len(dcols)):
print ('h :',h)
data_new.append(data[h])
dcols_new.append(dcols[h])
dcols_new=[dcols_new[0]]+dcols_new[2:]+[dcols_new[1]]
print ('dcols: ',dcols_new)
special_header='Table: (M='+str(mass)+',Z='+str(metallicity)+')'
headers=[special_header]+attr_lines
ascii1.writeGCE_table_parameter(filename=filename,headers=headers,data=data_new,dcols=dcols_new) |
//
// AssignmentActionFactory.cpp
// assignment-client/src/
//
// Created by <NAME> on 2015-6-19
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AssignmentActionFactory_h
#define hifi_AssignmentActionFactory_h
#include "EntityActionFactoryInterface.h"
#include "AssignmentAction.h"
class AssignmentActionFactory : public EntityActionFactoryInterface {
public:
AssignmentActionFactory() : EntityActionFactoryInterface() { }
virtual ~AssignmentActionFactory() { }
virtual EntityActionPointer factory(EntityActionType type,
const QUuid& id,
EntityItemPointer ownerEntity,
QVariantMap arguments);
virtual EntityActionPointer factoryBA(EntityItemPointer ownerEntity, QByteArray data);
};
#endif // hifi_AssignmentActionFactory_h
|
Functional diversification of duplicated chalcone synthase genes in anthocyanin biosynthesis of Gerbera hybrida. Chalcone synthase (CHS) is the key enzyme in the first committed step of the flavonoid biosynthetic pathway and catalyzes the stepwise condensation of 4-coumaroyl-CoA and malonyl-CoA to naringenin chalcone. In plants, CHS is often encoded by a small family of genes that are temporally and spatially regulated. Our earlier studies have shown that GCHS4 is highly activated by ectopic expression of an MYB-type regulator GMYB10 in gerbera (Gerbera hybrida). The tissue- and development-specific expression patterns of three gerbera CHS genes were examined. Virus-induced gene silencing (VIGS) was used to knock down GCHS1 and GCHS4 separately in gerbera inflorescences. Our data show that GCHS4 is the only CHS encoding gene that is expressed in the cyanidin-pigmented vegetative tissues of gerbera cv Terraregina. GCHS3 expression is pronounced in the pappus bristles of the flowers. Expression of both GCHS1 and GCHS4 is high in the epidermal cells of gerbera petals, but only GCHS1 is contributing to flavonoid biosynthesis. Gerbera contains a family of three CHS encoding genes showing different spatial and temporal regulation. GCHS4 expression in gerbera petals is regulated post-transcriptionally, at the level of either translation elongation or protein stability. |
/**
* If you want a metadata item on folders for specifying if provision only policy groups
* @return
*/
public boolean isAllowPolicyGroupOverride() {
if (this.allowPolicyGroupOverride != null) {
return this.allowPolicyGroupOverride;
}
return true;
} |
Increasingly, sports managers and franchises are turning to analytics to determine the players they recruit and pay premium salaries. The question is no longer what have you done for me lately? But rather: How well will you do tomorrow? and How can we be sure?
This emphasis in paying for future potential over past performance which is now transforming sports, also holds huge implications for how managers will “incent and inspire” high achievers in their organizations, according to according to Michael Schrage, research fellow at the Massachusetts Institute of Technology (MIT).
In a recent blog, he also observed that this “moneyball ethos” and the economic value of forecasting methodologies are also redefining the values of loyalty and leadership.
He cites a recent conversation with Daryl Morey, general manager of the National Basketball Association’s Houston Rockets who is considered a pioneer in using statistical, quantitative analytics to pro basketball.
The trend is towards paying for future performance meaning “forecasted performance,” said Morey.
Algorithmic and biomedical advances are now providing sports coaches, managers and team owners the tools to predict which players have picked and which ones have their full potential ahead of them, said Morey. |
/**
* A class to load in image data. This is largely derived from the Macy Mae
* source code, where it work well and will create graphics for a similar
* system.
*
* @author Jared Blackburn
*/
public class ImageLoader extends AbstractLoader {
private static final ImageLoader reader = new ImageLoader();
private static final String LOC = "/assets/pics/";
private static final String INFO_LOC = LOC + "GraphicsData.txt";
/**
* One private instance exists to conveniently hold temporary
* information. This should never be instantiated elsewhere nor
* shared with other classes, but only used internally.
*/
private ImageLoader(){
super();
loc = LOC;
infoLoc = INFO_LOC;
}
/**
* The static entryway to this image loading system. It should be
* called only once during initialization. This then calls the private
* methods that have access to internal data storage.
*/
public static void initGraphics() {
reader.openInfo();
}
@Override
protected void makeResource() {
Graphic.addGraphic(name, list);
}
} |
<filename>src/main/java/info/vziks/lessons/annotation/work/two/Container.java
package info.vziks.lessons.annotation.work.two;
import info.vziks.lessons.annotation.work.two.annotation.ConfigClass;
import info.vziks.lessons.annotation.work.two.annotation.InitClass;
import info.vziks.lessons.annotation.work.two.annotation.InitProp;
import info.vziks.lessons.annotation.work.two.annotation.RunMethod;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
public class Container {
private Set<Class> classes;
private Properties properties = new Properties();
private HashMap<Class, Object> objects = new HashMap<>();
public Container(Set<Class> classes) {
this.classes = classes;
}
public void init() throws NoSuchMethodException, InstantiationException, IllegalAccessException, InvocationTargetException {
loadProperties();
createObjects();
intProps();
runVoids();
}
private void runVoids() throws InvocationTargetException, IllegalAccessException {
for (Map.Entry<Class, Object> entry : objects.entrySet()) {
// Method[] methods = entry.getValue().getClass().getDeclaredMethods();
Method[] methods = entry.getKey().getDeclaredMethods();
for (Method method : methods) {
if (method.isAnnotationPresent(RunMethod.class)) {
method.setAccessible(true);
method.invoke(entry.getValue());
}
}
}
}
private void intProps() throws IllegalAccessException {
for (Map.Entry<Class, Object> entry : objects.entrySet()) {
Field[] fields = entry.getValue().getClass().getDeclaredFields();
for (Field field : fields) {
if (field.isAnnotationPresent(InitProp.class)) {
Object o = objects.get(field.getType());
field.setAccessible(true);
field.set(entry.getValue(), o);
}
}
}
}
private void createObjects() throws InvocationTargetException, NoSuchMethodException, InstantiationException, IllegalAccessException {
for (Class cls : classes) {
if (cls.isAnnotationPresent(ConfigClass.class)
|| cls.isAnnotationPresent(InitClass.class)) {
Object o = createObject(cls);
if (cls.isAnnotationPresent(ConfigClass.class)) {
Field[] fields = cls.getDeclaredFields();
for (Field field : fields) {
ConfigClass configClass =
(ConfigClass) cls.getDeclaredAnnotation(ConfigClass.class);
String prefix = configClass.prefix();
field.setAccessible(true);
field.set(o, properties.getProperty(prefix + "." + field.getName()));
}
}
objects.put(cls, o);
}
System.out.println(objects);
}
}
private Object createObject(Class cls) throws NoSuchMethodException, IllegalAccessException, InvocationTargetException, InstantiationException {
Constructor constructor = cls.getDeclaredConstructor();
return constructor.newInstance();
}
private void loadProperties() {
try (InputStream input =
Container.class
.getClassLoader()
.getResourceAsStream("config.properties")) {
properties.load(input);
} catch (IOException e) {
e.printStackTrace();
}
}
}
|
You might have thought that you weren’t that shallow when it comes to pulling. That you go for personality, intelligence, humour. But apparently, if you’re a girl you don’t – or at least, that’s what a new survey of 2,000 British men and women found.
The research, which comes from skincare brand Remescar, reveals a surprising trend in the dating world; men are more attracted to the person that they meet (i.e. their personality and their confidence) while women are more attracted to the person’s physical attributes (i.e. their smile and their body type).
The tables have turned when it comes to dating and finding a potential new partner and now it’s British men who focus on the person and who they are, whereas women are more likely to seek out good looks.
Initially all respondents were asked ‘Do you consider yourself to be fussy when it comes to meeting someone new?’ to which 61 per cent of respondents confessed that ‘yes’ they were fussy. Of those who said they were fussy, the top reasons cited were ‘I don’t want to waste my time with someone who’s not for me’ (31 per cent) and ‘I know what I like and what I don’t like’ (22 per cent). The most common reason, however, for those who considered themselves not to be fussy was ‘I like to open-minded in case I find someone that’s not my usual type’ (56 per cent).
All male respondents were asked what they looked for in a potential new partner. When provided with a list of possible responses and told to select all that applied, the top five responses were: good personality, someone to have a laugh with, confidence, intelligence, how she gets on with friends and family, and a good family ethic.
When women were asked the same question however, their top five was a little different. While women also rated family ethics, confidence and personality, their top value was “a nice smile”, and third from top was “body type”.
According to the poll, just 31 per cent of male respondents admitted that they would ignore or avoid someone of the opposite sex based upon their looks, compared to a massive 70 per cent of female respondents admitted that they would ignore or avoid the opposite sex because of the way they looked.
Carien Veldhuis, Marketing Manager at Remescar, said: “It’s shocking to hear that so many women, and likewise so many men, will ignore someone of the opposite sex based upon the way they look; you never know, that could be your Mr or Mrs Right right there, and you’ve snubbed them because they don’t quite marry up to the image of your dream partner that you’ve built up in your head.
“Ultimately looks aren’t everything – at the end of the day you want to be able to have an intelligent conversation with your partner when it’s just the two of you.” |
Using temporal logics for planning and control Traditionally, planning work in artificial intelligence has focused on primitive actions and instantaneous states. Innovative work has been done on composing primitive actions so as to bring about desired final states. The thesis advanced in this paper is that instead of simply focusing on primitive actions, it is also useful to use representation and reasoning tools whose primitive objects are sequences of actions (or the associated sequences of states that they generate). Temporal logics are a useful tool for representing and reasoning about action sequences. The author has examined some different applications of temporal logics to problems of planning and control. |
<filename>WindowOntheWorld/src/main/java/com/creamyfever/wow/vo/Discussion_log.java<gh_stars>0
package com.creamyfever.wow.vo;
public class Discussion_log {
private int dis_no;
private int dis_log_seq;
private int idno;
private String dis_log;
public Discussion_log(int dis_no, int dis_log_seq, int idno, String dis_log) {
super();
this.dis_no = dis_no;
this.dis_log_seq = dis_log_seq;
this.idno = idno;
this.dis_log = dis_log;
}
public Discussion_log() {
super();
}
public int getDis_no() {
return dis_no;
}
public void setDis_no(int dis_no) {
this.dis_no = dis_no;
}
public int getDis_log_seq() {
return dis_log_seq;
}
public void setDis_log_seq(int dis_log_seq) {
this.dis_log_seq = dis_log_seq;
}
public int getIdno() {
return idno;
}
public void setIdno(int idno) {
this.idno = idno;
}
public String getDis_log() {
return dis_log;
}
public void setDis_log(String dis_log) {
this.dis_log = dis_log;
}
@Override
public String toString() {
return "Discussion_log [dis_no=" + dis_no + ", dis_log_seq=" + dis_log_seq + ", idno=" + idno + ", dis_log="
+ dis_log + "]";
}
} |
<filename>src/app/task/Task.ts<gh_stars>0
/**
* Created by suhel on 9/18/16.
*/
export class Task{
constructor(public description: string, public completed: boolean){}
}
|
<gh_stars>0
/* istanbul ignore file */
import { researchOutputMapSubtype } from '@asap-hub/model';
import { RestResearchOutput } from '@asap-hub/squidex';
import { Migration } from '../handlers/webhooks/webhook-run-migrations';
import { applyToAllItemsInCollection } from '../utils/migrations';
export default class MapResearchOutputDeprecatedSubtype extends Migration {
up = async (): Promise<void> => {
await applyToAllItemsInCollection<RestResearchOutput>(
'research-outputs',
async (researchOutput, squidexClient) => {
const mappedSubtype = researchOutputMapSubtype(
researchOutput.data?.subtype?.iv,
);
if (mappedSubtype) {
await squidexClient.patch(researchOutput.id, {
subtype: { iv: mappedSubtype },
});
}
},
);
};
// eslint-disable-next-line @typescript-eslint/no-empty-function
down = async (): Promise<void> => {};
}
|
Q:
Magnetic Field Transmission
I have heard from here that if you put 1 turn of wire around your house, you can hear music all around inside your house. It doesen't tell me very much on all of the components etc. to drive but I am assuming that you have to have an amplifier on the receiver and the transmitter. To simulate, before actually performing the experiment, I wrapped 1 turn of wire around a cardboard box. For the transmitter, I found a stereo system, which can produce a very loud sound. The only problem is because it acts as a short, I can only turn it up to a certain volume. For the receiver, I just hooked up a speaker to a coil, and I heard a faint sound. I know with the scale of a large house, I will need some time of amplifier. How would I set up the circuit for amplifying the low signal sound found on the coil. Would this be possible with a TL082CP op-amp and if so, how would I set up the circuit? Also does anybody know of any better way of transmitting the sound through it or is the stereo fine for the setup around my house?
Also, I came across a very strange thing when I found a head-set and put one ear-piece in the magnetic field. I heard the audio out of the other. I had nothing connected up to the jack and it seems impossible for this to happen with a stereo head-set. Why is this happening?
A:
It is called audio induction loop and is commonly used to aid people that have a suboptimal hearing.
Here is a nice example. |
Effects of implant angulation, material selection, and impression technique on impression accuracy: a preliminary laboratory study. The aim of this preliminary laboratory study was to evaluate the effects of 5- and 25-degree implant angulations in simulated clinical casts on an impression's accuracy when using different impression materials and tray selections. A convenience sample of each implant angulation group was selected for both open and closed trays in combination with one polyether and two polyvinyl siloxane impression materials. The influence of material and technique appeared to be significant for both 5- and 25-degree angulations (P <.05), and increased angulation tended to decrease impression accuracy. The open-tray technique was more accurate with highly nonaxially oriented implants for the small sample size investigated. |
Characteristic variation of pulsedanodized NiTi surface by the adjustment of voltageunapplied state Pulsed anodization of superelastic NiTi alloy in nitric acid electrolyte is a novel surface modification process that enables the formation of an almost Nifree TiO2 layer on their surface. The core technology is the modulation of voltageunapplied state that is necessary for the chemical reaction with an electrolyte. Here, the effect of the adjustment of the voltageunapplied state on the layer characteristics was investigated. Prolongation of the voltageunapplied state led to the formation of a thicker TiO2 layer. This result was attributed to the elimination of Ni by nitric acid. As a result, nanometersized pores appearing as vestiges of Ni elimination increased and enlarged concomitantly. Furthermore, by prolonging the voltageunapplied state, the amount of Ni ions released into a phosphatebuffered saline (PBS) solution diminished significantly. Such prolongation enabled significant suppression of the dissolution of the layer itself; thus, Ni ion release from the surface region was considered to be reduced. |
The Improbable Ida B. Wells Few black women born in the nineteenth century are well known today, but the ones we do know about are larger than life. They are improbable figures, whose achievements seem to be dictated by sheer force of personality. Take, for instance, ex-slave Harriet Tubman, the legendary "black Moses," who led scores of slaves to freedom in an era when few female slaves managed to free even themselves. The intrepid Tubman also served as a guide on Union gunboats during the Civil War. Moreover, her daring exploits seem all the more unlikely given that Tubman was partially disabled by a head injury she received in her youth. (Tubman was given to fits of what her nineteenthcentury biographers called "somnolency"-sudden brief episodes of deep sleep, or what would today be termed narcolepsy.) 1 Likewise, Sojourner Truth, another famous ex-slave, also had a career that defied all odds. Raised a domestic, she never learned to read and write, and yet she sustained a thirty-year career as an influential public speaker, lecturing not only on abolition, but women's rights, women's suffrage, and temperance.2 Less well know, but equally remarkable, is black abolitionist Maria Stewart, self educated and a servant in her youth, who in 1831 somehow had the gall to become the first American women-black or white-to give a public lecture.3 And finally, as improbable of any of them, there is the slave-born Ida B. Wells, who achieved international renown as an anti-lynching crusader in the 1890s, and whose career of relentless political activism led American military intelligence to deem her in 1918 a "'far more dangerous agitator than Marcus Garvey"' (p. 157). Orphaned at age sixteen, Wells, whose life and thought is chronicled by Patricia A. Schechter in her new book Ida B. Wells-Barnett and American Reform, 1880-1830, was hardly groomed for fame and influence by her upbringing. Her father was a carpenter, her mother a cook, and when they died along with one of Wells's six younger siblings during a yellow fever epidemic that struck Holly Springs, Mississippi, in 1878, the young Ida Wells was left parent to five |
/**
* It is possible for an internal node to
* designate a named-entity, not just leaf nodes.
* Plus it is just more obvious.
*/
public class Node {
final public String name;
final Node parent;
final public Map<String, Node> children;
Boolean terminal;
final public List<Indices> recognized = new ArrayList<Indices>();
public Node() {
this.name = "ROOT";
this.parent = null;
this.children = new HashMap<String, Node>();
this.terminal = Boolean.FALSE;
}
public Node(final Node parent, int ix, final String[] fullName) {
this.name = fullName[ix];
this.parent = parent;
this.children = new HashMap<String, Node>();
this.terminal = (ix >= (fullName.length - 1));
}
public Boolean hasNamed(final String name) {
return this.children.containsKey(name);
}
public Node getNamed(final String name) {
return this.children.get(name);
}
public Boolean recognized(final Indices ixs) {
recognized.add(ixs);
return Boolean.TRUE;
}
public int occurenceCount() {
return recognized.size();
}
public Boolean isRoot() {
return this.parent == null;
}
/**
* Add a child node to the current node.
* The index indicates the current nodes name.
* @param ix the index of the item in the fullname being added.
* @param fullName the full name for the entity.
* @return success?
*/
public Boolean addChild(int ix, final String[] fullName) {
/*
log.log(Level.INFO, new StringBuffer("addChild ")
.append(ix).append(" name " )
.append(Arrays.toString(fullName))
.toString());
*/
final Node child = new Node(this, ix, fullName);
children.put(fullName[ix], child);
if (child.terminal) {
return Boolean.TRUE;
}
child.addChild(ix+1, fullName);
return Boolean.TRUE;
}
public String toString() {
if (this.parent == null) {
// log.log(Level.INFO, "top");
return name;
}
return new StringBuffer()
.append(this.parent.toString())
.append(':').append(this.name)
.toString();
}
public Boolean walk(Accumulator ac) {
ac.assimilate(this);
for (Node child : this.children.values()) {
child.walk(ac);
}
return Boolean.TRUE;
}
} |
<filename>service/app/config.go
package app
import (
"fmt"
"net"
)
type Config struct {
ListenAddress net.TCPAddr // Network address and port where the application should listen on
AllowedOrigins string // CORS policy allowed origins
PostgresURL string // URL for connecting to Postgres service
}
// Validate checks configuration for stupid values
func (c *Config) Validate() error {
if c.PostgresURL == "" {
return fmt.Errorf("POSTGRES_URL should not be empty")
}
return nil
}
|
Measurements of nonlinear absorption in azo dye doped liquids We present a simple experimental method for the determination of several parameters characterizing the photisomerization of an azo dye dopant in a liquid host. A combination of intensity-dependent transmission measurements and the relaxation of the transmission after illumination yields the sought-after parameters. These parameters are needed for a quantitative analysis of light-induced birefringence and dichroism within the framework of a dynamical mean-field model. |
The renewable energy policy of Turkey Nowadays, countries consider energy as politics and the power of competition with an increasing rivalry. Therefore, the importance of energy is going up and the importance of sustainability in energy policy and the decreasing policy of external dependence emerges. It is a scientific truth that actual energy resources will die out as a parallel to increasing energy. Besides, it is not possible to hold world-emission-value among current limits because of increasing population and energy demands. This situation makes the use of the renewable energy resources in energy production unavoidable. Basically, the sources can be listed as wind power, geothermal energy, biomass energy, hydrogen energy. In this study, it is mentioned the current potential of the renewable energy resources an account of sources in Turkey, the usage of this potential and their strategy also it is recommended how to use these sources effectively. |
/////////////////////////////////////////////////////////////////////////
// $Id: logical16.cc 11313 2012-08-05 13:52:40Z sshwarts $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001-2012 The Bochs Project
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
/////////////////////////////////////////////////////////////////////////
#define NEED_CPU_REG_SHORTCUTS 1
#include "bochs.h"
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::XOR_EwGwM(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = read_RMW_virtual_word(i->seg(), eaddr);
op2_16 = BX_READ_16BIT_REG(i->src());
op1_16 ^= op2_16;
write_RMW_virtual_word(op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::XOR_GwEwR(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
op1_16 = BX_READ_16BIT_REG(i->dst());
op2_16 = BX_READ_16BIT_REG(i->src());
op1_16 ^= op2_16;
BX_WRITE_16BIT_REG(i->dst(), op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::XOR_GwEwM(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = BX_READ_16BIT_REG(i->dst());
op2_16 = read_virtual_word(i->seg(), eaddr);
op1_16 ^= op2_16;
BX_WRITE_16BIT_REG(i->dst(), op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::XOR_EwIwM(bxInstruction_c *i)
{
Bit16u op1_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = read_RMW_virtual_word(i->seg(), eaddr);
op1_16 ^= i->Iw();
write_RMW_virtual_word(op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::XOR_EwIwR(bxInstruction_c *i)
{
Bit16u op1_16 = BX_READ_16BIT_REG(i->dst());
op1_16 ^= i->Iw();
BX_WRITE_16BIT_REG(i->dst(), op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::OR_EwIwM(bxInstruction_c *i)
{
Bit16u op1_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = read_RMW_virtual_word(i->seg(), eaddr);
op1_16 |= i->Iw();
write_RMW_virtual_word(op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::OR_EwIwR(bxInstruction_c *i)
{
Bit16u op1_16 = BX_READ_16BIT_REG(i->dst());
op1_16 |= i->Iw();
BX_WRITE_16BIT_REG(i->dst(), op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::NOT_EwM(bxInstruction_c *i)
{
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
Bit16u op1_16 = read_RMW_virtual_word(i->seg(), eaddr);
op1_16 = ~op1_16;
write_RMW_virtual_word(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::NOT_EwR(bxInstruction_c *i)
{
Bit16u op1_16 = BX_READ_16BIT_REG(i->dst());
op1_16 = ~op1_16;
BX_WRITE_16BIT_REG(i->dst(), op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::OR_EwGwM(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = read_RMW_virtual_word(i->seg(), eaddr);
op2_16 = BX_READ_16BIT_REG(i->src());
op1_16 |= op2_16;
write_RMW_virtual_word(op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::OR_GwEwR(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
op1_16 = BX_READ_16BIT_REG(i->dst());
op2_16 = BX_READ_16BIT_REG(i->src());
op1_16 |= op2_16;
BX_WRITE_16BIT_REG(i->dst(), op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::OR_GwEwM(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = BX_READ_16BIT_REG(i->dst());
op2_16 = read_virtual_word(i->seg(), eaddr);
op1_16 |= op2_16;
BX_WRITE_16BIT_REG(i->dst(), op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::AND_EwGwM(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = read_RMW_virtual_word(i->seg(), eaddr);
op2_16 = BX_READ_16BIT_REG(i->src());
op1_16 &= op2_16;
write_RMW_virtual_word(op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::AND_GwEwR(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
op1_16 = BX_READ_16BIT_REG(i->dst());
op2_16 = BX_READ_16BIT_REG(i->src());
op1_16 &= op2_16;
BX_WRITE_16BIT_REG(i->dst(), op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::AND_GwEwM(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = BX_READ_16BIT_REG(i->dst());
op2_16 = read_virtual_word(i->seg(), eaddr);
op1_16 &= op2_16;
BX_WRITE_16BIT_REG(i->dst(), op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::AND_EwIwM(bxInstruction_c *i)
{
Bit16u op1_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = read_RMW_virtual_word(i->seg(), eaddr);
op1_16 &= i->Iw();
write_RMW_virtual_word(op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::AND_EwIwR(bxInstruction_c *i)
{
Bit16u op1_16 = BX_READ_16BIT_REG(i->dst());
op1_16 &= i->Iw();
BX_WRITE_16BIT_REG(i->dst(), op1_16);
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::TEST_EwGwR(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
op1_16 = BX_READ_16BIT_REG(i->dst());
op2_16 = BX_READ_16BIT_REG(i->src());
op1_16 &= op2_16;
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::TEST_EwGwM(bxInstruction_c *i)
{
Bit16u op1_16, op2_16;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
op1_16 = read_virtual_word(i->seg(), eaddr);
op2_16 = BX_READ_16BIT_REG(i->src());
op1_16 &= op2_16;
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::TEST_EwIwR(bxInstruction_c *i)
{
Bit16u op1_16 = BX_READ_16BIT_REG(i->dst());
op1_16 &= i->Iw();
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::TEST_EwIwM(bxInstruction_c *i)
{
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
Bit16u op1_16 = read_virtual_word(i->seg(), eaddr);
op1_16 &= i->Iw();
SET_FLAGS_OSZAPC_LOGIC_16(op1_16);
BX_NEXT_INSTR(i);
}
|
# -*- coding: utf-8 -*-
from deap import base, creator, tools, algorithms
import random
def eval_one_max(individual):
return sum(individual),
def one_max():
creator.create('FitnessMax', base.Fitness, weights=(1.0, ))
creator.create('Individual', list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register('attr_bool', random.randint, 0, 1)
toolbox.register('individual', tools.initRepeat, creator.Individual,
toolbox.attr_bool, 32)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('evaluate', eval_one_max)
toolbox.register('mate', tools.cxTwoPoints)
toolbox.register('mutate', tools.mutFlipBit, indpb=0.05)
toolbox.register('select', tools.selTournament, tournsize=3)
pop = toolbox.population(n=100)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register('avg', tools.mean)
stats.register('std', tools.std)
stats.register('min', min)
stats.register('max', max)
algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=40,
stats=stats, halloffame=hof, verbose=True)
def main():
one_max()
if __name__ == '__main__':
main()
|
package com.duangframework.db.utils;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
public final class ClassKit {
private static final ConcurrentMap<String, Field[]> FIELD_MAPPING_MAP = new ConcurrentHashMap<String, Field[]>();
private ClassKit() {
}
/**
* 取出类的全名,包括包名
* @param cls 类
* @param isLowerCase 是否转为小写
* @return
*/
public static String getClassName(Class<?> cls, boolean isLowerCase) {
String name = cls.getName();
return isLowerCase ? name.toLowerCase() : name;
}
/**
* 根据class对象反射出所有属性字段,静态字段除外
* @param cls
* @return
*/
public static Field[] getFields(Class<?> cls){
String key = getClassName(cls, ToolsKit.isFieldToLowerCase());
Field[] field = null;
if(FIELD_MAPPING_MAP.containsKey(key)){
field = FIELD_MAPPING_MAP.get(key);
}else{
field = getAllFields(cls);
FIELD_MAPPING_MAP.put(key, field);
}
return (null == field) ? null : field;
}
/**
* 取出类里的所有字段
* @param cls
* @return Field[]
*/
private static Field[] getAllFields(Class<?> cls) {
List<Field> fieldList = new ArrayList<Field>();
fieldList.addAll(filterStaticFields(cls.getDeclaredFields()));
Class<?> parent = cls.getSuperclass();
//查找父类里的属性字段
while(null != parent && parent != Object.class){
fieldList.addAll(filterStaticFields(parent.getDeclaredFields()));
parent = parent.getSuperclass();
}
return fieldList.toArray(new Field[fieldList.size()]);
}
/**
* 根据class对象反射出所有属性字段,静态字段除外
* @param cls
* @return Map集合,key为field.getName()
*/
public static Map<String, Field> getFieldMap(Class<?> cls) {
Field[] fileds = getFields(cls);
if(null == fileds) {
return null;
}
Map<String, Field> map = new HashMap<>(fileds.length);
for(Field field : fileds) {
if(null != field) {
map.put(field.getName(), field);
}
}
return map;
}
/**
* 过滤静态方法
* @param fields
* @return
*/
private static List<Field> filterStaticFields(Field[] fields){
List<Field> result = new ArrayList<Field>();
for (Field field : fields) {
if(!Modifier.isStatic(field.getModifiers())){ //静态字段不取
field.setAccessible(true); //设置可访问私有变量
result.add(field);
}
}
return result;
}
/**
* 取字段属性里的泛型类型
* @param field 字段属性
* @return 泛型类
*/
public static Class<?> getGenericTypeClass(Field field) {
ParameterizedType paramTypeItem = (ParameterizedType)field.getGenericType();
Type[] types = paramTypeItem.getActualTypeArguments();
if(types.length == 1) {
return (Class)types[0];
} else if (types.length == 2) {
return (Class)types[1];
} else {
return Object.class;
}
}
}
|
Ex vivo activation of CD4+ T-cells from donors on suppressive ART can lead to sustained production of infectious HIV-1 from a subset of infected cells The fate of HIV-infected cells after reversal of proviral latency is not well characterized. Simonetti, et al. recently showed that CD4+ T-cells containing intact proviruses can clonally expand in vivo and produce low-level infectious viremia. We hypothesized that reversal of HIV latency by activation of CD4+ T-cells can lead to the expansion of a subset of virus-producing cells rather than their elimination. We established an ex vivo cell culture system involving stimulation of CD4+ T-cells from donors on suppressive antiretroviral therapy (ART) with PMA/ionomycin (day 17), followed by rest (day 721), and then repeat stimulation (day 2128), always in the presence of high concentrations of raltegravir and efavirenz to effectively block new cycles of viral replication. HIV DNA and virion RNA in the supernatant were quantified by qPCR. Single genome sequencing (SGS) of p6-PR-RT was performed to genetically characterize proviruses and virion-associated genomic RNA. The replication-competence of the virions produced was determined by the viral outgrowth assay (VOA) and SGS of co-culture supernatants from multiple time points. Experiments were performed with purified CD4+ T-cells from five consecutively recruited donors who had been on suppressive ART for > 2 years. In all experiments, HIV RNA levels in supernatant increased following initial stimulation, decreased or remained stable during the rest period, and increased again with repeat stimulation. HIV DNA levels did not show a consistent pattern of change. SGS of proviruses revealed diverse outcomes of infected cell populations, ranging from their apparent elimination to persistence and expansion. Importantly, a subset of infected cells expanded and produced infectious virus continuously after stimulation. These findings underscore the complexity of eliminating reservoirs of HIV-infected cells and highlight the need for new strategies to kill HIV-infected cells before they can proliferate. Introduction The major barrier to curing HIV infection in individuals receiving suppressive antiretroviral therapy (ART) is a persistent viral reservoir consisting of infrequent cells that harbor latent, intact proviruses capable of being activated to produce infectious virus. It has been reported that the latent reservoir is primarily composed of long-lived resting CD4 + T-cells that have a half-life of~44 months. The "shock-and-kill" strategy has been proposed as a means of depleting the HIV reservoir by reversing latency and promoting death of cells with reactivated proviruses by viral-or immune-mediated cytotoxicity. Multiple latency reversing agents (LRAs) have been discovered but the most effective LRAs often induce T-cell activation. Studies in untreated HIV infection have shown that the majority of productively infected cells undergo rapid cell death with a half-life of~1 day, likely due to viral cytopathic effect (CPE) and/or immune-mediated killing. However, Shan et al. used in vitro primary cell models of proviral latency to show that infected resting CD4 + T-cells are relatively resistant to viral cytopathic effect and can persist following latency reversal with vorinostat. In addition, studies have shown that HIV-infected cells can persist and expand in vivo. In one patient, marked clonal expansion of cells with intact proviruses led to persistent viremia. Given these findings, we hypothesized that a subset of inducible HIVinfected cells can persist and expand following T-cell activation. To characterize the effects of T-cell activation on HIV-infected cells, we developed an ex vivo cell culture system involving stimulation of primary cells from chronically HIV-infected donors on suppressive ART with phorbol 12-myristate 13-acetate (PMA) and ionomycin. Single-genome sequencing (SGS) was used to study the dynamics of cells containing genetically distinct proviruses and the virions released into the supernatant. These experiments revealed that infected cell populations have several different outcomes following cellular activation and latency reversal. In contrast to previous findings, we observed that cells containing intact, inducible proviruses can persist and expand following cellular activation. Donor characteristics Experiments were performed using unfractionated PBMC or total CD4 + T-cells purified from peripheral blood mononuclear cells (PBMC) obtained from five chronically HIV-1 infected donors on suppressive ART who met the eligibility criterion of having plasma HIV RNA 50 copies/mL for 2 years. The donors studied were the first five volunteers who met these eligibility criteria. Table 1 shows the characteristics of the five donors studied. The median age is 56 years (range 42-59 years); the median total number of years since detection of HIV seropositivity is 25 years (range 20-27 years); and, the median number of years of plasma viral HIV RNA suppression to 50 copies/mL on ART is 15 years (range 2-18 years). Total CD4 + T-cells or PBMC were stimulated with two sequential 7-day exposures to PMA (50 ng/mL) and ionomycin (500 ng/mL) with a 14-day intervening period of non-exposure (Fig 1). Cells were incubated continuously with 300nM efavirenz and 300nM raltegravir to block viral replication (SGS analysis of virion-associated HIV RNA revealed no evidence of replication, as described below). Aliquots of cells and supernatant were taken at multiple time points for downstream analysis (Fig 1). Experiments were performed with total CD4 + T-cells for donors 1-5, with a repeat experiment also performed for donor 1. Experiments were performed with PBMC for donors 1 and 5. PMA and Ionomycin treatment achieves robust cellular activation The effects of PMA (50 ng/mL) and ionomycin (500 ng/mL) on expression of the activation markers CD69 and CD25 was measured on cells from weekly time points (Fig 2). The first week of PMA and ionomycin exposure induced activation in > 95% of cells. During the 14-day rest period, the frequency of activated T-cells returned to baseline levels. The second round of stimulation induced potent activation again, although in fewer cells for donor 4. Cell numbers were measured at the beginning and conclusion of each week and were normalized relative to the number of cells at the beginning of each week (Fig 3). During the first stimulation week, there was a median 2.0-fold increase in total CD4 + T-cells and a median 3.4-fold increase in PBMC. During the second stimulation week, there was a median 1.9-fold increase in total CD4 + T-cells and 1.9-fold increase in PBMC, respectively. The proliferation of cultured cells during the stimulation weeks confirms that PMA and ionomycin induced robust cell activation. A reduction in cell number was observed in two of six experiments with total CD4 + T-cells during the second stimulation, which is not unexpected from activation-induced cell death. The two experiments performed using total CD4 + T-cells from donor 1 showed similar proliferation following the first stimulation, but showed greater variation at later time points. This likely reflects variability that can be seen with primary cell cultures over extended durations of culture. Changes in HIV DNA HIV DNA was quantified in cells by using qPCR targeting pol and normalizing this value to cell number as measured by qPCR targeting CCR5 (Fig 4). Overall, no significant change in HIV DNA occurred in total CD4 + T-cells between baseline and post-stimulation time points at days 7 and 28 (Wilcoxon Signed Rank test, P > 0.05). When comparing baseline HIV DNA to day 7 HIV DNA in total CD4 + T-cells, HIV DNA decreased in two of six experiments (median 1.5-fold) and increased in four of six experiments (median 1.1-fold). When comparing baseline HIV DNA to day 28 HIV DNA in total CD4 + T-cells, HIV DNA decreased in four of six experiments (median 2.1-fold decrease) and increased in two of six experiments (median 1.9-fold increase). For the two experiments performed with PBMC, HIV DNA decreased by a median 1.5-fold from baseline to day 7, and by a median 3.5-fold from baseline to day 28. By using cell counts and HIV DNA measurements, the numbers of total cells and infected cells could be calculated and compared. Following the first stimulation, there was a median 2.0-fold increase in total CD4 + T-cells, 2.1-fold increase in infected CD4 + T-cells, 3.4-fold increase in total PBMC, and 2.4-fold increase in infected PBMC. Following the second stimulation, there was a median 1.9-fold increase in total CD4 + T-cells, 1.0-fold change in infected CD4 + T-cells, a 1.9-fold increase in total PBMC, and 1.7-fold change in infected PBMC. There was no significant difference between relative changes in cells numbers between total and infected cells for both CD4 + T-cells and PBMC (P > 0.5 by Wilcoxon matched-pairs signed rank test). HIV virion production HIV virion production was quantified by measuring HIV nucleic acid in cell culture supernatants using the Roche TaqMan 2.0 ( Fig 5). Control amplifications with exclusion of reverse transcriptase confirmed that quantified nucleic acid contained >95% HIV RNA. All donors had plasma HIV RNA < 20 copies/mL before entry. In preliminary experiments, primary total CD4 + T-cells that were cultured ex vivo for 24 hours without stimulation produced virions at quantities lower than the limit of detection (< 20 copies/mL). All experiments with total CD4 + T-cells showed virion production following the first stimulation week (median 10,261 HIV copies/mL), decreased or stable virion production was observed over the two rest weeks (median 4.4-fold decrease), and increased virion production following the second stimulation week (median 3.8-fold increase). The two experiments performed using total CD4 + Tcells from donor 1 showed similar virion production following the first stimulation, but exhibited greater variation at later time points. This variation is likely due to the complex cellular dynamics that can occur in bulk cell cultures of primary cells. The PBMC experiments showed similar trends as compared with the total CD4 + T-cell experiments, with increased virion production following the first stimulation (median 2,766 HIV copies/mL), decreased virion production over the two rest weeks (median 40-fold decrease), and increased virion production following the second stimulation week (median 17-fold increase). Reduced virion production was observed following the second stimulation as compared to the first stimulation in all donors in both total CD4 + T-cells (median 1.8-fold reduction) and PBMC (median 2.8-fold reduction), except for one of the two experiments with donor 1 using total CD4 + T-cells (8.6-fold increase with the second stimulation). Dynamics of HIV-infected cell populations Interpretation of Single-Genome Sequences (SGS). To characterize cellular dynamics and virion production from HIV-infected cells, SGS of p6 of gag, pro and the first 900 nucleotides of pol (p6-PR-RT) was performed on HIV DNA in cultured cells and virion RNA in culture supernatants. When performing SGS on virion RNA, control reactions performed in the absence of reverse transcriptase were either negative or rarely positive with HIV DNA contamination estimated to be < 0.5% of nucleic acid in the extracted sample. No cross-contamination of proviral or virion sequences was detected across the different donors by neighbor-joining distance tree analysis (S1 Fig). We used neighbor-joining distance analyses of single-genome sequences to investigate the expression of specific proviruses and the population dynamics of cells containing specific proviruses. An example of the neighbor-joining distance trees is shown in S2 Fig An increased frequency of a specific proviral sequence over time suggests ex vivo proliferation of cells containing a specific provirus. Conversely, a decreased frequency of a proviral sequence over time suggests ex vivo elimination of cells containing a specific provirus. HIV sequences in virions released into the culture supernatants reveal whether a specific provirus is inducible and when it was induced. Interpretation of identical sequences. Although our targeted p6-PR-RT amplicon makes up only~15% of the HIV proviral genome, the five donors had diverse proviral sequences with a median 1.5% average pairwise distance (APD) of all proviral sequences (S2 Table) that improve the ability to distinguish between different viral sequences. An algorithm was developed to calculate the probability of identifying two identical sequences given the length of the sequence amplicon (S), the average pairwise distance between proviruses (APD), and the number of proviral sequences gathered (N) (see Materials and methods). These calculations showed that in the absence of cell proliferation there is less than a 1 in 700 chance of observing two identical proviral sequences when using the p6-PR-RT amplicon (S2 Table). The prolonged duration of untreated infection (Table 1) and the diverse proviral sequences indicate that these donors were not treated during acute infection. Therefore, identical HIV DNA sequences identified are likely due to cellular proliferation. Summary of cellular and proviral outcomes. The findings from donor 1 are representative of the other four donors ( Table 2). For donor 1, two phlebotomies were performed. The first phlebotomy was used to perform an experiment with total CD4 + T-cells ( Before stimulation of total CD4 + T-cells from donor 1, a median of 4.4% of unique proviral sequences were observed to be identical. Out of all virion sequences that were detected over the duration of cell culture, 77.8% of unique virion sequences were detected only after the first stimulation and not the second stimulation. Approximately 5.6% of unique virion sequences were detected only after the second stimulation. Persistent expression of virions during both stimulations was observed for 16.7% of unique virion sequences. Table 2. Proviral expression and dynamics in total CD4 + T-cells after sequential stimulation. Different proviral population outcomes are quantified for experiments with total CD4 + T-cells. Each outcome is calculated as either 1) the frequency of proviruses displaying a given outcome relative to the total number of unique proviral sequences observed over the entire duration of cell culture, or 2) the frequency of unique virion sequences displaying a given outcome relative to the total number of unique virion sequences observed over the entire duration of cell culture. IQR = interquartile range. Ex vivo clonal expansion was suggested by an increasing frequency of a proviral sequence after stimulation. We found proviral sequences that increased in frequency over time that matched virion sequences (1.1% of unique proviral sequences) and others that did not match virion sequences (1.1% of unique proviral sequences). Proviral Population Similar infected cell population outcomes were observed in the second experiment with total CD4 + T-cells from donor 1 (S3 Fig, Table 2). An experiment was also performed using donor 1's PBMC, which comprises more diverse cell types including CD8 + T-cells, NK cells, and B-cells. All the listed outcomes observed for total CD4 + T-cells were also observed for PBMC (S4 Fig, S1 Table). Neighbor-joining distance trees from donors 1-5 are shown in Fig 6 and S3-S9 Figs. A summary of the observed proviral population outcomes is shown in Table 2 and S1 Table. Overall, we observed identical proviral sequences in vivo for all five donors. Most unique virion sequences were detected only following the first stimulation (median 79.3% in total CD4 + T-cells; median 53.9% in PBMC). No significant correlation was found between the fold-change in HIV DNA between day 0 and day 28 and the frequency of unique virion sequences that were only expressed following the first stimulation (Spearman % = 0.59, P = 0.12). No changes in proviral diversity were detected over time (S3 Table), although this analysis is limited since most proviruses are defective. No correlation was observed between the number of virion particles produced and the average pairwise distances of virions produced at each time point (S4 Table) (Spearman P > 0.5), suggesting that the reduced virion production following the second stimulation was not solely due to the depletion of infected cells carrying inducible proviruses. A minority of unique virion sequences were detected only following the second stimulation (median 12.2% in total CD4 + T-cells; median 35.5% in PBMC). Persistent virion production over the non-stimulation period was observed in all experiments (Fig 5). In experiments with total CD4 + T-cells, a median of 35.1% of unique virion sequences expressed during the rest weeks were also expressed during the first stimulation. In experiments with PBMC, this fraction was 25.0%. These persistently expresses viruses were oligoclonal (0-4 unique sequences). A median 35.1% of unique virion sequences detected during the rest period were also expressed during the first stimulation, indicating persistent expression of proviruses. The remaining fraction of unique virion sequences detected during the rest period was not detected during the first stimulation. A minority of virion sequences were detected following both the first and second rounds of stimulation in four of five donors (median 12.3% in total CD4 + T-cells; median 10.5% in PBMC), suggesting the persistence of inducible proviral populations. Donor 5 did not show evidence of persistent virion expression in either total CD4 + T-cells or PBMC. However, this donor had a virion sequence that was detected following the first stimulation that matched a proviral sequence detected following the second stimulation, suggesting the persistence of a population of cells carrying inducible proviruses. For all donors, the persistently expressed virion sequences intermingled with the other virion sequences on the neighbor-joining trees. Evidence for ex vivo clonal expansion of non-induced proviral populations was observed in five of five donors in total CD4 + T-cells (Fig 6, S3 Expansion of cells containing intact inducible proviruses. The replication-competence of virions produced from persistent and inducible proviruses was assessed by performing the VOA using aliquots of cultured total CD4 + T-cells from donors 1-3 and sequencing the virions from p24-positive wells. In all three donors, we found inducible proviral populations that persisted throughout the 28-day cell culture and had sequence matches to VOA sequences (arrows, Fig 6 and S3-S6 Figs). Furthermore, multiple lines of evidence suggest that clonal expansion of cells with intact proviruses occurred in vivo or ex vivo in three of three donors. In the first experiment with donor 1 total CD4 + T-cells, two distinct proviral sequences increased in frequency over time, matched supernatant virion sequences, and also matched VOA sequences from the second experiment, suggesting ex vivo clonal expansion of cells infected with inducible, intact proviruses. Also in donor 1, two distinct virion sequences that matched VOA sequences were found in both total CD4 + T-cell experiments as well as in the PBMC experiment, indicating in vivo clonal expansion of cells infected with inducible, intact proviruses. In donors 2 and 3, sequence matches were also found between VOA sequences and persistently detected virion sequences throughout the duration of cell culture, indicating the in vivo or ex vivo clonal expansion of cells containing inducible, intact proviruses and the persistence of these cells despite robust stimulation. No evidence of viral replication in treated cell culture system Sequential stimulation experiments were performed in the presence of efavirenz (300nM) and raltegravir (300nM) to block viral replication. Proviral and virion sequences were analyzed for HIV drug resistance mutations using the HIV drug resistance databases from Stanford, the Rega Institute, and the Agence Nationale de Recherches sur le SIDA. Extensively hypermutated sequences, as determined by the online Hypermut algorithm (http://www.hiv.lanl.gov/ content/sequence/HYPERMUT/hypermut.html), were excluded from the drug resistance analysis since these genomes were unlikely to be replication-competent. Using the listed drug resistance databases, no proviral or virion sequences were predicted to be resistant to efavirenz, suggesting that HIV replication was successfully blocked by efavirenz. Drug resistance analysis for raltegravir was not performed since integrase (IN) was not included in the amplicon for SGS. It is unlikely that these donors contained raltegravir-resistant HIV strains because all donors, except for donor 4, were treatment-nave to raltegravir. Donor 4 had been suppressed for three years on a combination ART regimen containing raltegravir, abacavir, and lamivudine at the time of phlebotomy, suggesting that this patient did not harbor raltegravirresistant mutations. Distance analysis was used to further assess whether ongoing viral replication was occurring during experiments. The average pairwise distances (APD) between induced virion sequences, with hypermutant sequences excluded, did not change over the duration of cell culture (P > 0.05 by Pearson and Spearman correlation) (S4 Table). Taken together, these results suggest that viral replication was not occurring ex vivo. Discussion The most widely discussed approach to curing HIV infection is termed the "kick and kill" strategy, which aims to deplete the latent reservoir by reversing HIV latency and promoting the death of cells containing reactivated proviruses by viral cytopathic effect or immune-mediated killing. Multiple latency reversing agents (LRAs) have been discovered, with the most effective agents possessing the quality of inducing T-cell activation. To explore the effects of latency reversal by T-cell activation on the latent reservoir, we developed an ex vivo system involving sequential stimulation of primary total CD4 + T-cells or PBMC with PMA and ionomycin. We showed that this stimulation achieves robust cellular activation and latency reversal. We did not observe consistent changes in HIV DNA frequency across experiments. Virion production increased during the stimulation periods and decreased, but remained detectable, during the intervening period of non-exposure. Using SGS, we show that cells containing specific proviruses have diverse outcomes following stimulation, including their apparent elimination, persistence, or expansion. Importantly, a subset of inducible, replication-competent proviruses can persist and expand despite robust sequential stimulation. We found identical p6-PR-RT in vivo proviral sequences in five of five donors, suggesting in vivo clonal expansion of infected cells. These findings are consistent with previous reports of clonal expansion being a common phenomenon in HIV-infected donors on suppressive ART. In vivo clonal expansion of HIV proviruses can result from the proviral integration site, homeostatic mechanisms, or antigenic stimulation 28]. Multiple outcomes for infected cells were observed. Many proviruses did not produce detectable virion with either stimulation, which is consistent with prior reports and is expected given the high frequency of defective proviral genomes. Most inducible proviral populations produced virions following the first but not the second stimulation, consistent with the death of cells containing activated proviruses. Another possibility is that cells containing these proviruses became refractory to sequential stimulation due to T-cell exhaustion, as suggested by the reduced CD69 expression following the second round of stimulation as compared to the first. A minority of inducible proviral populations produced virions only following the second stimulation, similar to what has been observed previously. This outcome may be attributed to stochastic latency reversal. We also observed persistence of inducible proviral populations following sequential stimulation in five of five donors. Detectable virion production continued during the rest period in all experiments for all donors. Analyses of drug resistance mutations and genetic distances indicated that viral replication was not contributing to the persistent virion production. Virion sequences during this period were oligoclonal; a minority of these sequences arose from persistent expression of proviruses from the first stimulation whereas a majority of sequences were newly detected, likely a result of newly induced proviruses from persistent cell activation or from proviruses that were expressed during the stimulation period but were not detected by the assay at that time point. Conflicting results have been published on whether HIV-infected cells with intact proviruses can undergo clonal expansion. Simonetti et al. extensively characterized in vivo clonal expansion of an infectious HIV clone in a single patient. This clonal expansion likely resulted from persistent stimulation by tumor antigen, which was robust enough to cause increasing clone numbers over time despite the potential for viral cytopathic effect and immune-mediated killing. In contrast, Cohn et al. found that 75 expanded T-cell clones from eight HIV-infected individuals had defective genomes and concluded that expanded clones cannot contain intact proviruses. However, the study design had limited sensitivity to detect clonal expansion of intact proviruses. Here, we show that cells containing intact and inducible proviruses can persist following robust stimulation ex vivo, despite the potential for viral cytopathic effect and immune-mediated killing as seen in our experiments with PBMC. We found evidence suggesting the ex vivo persistence of cell populations carrying inducible proviruses in five of five donors and evidence suggesting in vivo or ex vivo clonal expansion of cells with intact proviruses in three of three donors. The ability for certain subsets of cells containing intact and inducible proviruses to persist and proliferate may be attributed to viral sequences, proviral integration sites, and cell types. The sequences of virions that were detected after both stimulations intermingled with other virion sequences on the neighbor-joining tree, suggesting that factors other than the viral sequences contributed to their persistence. Integration sites in cancer-associated genes have been shown to promote survival and clonal expansion of proviruses in vivo. In addition, HIV proviruses can be found in multiple T-cell subsets that have heterogeneous survival and proliferative potential. Identifying the proviral insertion sites and infected T-cell subsets that can give rise to expanded clones with intact proviruses is a high priority but is complicated both by the rarity of intact proviruses (~2% of proviruses are intact) compared to the large majority of defective provirus and the overall low frequency of infected CD4+T-cells (0.1%-0.01%). Experiments performed in the presence of effector cells (CD8 + T-cells, B-cells, NK cells) did not affect the persistence of inducible, replication-competent proviruses and did not result in appreciable decreases in the concentration of HIV-infected cells. These findings may be consistent with the well-studied exhaustion of the innate and adaptive immune systems during chronic HIV infection [31,. Potent latency reversal with T-cell activation using PMA and ionomycin did not significantly deplete the frequency of HIV-infected cells across all donors as measured by qPCR for HIV DNA. Changes in HIV DNA were inconsistent across donors (decreased in four experiments and increased in two experiments) suggesting that latency reversal with T-cell activation has variable effects on HIV-infected cells from different donors. The variable changes in HIV DNA may represent inter-donor variability in the survival of infected cells, possibly related to viral cytopathic effect, cell type, or proviral integration site. No significant change in cell proliferation was noted between total cell number and infected cells in both CD4 + T-cells and PBMC. Future studies will be needed to investigate the mechanisms behind the inter-donor variability in HIV DNA changes in response to latency reversal by T-cell activation. Following repeat stimulation with PMA and ionomycin, we observed a trend toward reduced cellular proliferation and virion production after the second stimulation compared with the first, although these trends were not statistically significant. It is possible that the observed differences in proliferation could be due to differences in activation state prior to stimulation. However, pre-stimulation cellular activation levels were similar between the two stimulation periods and therefore likely did not play a major role in the observed response differences. One possible explanation for a reduction in virion production is cell death of a majority of infected-cells that were actively producing virus, as suggested by SGS. Another possibility is that the cells became exhausted following the first stimulation and consequently did not respond as strongly following the second stimulation. Following prolonged stimulation, T-cells have been observed to become exhausted and with progressive impairment of effector functions, including proliferative potential. Whether the diminished virion production was from depletion of infected cells with inducible proviruses, reduced virion production from infected cells, or both would require single-cell analysis for elucidation. Some limitations of our study deserve mention. Our sample size was small (N = 5), which may limit the generalizability of our findings. However, it should be noted that the donors met broad eligibility criterion (plasma HIV RNA < 50 copies/mL for > 2 years) and were enrolled and studied consecutively, minimizing selection bias. In addition, five of five donors displayed evidence of persistence of inducible proviral populations after latency reversal from cell activation and three of three donors displayed evidence of persistence of inducible, intact proviral populations, making these observations relevant for chronically HIV-infected individuals on long-term suppressive ART. Nevertheless, our findings of persistence and expansion of specific proviruses following latency reversal with T-cell activation should be verified in vivo. Another limitation of our study is that IL-2 was added to cultures to promote cell viability. Preliminary experiments revealed that cell numbers precipitously declined during the inter-dose period if IL-2 was not provided, reflecting the current limitations of ex vivo cell culture with primary Tcells. Finally, we performed SGS on only a~1.5kb portion of the HIV genome. Consequently, we did not prove that identical sequence matches were from proviruses that were identical throughout their genomes, but Poisson and binomial statistical analyses incorporating average pairwise differences of the proviruses in the regions sequenced indicated that this event was improbable (1 in 700 chance on average). Definitive evidence of clonal expansion requires integration site analysis to confirm identical integration sites between proviruses in different cells ; however, significant technological advances are need to show linkage of identical integration sites to identical proviral sequences for the rare infected cells with intact proviruses observed in the current experiments. In summary, this study provides insight into the effects of latency reversal with T-cell activation on the latent HIV reservoir and exposes additional challenges when using these compounds toward achieving an HIV cure. We found that reversal of HIV latency by CD4 + T-cell activation results in diverse outcomes for proviral populations, ranging from their apparent elimination to expansion of proviruses capable of infectious virus production. Survival and expansion of a subset cells containing inducible HIV proviruses occurred after T-cell activation across multiple HIV-infected individuals in the absence or presence of autologous effector cells (e.g. CD8 + T-cells, NK cells, B-cells). Even if a net depletion of the latent reservoir occurs following cellular activation, some inducible, intact proviral populations may be able to persist. To effectively target these proviral populations, compounds that kill HIV-infected cells before cellular proliferation occurs will be needed. Isolation of total CD4 + T-cells from HIV-infected individuals on ART Large volume phlebotomy (~180 mL) was performed on five, consecutive, HIV-infected donors who were on suppressive ART (<50 copies of HIV RNA/mL plasma) for ! 2 years (Table 1). All patients provided written informed consent and the blood donation protocol was approved by the University of Pittsburgh Institutional Review Board, PRO13070189 and PRO14120068. PBMC were isolated by Ficoll-Paque density gradient centrifugation. Next, total CD4 + T-cells were isolated by negative selection using the EasySep Human CD4+ T-cell Enrichment Kit (STEMCELL). Ex vivo sequential stimulation culture Isolated total CD4 + T-cells or PBMC were resuspended in RPMI medium 1640 without phenol red containing 10% (vol/vol) fetal bovine serum, 0.6% penicillin/streptomycin, 300nM efavirenz, and 300nM raltegravir. The cells were then cultured in T75 cm 2 flasks. Following one day of rest, the cells were stimulated for seven days with 50ng/mL PMA and 500ng/mL ionomycin. Following the seven days of stimulation, the cells were washed three times and then transferred to a new flask in fresh media to be cultured in the absence of PMA and ionomycin for seven days. After an additional seven days, the cells were washed three times, transferred to a new flask in fresh media, and cultured again in the absence of PMA and ionomycin for seven days. After the seven days, the cells were washed three times, transferred to a new flask in fresh media, and cultured in the presence of PMA and ionomycin for seven days. On each day of transferring cells to a new flask, cell numbers were counted using a hemocytometer, aliquots of cells were removed and saved in liquid nitrogen, and aliquots of cells and supernatant were removed and stored at -80C for downstream analysis. Three days after each seeding of cells into a new flask, the cell media was changed. Modified Viral Outgrowth Assay (VOA) A non-quantitative VOA was performed by co-culturing fresh aliquots of cells from the ex vivo sequential stimulation culture with allogeneic, irradiated feeder cells and CD8-depleted blasts as previously described. The VOA was performed for donors 1-3. Supernatants from Single-Genome Sequencing (SGS) SGS was performed on culture supernatants and cells. Extraction of nucleic acid from supernatant was performed as previously described, except with initial centrifugation at 5,300xg for 10 min at 4C to remove debris. Nucleic acid was extracted from cells as previously described. cDNA was synthesized using the SuperScript III First-Strand Synthesis System. Each cDNA synthesis reaction was performed with 5L of supernatant extract, 5L of 10mM deoxynucleotide triphosphates, and 5L of 2M reverse primer targeting pol (5'-CTATTAAG TATTTTGATGGGTCATAA-3'). Following denaturation at 65C for 10 min, each sample was quenched on ice followed by addition of 10L of 10X RT buffer, 20L of 25mM MgCl 2, 1L of DTT, 17.5L of molecular-grade water, 1L of RNase-Out, and 0.5L of SuperScript III RT. The samples were then incubated at 25C for 10 min, 45C for 40 min, 85C for 10 min, and then at 4C. The cDNA for SGS was plated using a limiting dilution scheme and amplified using nested PCR to determine a cDNA dilution that yields~30% positive PCR reactions. At this dilution, 80% of positive PCR reactions contain only a single copy of HIV cDNA in the reaction according to Poisson statistics. The nested PCR amplified a~1.5kb amplicon spanning p6 of gag, pro, and the first~900 nucleotides of RT as previously described. Positive nested PCR product was detected using GelRed (Biotium). Analysis of HIV sequences Phylogenetic analysis was performed using the Neighbor-joining p-distance method in the MEGA 5.2 software. Hypermutant sequences were determined by the online algorithm: http:// www.hiv.lanl.gov/content/sequence/HYPERMUT/hypermut.html. The APD between proviruses was calculated in MEGA 6. Hypermutant sequences, as determined by the online Hypermut algorithm, were excluded from analysis to avoid erroneous elevation of the APD. To calculate the expected number of identical HIV sequence pairs (L e ), the total number of sequence pair comparisons (T) is multiplied by the probability of a sequence pair comparison being identical (P): L e = T P. The T can be derived as follows: T = N (N-1) / 2. The probability of identical sequences, P, is obtained using the Poisson distribution: P = e -. Here, the is the average number of sequence differences: = APD S. The probability of observing two identical sequences (or one sequence pair match) by chance was calculated using the binomial distribution to calculate the cumulative probability of observing an identical sequence over T trials given the probability P of observing two identical sequences. Data availability Sequences were submitted to the GenBank database (accession numbers: KX829224-829753, KX830756-830801). Sequences were rooted to a consensus sequence of HIV subtype B. The tree was constructed using the neighbor-joining p-distance method. Hypermutant sequences are in boxes. The Viral Outgrowth Assay (VOA) was performed using day 7 and day 28 cells from Donor 1 (Experiment 2) total CD4 + T-cells. The day 7 cells were seeded into 6 wells at 1x10 6 cells/well and the day 28 cells were seeded into 6 wells at 3x10 5 cells/well. Sequences were rooted to a consensus sequence of HIV subtype B. The tree was constructed using the neighbor-joining p-distance method. Hypermutant sequences are in boxes. The Viral Outgrowth Assay was performed using cells from day 7, day 14, day 21, and day 28. The day 7 cells were seeded into 4 wells at 1.25x10 6 cells/well; day 14 cells were seeded into 4 wells at 1.25x10 6 cells/ well; day 21 cells were seeded into 4 wells at 1.25x10 6 cells/well; and day 28 cells were seeded into 4 wells at 1. Sequences were rooted to a consensus sequence of HIV subtype B. The tree was constructed using the neighbor-joining p-distance method. Hypermutant sequences are in boxes. (TIF) S1 Table. Proviral expression and dynamics in PBMC after sequential stimulation. Different proviral population outcomes are quantified for experiments with PBMC. Each outcome is calculated as either 1) the frequency of proviruses displaying a given outcome relative to the total number of unique proviral sequences observed over the entire duration of cell culture, or 2) the frequency of unique virion sequences displaying a given outcome relative to the total number of unique virion sequences observed over the entire duration of cell culture. (DOCX) S2 Table. Probability estimate of detecting two identical proviral sequences. The probability of detecting two identical p6-PR-RT sequences assuming no clonal expansion had occurred was calculated for each experiment using the binomial distribution based on the average pairwise distance (APD) of all obtained proviral sequences for each experiment. Hypermutant sequences were excluded from analysis. (DOCX) S3 |
The latest property trends and what they mean for small business.
If your business suffers from real estate blues brought on by plummeting prices, it may come as little comfort to know that this trend was supposed to have ended by now. When the market began its downturn in early 2006, some of the smartest economists in the country, as well as the CEOs of major home-builders and the National Association of Realtors, predicted that prices would rebound by mid-2007. Instead the experts have been humbled by the depth and breadth of the downturn - and the resulting sub-prime credit crisis has shaken financial markets around the world.
Expect tremors to keep shaking the real estate market along multiple fault lines in 2008. Here are the winners and losers in the housing, rental and commercial categories.
UP MARKETS: As a whole, the national housing market will finally hit bottom - and start bouncing back - at the end of 2008, says Celia Chen, director of housing economics at Economy.com, a subsidiary of the financial rating agency Moody's (Charts). But more than a dozen major metro areas are already ahead of the curve, and enjoying modest but significant price appreciation.
Markets such as Atlanta, Austin and Dallas didn't draw enough speculators to skew prices during the housing boom. Yet they boast sufficient employment and income growth to increase demand for housing. Mobile, Ala., surprisingly, is poised to be a top performer in this group of metros: in recent years it's seen only a trickle of new housing but is currently booming thanks to billions of dollars worth of new mega-projects.
WHAT IT MEANS: Small business owners in these regions will still be able to tap home equity loans for funds, or won't face calls on existing loans from banks because of declining values.
DOWN MARKETS: The regions that will likely lag the national recovery are Phoenix, Las Vegas, south Florida and California's Central Valley. Although publicly-traded home builders packed these areas with inventory, prices soared beyond reason thanks to easy credit and an abundance of speculators who never intended to occupy the homes they bought.
In some cases the inventory glut will take years to clear, even at heavily discounted prices. Phoenix currently offers about 55,000 listings, the highest in the Arizona capital's history, in addition to an estimated 15,000 spec houses.
"Builders have now dropped new three-bedroom, single-family homes as low as $130,000," says Frank Owens, a local real estate analyst and headhunter for the home-building industry. "That's unheard of. The lowest we'd see a year ago was $200,000."
WHAT IT MEANS: In these cities, stagnation equals opportunity for entrepreneurs: Because a big slice of the local labor force was employed in the broader housing sector, the downturn has shaken loose many workers who are desperate for a new gig and not so picky about pay.
THE RENTAL MARKET: By some estimates, the clampdown on easy credit provoked by the subprime crisis will ultimately wipe out 25% of national demand for housing. That's good news for landlords, predicts Todd Sinai, an associate professor of real estate at the University of Pennsylvania's Wharton School.
Look for two ingredients: a high concentration of sub-prime borrowers and average income levels near the national average, or lower. "One-time homebuyers will be relegated to renters because young households will have an even harder time amassing a down payment," says Sinai. Memphis and St. Louis, come on down!
WHAT IT MEANS: Commercial rents will remain stable in these areas, because the general economy is slowing and there won't be much new competition for office and retail space.
COMMERCIAL REAL ESTATE: Thanks to a white-hot tech sector and a renewed surge of VC funding for Internet start-ups, office rents in the Bay Area are testing records set during the dotcom bubble. But the trend is moving in the opposite in bellwether markets such as New York.
Having been the shining star of real estate for the past two years, the commercial market is due for a slump. A dramatic rise in commercial mortgage rates this year, and tougher bank lending standards have sidelined buyers. Many record-setting deals are falling apart. Prices for office buildings, hotels and shopping centers around the country may fall by double digits, commercial analysts now concede.
Perhaps the most telling indicator is legendary developer Sam Zell, once lord of the largest commercial real estate portfolio in history. A legendary market-timer, Zell sold his holdings to a private equity firm for $39 billion last February.
WHAT IT MEANS: Because of tight credit and soft prices, cash-rich businesses will hold the upper hand when negotiating to buy their own property.
Disagree with our "Next little thing" picks? Talk back here!
Who would buy real estate in this market? |
Associations between abdominal adiposity, body size and objectively measured physical activity in infants from Soweto, South Africa Objectives: Considering the importance of the early life period, in conjunction with the increasing prevalence of adiposity and insufficient physical activity already evident in early childhood, this study aimed to determine associations between abdominal adiposity, body size, and objectively measured physical activity in infancy. Methods: Infants (n=138, aged 324 months) from Soweto, South Africa were recruited to this cross-sectional study. Visceral (VAT) and subcutaneous abdominal fat (SAT) were measured using ultrasound. Physical activity was assessed using accelerometry and analysed at the hourly level. Multilevel linear regression analyses were run with body composition exposures adjusted for age, sex, and length; models with VAT and SAT were also adjusted for total abdominal fat. Results: Mean (SD) age was 11.8 (7.6) months; 86% were normal weight, 7% were underweight and 7% overweight. In linear models, no body composition variable was significantly associated with physical activity. Physical activity was higher with each increasing length tertile (ANOVA p<0.01); with a mean(95%CI) 29mg in the lowest tertile, 39mg in the middle tertile, and 50mg in the highest tertile. Infants with normal weight had higher mean(95%CI) physical activity (40mg) than underweight (34mg, p=0.01) or overweight infants (31mg, ANOVA p<0.01). When also adjusting for total abdominal fat, infants in the lowest SAT tertile had higher physical activity than those in the middle or highest SAT tertiles (p<0.01). Conclusions These findings lend support for higher physical activity as a marker of healthy growth in the first two years of life. Introduction shown (;;), and it is possible that these diurnal variations may affect the association between body composition and physical activity. Lastly, it is likely that the relationship between body composition and physical activity is bidirectional, particularly in this age group; whereby accumulating more activity may allow for better development and growth -the relationship most often explored. Conversely, better physically developed babies may be more likely to accumulate a greater volume of activity, either due to nourishment and/or development -and this relationship remains to be examined. Considering the importance of this early life period in conjunction with the increasing prevalence of adiposity and insufficient physical activity already evident in early childhood, it is essential to determine whether there are associations between body composition and physical activity in the first two years of life. The aim of this study was therefore to determine the associations between abdominal adiposity (measured using ultrasound) and body size (BMI z-score and length) with objectively measured infant and toddler physical activity, while accounting for diurnal variation in physical activity. Participants and procedures: Infants (n = 152) aged 3-24 months were recruited for this cross-sectional study performed at the SAMRC/Wits Developmental Pathways for Health Research Unit (DPHRU) at the Chris Hani Baragwanath Academic Hospital in Soweto, South Africa. Participants were excluded if they had been diagnosed with any developmental abnormalities that may impact normal movement or development. Mothers were asked to read and sign assent documents for their infant, and were free to withdraw from the study at any time. Ethical approval for this study was provided by the University of the Witwatersrand Human Research Ethics Committee (M150632). At recruitment, mothers were asked to report their date of birth, and their infant/toddler's date of birth and gender. Anthropometry and demographics: All anthropometry measurements were taken twice and the average of the two values was used. Trained research staff measured length to the nearest 1 mm using an infantometer (Chasmors Ltd, UK), and weight to the nearest 0.1 kg using a becoming more sedentary and less physically active from a very young age (). Similar to international findings, the majority of South African children are not meeting physical activity recommendations, a problem which is worsening each year (). This is of concern since these behaviours track into later childhood and adolescence (;;Carson, Tremblay, & Chastin, 2017). Determining correlates of physical activity in early life is therefore essential, in order to understand how to intervene effectively before poor trajectories are established. Furthermore, decreased physical activity levels in the early years may be indicative of poor health or malnutrition (;), and a more thorough understanding of these relationships is thus important. There is limited and mixed evidence for the relationship between body composition and physical activity in children under four years of age (;Carson, Lee, et al., 2017;;;Trost, Sirard, Dowda, Pfeiffer, & Pate, 2003). Longitudinal studies have examined this relationship, showing that physical activity in early childhood is associated with lower adiposity in later childhood (Berkowitz, Agras, Korner, Kraemer, & Zeanah, 1985;Moore, et al., 2003). How infant body composition associates with physical activity is less clear, largely due to the paucity of studies assessing physical activity in the first two years of life (Carson, Lee, et al., 2017;). In addition, most studies investigating these associations in the early years have used surrogate measures of adiposity such as body mass index (BMI) (Carson, Lee, et al., 2017) or skinfold thickness (), therefore limiting the conclusions that can be drawn on body composition per se. Measures of not only body size, but also body composition and adiposity deposition, such as ultrasound and CT or MRI imaging (De Lucia ;Suliga, 2009) may elucidate these relationships further. It has been suggested that some studies may fail to observe associations between body composition and physical activity due to the common categorisation of physical activity data according to intensity thresholds, and the consequent focus placed on moderate to vigorous intensity physical activity, rather than examining physical activity as a continuum (Aadland, Kvalheim, Anderssen, Resaland, & Andersen, 2018;). This may be particularly important in the first two years of life, during which time activities are intermittent and sporadic, and where hourly level data provides greater power to account for within-person diurnal variation (). Furthermore, hourly variations in the accumulation of daily physical activity during childhood and infancy have been detection and wear time criteria, have been described in detail previously ;van ;White, Westgate, Wareham, & Brage, 2016). The physical activity outcome, average vector magnitude of acceleration corrected for gravitational acceleration (HPFVM, in mg), was analysed at an hourly level; hours were excluded if any non-wear was detected within that hour. Participants were excluded if they did not provide at least 3 days with at least 15 h of data per day. Statistical analysis: Data were stratified by sex and summarised as mean(SD) for parametric continuous data, n(%) for categorical data, and median(SEM) for nonparametric data. Two analytical approaches were used, whereby the exposure variables were first considered continuously, and then categorically. Firstly -pairwise correlations were run to determine significant correlates of average vector magnitude; then these correlates, as well as a priori determined confounders, were included in separate multilevel panel linear regression analyses with either BMI z-score or length (adjusted for age and sex), or TAT, VAT or SAT (adjusted for age, sex, and length) as exposures. Using substitution analysis, we then re-ran the VAT and SAT models also adjusting for TAT as has been suggested for this age group ; this describes the hypothetical effect of shifting one unit of SAT for one unit of VAT. All regression models were then re-run restricted to daytime hours only (between 7am and 7pm). Sensitivity analyses were conducted to determine potential modification effects by age (≤ 12 months (n = 93) or > 12 months (n = 45)). Secondly -BMI z-score categories were used; while tertiles of length, SAT and VAT were created based on their residuals from models adjusted for age and sex (and for length in the case of SAT and VAT). After considering this main effect, SAT and VAT were further residualised for TAT. The hourly physical activity data were then used to plot diurnal distributions stratified by BMI categories and by tertiles of length SAT, and VAT. Differences in physical activity between the categories were tested using one way ANOVAs with Bonferroni post-hoc tests. Lastly, linear trends across these categories were tested using multilevel panel regressions. Lastly, the A p-value < 0.05 was considered to indicate statistical significance. All data were analysed using Stata version 13 (Stata Corp, College Station, TX) for Mac. digital scale (Dismed, USA). Weight, length, and BMI were converted to sex and age-specific z-scores according to the 2006 World Health Organisation (WHO) growth standards (de Onis, 2006) using the WHO Anthro software ("WHO Anthro for personal computers, version 3.2.2, 2011: Software for assessing growth and development of the world's children.,"). Overweight was defined as a BMI z-score > 2, while underweight was defined as BMI z-score <-2. Abdominal adiposity: In order to assess abdominal adiposity depots, infants were measured while lying quietly in the supine position using an ultrasound (GE, LOGIC e) with a standard curved array abdominal probe (4 C-RS-2-5 MHz 3 C-RS). The probe was positioned where the xiphoid line intercepts the waist circumference, and all images were captured during expiration. All measurements were performed twice by a trained operator according to previously validated techniques (De Lucia ), and were remeasured by a second trained operator (AP). Visceral adipose tissue (VAT) was measured with the probe in the longitudinal plane, and the distance between the peritoneum to the corpus of the lumbar vertebra was recorded. Subcutaneous adipose tissue (SAT) was measured with the probe in the vertical plane, and the distance between the linea alba and the cutaneous boundary was recorded. Where major discrepancies existed between the two measurements (±1SD of the mean difference between each operator's measures, which equated to ±0.11 for SAT and ±0.23 for VAT), a third external operator (EDLR) re-measured the images and these values were then recorded and used. Lastly, 10% of all scans were remeasured by the external operator (EDLR) for further quality control. In all instances where discrepancies existed, the quality control measurement replaced the original measurement. If the image was deemed to be inaccurately captured or unusable, the value was rejected. If one image was usable but the second was not, the value for the usable image was used. Total abdominal thickness (TAT) was calculated as the sum of VAT and SAT. Physical activity: Physical activity was objectively measured using an accelerometer (AX3, Axivity Ltd, Newcastle-upon-Tyne, UK) worn in a specially designed fabric band (Open Lab, Newcastle, UK). The design and feasibility of this infant band has been described previously (). Monitors were initialised to capture triaxial acceleration data at 100 Hz with a dynamic range of +-8 g and we requested that infants wore the monitors continuously for a 7-day period. Processing of the data, including non-wear Linear associations Length, BMI z-score, TAT, VAT, and SAT were not significantly linearly associated with physical activity (in adjusted models; Table 2). Findings were similar when physical activity was restricted to daytime hours only (Table 2). Sensitivity analyses showed that findings with BMI z-score, VAT and SAT did not differ when stratifying the sample by age ≤ 12 months or > 12 months (data not shown). However, among younger infants aged ≤ 12 months, physical activity over the full 24 h was positively associated with length ( = 0.51, p < 0.01, 95% CI: 0.28-0.73) and TAT ( = 5.71, p = 0.03, 95% CI: 0.67-10.75).The association with length in this younger sub-group was even more pronounced with physical activity restricted to daytime hours Results: Of the 152 infants recruited, 138 provided sufficient physical activity data for analysis. Of the 14 participants with insufficient data, 6 lost their measurement devices and 8 did not meet the wear time criteria. Of the 138 included participants, mean (SD) age was 11.8 (7.6) months, most (86%) were normal weight, 7% were underweight and 7% were overweight. Physical activity (mg) was higher in boys compared to girls; and boys were taller than girls, but there were no other sex differences detected ( Table 1). Discussion This study has explored the associations between infant abdominal fat depots and body size, with daily objectively measured physical activity, using imaging techniques and 24-hour accelerometry to more accurately measure these exposures and outcomes. Interestingly, while no significant associations were found between continuous adiposity or body size with physical activity; when categorising these exposures into tertiles the relationships became significant, indicating non-linear effects according to body size and abdominal fat depots. It appears from these results that infants who were thriving -i.e.: normal body weight, taller, lower SAT and 'normal' VAT (within the middle tertile), showed higher levels of physical activity. Some of the relationships between fat deposition and physical activity exhibited a U-shaped relationship-such as BMI, and VAT tertiles when adjusted for TAT. Previous work in this age group has shown diurnal variations in physical activity intensity by age, sex, and developmental stage (). However, this is the first study we are aware of to show that diurnal variation in physical activity was differentially associated with high, middle and low infant body size and adiposity. It seems logical that bigger infants might accumulate more physical activity only ( = 0.80, p < 0.01, 95% CI: 0.43-1.16). No association was seen in older infants aged > 12 months. Fig. 3 Mean infant physical activity (adjusted for infant age, sex, and length) plotted by hour of the day, stratified by infant SAT tertiles Fig. 2 Mean infant physical activity (adjusted for infant age and sex) plotted by hour of the day, stratified by infant BMI categories under the age of five years considered stunted according to the World Bank. Therefore, the positive association between length (adjusted for age and sex) and physical activity is indicative of infants who are thriving accumulating higher levels of activity. In order to further test these relationships, we repeated the length regression correcting for maternal height in order to account for a genetic influence; and the significance remained (data not shown). This indicates that these relationships were purely related to growth environments, and consequently are indicative of infants' ability to thrive. Since both length, and physical activity during childhood have been related to improved outcomes (such as growth, cognitive and motor development, and cardio metabolic health) (Carson, Lee, et al., 2017;Said-Mohamed, Micklesfield, Pettifor, & Norris, 2015); it is encouraging that these two aspects of infant growth and development appear to be positively associated. Furthermore, the global focus on decreasing stunting (and improving body composition) in low to middle income countries such as South Africa may thus have a dual effect by concurrently improving infant movement behaviours through increasing physical activity. To our knowledge this is one of the first studies to examine the association between body composition and physical activity in an infant population; and these cross sectional results must therefore be interpreted in the context of the dramatic and rapid changes in adiposity that occur during this period. However, a recent study that examined relationships between longitudinal physical activity and BMI and adiposity measured using skinfold thickness similarly did not find an association with linear outcomes, and attributed this to the use of skinfold thickness to estimate adiposity (). They did however find an association between physical activity and estimated central adiposity. It is important in the context of infant physical activity to consider theories related to the growth trade-offs that occur during the early years of life. Specifically, during critical periods of growth (i.e.: infancy) there is a limited amount of energy available for both growth and maintenance (), and focus is placed on brain and body growth during this period in order to maximise fitness. Therefore, if during this period, infants are exposed to undernutrition -which remains prevalent in the context of this study setting -they would preferentially allocate more energy to brain growth (). In line with this, while physical activity remains beneficial for motor and cognitive development (Carson, Lee, et al., 2017), and seems to be beneficially related to body composition in the current study; excessive energy expenditure during this time may also be detrimental for growth, forcing the infant to trade body growth for brain growth (). Given the importance of this period of life in setting up future body composition due to being better developed and able to produce bigger movements. However, it is interesting that that excessive body size (overweight and obese) appear to have a negative impact on physical activity. This implies that an optimal BMI allows higher physical activity levels compared to over-or under-weight infants. The present findings may provide some evidence for a beneficial relationship between optimal body composition and physical activity, even from a very young age. Similarly, when adjusted for TAT, a VAT within the middle tertile was associated with higher physical activity compared to the lowest or highest VAT tertiles. Typically, VAT has been associated with adverse metabolic and cardiovascular outcomes in children and adults (Staiano & Katzmarzyk, 2012;Suliga, 2009;Toro-Ramos, Paley, Pi-Sunyer, & Gallagher, 2015). However, there is limited research into the health consequences of VAT in infants (De Lucia ;Ferreira, da Silva Junior, Figueiroa, & Alves, 2014;). On the other hand, length and SAT exhibited linear relationships with physical activity, where length was positively associated, and SAT inversely associated, with physical activity. Multiple studies in older children have shown that higher physical activity was associated with lower VAT, but not SAT ; however similar to our findings a study of children in a lower income setting in South America showed that higher physical activity was associated with lower SAT (as measured by skinfolds) (Urlacher & Kramer, 2018). Failure to reach growth potential (stunting) has consistently been associated with poor health outcomes in later life (Said-Mohamed, Pettifor, & Norris, 2017). In South Africa, stunting persists with nearly a third of children Acknowledgements We would like to thank the participants who were involved in this study and Lutricia Moagi (DPHRU) for her assistance with data collection. We would also like to acknowledge Antonia Smith for her assistance with the processing of the data and Tom White (both MRC Epidemiology Unit, University of Cambridge) for provision of open-access software (Pampro) for summarising accelerometry data. Authors' contributions: AP was involved in conceptualisation of the project and the manuscript, data collection, cleaning and analysis, and writing of the manuscript. EDLR and KO were involved in the conceptualisation of the manuscript and edited the manuscript. SB was involved in conceptualisation of the manuscript, processing of the data and edited the manuscript. KW was involved in the processing of the physical activity data and edited the manuscript. LKM conceptualised the project and edited the manuscript. All authors read and approved the final manuscript. -1215-20014]. LKM acknowledges funding from the Academy of Medical Sciences-Newton Advanced Fellowship. None of the funders were involved in the design of the study, collection or analysis or interpretation of the data, or in writing of the manuscript. Availability of data and material: De-identified data used in this manuscript are available upon request subject. Code Availability Not applicable. Conflicts of interest The authors have no conflicts of interest to declare. Ethics approval Ethical approval for this study was provided by the University of the Witwatersrand Human Research Ethics Committee (M150632). Mothers were asked to read and sign assent documents for their infant and were free to withdraw from the study at any time. Open Access This article is licensed under a Creative Commons Attribution 4.0 International License, which permits use, sharing, adaptation, distribution and reproduction in any medium or format, as long as you give appropriate credit to the original author(s) and the source, provide a link to the Creative Commons licence, and indicate if changes were made. The images or other third party material in this article are included in the article's Creative Commons licence, unless indicated otherwise in a credit line to the material. If material is not included in the article's Creative Commons licence and your intended use is not permitted by statutory regulation or exceeds the permitted profiles (Young, Johnson, & Krebs, 2012), this improved understanding of how body size and adiposity are related to physical activity is crucial. Future research should include an assessment of nutritional status in order to account for the nutritional context of these findings, thus improving our understanding of the energy balance occurring during this period of life. Studies examining the associations between physical activity and adiposity in the first few years of life do not show consistent findings, with many of these studies using proxy measures of adiposity (Carson, Lee, et al., 2017). It is possible that by using a more accurate measure of adiposity, we have been able to better determine associations between infant fat deposition. Furthermore, our objective measurement of physical activity allowed us to report diurnal variation over an extended period of time and representing the entire range of physical activity rather than categorised data, and has allowed us to examine differential diurnal relationships between physical activity and adiposity for the first time in this age group. Lastly, we have used a better measure of adiposity deposition, in conjunction with the commonly used BMI z-score, thus improving understanding of these relationships. This study has several limitations. The cross sectional nature of the data limits our ability to determine causal relationships, and longitudinal growth and physical activity data in the first two years of life would greatly improve our understanding of these relationships. Secondly, the sample size was small, which limits the conclusions that can be drawn. The lack of information on the nutritional context of these findings means that we were unable to account for energy intake or undernutrition. We were also limited in our assumptions by the lack of birth outcome data, meaning we could not account for growth restriction in utero. Due to the nature of accelerometry and the limited methodological work that has done been done in this age group, we are not yet able to differentiate between infant initiated movement, and that initiated by a caregiver who may be carrying or moving the infant. Future methodological work is required to improve our ability to use accelerometry in infant populations. In conclusion, using imaging measures of abdominal adiposity and an objective assessment of physical activity, this study shows that optimal body composition profiles were related to higher physical activity in infants in South Africa. Conversely, body composition profiles indicative of under-or overnutrition were associated with lower physical activity. These results provide the first insight into the effect that optimal growth may have on infants' physical activity, and may be important for understanding how non-optimal growth may negatively impact health outcomes in children. |
Magnetotelluric imaging of the Mrida Andes and surrounding areas in Venezuela The Caribbean and South American tectonic plates bound the north-eastwards expulsion of the North Andean Block in western Venezuela. This complex geodynamic setting resulted in the formation of major strike-slip fault systems and sizeable mountain chains. The 100-km-wide Mrida Andes extend from the Colombian/Venezuelan border to the Caribbean coast. To the north and south, the Mrida Andes are bound by hydrocarbon-rich sedimentary basins. Knowledge of lithospheric structures, related to the formation of the Mrida Andes, is limited though, due to a lack of deep geophysical data. In this study, we present results of the first broad-band magnetotelluric profile crossing the Mrida Andes and the Maracaibo and BarinasApure foreland basins on a length of 240km. Geoelectrical strike and dimensionality analysis are consistent with 1-D or 2-D subsurface structures for the sedimentary basins but also indicate a strong 3-D setting for the Mrida Andes. Using a combination of 2-D and 3-D modelling we systematically examined the influence of 3-D structures on 2-D inversions. Synthetic data sets derived from 3-D modelling allow identification and quantification of spurious off-profile features as well as smoothing artefact due to limited areal station coverage of data collected along a profile. The 2-D inversion models show electrically conductive basins with depths of 25km for the Barinas-Apure and 27km for the Maracaibo basins. A number of resistive bodies within the Maracaibo basin could be related to active deformation causing juxtaposition of older geological formations and younger basin sediments. The most important fault systems of the area, the Bocon and Valera Faults, cross-cut the Mrida Andes in NESW direction along its strike on a length 400km and NS direction at its centre on a length 60km, respectively. Both faults are associated with subvertical zones of high electrical conductivity and sensitivity tests suggest that they reach depths of up to 12km. A sizeable conductor at 50km depth, which appears consistently in the 2-D sections, could be identified as an inversion artefact caused by a conductor east of the profile. We speculate the high conductivity associated with the off-profile conductor may be related to the detachment of the Trujillo Block. Our results partially support the floating orogen hypothesis developed to explain the geodynamic evolution of western Venezuela and they highlight the relevance of the Trujillo Block in this process. |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dev.liliwei.iceberg.tool;
import static org.junit.Assert.assertEquals;
import com.google.gson.Gson;
import com.google.gson.stream.JsonReader;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Type;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericDatumWriter;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Map;
@SuppressWarnings("deprecation")
public class TestDataFileTools {
static final int COUNT = 15;
private static final String KEY_NEEDING_ESCAPES = "<KEY>";
private static final String ESCAPED_KEY = "<KEY>";
@ClassRule
public static TemporaryFolder DIR = new TemporaryFolder();
static File sampleFile;
static String jsonData;
static Schema schema;
static File schemaFile;
@BeforeClass
public static void writeSampleFile() throws IOException {
sampleFile = new File(DIR.getRoot(), TestDataFileTools.class.getName() + ".avro");
schema = Schema.create(Type.INT);
schemaFile = new File(DIR.getRoot(), "schema-temp.schema");
try (FileWriter fw = new FileWriter(schemaFile)) {
fw.append(schema.toString());
}
StringBuilder builder = new StringBuilder();
try (DataFileWriter<Object> writer = new DataFileWriter<>(new GenericDatumWriter<>(schema))) {
writer.setMeta(KEY_NEEDING_ESCAPES, "");
writer.create(schema, sampleFile);
for (int i = 0; i < COUNT; ++i) {
builder.append(Integer.toString(i));
builder.append("\n");
writer.append(i);
}
}
jsonData = builder.toString();
}
private String run(Tool tool, String... args) throws Exception {
return run(tool, null, args);
}
private String run(Tool tool, InputStream stdin, String... args) throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream p = new PrintStream(baos);
tool.run(stdin, p, // stdout
null, // stderr
Arrays.asList(args));
return baos.toString("UTF-8").replace("\r", "");
}
@Test
public void testRead() throws Exception {
assertEquals(jsonData, run(new ManifestFileReadTool(), sampleFile.getPath()));
}
@Test
public void testReadStdin() throws Exception {
FileInputStream stdin = new FileInputStream(sampleFile);
assertEquals(jsonData, run(new ManifestFileReadTool(), stdin, "-"));
}
@Test
public void testReadToJsonPretty() throws Exception {
assertEquals(jsonData, run(new ManifestFileReadTool(), "--pretty", sampleFile.getPath()));
}
@Test
public void testReadToJsonPretty2() throws Exception {
ManifestFileReadTool dataFileReadTool = new ManifestFileReadTool();
Path resourceDirectory = Paths.get("src", "test", "resources");
String[] args = new String[] {
"tojson",
resourceDirectory.toString() + File.separatorChar + "1702989a-f66f-423a-aaf1-a01b9a699685-m0.avro",
resourceDirectory.toString() + File.separatorChar + "v1.metadata.json"
};
dataFileReadTool.run(System.in, System.out, System.err, Arrays.asList(args).subList(1, args.length));
}
@Test
public void testReadMetaData() throws Exception {
ManifestFileReadTool dataFileReadTool = new ManifestFileReadTool();
JsonReader jsonReader = new Gson().newJsonReader(new BufferedReader(new FileReader("D:\\v1.metadata.json")));
Map<Integer, String> integerStringMap = dataFileReadTool.parseMetaData(jsonReader);
}
}
|
Metabolic Syndrome Prevalence in Students Attending West Virginia University Metabolic Syndrome (MetS) contributes to the development of cardiovascular disease (CVD) and type II diabetes mellitus (T2DM). Few studies have investigated the MetS risk of young adults (1824 years old). This study aims to determine the prevalence of MetS in Appalachian and non-Appalachian students attending West Virginia University. The prevalence of MetS in this population was 15%. There was no difference in MetS prevalence between male students and female students (18.8% males and 11.1% females, p-value = 0.30), or between Appalachian students and non-Appalachian students (17.7% Appalachian and 10.0% non-Appalachian, p-value = 0.33). Identification of MetS early in life is needed in order to reduce the onset of chronic disease. Therefore, implementing a screening process to identify at-risk young adults will help tailor more effective behavioral interventions. Few studies have investigated the MetS risk of young adults. However, CHD is the second leading cause of death in this age group, suggesting the need to detect for early warning signs of this disease. Thus, screening for MetS in this population can be beneficial in identifying those at higher risk from CHD development. The prevalence of obesity has reached epidemic proportions within the Appalachian region, which extends from Northeastern Mississippi to Southern New York, with its epicenter lying entirely within the state of West Virginia (WV). The Appalachian region is characterized by unhealthy eating behaviors and inactivity, which leads to the increased incidence of chronic disease among this population. Ely et al. recently reported the increased risk of chronic disease, related to excessive weight gain and poor health behaviors, is not realistically perceived by Appalachian residents. Inadequate transportation, poverty, lack of access to medical care and lack of insurance are additional factors directly impacting the health disparities of individuals in this region. In 2009, Appalachians were 40% more likely to have diabetes than non-Appalachians. In 2011, the prevalence of diagnosed diabetes was 9.8% within the Appalachian region and 7.8% in the rest of the nation. In Ohio and WV, 25.6% of normal-weight adults display two or more of the following cardio-metabolic abnormalities: High blood pressure, elevated TG, decreased HDL, elevated FBG, and insulin resistance (IR). An estimated 50% of adults and adolescents residing in WV are obese by Body Mass Index (BMI) classification: Normal weight (18.5-24.9 kg/m 2 ), overweight (25.0-29.9 kg/m 2 ), and obese (≥30 kg/m 2 ). WV has reported the highest age-adjusted incidence of diabetes as well as the highest diabetes-related deaths in the nation. As many as 30% to 35% of college students are reported to be overweight or obese and obesity rates have increased most rapidly among individuals 18-29 years old with some college education [7,. First year college students gain weight up to 11 times faster than adults. This increase in weight increases the likelihood of developing MetS risk criteria. Increased weight and obesity, particularly abdominal obesity, is directly related to the development of MetS and cardiovascular risk. Screening for abnormal lipid values is recommended starting at 20 years old. Therefore, colleges and universities serve as important settings for the surveillance, prevention, and intervention of hyperlipidemia, MetS, and CHD. Targeting young adults that choose to attend post-secondary institutions could aid in the prevention of CVD, CHD, and T2DM since many young people develop a clearer sense of self and establish life-long behavior patterns during college. In the same studies, 5.7%, 7.4%, and 14.3% of college students had two components of MetS. In the Southern US, 43% of students presented with at least one component of MetS. When the sample was limited to students who were overweight or obese, the rate of at least one component of MetS rose to 82%. Fernandes et al. found that 3.7% of college students presented with three or more MetS risk components. The latest study, conducted by Morrell et al., reported 77.2% of males and 53.8% of females exhibited at least one component of MetS. MetS was present in 9.9% of males and 3.0% females. Not only do overweight and obese students present with MetS and more MetS risk factors than normal weight students, but obese students exhibit significantly more MetS criteria than overweight students. Overall, male students were more likely to exhibit MetS risk criteria than female students and the most prevalent MetS risk component was low HDL, then FBG, and TG in decreasing order. Due to the overall poorer health and higher risk factors in the Appalachian region discussed previously, young adults in these regions are an important, yet overlooked, demographic to study their MetS risk. This study aims to determine the prevalence of MetS in Appalachian and non-Appalachian students attending West Virginia University. Method The measures for this cross-sectional study were collected in January and February 2011. Subjects were recruited from West Virginia University. The Institutional Review Board (IRB) at West Virginia University approved the study protocol in advance of commencement. Subjects Subjects (n = 93) were recruited via posted advertisements, in-class announcements, and recruitment E-mails sent by instructors and campus administrators. Students were eligible if they were 18-24 years old; had a BMI >18.5 kg/m 2 ; were a first, second, or third year undergraduate; had regular access to the Internet; were free from life-threatening illnesses or other health conditions; were not pregnant; had no diet-and/or activity-related medical restrictions that prevented accurate physical assessments; they were not currently enrolled in a nutrition course; and they were not majoring in nutrition, exercise science, or health-promotion. Measures Demographics (e.g., ethnicity), health-related practices (e.g., cigarette and medication use), and Appalachian identity (from self-reported home address) were collected via questionnaire. All clinical, anthropometric, and biochemical measurements were performed by trained staff. Measurements were collected in the fasted state (>8 h) after participants had changed into light clothing and voided. All female participants were screened for pregnancy. Systolic and diastolic pressures were measured in triplicate (with 2 min rest intervals) in the left arm midpoint between the shoulder and elbow via an automated cuff (HEM-907XL, Omron; Lake Forest, IL, USA) after subjects were rested in a seated position for five minutes. Height was measured by a wall-mounted, digital stadiometer (Heightronic 235, Quick Medical; Issaquah, WA, USA) with subjects looking straight ahead and maintaining four points of contact (heels, buttocks, shoulder blades, and back of the head) with the wall. Weight was measured by a digital scale (300A, Tanita; Arlington Heights, IL, USA) which also calculated body mass index (BMI) from height input. Waist circumference (WC) was measured in duplicate at the top of the iliac crest to the nearest 0.1 cm using a non-stretchable tape measure with tensometer (Gulick, Creative Health Products; Plymouth, MI, USA). Neck circumference (NC) was measured in duplicate at the point below the larynx to the nearest 0.1 cm with the tape measure used for waist circumference. Biochemical measures were obtained via finger-stick using a desktop LDX Cholestech Analyzer (LDX, Cholestech; San Diego, CA, USA) following manufacturer's instructions. Analyses for high-density lipoprotein cholesterol (HDL), triglycerides (TG), fasting blood glucose (FBG), C-Reactive Protein (CRP), and low-density lipoprotein cholesterol (LDL) were conducted via direct enzymatic methods by the analyzer. The Cholestech LDX analyzer has been validated for precision and accuracy against traditional laboratory techniques and offers logistical convenience for large-scale health assessments. Hemoglobin A1C (HbA1C) was measured using DCA Vantage. Hemoglobin (Hgb) and hematocrit (Hct) weer measured using HemoPoint A2 (Boerne, TX, USA). Student t-tests were performed to identify differences in anthropometric and biochemical measures between males and females and Appalachians and non-Appalachians. Chi-square tests were used to identify associations between prevalence rates of individual components and MetS, and the following subject characteristics: Male, female, Appalachian, and non-Appalachian. The Mantel-Haenszel test was used to assess the ordinal trend between males and females as well as Appalachians and non-Appalachians. Statistical significance was defined as a p-value < 0.05. Statistical analyses were performed using SAS 9.3 statistical software (SAS Institute Inc., Cary, NC, USA). Twenty-eight percent of students presented with zero components of MetS; 33.1% with one component; and 23.7% with two components ( Table 6). The prevalence of MetS in this population was 15%. Specifically, 10.8% presented with three components, 3.2% with four components, and 1.1% with all five components of MetS (Table 6). There was no significant difference between male and female students in MetS prevalence (p-value = 0.30) or between Appalachian students and non-Appalachian students (p-value = 0.33) ( Table 6). There were no significant associations between the number of MetS components by sex (p = 0.28) or by Appalachian identity (p = 0.78) ( Table 6). Discussion The prevalence of MetS students at West Virginia University (15.1%) is much higher than what has been reported in previous studies (0.6% to 10%) of MetS prevalence in college students. Previous studies have reported the prevalence of MetS in young adults to be 0.6% and 1.3% in the Midwest region, 3.7% in the Northeast region and up to 10% in the Southeast region. Additionally, in the Northeast 7% ; and in the Midwest 14% and 6% of students had two MetS risk factors, while 24% had two MetS risk factors in the Appalachian region. Showing that Appalachia has a markedly higher incidence of MetS and MetS risk factors than other regions of the US. In a direct comparison of West Virginia University students to students from two Northeast universities (NEU1 and NEU2), the prevalence of MetS was significantly greater at WVU than both NEU1 (p-value < 0.05) and NEU2 (p-value < 0.05). The surprising lack of a significant difference between Appalachian and non-Appalachian student identity and the higher prevalence of MetS in WVU students suggests that an unidentified factor may be mediating the relationship with higher incidence of MetS in this population. One possible factor is BMI. Upon examining correlations between BMI and MetS components significant relationships were found. Specifically, BMI was significantly correlated with students presenting with at-risk WC (R 2 = 0.86, p < 0.05), low HDL (R 2 = −0.43, p < 0.05), elevated BP (R 2 = 0.29, p < 0.05) and increased TG (R 2 = 0.22, p < 0.05). No correlation was found between BMI and impaired FBG measures. There are several limitations to this study. First, this was a small sample size of students. While this area of study is important, having a larger sample size would have increased the impact of these results. Overall, there was a generally low prevalence of MetS in this sample. Future work would include a larger sample size to have more impactful results. These data identify the need for university administrators throughout the nation to implement health initiatives to measure BMI and screen for MetS components in students to prevent the development of chronic disease. Data from such screenings for college students will provide researchers, public health officials, and administrators with information to use to design, tailor, and implement effective interventions to prevent chronic disease progression across various university settings. |
<filename>gateway/cmd/gateway/main.go
package main
import (
"context"
"flag"
"net/http"
"github.com/heetch/confita"
"github.com/heetch/confita/backend/file"
bus "github.com/rafaeljesus/nsq-event-bus"
"github.com/sirupsen/logrus"
"github.com/spirosoik/go-driver-microservices/gateway"
)
//Config for gateway
type Config struct {
Urls []gateway.URL `config:"urls"`
}
func main() {
var (
config = flag.String("config", "config.yaml", "Yaml config file")
httpAddr = flag.String("http.addr", ":8080", "HTTP listen address")
nsqAddr = flag.String("nsq.addr", ":4150", "NSQ listen address")
)
flag.Parse()
logger := logrus.New()
logger.SetLevel(logrus.DebugLevel)
// Load configuration
cfg := Config{}
loader := confita.NewLoader(
file.NewBackend(*config),
)
err := loader.Load(context.Background(), &cfg)
if err != nil {
logger.WithError(err).Fatal("failed to load configuration")
}
// Emitter for bus setup
emitter, err := bus.NewEmitter(bus.EmitterConfig{
Address: *nsqAddr,
MaxInFlight: 100,
})
if err != nil {
logger.WithError(err).Fatal("failed to create BUS emitter")
}
//Set service
var s gateway.Service
{
s = gateway.NewService(emitter, http.DefaultClient)
s = gateway.LoggingMiddleware(logger)(s)
}
// Handler setup
ctx := context.Background()
h := gateway.MaketHTTPHandler(ctx, cfg.Urls, s, logger)
// Run gateway
errchan := make(chan error)
go func() {
logger.WithFields(logrus.Fields{
"protocol": "HTTP",
"address": httpAddr,
}).Info("Set Router Handler")
errchan <- http.ListenAndServe(*httpAddr, h)
}()
logger.Error("Server Error :( !!!!", <-errchan)
}
|
The safety, tolerability and pharmacokinetics of levamisole alone, levamisole plus ivermectin, and levamisole plus albendazole, and their efficacy against Onchocerca volvulus Abstract Two randomized, double-blind, placebo-controlled trials, in which levamisole (2.5 mg/kg) was given alone or co-administered with ivermectin (200 g/kg) or albendazole (400 mg), were conducted. In Trial 1, safety and drugdrug interaction were explored in 42 healthy male volunteers. During Trial 2, the safety of the same treatment regimens and their efficacy against the adult worms and microfilariae of Onchocerca volvulus were investigated in 66 infected subjects of both sexes. Safety was determined from the results of detailed clinical and laboratory examinations before treatment, during hospitalization and on day 30. The pharmacokinetic parameters for levamisole alone and the combinations were determined in Trial 1 and then compared with historical data for ivermectin and albendazole, given as single agents, to determine if drugdrug interaction had occurred. The level of efficacy against the adult worms was determined by the examination of histology sections of nodules excised 6 months posttreatment and from the changes seen in the levels of microfilaridermia within a year of treatment. Microfilaricidal efficacy was estimated from the reductions in the levels of microfilaridermia between day 0 (1 day pre-treatment) and day 30. Although the regimens were generally well tolerated, there were unexpected adverse effects in both healthy volunteers and infected subjects. Clinically significant drugdrug interactions resulted in an increase in the bio-availability of ivermectin but a reduction in that of albendazole when these drugs were co-administered with levamisole. Levamisole given alone or with albendazole had little effect on O. volvulus. The combination of levamisole with ivermectin was neither macrofilaricidal nor more effective against the microfilariae and the adult worms than ivermectin alone. The pathogenesis of the adverse events and the drugdrug interactions are discussed. |
At least two people, both Muslims, have been lynched this past week alone in the Hindi heartland of UP and Jharkhand. Shahzad Ahmad, a 22-year old youth, an expatriate who worked in Dubai, and had just returned to celebrate Eid with his family in the outskirts of Bareilly, UP, was lynched by a huge mob, destroying his dreams of a great future, and snatching away the lone bread winner of his family.
Incidents of mob lynching across the country are on the rise. Dozens of people have lost their lives in the mindless cases of mob lynching over the last four years. Despite promises and some instances of judicial rebukes, nothing has been done on the ground to alter the situation till now.
Initially only Muslims were targets of the mob lynching. They were the main target of the organized gangs of so called cow vigilantes or gau rakshaks, who were given a free hand in much of the North India. The menace of the gau rakhshaks grew so much that they started targeting the cattle traders and even innocent Muslim men in the NCR region, thrashing them and then killing them on the spot.
That there was a law of land and punishment for criminal acts against innocent people has been completely lost on their minds due to the covert state support they have been getting. This covert and overt support has emboldened the lynch mob to such an extent that they have started believing that the police will support them in undertaking the task.
What is surprising the most is that the incidents of mob lynching are taking place without any signs of slowing down in states where the Bharatiya Janata Party rules. From Rajasthan to Jharkhan, Uttar Pradesh, and the newly conquered saffron trophy in Tripura, the incidents of mob lynching have spiralled. Tripura, where there was no case of mob lynching during the long Left rule, the arrival of the BJP government has heralded a new era of lynch mobs going berserk.
To rub salt on the wounds of the victims, in most cases of mob lynching, FIRs are lodged against the victims and the perpetrators let off. There is no denying that a conscious effort has been made to embolden gau rakshaks. Merely a few weeks ago, a Union minister grabbed headlines for getting photographs clicked with eight men, who were convicted for killing a Muslim meat trader in Jharkhand, and welcoming them at his house.
When the menace of mob lynching began, only Muslims were on the radar of the lynch mobs. But as the freedom to perpetrate the criminal acts were felt by hoodlums and the rule of law looked weaker than ever before, others too started facing the music of the gau rakshaks and the lynch mobs. Dalits are the second most common targets of the lynch mobs and the cow vigilantes.
A number of Dalits have been subjected to mob lynching over the last four years. Rama Singrahiya, 42, was beaten to death using clubs and axes when he was sowing castor seeds on a plot in the village, located 30 km away from Porbandar. He died a day later. The mob was allegedly led by village sarpanch Harbham Karavadra, who is on the run, police said. According to the police, the Mer community members claim the plot where Rama was farming is gauchar (pastoral) land, meant for cattle grazing. As per the FIR, two men hired by Rama to help him sow the seeds were also beaten up. A number of Dalits have faced the lynch mob fury over the past few years, and the cases are only on the rise.
The lynching is no longer confined against Muslims. It is becoming endemic in nature as the fear of law recedes in the minds of the lynch mobs. The rising incidents of mob lynching on mere suspicion of child lifting show that the malaise is becoming deep rooted with complete disregard for the law of land. An IndiaSpend report says this year alone at least 24 people have been killed in such mob attacks. The report goes on to add that this is more than 4.5 times rise in attacks and 1.6 times rise in deaths of this kind over 2017, when nine persons were killed in eight separate attacks.
An IndiaSpend report says, “Between January 1, 2017, and July 5, 2018, 33 persons have been killed and at least 99 injured in 69 reported cases. In the first six days of July alone, there have been nine cases of mob violence over child lifting rumours and five deaths, which amounts to more than one attack recorded every day...In all cases, the victims were assaulted on mere suspicion and no evidence of child lifting was found later. So far, police across states have arrested at least 181 persons in connection with 21 cases, according to information from the news reports”.
Instead of taking action against the perpetrators to stop the menace, the covert support is helping them become all the more aggressive across the Gangetic plains and beyond. There is no denying that the state machinery like police and investigation departments have been outright biased in providing justice to the victims of lynching and hate crimes.
Earlier this year a group of 67 prominent retired officials belonging to the country’s elite civil service cadres wrote an open letter to Prime Minister Narendra Modi asking him to take tough measure against the increasing instances of mob lynching. “We seek now and without delay a clear response from the Honourable Prime Minister and his government on these issues”, they said. The retired officials demanded “immediate and firm action against the perpetrators of such hate crimes against minorities in this country...These recent incidents undermine our Constitutional values and weaken the rule of law”.
In July this year a Supreme Court bench headed by Chief Justice Dipak Misra called the incidents of mob-lynching in the country as “horrendous acts of mobocracy” and asked the Parliament to draft new law to control the incidents of mob lynching across the country. The apex court bench headed by the Chief Justice also ordered the police to register FIRs under Section 153A of the IPC against those who are found guilty in such cases. |
Q:
Why might it be a bad idea to invest 100% of your 401(k) into a stock index fund?
Background: I'm a young 20-something, I won't be retiring for 40+ years, and the expense ratios for the funds offered in my company's 401(k) plan mostly suck. The company matches 35% of my contributions up to a limit. The plan doesn't offer a large cap fund.
I understand that one of the reasons an all-stock portfolio isn't recommended is because of the volatility, though stocks will provide some of the best long-term returns. Considering my scenario, what drawbacks would I have were I to solely invest in stock-based funds? (going by expense ratios, my only viable choices are Vanguard mid-cap at 0.24% and Vanguard small-cap at 0.30%)
This question appears to be related: If low-cost index funds are considered the best investment, why are there so many high-cost, managed funds?
A:
At your age, I don't think its a bad idea to invest entirely in stocks. The concern with stocks is their volatility, and at 40+ years from retirement, volatility does not concern you. Just remember that if you ever want to call upon your 401(k) for anything other than retirement, such as a down payment on a home (which is a qualified distribution that is not subject to early distribution penalties), then you should reconsider your retirement allocations. I would not invest 100% into stocks if I knew I were going to buy a house in five years and needed that money for a down payment.
If your truly saving strictly for a retirement that could occur forty years in the future, first good for you, and second, put it all in an index fund. An S&P index has a ridiculously low expense ratio, and with so many years away from retirement, it gives you an immense amount of flexibility to choose what to do with those funds as your retirement date approaches closer every year. |
<filename>oscar/defences/preprocessor/detectron2.py
#
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: BSD-3-Clause
#
import pkg_resources
import logging
logger = logging.getLogger(__name__)
import math
import torch
import numpy as np
from typing import Optional, Tuple, List
from oscar.defences.preprocessor.preprocessor_pytorch import PreprocessorPyTorch
from oscar.defences.preprocessor.gaussian_augmentation import GaussianAugmentationPyTorch
import detectron2
from detectron2.model_zoo import get_config_file
from detectron2.config import get_cfg
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.structures import Instances, ImageList
import detectron2.data.transforms as T
from oscar.utils.utils import create_model, create_inputs
from oscar.utils.layers import Quantize
from torchvision.transforms import Resize
from torch.nn import functional as F
from armory.data.utils import maybe_download_weights_from_s3
from pathlib import Path
# Monkey patch paste_masks_in_image with our modified implememtation that supports threshold=None
from oscar.utils.detectron2.layers.mask_ops import paste_masks_in_image
detectron2.modeling.postprocessing.paste_masks_in_image = paste_masks_in_image
class Detectron2PreprocessorPyTorch(torch.nn.Module):
"""
A base class for defense preprocessors.
"""
def __init__(self, config_path, weights_path, score_thresh=0.5, iou_thresh=None, resize=True):
super().__init__()
# Find paths to configuration and weights for Detectron2.
if config_path.startswith('detectron2://'):
config_path = config_path[len('detectron2://'):]
config_path = get_config_file(config_path)
elif config_path.startswith('oscar://'):
config_path = config_path[len('oscar://'):]
config_path = pkg_resources.resource_filename('oscar.model_zoo', config_path)
elif config_path.startswith('armory://'):
config_path = config_path[len('armory://'):]
config_path = maybe_download_weights_from_s3(config_path)
if weights_path.startswith('oscar://'):
weights_path = weights_path[len('oscar://'):]
weights_path = pkg_resources.resource_filename('oscar.model_zoo', weights_path)
elif weights_path.startswith('armory://'):
weights_path = weights_path[len('armory://'):]
weights_path = maybe_download_weights_from_s3(weights_path)
# Create Detectron2 Model CPU and rely upon torch.to to move to proper device since this is a proper nn.Module
self.model, self.metadata = create_model(config_path, weights_path, device='cpu', score_thresh=score_thresh, iou_thresh=iou_thresh)
logger.info(f"Detectron2 config: score_thresh={score_thresh}, iou_thresh={iou_thresh}.")
# Get augmentation for resizing
cfg = get_cfg()
cfg.merge_from_file(config_path)
self.aug = None
if resize:
# FIXME: We always resize to 800 in estimate_forward, so error loudly if the model expects something else
assert cfg.INPUT.MIN_SIZE_TEST == 800
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
def forward(self, x):
assert len(x.shape) == 4 # NHWC
assert x.shape[3] == 3 # C = RGB
assert 0. <= x.min() <= x.max() <= 1. # C in [0, 1]
# NHWC -> NCHW
x = x.permute((0, 3, 1, 2))
# Run inference on examples
self.model.eval()
batched_inputs = create_inputs(x, transforms=self.aug, input_format=self.model.input_format)
outputs = self.model(batched_inputs)
batched_instances = [output['instances'] for output in outputs]
return batched_instances
def estimate_forward(self, x):
assert len(x.shape) == 4 # NHWC
assert x.shape[3] == 3 # C = RGB
assert 0. <= x.min() <= x.max() <= 1. # C in [0, 1]
# Make sure model is RCNN-style model
if not isinstance(self.model, GeneralizedRCNN):
raise NotImplementedError(f"There is no differentiable forward implementation for {self.model.__class__} currently")
# Put into eval mode since we don't have groundtruth annotations
self.model.eval()
images = x
_, orig_height, orig_width, _ = images.shape
# Create inputs for Detectron2 model, we can't use create_inputs as above.
images = 255*images # [0, 1] -> [0, 255]
if self.model.input_format == 'BGR':
images = images.flip(3) # RGB -> BGR
images = images.permute((0, 3, 1, 2)) # NHWC -> NCHW
if self.aug is not None:
images = Resize(800)(images) # Resize to cfg.INPUT.MIN_SIZE_TEST
images = (images - self.model.pixel_mean) / self.model.pixel_std
# Mostly cribbed from https://github.com/facebookresearch/detectron2/blob/61457a0178939ec8f7ce130fcb733a5a5d47df9f/detectron2/structures/image_list.py#L70
# Pad to self.backbone.size_divisibility
_, _, height, width = images.shape
size_divisibility = self.model.backbone.size_divisibility
max_size = torch.tensor([height, width])
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size + (stride - 1)) // stride * stride
# left, right, top, bottom
padding_size = [0, max_size[-1] - width, 0, max_size[-2] - height]
images = F.pad(images, padding_size, value=0)
# Mostly cribbed from https://github.com/facebookresearch/detectron2/blob/d135f1d9bddf68d11804e09722f2d54c0672d96b/detectron2/modeling/meta_arch/rcnn.py#L125
image_list = ImageList(images.contiguous(), [(height, width) for _ in images])
features = self.model.backbone(image_list.tensor)
proposals, _ = self.model.proposal_generator(image_list, features, None)
list_of_instances, _ = self.model.roi_heads(image_list, features, proposals, None)
assert len(list_of_instances) == len(x)
# In-place post-process instances and quantize prediction masks
for i in range(len(list_of_instances)):
instances = list_of_instances[i]
#if len(instances) > 0:
# Mostly cribbed from https://github.com/facebookresearch/detectron2/blob/d135f1d9bddf68d11804e09722f2d54c0672d96b/detectron2/modeling/meta_arch/rcnn.py#L233
# However, we monkey-patch to support mask_threshold=None to give us a gradient
instances = detector_postprocess(instances, orig_height, orig_width, mask_threshold=None)
# Convert pred_masks score to 0-1 according to threshold
pred_masks = instances.pred_masks
# XXX: This implicitly thresholds at whatever threshold `round` uses. This is fine for now since
# mask_threshold=0.5 normally and is not externally configurable.
pred_masks = Quantize.apply(pred_masks, 1)
instances.pred_masks = pred_masks
list_of_instances[i] = instances
return list_of_instances
class CachedDetectron2Preprocessor(torch.nn.Module):
def __init__(
self,
cache_dir,
):
super().__init__()
self.cache_dir = Path(cache_dir)
def forward(self, x, parent, indices):
assert(len(x.shape) == 4) # NHWC
assert(x.shape[3] == 3) # C = RGB
assert(0. <= x.min() <= x.max() <= 1.) # C in [0, 1]
cache_path = self.cache_dir / parent.parts[-2] / (parent.parts[-1] + '.npz')
dicts = np.load(cache_path, allow_pickle=True, mmap_mode='r')['instances'][indices]
instances = [Instances(d['image_size'], **d['fields']) for d in dicts]
return instances
def estimate_forward(self, x):
raise NotImplementedError
class GaussianDetectron2PreprocessorPyTorch(torch.nn.Module):
"""Add Gaussian noise to Detectron2 input. It behaves the same as Detectron2PreprocessorPyTorch when sigma is 0.
"""
def __init__(self, sigma=0, clip_values=None, **detectron2_kwargs):
super().__init__()
self.noise_generator = GaussianAugmentationPyTorch(sigma=sigma, augmentation=False, clip_values=clip_values)
self.detectron2 = Detectron2PreprocessorPyTorch(**detectron2_kwargs)
logger.info(f"Add Gaussian noise sigma={sigma:.4f} clip_values={clip_values} to Detectron2's input.")
def forward(self, x):
x = self.noise_generator(x)
x = self.detectron2(x)
return x
def estimate_forward(self, x):
x = self.noise_generator.estimate_forward(x)
x = self.detectron2.estimate_forward(x)
return x
|
<reponame>KiteVX/kite<gh_stars>0
/*
* Click nbfs://nbhost/SystemFileSystem/Templates/Licenses/license-default.txt to change this license
* Click nbfs://nbhost/SystemFileSystem/Templates/GUIForms/JFrame.java to edit this template
*/
package kite;
import com.sun.mail.imap.IMAPFolder;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
import javax.crypto.Cipher;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import javax.mail.*;
import javax.mail.search.FlagTerm;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.mail.Flags.Flag;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeMessage;
import javax.swing.JTextField;
/**
*
* @author Aadhitya, Balasubramanian
*/
public class Appl extends javax.swing.JFrame {
/**
* Creates new form for kite
*/
public Appl() {
initComponents();
}
int portSMTP, portIMAP;
String serverSMTP, serverIMAP;
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jLabel1 = new javax.swing.JLabel();
jTabbedPane1 = new javax.swing.JTabbedPane();
jPanel1 = new javax.swing.JPanel();
jLabel2 = new javax.swing.JLabel();
jTextField1 = new javax.swing.JTextField();
jLabel3 = new javax.swing.JLabel();
jPasswordField1 = new javax.swing.JPasswordField();
jLabel4 = new javax.swing.JLabel();
jTextField2 = new javax.swing.JTextField();
jLabel5 = new javax.swing.JLabel();
jTextField3 = new javax.swing.JTextField();
jLabel6 = new javax.swing.JLabel();
jScrollPane1 = new javax.swing.JScrollPane();
jTextArea1 = new javax.swing.JTextArea();
jButton1 = new javax.swing.JButton();
jButton2 = new javax.swing.JButton();
jLabel18 = new javax.swing.JLabel();
jPanel2 = new javax.swing.JPanel();
jLabel7 = new javax.swing.JLabel();
jTextField4 = new javax.swing.JTextField();
jLabel8 = new javax.swing.JLabel();
jPasswordField2 = new javax.swing.JPasswordField();
jButton3 = new javax.swing.JButton();
jLabel9 = new javax.swing.JLabel();
jLabel10 = new javax.swing.JLabel();
jTextField5 = new javax.swing.JTextField();
jScrollPane2 = new javax.swing.JScrollPane();
jTextArea2 = new javax.swing.JTextArea();
jButton5 = new javax.swing.JButton();
jPanel3 = new javax.swing.JPanel();
jLabel11 = new javax.swing.JLabel();
jLabel12 = new javax.swing.JLabel();
jTextField6 = new javax.swing.JTextField();
jLabel13 = new javax.swing.JLabel();
jTextField7 = new javax.swing.JTextField();
jLabel14 = new javax.swing.JLabel();
jLabel15 = new javax.swing.JLabel();
jTextField8 = new javax.swing.JTextField();
jLabel16 = new javax.swing.JLabel();
jTextField9 = new javax.swing.JTextField();
jButton4 = new javax.swing.JButton();
jLabel17 = new javax.swing.JLabel();
setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);
setBackground(new java.awt.Color(0, 102, 255));
setForeground(new java.awt.Color(0, 102, 255));
jLabel1.setFont(new java.awt.Font("Segoe UI", 1, 14)); // NOI18N
jLabel1.setText("Kite");
jLabel2.setText("Your email");
jLabel3.setText("Your password ");
jLabel4.setText("Target email");
jLabel5.setText("Subject of the email");
jLabel6.setText("Body");
jTextArea1.setColumns(20);
jTextArea1.setRows(5);
jScrollPane1.setViewportView(jTextArea1);
jButton1.setText("Encrypt & Send");
jButton1.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton1ActionPerformed(evt);
}
});
jButton2.setText("Clear fields");
jButton2.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton2ActionPerformed(evt);
}
});
javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1);
jPanel1.setLayout(jPanel1Layout);
jPanel1Layout.setHorizontalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addContainerGap()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel2)
.addComponent(jLabel3)
.addComponent(jLabel4)
.addComponent(jLabel5)
.addComponent(jLabel6))
.addGap(93, 93, 93)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(jTextField1)
.addComponent(jPasswordField1)
.addComponent(jTextField2)
.addComponent(jTextField3)
.addComponent(jScrollPane1, javax.swing.GroupLayout.DEFAULT_SIZE, 296, Short.MAX_VALUE)))
.addGroup(jPanel1Layout.createSequentialGroup()
.addGap(112, 112, 112)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING, false)
.addComponent(jLabel18, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(jButton1)
.addGap(80, 80, 80)
.addComponent(jButton2)))))
.addContainerGap(118, Short.MAX_VALUE))
);
jPanel1Layout.setVerticalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addContainerGap()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel2)
.addComponent(jTextField1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel3)
.addComponent(jPasswordField1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(19, 19, 19)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel4)
.addComponent(jTextField2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel5)
.addComponent(jTextField3, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel6)
.addComponent(jScrollPane1, javax.swing.GroupLayout.PREFERRED_SIZE, 140, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addComponent(jLabel18, javax.swing.GroupLayout.DEFAULT_SIZE, 15, Short.MAX_VALUE)
.addGap(17, 17, 17)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jButton1)
.addComponent(jButton2))
.addGap(31, 31, 31))
);
jTabbedPane1.addTab("Sender", jPanel1);
jLabel7.setText("Your mail");
jLabel8.setText("Your password");
jButton3.setText("Fetch recent mail");
jButton3.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton3ActionPerformed(evt);
}
});
jLabel9.setText("Message from app");
jLabel10.setText("Body");
jTextArea2.setColumns(20);
jTextArea2.setRows(5);
jScrollPane2.setViewportView(jTextArea2);
jButton5.setText("Clear All");
jButton5.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton5ActionPerformed(evt);
}
});
javax.swing.GroupLayout jPanel2Layout = new javax.swing.GroupLayout(jPanel2);
jPanel2.setLayout(jPanel2Layout);
jPanel2Layout.setHorizontalGroup(
jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel2Layout.createSequentialGroup()
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addGroup(jPanel2Layout.createSequentialGroup()
.addContainerGap()
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel7)
.addComponent(jLabel8))
.addGap(143, 143, 143)
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(jTextField4)
.addComponent(jPasswordField2, javax.swing.GroupLayout.DEFAULT_SIZE, 236, Short.MAX_VALUE)))
.addGroup(jPanel2Layout.createSequentialGroup()
.addContainerGap()
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel10)
.addComponent(jLabel9, javax.swing.GroupLayout.PREFERRED_SIZE, 103, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(121, 121, 121)
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jTextField5)
.addComponent(jScrollPane2, javax.swing.GroupLayout.DEFAULT_SIZE, 236, Short.MAX_VALUE)))
.addGroup(jPanel2Layout.createSequentialGroup()
.addGap(195, 195, 195)
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jButton3)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, jPanel2Layout.createSequentialGroup()
.addComponent(jButton5)
.addGap(30, 30, 30)))))
.addContainerGap(152, Short.MAX_VALUE))
);
jPanel2Layout.setVerticalGroup(
jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel2Layout.createSequentialGroup()
.addGap(20, 20, 20)
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel7)
.addComponent(jTextField4, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel8)
.addComponent(jPasswordField2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addComponent(jButton3)
.addGap(18, 18, 18)
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel9)
.addComponent(jTextField5, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(jPanel2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel2Layout.createSequentialGroup()
.addGap(18, 18, 18)
.addComponent(jLabel10))
.addGroup(jPanel2Layout.createSequentialGroup()
.addGap(10, 10, 10)
.addComponent(jScrollPane2, javax.swing.GroupLayout.PREFERRED_SIZE, 140, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 36, Short.MAX_VALUE)
.addComponent(jButton5)
.addGap(33, 33, 33))
);
jTabbedPane1.addTab("Receiver", jPanel2);
jLabel11.setFont(new java.awt.Font("Segoe UI", 1, 12)); // NOI18N
jLabel11.setText("SMTP ");
jLabel12.setText("SMTP Mail Server");
jLabel13.setText("Port number");
jLabel14.setFont(new java.awt.Font("Segoe UI", 1, 12)); // NOI18N
jLabel14.setText("IMAP");
jLabel15.setText("IMAP mail server");
jLabel16.setText("Port number");
jButton4.setText("Save config");
jButton4.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton4ActionPerformed(evt);
}
});
javax.swing.GroupLayout jPanel3Layout = new javax.swing.GroupLayout(jPanel3);
jPanel3.setLayout(jPanel3Layout);
jPanel3Layout.setHorizontalGroup(
jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel3Layout.createSequentialGroup()
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING, false)
.addGroup(javax.swing.GroupLayout.Alignment.LEADING, jPanel3Layout.createSequentialGroup()
.addContainerGap()
.addComponent(jLabel17, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel3Layout.createSequentialGroup()
.addContainerGap()
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel11)
.addComponent(jLabel14)
.addGroup(jPanel3Layout.createSequentialGroup()
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel12)
.addComponent(jLabel13)
.addComponent(jLabel15)
.addComponent(jLabel16))
.addGap(65, 65, 65)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jTextField9, javax.swing.GroupLayout.PREFERRED_SIZE, 63, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(jTextField7, javax.swing.GroupLayout.PREFERRED_SIZE, 63, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jTextField6)
.addComponent(jTextField8, javax.swing.GroupLayout.DEFAULT_SIZE, 288, Short.MAX_VALUE))))))
.addGroup(jPanel3Layout.createSequentialGroup()
.addGap(215, 215, 215)
.addComponent(jButton4))))
.addContainerGap(168, Short.MAX_VALUE))
);
jPanel3Layout.setVerticalGroup(
jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel3Layout.createSequentialGroup()
.addContainerGap()
.addComponent(jLabel11)
.addGap(18, 18, 18)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel12)
.addComponent(jTextField6, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel13)
.addComponent(jTextField7, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(34, 34, 34)
.addComponent(jLabel14)
.addGap(18, 18, 18)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel15)
.addComponent(jTextField8, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(jPanel3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel16)
.addComponent(jTextField9, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(52, 52, 52)
.addComponent(jButton4)
.addGap(18, 18, 18)
.addComponent(jLabel17, javax.swing.GroupLayout.PREFERRED_SIZE, 27, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(48, Short.MAX_VALUE))
);
jTabbedPane1.addTab("Settings", jPanel3);
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jLabel1)
.addGap(304, 304, 304))
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addComponent(jTabbedPane1)
.addContainerGap())
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(jLabel1)
.addGap(18, 18, 18)
.addComponent(jTabbedPane1)
.addContainerGap())
);
pack();
}// </editor-fold>//GEN-END:initComponents
// Functions for encryption, decryption and values of keys
static String IV = "AAAAAAAAAAAAAAAA";
static String key = "<KEY>"; // (256 bits - 32 chars)
public static String encrypt(byte[] originalContent, String keyText) throws Exception {
Cipher encrypt = Cipher.getInstance("AES/CBC/PKCS5Padding");
SecretKeySpec key = new SecretKeySpec(keyText.getBytes(StandardCharsets.UTF_8), "AES");
encrypt.init(Cipher.ENCRYPT_MODE, key,new IvParameterSpec(IV.getBytes(StandardCharsets.UTF_8)));
byte[] encryptedBytes = encrypt.doFinal(originalContent);
return Base64.getEncoder().encodeToString(encryptedBytes);
}
public static String decrypt(String encryptedContent, String keyText) throws Exception {
byte[] messageInBytes = Base64.getMimeDecoder().decode(encryptedContent);
Cipher decrypt = Cipher.getInstance("AES/CBC/PKCS5Padding");
SecretKeySpec key = new SecretKeySpec(keyText.getBytes(StandardCharsets.UTF_8), "AES");
decrypt.init(Cipher.DECRYPT_MODE, key,new IvParameterSpec(IV.getBytes(StandardCharsets.UTF_8)));
byte[] decryptedBytes = decrypt.doFinal(messageInBytes);
return new String(decryptedBytes);
}
//SMTP mail code
private static Session createSession(Properties prop, String from, String password) {
Session sess = Session.getInstance(prop, new javax.mail.Authenticator() {
@Override
protected javax.mail.PasswordAuthentication getPasswordAuthentication() {
return new javax.mail.PasswordAuthentication(from, password);
}
});
return sess;
}
private static Message createMessage(Session sess,Properties prop,String from,String to,String sub,String Msgg) throws Exception{
Message msg = new MimeMessage(sess);
msg.setFrom(new InternetAddress(from));
msg.setRecipient(Message.RecipientType.TO, new InternetAddress(to));
msg.setSubject(sub);
msg.setText(Msgg);
return msg;
}
private boolean sendMail(String to, String sub, String msgg,String frm,String passk) {
try{
Properties prop = new Properties();
prop.put("mail.smtp.auth", "true");
prop.put("mail.smtp.starttls.enable", "true");
prop.put("mail.smtp.trust", serverSMTP);
prop.put("mail.smtp.host", serverSMTP);
prop.put("mail.smtp.port", portSMTP);
Session sess = createSession(prop, frm, passk);
String encriptedText = encrypt( msgg.getBytes(), key);
Message msg = createMessage(sess, prop, frm, to,sub,encriptedText);
Transport.send(msg);
} catch (Exception e) {
return false;
}
return true;
}
public static String mailFetch(String server, int port, String mail, String pass) throws Exception {
String mailServer = server;
String mailID = mail;
String passKey = pass;
IMAPFolder folder = null;
Store store = null;
String subject = null;
Flag flag = null;
Properties props = System.getProperties();
props.setProperty("mail.store.protocol", "imaps");
Session session = Session.getDefaultInstance(new Properties( ));
store = session.getStore("imaps");
store.connect(mailServer, port, mailID, passKey);
folder = (IMAPFolder) store.getFolder("inbox");
folder.open( Folder.READ_ONLY );
// Fetch unseen messages from inbox folder
Message[] messages = folder.search(new FlagTerm(new Flags(Flags.Flag.SEEN), true));
Message msg = messages[messages.length - 1]; // Fetching latest mail
System.out.println(msg.getContent().toString());
String encrypt = msg.getContent().toString();
return encrypt;
}
private void jButton2ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton2ActionPerformed
// TODO add your handling code here:
jTextField1.setText("");
jPasswordField1.setText("");
jTextField2.setText("");
jTextField3.setText("");
jTextArea1.setText("");
}//GEN-LAST:event_jButton2ActionPerformed
private void jButton5ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton5ActionPerformed
// TODO add your handling code here:
jTextField4.setText("");
jPasswordField2.setText("");
jTextField5.setText("");
jTextArea2.setText("");
}//GEN-LAST:event_jButton5ActionPerformed
private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed
// TODO add your handling code here:
// Code for email import part
String sender = jTextField1.getText();
String passKey = <PASSWORD>.getText();
String receiver = jTextField2.getText();
String subject = jTextField3.getText();
String body = jTextArea1.getText();
if (sendMail(receiver,subject,body,sender,passKey)) {
jLabel18.setText("The email is successfully sent ");
}
else {
jLabel18.setText("Error occured while sending !!!");
}
}//GEN-LAST:event_jButton1ActionPerformed
private void jButton3ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton3ActionPerformed
// TODO add your handling code here:
// Code for receiving and decrypting mail
String mail = jTextField4.getText();
String pass = <PASSWORD>();
String body = null;
String b0dy = null;
try {
body = mailFetch(serverIMAP, portIMAP, mail, pass);
//body = "wOgB3rxBHZXgftxQxdfD1A==";
System.out.println(body);
} catch (Exception ex) {
Logger.getLogger(Appl.class.getName()).log(Level.SEVERE, null, ex);
}
jTextField5.setText("Mail fetched. Decrypting...");
// Decryption part
try {
b0dy = decrypt(body, key);
}
catch (Exception e)
{
e.printStackTrace();
}
jTextArea2.setText(b0dy);
}//GEN-LAST:event_jButton3ActionPerformed
private void jButton4ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton4ActionPerformed
// TODO add your handling code here:
portSMTP = Integer.parseInt(jTextField7.getText());
portIMAP = Integer.parseInt(jTextField9.getText());
serverSMTP = jTextField6.getText();
serverIMAP = jTextField8.getText();
}//GEN-LAST:event_jButton4ActionPerformed
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
/* Set the Nimbus look and feel */
//<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) ">
/* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.
* For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html
*/
try {
for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {
if ("Nimbus".equals(info.getName())) {
javax.swing.UIManager.setLookAndFeel(info.getClassName());
break;
}
}
} catch (ClassNotFoundException ex) {
java.util.logging.Logger.getLogger(Appl.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
java.util.logging.Logger.getLogger(Appl.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
java.util.logging.Logger.getLogger(Appl.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (javax.swing.UnsupportedLookAndFeelException ex) {
java.util.logging.Logger.getLogger(Appl.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
}
//</editor-fold>
/* Create and display the form */
java.awt.EventQueue.invokeLater(new Runnable() {
public void run() {
new Appl().setVisible(true);
}
});
}
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JButton jButton1;
private javax.swing.JButton jButton2;
private javax.swing.JButton jButton3;
private javax.swing.JButton jButton4;
private javax.swing.JButton jButton5;
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel10;
private javax.swing.JLabel jLabel11;
private javax.swing.JLabel jLabel12;
private javax.swing.JLabel jLabel13;
private javax.swing.JLabel jLabel14;
private javax.swing.JLabel jLabel15;
private javax.swing.JLabel jLabel16;
private javax.swing.JLabel jLabel17;
private javax.swing.JLabel jLabel18;
private javax.swing.JLabel jLabel2;
private javax.swing.JLabel jLabel3;
private javax.swing.JLabel jLabel4;
private javax.swing.JLabel jLabel5;
private javax.swing.JLabel jLabel6;
private javax.swing.JLabel jLabel7;
private javax.swing.JLabel jLabel8;
private javax.swing.JLabel jLabel9;
private javax.swing.JPanel jPanel1;
private javax.swing.JPanel jPanel2;
private javax.swing.JPanel jPanel3;
private javax.swing.JPasswordField jPasswordField1;
private javax.swing.JPasswordField jPasswordField2;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JScrollPane jScrollPane2;
private javax.swing.JTabbedPane jTabbedPane1;
private javax.swing.JTextArea jTextArea1;
private javax.swing.JTextArea jTextArea2;
private javax.swing.JTextField jTextField1;
private javax.swing.JTextField jTextField2;
private javax.swing.JTextField jTextField3;
private javax.swing.JTextField jTextField4;
private javax.swing.JTextField jTextField5;
private javax.swing.JTextField jTextField6;
private javax.swing.JTextField jTextField7;
private javax.swing.JTextField jTextField8;
private javax.swing.JTextField jTextField9;
// End of variables declaration//GEN-END:variables
}
|
Recent synthetic methodologies for imidazopyridines and related heterocycles. Imidazopyridine is a significant structural component of a large number of agrochemicals and pharmaceuticals. The synthesis of imidazopyridine has been a subject of intense research for numerous decades. A large number of transformations are now available to conveniently access imidazopyridine from readily available starting materials. This review details the recent development in imidazopyridine construction involving cyclocondensation, cycloaddition, oxidative cyclization, and transannulation reactions. |
Effects of field parameters on IMRT plan quality for gynecological cancer: A case study Traditional external beam radiotherapy of gynecological cancer consists of a 3D, fourfieldbox technique. The radiation treatment area is a large region of normal tissue, with greater inhomogeneity over the treatment volume, which could benefit more with intensitymodulated radiation therapy (IMRT). This is a case report of IMRT planning for a patient with endometrial cancer. The planning target volume (PTV) spanned the intrapelvic and periaortic lymph nodes to a 33cm length. Planning and treatment were accomplished using double isocenters. The IMRT plan was compared with a 3D plan, and the effects of field parameters were studied. Delineated anatomical contours included the intrapelvic nodes (PTV), bone marrow, small bowel, bladder, rectum, sigmoid colon, periaortic nodes (PTV), spinal cord, left kidney, right kidney, large bowel, liver, and tissue (excluding the PTVs). Comparisons were made between IMRT and 3D plans, 23MV and 6MV energies, zero and rotated collimator angles, different numbers of segments, and opposite gantry angle configurations. The plans were evaluated based on dosevolume histograms (DVHs). Compared with the 3D plan, the IMRT plan had superior dose conformity and spared the bladder and sigmoid colon embedded in the intrapelvic nodes. The higher energy (23 MV) reduced the dose to most critical organs and delivered less integral dose. Zero collimator angles resulted in a better plan than optimized collimator angles, with lower dose to most of the normal structures. The number of segments did not have much effect on isodose distribution, but a reasonable number of segments was necessary to keep treatment time from being prohibitively long. Gantry angles, when evenly spaced, had no noticeable effect on the plan. The patient tolerated the treatment well, and the initial complete blood count was favorable. Our results indicated that largevolume tumor sites may also benefit from precise conformal delivery of IMRT. PACS numbers: 87.53.Kn, 87.53.Tf Effects of field parameters on IMRT plan quality for gynecological cancer: A case study I. INTRODUCTION It is estimated that there are 83 000 new cases of gynecological cancer in the United States in 2004 and 29 000 deaths per year. Half the gynecological cancer cases originate at the uterine corpus. The American Cancer Society estimated that "a malignant endometrial tumor will develop in approximately 700,000 of the 48 million women in the United States aged 35 or older. For late-stage disease with metastasis to the intrapelvic and para-aortic lymph nodes, treatment often involves surgery and postoperative radiation therapy and/or chemotherapy. The five-year survival rate for lymph node positive disease ranges from 20% to 60%. The radiation therapy regimens typically consist of external beam radiation and/or intracavitary brachytherapy. Traditional external beam technique uses a 3D, four-field box with shielding by multileaf collimators (MLCs). This four-field-box-technique includes a substantial amount of normal tissue in the treated field, causing morbidity and complication. Intensity-modulated radiation therapy (IMRT) is one of the most significant technological advances in radiation therapy in the past decade. A majority of radiation clinics in the United States have implemented IMRT or plan to implement IMRT in the near future. The common treatment sites related to IMRT include head and neck, prostate, and intracranial tumors. Gynecological treatment has not been a common area for IMRT. A recent survey indicated that IMRT was being performed on 88% of head and neck patients, while only 15% of gynecological cancer radiation therapy patients were treated with IMRT. The conventional thinking is that IMRT is more suitable for small-volume tumors, while late-stage gynecological cancer, with a large volume of lymph nodes as target volume, is not an appropriate candidate for IMRT. While there is some truth to this, the opposite argument is equally valid. Radiation for gynecological malignancies delivers extensive dose to a large region of normal tissue, which may benefit from the technical capabilities of IMRT. There are several reports in the literature on IMRT planning for gynecological malignancies. Mundt et al. compared conventional whole pelvic radiation therapy plans to IMRT plans for 10 patients (5 cervical, 5 endometrial). They found that IMRT resulted in more conformal dose and a reduction of irradiated volume in small bowel, rectum, and bladder. Heron et al. did a similar comparison on 10 gynecological patients and arrived at the same conclusion. Adli et al. investigated the IMRT planning of 16 patients and discovered that prone positioning with a belly board decreased the small bowel even more. The published articles on gynecological IMRT have used one single isocenter for all treatment beams. This report is a case study of IMRT planning in a patient with endometrial cancer. The planning target volume (PTV) spanned the periaortic and the pelvic lymph nodes to a length of 33 cm. This was larger than the IMRT capability of MLCs in most of the LINACs. Planning and treatment were accomplished using two separate isocenters. All critical organs in the abdominal and pelvic regions were delineated, and their dose distributions were investigated. The IMRT plan was compared with a traditional 3D plan. The field parameters in IMRT planning were also studied: beam energy, collimator angles, the number of segments, and gantry angles, to observe the effect of these parameters on the final dose distribution. II. METHODS The patient is a 53-year-old female who had FIGO stage IIIC, grade 3 endometrial adenocarcinoma. The patient had undergone total abdominal hysterectomy with bilateral salpingo-oophorectomy and pelvic as well as periaortic lymphadenectomy and pelvic washings. One of the 8 lymph nodes from left pelvic node dissection and one of 12 from the right were positive for metastatic carcinoma. Four periaortic lymph nodes were recovered, all of which were negative for malignancy spread. It was decided to proceed with postoperative radiation therapy followed by systemic chemotherapy. The patient was simulated for radiation treatment with CT. Simulation was done in a supine posture with the arms above the head. A Vac Lok® (MED-TEC Inc., Orange City, IA ) cradle was used for immobilization, and a styroform block was placed between the taped feet. CT contrast was not used. The Pinnacle radiation planning system (Philips Medical Systems, Bothell, WA) was used for the IMRT and 3D planning, and radiation was delivered with segmental MLCs of a Siemens Primus LINAC (Siemens Medical Solution, Erlangen, Germany). The goals of radiation therapy were to treat both the pelvic and periaortic lymph nodes to a dose prescription of 45 Gy in 1.8-Gy fractions, while minimizing the risk for small bowel morbidity and sparing bone marrow. The bone marrow would be important for chemotherapy afterward. Thirteen anatomical contours in the pelvic and abdominal regions were delineated in the plan. The planning target volumes, as defined in ICRU Report 50, were intrapelvic and periaortic nodes. The tissue structure included all the tissue in the CT image series with the target volumes (intrapelvic and periaortic nodes) excluded. The dose to tissue would give a measure of the "integral dose" delivered. We studied the effect on the plan quality by various treatment parameters: energy, collimator angle, number of segments, and gantry angles. Table 1 lists the parameters of the "original" plan (the original parameters were used for the actual treatment of the patient), as well as the alternative parameter settings for plan comparison in this study. The whole PTV, including intrapelvic and periaortic nodes, traversed 33 cm in the superior-inferior direction and 15 cm in the lateral direction. The total target volume was 1293 cm 3. Since the target volumes were lymph nodes, it was not possible to separately distinguish the gross (GTV) or clinical (CTV) target volume; hence, the physician directly drew the PTV. Although the Primus MLCs permitted a maximum field size of 40 cm at the isocenter, only the middle 27 cm of a 1-cm leaf width could be used for IMRT delivery. Therefore, it was necessary to split the treatment field into two isocenters: one at the pelvis and the other at the abdomen, 15 cm from each other. The original IMRT plan had a total of 14 beams, that is, 7 beams centered at each isocenter, with evenly spaced gantry angles starting at 0° (anterior). The dose "objectives" used in the optimization that generated the original IMRT plan are listed in Table 2. As stated, it was possible to put in more than one objective for each region of interest (ROI). The critical organs included in the optimization objectives were all in the pelvic region, except for the bone marrow, which extended from the abdomen to the pelvis. The same objectives were used in all the IMRT plans for fair comparison. The number of iterations used in optimization was 25 because the total cost function was observed to be stabilized (i.e., not decreasing any more) well before the 25th iteration. For quality assurance, a hybrid plan composed of the treatment beams irradiating the virtual water phantom was generated, and the doses at the two isocenters were verified with ion chamber measurement. Table 1. Parameters of the "original" plan (the original parameters were used for the actual treatment of the patient), as well as the alternative parameter setting for plan comparison in this study Table 2. Objectives used in the optimization that generated the original IMRT plan Test 1 compared the IMRT plan with a 3D non-IMRT plan. The typical 3D plans had fewer numbers of radiation beams than the IMRT plans. Our alternative 3D plan had 4 2 = 8 fields, giving 4-field box distributions. The MLC apertures of the 3D portals were designed based on the beam's-eye views (BEVs) of the PTVs. The collimator settings for the 3D plan were different than the IMRT plan, especially at the junction between the pelvic beams and the abdominal beams to ensure proper field matching. In Test 2, the IMRT optimization was a rerun from iteration one with the same settings and objectives. The only change was the energy, which changed from 23 MV to 6 MV. Test 3 examined the effect of the collimator angle rotation. By rotating the collimator angle and making the PTVs more "diagonal" in the BEV, the abdominal beams could encompass more of the intrapelvic nodes ( Fig. 1), and the pelvic beams could encompass more of the periaortic nodes. We studied whether this would result in a better plan. We used another planning system, the Nomos Corvus system (Sewickley, PA), to automatically determine the "optimal" collimator angle for each field relative to the shape of the PTVs. For each beam, the algorithm found the collimator angle so that the MLC leaves were perpendicular to the longest dimension of the combined PTVs. These rotated collimator angles were put in the ADAC Pinnacle system for IMRT optimization. Test 4 investigated the effect of the total number of MLC segments. The number of MLC segments determined the radiation-on time, and we aimed at keeping it within 20 min. The original result from the Pinnacle optimization showed the intensity profile of each beam, which was then converted to MLC segments. Depending on the conversion criteria, the resulting MLC segments might give intensities slightly different than the ideal solutions, with fewer segments resulting in larger differences. All plans in the tests were reoptimized, resulting in slightly different segments. For Test 4, the alternative plan was converted to about three times the number of segments of the original plan. This would indicate how much improvement to expect from increasing the number of segments, although we were aware that the alternative plan would prohibitively take longer to deliver. Test 5 was a plan with all gantry angles inverted 180° and opposite to the original. The seven gantry angles are depicted in Fig. 2. the 3D (gantry 0) plan, and (c) test 5 IMRT plan with opposite gantry angles. The bladder (purple) and the sigmoid colon (gray) are at the center and are deeply embedded in the intrapelvic nodes (red). Both IMRT plans tailored the prescription isodose (yellow) around the bladder and sigmoid colon, excluding these two organs from the highest dose. The 3D plan, as expected, delivered a convex uniform dose for the whole area, and the doses to bladder and sigmoid colon were as high as those of the PTVs. III. RESULTS AND DISCUSSION The plans were primarily evaluated by comparing the dose volume histograms (DVHs). Figures 3 to 7 show the DVHs of tests 1 to 5, respectively. For each test, (a) contained the structures in the pelvic regions, while (b) contained those in the abdominal regions. Bone marrow was included in the pelvic DVH graph, and tissue was included in the abdominal DVH graph arbitrarily. The symbols O, 3, E, C, and G stand for the original, 3D, energy 6 MV, collimator rotated, and gantry 180° plans, respectively. For the sake of this study, all the plans were normalized so that the DVHs of the intrapelvic and the periaortic nodes each had 90% of the volume receiving more than the prescribed dose of 45 Gy, that is, D90 = 45 Gy. Identical normalization ensured clinically relevant comparison. Table 3 summarizes the differences of each plan in every organ delineated. Table 4 lists the D10 (dose delivered to at least 10% of the volume) for every organ in each plan. D10 was chosen as a robust indicator of the maximum dose for each anatomical structure. The ranking in Table 3 might not agree with the numbers in Table 4, since Table 3 is based on the whole DVH curves, while Table 4 essentially represents single points on the DVH curves. Table 3. Summary of the differences of each plan in every organ delineated √ = better than the original plan; 0 = similar to the original plan; x = worse than the original plan; xx = much worse than the original plan Table 4.) Figure 2 compares the isodose distributions at the pelvic level of (a) the original 23-MV IMRT plan, (b) the 3D 23-MV plan (test 1), and (c) the 6-MV IMRT plan (test 2). The bladder (purple) and the sigmoid colon (gray) are at the center and are deeply embedded in the intrapelvic nodes (red). Both IMRT plans tailored the prescription isodose (yellow) around the bladder and sigmoid colon to exclude these two organs from the highest dose. The 3D plan, as expected, delivered a convex uniform dose for the whole area, and the doses to bladder and sigmoid colon were as high as that of the PTVs. Hence, the IMRT plan was more conformal than the 3D plan. It was found that the dose uniformity of the PTVs was better in the 3D than the IMRT plan. The dose to tissue, indicating the integral dose, was in fact less with the 3D plan. This represented a workable compromise to achieve IMRT conformity. The unexpected result here was that large bowel had lower dose in the 3D plan. IMRT, while keeping most of the critical organs in lower doses, had to deliver the radiation somewhere. In this case, higher dose was delivered to the large bowel with IMRT. Nevertheless, the 3D plan was overall considered inferior to the IMRT plan. The 3D planning process has its own problem of field matching at the pelvic-abdominal junction. Figure 8 shows the coronal view of isodose distribution of two 3D plans with slightly different Y1 jaw settings of the abdominal fields: (a) Y1 = 8.9 cm, and (b) Y1 = 9 cm. When the field size was too small, a cold spot developed at the junction, while a one-millimeter increase in field size created a hot spot. The DVHs of the 3D plan in Fig. 3 were generated using Y1 = 8.9 cm. This demonstrates the importance of careful field matching in 3D planning. In contrast, no manual field matching was necessary for double isocenter IMRT. Dose nonuniformity at the junction was taken into account with the IMRT optimization objectives. The algorithm automatically generated a solution including field sizes and IMRT segments, and the resulting IMRT plans showed no problem at the pelvic-abdominal junction. This is one of the key advantages of double isocenter IMRT. B. Energy 6 MV Most anatomical structures received less desirable doses with 6 MV than 23 MV. The maximum doses in the intrapelvic and periaortic nodes were higher with 6 MV at the same normalization of D90 = 45 Gy, meaning worse PTV homogeneity with 6 MV. The doses to the small bowel, bladder, sigmoid colon, large bowel, and liver were also higher with 6 MV. Some critical organs, such as bone marrow, spinal cord, and left kidney, received lower doses with 6 MV. The integral dose in general, as indicated by the dose to tissue, was higher with 6 MV. Dose in the 6-MV plan tended to spread out more into the surrounding normal structures, which seemingly made the 6-MV plan inferior. However, recall that the plans only considered dose from photons and ignored the neutron dose. It is well known that high-energy X-rays (above 10 MV) generate neutrons from the LINAC, which may increase the possibility of secondary malignancy. In addition to the neutron dose to the patients with higher-energy photon beams, there is another health risk by the neutron-activated products : the residual radiation after the treatment of higher-energy IMRT can be considerable to therapists who enter the treatment room often. The physician should decide whether the photon dose advantage of higher energy is more significant than the neutron dose disadvantage.. C. Collimator angle rotation The rotated collimator angles as determined by the Corvus system are listed in Table 5. The plan with "optimized" collimator rotation was uniformly worse than the original plan with zero collimator angle. Bone marrow, small bowel, bladder, rectum, right kidney, and large bowel all received higher doses in the collimator rotated plan. There was not a single anatomical structure in which the collimator-rotated plan fared better. We postulated that although the pelvic-centered fields covered some of the periaortic nodes with rotated collimator, the 6.5-cm wide superior end leaf of the MLC did not facilitate precise dose delivery, and the same for the inferior end leaf of the abdominal-centered fields. D. Number of segments Optimization created different intensity profiles for each beam. A conversion from intensity profiles to MLC segments was necessary, and the converted MLC segments might not reproduce exactly the ideal intensity profiles. The Pinnacle conversion algorithm allowed criteria to discard segments that might not have a significant effect on total dose distribution. This kept the number of segments within a practical level. The original IMRT plan was generated with the following settings: error tolerance = 7%, minimum segment area = 5 cm 2, and minimum segment monitor units (MUs) = 4. That plan had a total of 165 segments for 14 fields. From the same optimization result, we also converted the MLC segments with error tolerance of 3%, no minimum segment area, and no minimum segment MUs. This gave a plan with 509 segments. It was found that permitting more MLC segments resulted in better uniformity of the two PTVs. However, the plan with a higher number of segments delivered a higher dose to the abdominal critical organs, such as the spinal cord, right kidney, large bowel, and liver. It also resulted in a higher integral dose as indicated by the dose to normal tissue. A plan with more segments should be able to get better optimization results than one with fewer segments in general. The abdominal organ dose increased by 509 segments may be attributed to random occurrence. For this particular patient case, it was not possible to clearly conclude whether the plan with more or fewer segments was better. Given that a larger number of segments prolonged treatment time at the LINAC, an attempt to increase the number of segments in the hope of a better plan was determined impractical. Incidentally, when the intensity profiles were different, the same conversion criteria did not guarantee exactly the same number of segments. Our test plans generated the following number of segments: original, 165; energy 6 MV, 164; collimator rotated, 150; segment test, 509; gantry 180°, 168. E. Gantry angle The test plan used gantry angles complementary to the original plan. Hence, 0° became 180°, 205° became 25°, etc. It was generally believed that as long as the fields were evenly spaced around the treatment area, the exact gantry angles did not create a significant difference in achievable dose distribution. Our results confirmed this belief. The DVH curves of both gantry configurations were essentially similar for every anatomical structure. The two plans could be considered identical. The choice of gantry angles might be based on other considerations, such as couch interference during radiation delivery. The above findings were based on evaluating each plan and all the DVHs as a whole. We were careful in our evaluation to not read too much into the specific DVH of each anatomical structure in this study (i.e., if bone marrow preservation is of top priority for a patient, among our plans the 6-MV plan gave the best DVH, but that did not conclude that 6 MV was the method of choice in sparing bone marrow). Individual DVH results were probably due more to random chance and not a result of the absolute merit of that treatment parameter. The ranking of each structure in Table 3 was rather subjective, particularly on whether the pairs of DVHs were similar, so it would not be surprising if some readers disagree with a few of the rankings. Another limitation of our study was that the treatment plans did not explicitly take into consideration the motion of the various organs. The magnitude of patient movement in the abdomen and the pelvis likely had a significant effect on degrading the quality of radiation delivery. Tissue heterogeneity was ignored in all plans, that is, dose calculation was performed assuming unit density in all the tissue. Nevertheless, it was not expected that the inclusion of patient motion or tissue heterogeneity would alter the relative quality of the alternative plans. We chose not to investigate the effect of optimization objectives on isodose distribution, since any result might be peculiar to the Pinnacle planning system or the geometry of this particular patient. Our results are in agreement with the findings from other investigators. Application of IMRT on a large abdominal target has been previously reported. Hong et al. studied 10 patients with whole-abdomen radiation and discovered that IMRT resulted in significant dose reduction to the bones and improved PTV coverage as compared with conventional treatment. The high-dose regions within the PTV increased slightly. Mundt et al. studied 10 patients with pelvic treatment and found that IMRT dose distribution (compared with 3D) was more conformal to the PTV, with better DVHs for small bowel, rectum, and bladder. They also found IMRT plans resulted in more dose inhomogeneity within the PTV. Heron et al. studied 10 patients treated with pelvic fields and concluded that the doses to small bowel, bladder, rectum were lower from IMRT versus 3D. Ahmed et al. studied 5 patients with abdominal treatment and demonstrated reduced doses to bone marrow, bowel, spinal cord, and both kidneys. Our study was unique in several ways: the treated PTV (encompassing the intrapelvic and periaortic nodes with 33 cm total length) was among the largest of IMRT planning reported in the literature. This necessitated two isocenters and a total of 14 radiation fields. Our study was also meticulous in terms of delineating all critical structures involved in the abdominal and pelvic regions. Finally, while most investigations in IMRT gynecological treatment focused on comparison with the 3D plans, we also studied the effects of various IMRT parameters on isodose distribution. The discussion in literature of large-volume IMRT treatment often included the limitation in field width of some models of LINAC (maximum 14.5 cm), and the consequent splitting of IMRT fields into twice the number of smaller subfields. The Primus LINAC had a limitation in field width of 20 cm, and we did not need to split any of our IMRT fields into smaller fields. Since IMRT was developed in the past decade, most of the treatment delivered has been for small-volume tumor sites, such as head and neck, prostate, and intracranial locations. There was an unspoken assumption within the radiation oncology community that large-volume tumor sites were not suitable candidates for IMRT. We believe that this may be misplaced dogma and that large treatment fields encompassing extensive normal tissue may also benefit from precise conformal delivery of IMRT. IMRT allows for higher than conventional dose delivered to the target volume, while sparing critical organs from radiation toxicity. The patient tolerated this high-dose treatment with remarkable condition, and the initial lab finding on complete blood count was favorable. It is expected that more research effort will be directed to this area, with adequate patient numbers and follow-up to evaluate the clinical outcome of IMRT in gynecological treatment. IV. CONCLUSIONS In summary, we have planned and treated a patient with endometrial carcinoma using IMRT. The PTV including the intrapelvic and periaortic lymph nodes had a length of 33 cm, and treatment was accomplished with two isocenters. In this case study, we compared the effect of various field parameters on the resultant plan. The findings were the following: the IMRT plan delivers a smaller amount of dose to critical organs compared with the 3D plan. High energies (23 MV) produce a more desirable plan because of lower total photon integral dose (ignoring neutron dose). Collimator rotation is unnecessary and, in fact, leads to a slightly inferior plan. Including more MLC segments does not improve plan quality significantly and certainly increases the treatment time. Switching the gantry angles to opposite directions does not change the isodose distribution in any meaningful way. |
Is College the New High School? Outsourcing American jobs will eventually destroy our education system. Our schools will be unrecognizable in 50 years--and not in a good way. Changes in our economy will destroy the integrity of U.S. colleges and, eventually, every public and private school beneath them. As middle-class manufacturing jobs have moved beyond U.S. borders, education has felt certain ripple effects. It's all well and good to say education's goal is to create enlightened individuals who become more complete persons by the expansion of their mental horizons. But, arguably, the most important goal of educational institutions is to prepare young people to be socially and economically self-sufficient. In short, a capitalist society needs to inculcate its paradigm through its educational institutions to ensure that future workers have the skills to hold meaningful, productive jobs. K-12 education used to rise to this challenge admirably, and plenty of jobs were available for high school graduates. As jobs disappear, we must investigate exactly what kind of an economy we are preparing young people for. Obviously, they will enter a world in which few consumer goods will be produced in this country. Retail jobs will still be here, but hundreds of thousands of manufacturing jobs, if not millions, will be lost to outsourcing. A person with only a high school education will face one of three options: move into the higher socioeconomic class, accept a career in retailing (in which a sales clerk generally makes much less than a drill-press operator), or sink into the lower socioeconomic class. I feel they will opt for the first choice. This means increased college enrollment because a move to upper socioeconomic professions usually requires an advanced degree. In short, college will soon become the new high school. In a few years, having a bachelor's degree will be the rough equivalent of having today's high school diploma, and a master's degree will be the new entry ticket to the "good life" formerly obtainable with a bachelor's degree. In other words, as the economic paradigm changes, the education paradigm must change with it, unless one wants a society in which scholars who can quote Shakespeare are working at check-out counters or, much worse, are on the state dole. Here's the rub. While all people are created equal, not all possess the same intellectual ability. This means that, in the near future, many more high school graduates entering community colleges won't be able to function well in an advanced curriculum. |
Visual data mining of genomic databases by immersive graph-based exploration Biologists are leading current research on genome characterization (sequencing, alignment, transcription), providing a huge quantity of raw data about many genome organisms. Extracting knowledge from this raw data is an important process for biologists, using usually data mining approaches. However, it is difficult to deals with these genomic information using actual bioinformatics data mining tools, because data are heterogeneous, huge in quantity and geographically distributed. In this paper, we present a new approach between data mining and virtual reality visualization, called visual data mining. Indeed Virtual Reality becomes ripe, with efficient display devices and intuitive interaction in an immersive context. Moreover, biologists use to work with 3D representation of their molecules, but in a desktop context. We present a software solution, Genome3DExplorer, which addresses the problem of genomic data visualization, of scene management and interaction. This solution is based on a well-adapted graphical and interaction paradigm, where local and global topological characteristics of data are easily visible, on the contrary to traditional genomic database browsers, always focused on the zoom and details level. |
Decolorization of remazol brilliant blue R with laccase from Lentinus crinitus grown in agro-industrial by-products. Lentinus crinitus is a white-rot fungus that produces laccase, an enzyme used for dye decolorization. Enzyme production depends on cultivation conditions, mainly agro-industrial by-products. We aimed to produce laccase from Lentinus crinitus with agro-industrial by-products for dye decolorization. Culture medium had coffee husk (CH) or citric pulp pellet (CP) and different nitrogen sources (urea, yeast extract, ammonium sulfate and sodium nitrate) at concentrations of 0, 0.7, 1.4, 2.8, 5.6 and 11.2 g/L. Enzymatic extract was used in the decolorization of remazol brilliant blue R. CH medium promoted greater laccase production than CP in all evaluated conditions. Urea provided the greatest laccase production for CH (37280 U/L) as well as for CP (34107 U/L). In CH medium, laccase activity was suppressed when carbon-to-nitrogen ratio changed from 4.5 to 1.56, but the other nitrogen concentrations did not affect laccase activity. For CP medium, reduction in carbon-to-nitrogen ratio from 6 to 1.76 increased laccase activity in 17%. The peak of laccase activity in CH medium occurred on the 11th day (41246 U/L) and in CP medium on the 12th day (32660 U/L). The maximum decolorization within 24 h was observed with CP enzymatic extract (74%) and with CH extract (76%). |
<gh_stars>1-10
/*
* Copyright (c) 2018-2021 Karlatemp. All rights reserved.
* @author Karlatemp <<EMAIL>> <https://github.com/Karlatemp>
*
* MXLib/MXLib.mxlib-api.main/Injected.java
*
* Use of this source code is governed by the MIT license that can be found via the following link.
*
* https://github.com/Karlatemp/MxLib/blob/master/LICENSE
*/
package io.github.karlatemp.mxlib.injector;
import io.github.karlatemp.mxlib.exception.ValueInitializedException;
import io.github.karlatemp.mxlib.exception.ValueNotInitializedException;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public abstract class Injected<T> {
protected final Class<T> type;
protected final String name;
protected T value;
protected boolean initialized;
public Injected(@NotNull Class<T> type, @Nullable String name) {
this.type = type;
this.name = name;
}
public Injected(@NotNull Class<T> type) {
this(type, null);
}
public Class<?> getType() {
return type;
}
public String getName() {
return name;
}
public @Nullable T getValueDirect() {
return value;
}
public abstract T getValue();
public abstract void initialize(T value);
public abstract boolean isNullable();
public boolean isInitialized() {
return initialized;
}
public static class Nillable<T> extends Injected<T> {
public Nillable(@NotNull Class<T> type, @Nullable String name) {
super(type, name);
}
public Nillable(@NotNull Class<T> type) {
super(type);
}
@Override
public @Nullable T getValue() {
return value;
}
@Override
public void initialize(T value) {
if (initialized) throw new ValueInitializedException();
synchronized (this) {
if (initialized) throw new ValueInitializedException();
initialized = true;
this.value = value;
}
}
@Override
public boolean isNullable() {
return true;
}
}
public static class Nonnull<T> extends Injected<T> {
public Nonnull(@NotNull Class<T> type, @Nullable String name) {
super(type, name);
}
public Nonnull(@NotNull Class<T> type) {
super(type);
}
@Override
public @NotNull T getValue() {
if (!initialized) throw new ValueNotInitializedException();
assert value != null;
return value;
}
@Override
public void initialize(@NotNull T value) {
if (initialized) throw new ValueInitializedException();
synchronized (this) {
if (initialized) throw new ValueInitializedException();
initialized = true;
this.value = value;
}
}
@Override
public boolean isNullable() {
return false;
}
}
}
|
#!/usr/bin/env python
#encode=utf-8
#vim: tabstop=4 shiftwidth=4 softtabstop=4
#Created on 2013-8-17
#Copyright 2013 <NAME>
import copy
import logging
import traceback
class RPCException(Exception):
message = "An Unknown RPC related exception occurred"
def __init__(self, message = None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.message % kwargs
except Exception as e:
message = self.message
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
message = "Remote error: %(exc_type)s %(value)s\n%(traceback)s"
def __init__(self, exc_type = None, value = None, traceback = None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type = exc_type,
value = value,
traceback = traceback)
class Timeout(RPCException):
"""
"""
message = "Timeout while waiting on RPC response"
class InvalidRPCConnectionReuse(RPCException):
message = "Invalid reuse of an RPC Connection"
class Connection(object):
def close(self):
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout = False):
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
raise NotImplementedError()
def consumer_in_thread(self):
raise NotImplementedError()
def _sage_log(log_func, mes, msg_data):
"""
"""
pass
def serialize_remote_exception(failure_info):
"""
"""
pass
def deserialize_remote_exception(conf, data):
"""
"""
pass
|
Optical Responses on Multiple Spatial Scales for Assessing Vegetation Dynamics - A Case Study for Alpine Grasslands Vegetation growth is highly dynamic over space and time. Especially mountainous vegetation is regionally affected by climatic variability and human impacts. Tracking optical reflectance provides a unique possibility to analyze vegetation across scales from a small plot up to an extensive area. This study compares the NDVI index of grasslands across four different spatial scales during the growing period in 2017. These scales are covered by measurements from (i) ground spectrometer; (ii) station-based reflectance sensors; (iii) fixed installed Phenocam and (iv) Remote Sensing Sentinel-2 MSI images. We compared the reflectance on single points within a grassland and among different grassland sites in order to assess the strengths and weaknesses of each sensor. Secondly, we analyzed the detectability of anthropogenic management activities using the NDVI index. First results show clear differences in the optical response among scales and in the detectability of management (e.g. fertilization, harvesting) activities. |
import { QuantConnectResponse } from ".";
import { ReadProjectParams } from "./projects";
export type QuantConnectFile = {
name: string;
content: string;
modified: string;
};
export type CreateFileParams = ReadProjectParams & {
name: string;
content: string;
};
export type ReadFileParams = ReadProjectParams & {
name?: string;
};
export type UpdateFileNameParams = ReadProjectParams & {
oldFileName: string;
newFileName: string;
};
export type UpdateFileContentParams = {
fileName: string;
newFileContents: string;
};
export type UpdateFileParams = UpdateFileNameParams | UpdateFileContentParams;
export type DeleteFileParams = ReadProjectParams & {
name: string;
};
export type ReadFilesResponse = QuantConnectResponse & {
files: Array<QuantConnectFile>;
};
/**
*
* @description [Read project's files](https://www.quantconnect.com/docs/v2/our-platform/api-reference/file-management/read-file)
* @example
* ```typescript
* const { files } = quantconnect({ userId, token })
*
* const {files, success} = await files.read({ projectId: 214512 });
*
* // To filter by file name, pass the `name`
* const {files, success} = await files.read({ projectId: 214512, name: 'main.py' });
* ```
*/
export type ReadFiles = (params: ReadFileParams) => Promise<ReadFilesResponse>;
/**
*
* @description [Create a file](https://www.quantconnect.com/docs/v2/our-platform/api-reference/file-management/create-file)
* @example
* ```typescript
* const {files} = quantconnect({userId, token})
*
* const {files, success} = await files.create({
* "projectId": 0,
* "name": "main.py",
* "content": "string"
* })
* ```
*/
export type CreateFile = (
params: CreateFileParams
) => Promise<ReadFilesResponse>;
/**
*
* @description [Update a file](https://www.quantconnect.com/docs/v2/our-platform/api-reference/file-management/update-file)
* @example
* ```typescript
* const {files} = quantconnect({userId, token})
*
* // Update a file name
* const {success} = await files.update({
* projectId: 0,
* oldFileName: "main.py",
* newFileName: "new.py",
* })
*
* // Update a file content
* const {success} = await files.update({
* projectId: 0,
* fileName: "main.py",
* newFileContents: "",
* })
* ```
*/
export type UpdateFile = (
params: UpdateFileParams
) => Promise<QuantConnectResponse>;
/**
*
* @description [Delete a File](https://www.quantconnect.com/docs/v2/our-platform/api-reference/file-management/delete-file)
* @example
* ```typescript
* const {files} = quantconnect({userId, token})
*
* const {success} = await files.delete({ projectId: 2134213, name: 'main.py' });
* ```
*/
export type DeleteFile = (
params: DeleteFileParams
) => Promise<QuantConnectResponse>;
|
<reponame>cyberj0g/verification-classifier
import argparse
parser = argparse.ArgumentParser(description='Generate renditions')
parser.add_argument('-i', "--input", action='store', help='Input file containing the bad processed renditions',
type=str, required=True)
args = parser.parse_args()
input_file = args.input
def get_files_to_reprocess(file_with_errors):
attacks_and_files = {}
with open(file_with_errors) as file_to_read:
for current_line in file_to_read:
processed_line = clean_string(current_line)
for current_line in processed_line:
key = get_key(current_line[0])
if key in attacks_and_files:
attacks_and_files[key].add(current_line[1] + '\n')
else:
attacks_and_files[key] = set()
return attacks_and_files
def clean_string(string_to_clean):
attack_and_file = []
extension = '.mp4'
files_to_process = [name + extension for name in string_to_clean.split('.mp4')]
for line in files_to_process:
split_line = line.split('/')
if len(split_line) == 5:
attack_and_file.append((split_line[3], split_line[4]))
return attack_and_file
def get_key(full_key):
sep = '_'
split_key = full_key.split(sep)
if len(split_key) == 1:
return 'orig'
return sep.join(split_key[1:])
def write_to_file(dict_to_write):
for k, v in dict_to_write.items():
with open(k + '_reprocess', 'w') as w_file:
w_file.writelines(v)
if __name__ == "__main__":
files_to_reprocess = get_files_to_reprocess(input_file)
write_to_file(files_to_reprocess)
|
The present disclosure relates generally to the field of display systems. More specifically, the present disclosure relates to a method for displaying radar-estimated terrain on a flight display in an aircraft.
Terrain awareness warning systems provide aircrews with information regarding the terrain. Standard TSO-C151b is utilized by the Federal Aviation Administration (“FAA”) to specify four functions for a terrain awareness warning system. These functions are terrain display, premature descent alerting, ground proximity alerting and forward-looking terrain alerting. The terrain information is transmitted to a display, which provides alerts for the aircrew.
The terrain display allows the pilot to estimate the distance and bearing to terrain cells of interest. The display can be oriented with the aircraft positioned at the bottom of the display and the track of the aircraft in the upward direction; however, other orientations are allowed by the regulations.
The display is formatted in such a way as to ensure that the pilot can differentiate between terrain that is above the aircraft and terrain that is below the aircraft. The display also provides a means to distinguish between terrain cells that represent a potential hazard to the aircraft and non-hazardous terrain cells.
The color scheme for the terrain display can be determined through extensive prototyping and human factors studies. The following colors are commonly used in terrain displays. Blue or cyan for the sky. Red for terrain above the aircraft's altitude. Yellow or amber for terrain just below the aircraft's altitude. A neutral color for terrain that is well below the aircraft's altitude (i.e., non-hazardous terrain). The neutral color may be green, brown, tan, some other pastel color, or even a photo-realistic rendering.
Terrain awareness warning system can utilize a database to generate the display image. Terrain awareness warning system displays that use databases are subject to three basic error conditions, including position errors from the navigation system (e.g. the Global Positioning System (“GPS”), altitude or heading errors from the inertial sensors (e.g. the Altitude Heading Reference System (“AHRS”)), and terrain elevation errors from the terrain database. These error sources can cause significant problems for aircrews.
There is a need for a method of displaying terrain utilizing an enhanced vision system and a radar system to provide and/or validate a terrain display. Therefore, there is a need for an improved method of creating a terrain image such that the pilot of the aircraft can make better-informed decisions.
It would be desirable to provide a system and/or method that provides one or more of these or other advantageous features. Other features and advantages will be made apparent from the present specification. The teachings disclosed extend to those embodiments which fall within the scope of the appended claims, regardless of whether they accomplish one or more of the aforementioned needs. |
Investigating signs of recent evolution in the pool of proviral HIV type 1 DNA during years of successful HAART. In order to shed light on the nature of the persistent reservoir of human immunodeficiency virus type 1 (HIV-1), we investigated signs of recent evolution in the pool of proviral DNA in patients on successful HAART. Pro-viral DNA, corresponding to the C2-V3-C3 region of the HIV-1 env gene, was collected from PBMCs isolated from 57 patients. Both "consensus" (57 patients) and clonal (7 patients) sequences were obtained from five time points spanning a 24-month period. The main computational strategy was to use maximum likelihood to fit a set of alternative phylogenetic models to the clonal data, and then determine the support for models that imply evolution between time points. Model fit and model-selection uncertainty was assessed using the Akaike information criterion (AIC) and Akaike weights. The consensus sequence data was also analyzed using a range of phylogenetic techniques to determine whether there were temporal trends indicating ongoing replication and evolution. In summary, it was not possible to detect definitive signs of ongoing evolution in either the bulk-sequenced or the clonal data with the methods employed here, but our results could be consistent with localized expression of archival HIV genomes in some patients. Interestingly, stop-codons were present at the same two positions in several independent clones and across patients. Simulation studies indicated that this phenomenon could be explained as the result of parallel evolution and that some sites were inherently more likely to evolve into stop codons. |
Chromosome abnormalities in neurological diseases. The current status of research into chromosomal abnormalities in neurological diseases is reviewed. The only possible association between chromosome aberration and neurological disorder is found in ataxia telangiectasia and in tumours of the nervous system. In the remaining diseases reviewed, no specific association was confirmed. This was expected to some extent, since the majority of these diseases (spinal muscular atrophies, muscular dystrophies, etc.) are due to single gene defects. |
def multiplication(a, b):
a = float(a)
b = float(b)
value = a * b
return value |
T-alleles in the mouse are probably not inversions. Animals heterozygous for the t/sup 6/ allele are known to exhibit reduced crossing over in the region near the allele. Testes of t/sup 6//+ males, however, showed first meiotic anaphase bridge frequencies that were not significantly different from those frequencies in T/+ or +/+ animals, nor from frequencies observed in other animals with normal chromosomal arrangements. Two other possible characteristics of inversions, frequencies of broken bridges at anaphase I and frequencies of fragments appearing alone at anaphase I, also were not significantly different between the genotypes. These data indicate that an inversion involving most of the region between T and H-2 is not a likely explanation for the suppression of crossing-over in that region attributable to the t/sup 6/ allele. (auth) |
The Effect of Prolonged Thermo-oxidative Ageing on the Mechanical Properties of Dynamically Vulcanized Poly(Vinyl Chloride)/Nitrile Butadiene Rubber Thermoplastic Elastomers ABSTRACT Plasticized poly(vinyl chloride)/nitrile butadiene rubber (PVC/NBR) thermoplastic elastomers (TPEs) were dynamically vulcanized in the melt stage with the incorporation of a semi-efficient vulcanizing system using a Brabender Plasticorder at 150°C and 50 rpm rotor speed. Curative concentration was progressively increased from 0 to 1 part per hundred (phr) NBR in order to study the effect of dynamic curing on the plasticized blend. The mechanical properties investigated include tensile strength, elongation at break (%EB), modulus at 100% elongation (M100), tear strength, and hardness. The effect of thermo-oxidative ageing (TOA) on the mechanical properties was investigated by exposing the PVC/NBR TPEs in an air oven at 100°C for 3, 7, 14, and 21 days. It was found that tensile and tear strength passed through a maximum value, whereas, hardness and M100 increased steadily with the sulfur content and ageing time. On the contrary, the elongation at break reduced gradually with ageing time until it reaches a minimum value. The increase in crosslink density as well as the steady reduction in swelling index with increasing concentration of curatives provided an excellent evidence for the significant increase in crosslink density with ageing time. The changes in physical and mechanical properties of the TPEs is believed to be closely related to some microstructural changes taking place as a result of the formation of new crosslinks due to prolonged thermo-oxidative environment. |
Contribution of dimethyl sulfide to the aroma of Syrah and Grenache Noir wines and estimation of its potential in grapes of these varieties. The contribution of dimethyl sulfide (DMS) to the aroma of Syrah and Grenache Noir wines from the Rhone Valley of France was investigated by sensory analysis, and its levels in these wines were measured. The potential DMS in the corresponding grapes and wines, susceptible to release during wine aging, was evaluated. Free DMS and potential DMS assessed by a heat-alkaline treatment were measured in grape juices and wines by SPME-GC-MS using methods previously reported and slightly modified. A relationship between potential DMS from grapes and the total DMS levels in wine was demonstrated. Furthermore, a linear regression between the ratio of free DMS levels to these total DMS levels in wine and time of storage was found. Free and potential DMS levels in grapes and wines depended on grape variety, vintage, and vine location. DMS imparted a noticeable and complex contribution to the aroma of the wines investigated, depending on the mode of sensory perception used, either before or after glass swirling. It significantly enhanced the fruity notes of the wines, and additional truffle and black olive notes. |
What’s the best and worst part about learning French online?
The best part: all of the great free resources that are available at your fingertips.
The worst part: too many free resources.
This includes blogs about learning French.
In fact there are so many that you could spend forever and a day searching for the right ones for you.
But now you don’t have to, as I’ve compiled a great list of blog sites that in their many different ways can help you learn French in no time at all.
Their approaches may well be different, but they’re all united by a common love of the French language and a common purpose to help the student as much as they possibly can. Some use video, others podcasts and one or two are text only.
So grab a cup of coffee, butter up some warm, soft croissants and settle down in front of your laptop for some quality French learning. Here’s my pick of eight of the best French language learning blog sites:
8 Great French Blogs for French Learners
French Crazy is a glorious immersion into all things Gallic with a series of blog posts that explore the country’s music, fashion, lifestyle and culture. There are also articles that point to other French resources on the internet. Some of the posts have been taken from French sites and translated into English, and videos and large photographs are used throughout.
For the adventurous intermediate or competent advanced speaker, there is a section of French texts featuring the work of such literary luminaries as George Sand and Gustave Flaubert. Language learning lessons have not been forgotten and French Crazy has a selection of tutorials covering grammar, vocabulary, pronunciation, how to start thinking in French and more.
The website is run by John Elkhoury, a French-English bilingual who teaches French and has lived and visited a clutch of French cities.
I tip my chapeau to Love Learning Languages, (previously Learn French with Jennifer), a comprehensive resource for learning the language. Jennifer Crespin taught French in the United States for more than 15 years before leaving her native country with her French husband and relocating to the south of France.
She created the blog “to reach an audience that could use a little help in learning French.” There are posts about living in France, grammar lessons and a “word of the day” series. Some of the posts feature short pre-recorded video lessons, others include podcasts followed by comprehension questions in French, and some are written in English.
The “word of the day” posts are particularly useful if you’re time pressed. A chosen word is highlighted, translated and then used in a sentence. All in all, a very well-written blog.
Talk in French has put together a great series of blogs that dive into the language to source the most essential ingredients for speaking it fluently. Each post a different lesson and all are well presented with easy to follow information and plenty of French with English translations.
One of the really cool aspects to the blogs is that in the top left hand corner of each post the reader is told how long it will take to read and whether it’s for beginners, intermediates or advanced speakers. The text is also given a grade such as “easy” or “difficult” so the student knows what they are letting themselves in for.
The posts attract a reasonable number of comments and these are worth reading sometimes as they can throw up some of the burning questions that you probably have and need answers to.
The French Blog is bursting at the seams with great content, and is compiled by aspiring fluent French speaker William Alexander who’s also an author and IT director. According to the blurb on his website this is his last best shot at becoming fluent and he wants to bring other French language students along with him.
Knowing the importance of good content, William scours the Internet for useful videos and articles to comment on as well as writing a lot of original material. There are posts on aspects of French life and culture, cooking, comments on the news and much more.
Two of his unmissable regular features are “Wordsmith Wednesday”—a weekly exploration of a different word, and “French Food Fight Friday”—recipes, restaurants news and lots of other food-related goodies.
This is a fantastic resource created by a French teacher with a Master’s degree in French literature and a bachelor’s in French language. The posts are an eclectic mix of culture, grammar, songs, news and vocabulary and are written in English, but with lots of phrases and their translations thrown in.
Some posts make use of videos and audio and others have simple cartoon graphics that help to reinforce meanings. Oui, c’est ça has dozens of posts stretching back to July 2012. Beginners, intermediates and advanced French speakers will find this blog series a useful addition to their bag of learning tools.
Be prepared to laugh—a lot. French Together says it wants students to learn French the fun way and it more than delivers on its aim. This compelling blog series puts humor and learning centre stage as many of the posts certainly have a fun slant to them.
A scan of the French Together website throws up some interesting blog post titles such as “7 French stand-up comedies that will make you laugh out loud” and “5 funny French expressions.” It’s all part of the website’s desire to get away from learning by rote. Humor is an excellent vehicle to help students on their way to fluency. The posts are a mixture of text, graphics, photographs and videos.
French Today is a site specializing in audiobooks and lessons for learners that gives you additional varied materials for learning by throwing up an exciting smorgasbord of goodies with posts about anything and everything to do with French life and culture. Many of the posts follow a simple enough format. They usually start with a short introductory paragraph under which is a series of bullet points or short, informative paragraphs.
Occasionally you will come across a few highlighted pieces of text and these link to audio files in French on subjects related to the particular blog’s theme. The posts are written by a number of authors and are filed under such categories as travel, food, French culture, learn French and humor. In addition to the audiobooks and lessons, you can sign up for private French lessons on Skype through the site.
The intent of this website is there in the title. Created by Stanley Aléong, whose academic background encompasses anthropology, linguistics and computer science, this blog series is for those who want to speak French fluently. There are three categories of blog posts: how-to articles, methods and strategies for learning French, and learning from the common mistakes in spoken French.
The posts are predominately text-based, so there are no cartoons, videos or flash graphics. But the articles flow so well that bells and whistles are not required to sugar-coat the language learning pill. Paragraphs are succinct and key words and phrases are highlighted in bold.
Stanley’s website also has a section called “real-life examples” that has a collection of links to authentic French conversations in numerous scenarios. Transcriptions, translations and technical commentaries on the conversations are available for download. Speak French Fluently is for beginners, intermediates and advanced French language speakers.
And one more thing…
If you like French blogs, then I would be remiss not to tell you about a great new resource for learning French: FluentU. FluentU makes it possible to learn French through music videos, commercials, news, and inspiring talks. Native French videos become language learning experiences.
FluentU lets you learn real French—the same way that people speak it in real life. FluentU has a diverse range of videos (eg. movie trailers, funny commercials, and web series), as you can see here:
FluentU makes it really easy to watch French videos with interactive captions. Tap on any word to see an image, definition, and useful examples.
For example, when you tap on the word “suit,” this is what you see:
And FluentU lets you learn all the vocabulary in any video with quizzes. Swipe left or right to see more examples for the word you’re learning.
All along, FluentU keeps track of the vocabulary that you’re learning. It uses that vocab to recommend you examples and videos. You have a truly personalized experience. Start using FluentU on the website or practice anytime, anywhere with the mobile app for iOS or Android devices. |
Proposal of Optimum Application Deployment Technology for Heterogeneous IaaS Cloud Recently, cloud systems composed of heterogeneous hardware have been increased to utilize progressed hardware power. However, to program applications for heterogeneous hardware to achieve high performance needs much technical skill and is difficult for users. Therefore, to achieve high performance easily, this paper proposes a PaaS which analyzes application logics and offloads computations to GPU and FPGA automatically when users deploy applications to clouds. Introduction Recently, Infrastructure as a Service (IaaS) clouds have been progressed, and users can use computer resources or service components on demand (e.g., ). Early cloud systems are composed of many PC-like servers. Hypervisors, such as Xen or kernel-based virtual machines (KVMs), virtualize these servers to achieve high computational performance using distributed processing technology, such as MapReduce. However, recent cloud systems change to make the best use of recent advances in hardware power. For example, to use a large amount of core CPU power, some providers have started to provide baremetal servers which do not virtualize physical servers. Moreover, some cloud providers use special servers with strong graphic processing units (GPUs) to process graphic applications or special servers with field programmable gate arrays (FPGAs) to accelerate specific computation logics. For example, Microsoft's search engine Bing uses FPGAs to optimize search processing. To use the recent advances in hardware power, users can benefit from high performance of their applications. However, to achieve this, users need to program appropriate applications for heterogeneous hardware and have much technical skill. Therefore, our objective is to enable users to achieve high performances easily. For this objective, cloud PaaS analyzes application logics and offloads computations to GPU and FPGA automatically when users deploy applications. The author previously proposed a Platform as a Service (PaaS) to select appropriate provisioning type of baremetal, container or virtual machine based on user requests. In this paper, we investigate an element technology to offload part logics of applications to GPU and FPGA. The rest of this paper is organized as follows. In Section 2, we review and clarify existing technologies problems. In Section 3, we propose a method of optimum application deployment for heterogeneous IaaS cloud. In Section 4, we conclude this paper. Problems of Existing Technologies Recently, GPU programming, such as the compute unified device architecture (CUDA), that involves GPU computational power not only for graphics processing has become popular. Furthermore, to program ISBN 978-981-11-0008-6 Proceedings of 2016 6th International Workshop on Computer Science and Engineering (WCSE 2016) Tokyo, Japan, 17-19 June, 2016, pp. 34 -37 without walls between the CPU and GPU, the heterogeneous system architecture (HSA), which allows shared memory access from the CPU and GPU and reduces communication latency between them, has been extensively discussed. For heterogeneous programming, it is general to add and specify a code line to direct specified hardware processing. PGI Accelerator Compilers with OpenACC Directives can compile C/C++/Fortran codes with OpenACC directives and deploy execution binary to run on GPU and CPU. OpenACC directives indicate parallel processing sections, then PGI compiler creates execution binary for GPU and CPU. Aparapi (A PARallel API) of Java is API to call GPGPU (General Purpose GPU) from Java. Specifying this API, Java byte code is compiled to OpenCL and run when it is executed. To control FPGA, development tools of OpenCL for FPGA are provided by Altera and Xilinx. For example, Altera SDK for OpenCL is composed of OpenCL C Compiler and OpenCL Runtime Library. OpenCL C Compiler compiles OpenCL C codes to FPGA bit stream and configures FPGA logic, OpenCL Runtime Library controls FPGA from applications on CPU using libraries of OpenCL API. Programmers can describe FPGA logic and control by OpenCL, then configured logic can be offloaded to specified FPGA. However, these technologies have two problems. A) General language codes of C, C++, Java need directives such as OpenACC or language extension such as Open CL C. If we would like to achieve high performance, timing to specify directives is very important and much technical knowledge is needed. B) There is no PaaS to utilize CPU/GPU/FPGA appropriately in clouds and users need to design how much GPU instances are needed. The author previously proposed a PaaS to provide services based on user requests,,. The work of can provision baremetal, container or virtual machine appropriately, thus enhancing idea, we can provide PaaS to select CPU/GPU/FPGA and can partly solve B). This paper targets to solve A) by an element technology to utilize GPU and FPGA from general language applications. Complex applications such as synchronous execution of FPGA and GPU are out of scope of this paper. Proposal of Optimum Application Deployment Technology for IAAS In this section, we propose a cloud provider PaaS with optimum application deployment technology. Our proposed technology involves a PaaS, an IaaS controller, such as OpenStack, heterogeneous cloud hardware, and a code patterns database (DB). The figures describe OpenStack as an IaaS controller, but OpenStack is not a precondition of the proposed technology. Fig. 1 shows system architecture and application deployment steps. There are 7 steps to deploy applications. 1. Users specify applications to deploy on clouds to PaaS system. Users need to upload source codes of applications to PaaS. 2. PaaS analyzes application source codes, compares codes to code patterns DB and detects similar code patterns. Here, code patterns DB retains codes which are offloadable to GPU and FPGA and corresponding OpenCL patterns. To detect similar codes, we use similar code detection tools such as CCFinderX. Similar code detection tools can detect specified code patterns of FFT (Fast Fourier Transformation), encryption and decryption processing, graphic processing and so on from users' application codes. In these examples, FFT, encryption and decryption processing can be offloaded to FPGA with accelerated configurations of these processing, and graphic processing can be offloaded to GPU. 3. PaaS extracts OpenCL language codes for offloadable processing to GPU and FPGA detected in step 2. OpenCL language is major language for heterogeneous programming and describes processing which run on FPGA/GPU. 4. PaaS sends a provisioning request of creating run environment for applications to OpenStack. For example, when we need GPU, containers are provisioned on GPU servers because VMs cannot sufficiently control GPUs. And when we need FPGA, servers with FPGA board are provisioned by baremetal provisioning such as Ironic. Basically PaaS selects pre-configured FPGA server for specified logics such as FFT from 35 Heterogeneous multiple FPGA servers. However, there is no desired configuration of FPGA, PaaS may provision non-configured FPGA server and customized configuration may be done before actual applications run. 5. OpenStack creates computer resources for specified applications. Note that if applications need to create not only one compute server but several resources, such as virtual routers and storage, PaaS sends templates that describe the environment structures by JavaScript Object Notation (JSON) and provisions them at once by OpenStack Heat or other orchestration/composition technology (e.g., ). 6. PaaS deploys application execution binary to provisioned servers. When PaaS deploys applications, existing tools of each vendor such as Altera SDK for OpenCL can be used. 7. PaaS returns application deployment results information of which servers are used for the processing and how much is the cost of server usage and so on. If users agree the deployment results, users start to use applications and usage fees are also started to charge. If users disagree the results, PaaS deletes resources by OpenStack Heat stack-delete API. After resources deletion, users may re-deploy. And if users would like to reconfigure FPGA, users specify FPGA configuration in this step 7 timing. By these processing steps, users can deploy general language codes applications to heterogeneous cloud with extracting offloadable logics and OpenCL codes automatically. There is a merit for users to achieve high performance without program knowledge of GPU and FPGA. Summary This paper proposed a PaaS to offload application logics to GPU and FPGA automatically when users deploy applications to clouds. Proposed PaaS analyzed source codes, detected offloadable logics to GPU and FPGA using similar code detection technology and predefined code patterns, created OpenCL codes and deployed them. This can enable high performance applications easily for users. In the future, we will verify the proposed technology performance and validity for general application codes. We also study how to deploy complex applications in the future. |
/**
* Copyright (c) 2017-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <sstream>
#include <glog/logging.h>
#include <google/protobuf/text_format.h>
#include "tc/proto/compcache.pb.h"
namespace tc {
template <typename T>
std::vector<T> makeStridesFromSizes(const std::vector<T>& sizes) {
auto ndim = sizes.size();
if (ndim == 0) {
return std::vector<T>();
}
std::vector<T> strides(sizes.size(), 0);
strides[ndim - 1] = 1;
for (int i = ndim - 2; i >= 0; --i) {
strides[i] = strides[i + 1] * sizes[i + 1];
}
return strides;
}
inline DLContext getCPUDLContext() {
DLContext res;
res.device_id = 0;
res.device_type = DLDeviceType::kDLCPU;
return res;
}
inline DLContext getGPUDLContext(int device_id) {
DLContext res;
res.device_id = device_id;
res.device_type = DLDeviceType::kDLGPU;
return res;
}
template <typename DLTensorType>
void DLTensorDeleter::operator()(const DLTensorType* t) {
if (t->shape) {
delete[] t->shape;
}
if (t->strides) {
delete[] t->strides;
}
delete t;
};
namespace detail {
template <typename DLTensorType, typename T>
inline std::unique_ptr<DLTensorType, DLTensorDeleter> makeDLTensor(
DLContext ctx,
DLDataType dtype,
const std::vector<T>& sizes,
const std::vector<T>& strides = std::vector<T>(),
decltype(DLTensorType().data) data = nullptr,
uint64_t byteOffset = 0) {
static_assert(
std::is_convertible<T, int64_t>::value,
"Template type not convertible to int64_t");
std::unique_ptr<DLTensorType, DLTensorDeleter> res(new DLTensorType);
res->data = data;
res->ctx = ctx;
auto ndim = sizes.size();
res->ndim = ndim;
res->dtype = dtype;
res->shape = new int64_t[ndim];
for (size_t i = 0; i < ndim; ++i) {
res->shape[i] = sizes[i];
}
res->strides = new int64_t[ndim];
std::vector<T> st(strides);
if (st.size() == 0) {
st = makeStridesFromSizes(sizes);
}
for (size_t i = 0; i < ndim; ++i) {
res->strides[i] = st[i];
}
res->byte_offset = byteOffset;
return res;
}
template <typename DLTensorType>
inline std::unique_ptr<DLTensorType, DLTensorDeleter> makeDLTensorHelper(
const DLTensor* ptr) {
std::vector<int64_t> sizes(ptr->ndim, 0);
std::copy(ptr->shape, ptr->shape + ptr->ndim, sizes.begin());
std::vector<int64_t> strides(ptr->ndim, 0);
std::copy(ptr->strides, ptr->strides + ptr->ndim, strides.begin());
return makeDLTensor<DLTensorType>(
ptr->ctx, ptr->dtype, sizes, strides, ptr->data, ptr->byte_offset);
}
} // namespace detail
inline DLTensorUPtr makeDLTensor(const DLTensor* ptr) {
return detail::makeDLTensorHelper<DLTensor>(ptr);
}
inline DLTensorUPtr makeDLTensor(const TensorInfo& tensor) {
return detail::makeDLTensor<DLTensor>(
DLContext{kDLCPU, 0},
tensor.dtype,
tensor.shape,
tensor.strides,
nullptr,
tensor.alignment);
}
template <typename T>
inline DLTensorUPtr makeDLTensor(
DLContext ctx,
DLDataType dtype,
const std::vector<T>& sizes,
const std::vector<T>& strides,
void* data,
uint64_t byteOffset) {
return detail::makeDLTensor<DLTensor>(
ctx, dtype, sizes, strides, data, byteOffset);
}
template <typename DLTensorType>
inline DLConstTensorUPtr makeDLConstTensor(const DLTensorType* ptr) {
return detail::makeDLTensorHelper<DLConstTensor>(ptr);
}
inline DLConstTensorUPtr makeDLConstTensor(const TensorInfo& tensor) {
return detail::makeDLTensor<DLConstTensor>(
DLContext{kDLCPU, 0},
tensor.dtype,
tensor.shape,
tensor.strides,
nullptr,
tensor.alignment);
}
template <typename T>
inline DLConstTensorUPtr makeDLConstTensor(
DLContext ctx,
DLDataType dtype,
const std::vector<T>& sizes,
const std::vector<T>& strides,
const void* data,
uint64_t byteOffset) {
return detail::makeDLTensor<DLConstTensor>(
ctx, dtype, sizes, strides, data, byteOffset);
}
// Specializes for const DLTensor*, const DLConstTensor* and TensorInfo
template <typename T>
std::vector<DLTensorUPtr> makeDLTensorVector(const std::vector<T>& ptrs) {
std::vector<DLTensorUPtr> res;
for (auto p : ptrs) {
res.push_back(makeDLTensor(p));
}
return res;
}
template <typename T>
std::vector<DLConstTensorUPtr> makeDLConstTensorVector(
const std::vector<T>& ptrs) {
std::vector<DLConstTensorUPtr> res;
res.reserve(ptrs.size());
for (auto p : ptrs) {
res.push_back(makeDLConstTensor(p));
}
return res;
}
template <typename DLTensorPtrType>
std::vector<TensorInfo> makeTensorInfoVector(
const std::vector<DLTensorPtrType>& ts) {
std::vector<TensorInfo> res;
res.reserve(ts.size());
std::transform(
ts.begin(), ts.end(), std::back_inserter(res), [](DLTensorPtrType t) {
return TensorInfo(t);
});
return res;
}
inline std::vector<const DLTensor*> extractRawPtrs(
const std::vector<DLTensorUPtr>& uptrs) {
std::vector<const DLTensor*> res(uptrs.size(), nullptr);
for (size_t i = 0; i < uptrs.size(); ++i) {
res[i] = uptrs[i].get();
}
return res;
}
inline std::vector<const DLConstTensor*> extractRawPtrs(
const std::vector<DLConstTensorUPtr>& uptrs) {
std::vector<const DLConstTensor*> res(uptrs.size(), nullptr);
for (size_t i = 0; i < uptrs.size(); ++i) {
res[i] = uptrs[i].get();
}
return res;
}
inline std::string toString(const DLDataType& t) {
if (t.lanes != 1) {
CHECK(false) << "NYI: toString for >1 lanes";
}
switch (t.code) {
case DLDataTypeCode::kDLFloat:
switch (t.bits) {
case 16:
return "Half";
case 32:
return "float";
case 64:
return "double";
}
break;
case DLDataTypeCode::kDLInt:
switch (t.bits) {
case 8:
return "int8_t";
case 16:
return "int16_t";
case 32:
return "int";
case 64:
return "int64_t";
}
break;
case DLDataTypeCode::kDLUInt:
switch (t.bits) {
case 8:
return "uint8_t";
}
break;
}
CHECK(false) << "NYI: toString for type: " << t.code << ", bits: " << t.bits;
return "";
}
inline std::string toString(const DLTensor& t) {
std::stringstream ss;
ss << "DLTensor@" << t.data << ":\n";
std::string res;
google::protobuf::TextFormat::PrintToString(
TensorInfo(&t).toProtobuf(), &res);
ss << res;
return ss.str();
}
} // namespace tc
|
<reponame>taptalk-io/meettalk-ios
//
// TAPMyAccountView.h
// TapTalk
//
// Created by <NAME> on 04/05/19.
// Copyright © 2019 Moselo. All rights reserved.
//
#import "TAPBaseView.h"
#import "TAPCustomTextFieldView.h"
#import "TAPCustomButtonView.h"
#import "TAPImageView.h"
#import "TAPCustomLabelView.h"
#import "TAPCustomGrowingTextView.h"
NS_ASSUME_NONNULL_BEGIN
typedef NS_ENUM(NSInteger, TAPMyAccountLoadingType) {
TAPMyAccountLoadingTypeSetProfilPicture,
TAPMyAccountLoadingTypeUpadating,
TAPMyAccountLoadingTypeSaveImage,
};
@interface TAPMyAccountView : TAPBaseView
@property (strong, nonatomic) UIView *navigationHeaderView;
@property (strong, nonatomic) UIView *shadowView;
@property (strong, nonatomic) UIImageView *cancelImageView;
@property (strong, nonatomic) UIButton *cancelButton;
@property (strong, nonatomic) UILabel *navigationHeaderLabel;
@property (strong, nonatomic) UIView *navigationSeparatorView;
@property (strong, nonatomic) UIView *additionalWhiteBounceView;
@property (strong, nonatomic) UIScrollView *scrollView;
@property (strong, nonatomic) TAPCustomTextFieldView *fullNameTextField;
@property (strong, nonatomic) TAPCustomTextFieldView *usernameTextField;
@property (strong, nonatomic) TAPCustomTextFieldView *mobileNumberTextField;
@property (strong, nonatomic) TAPCustomTextFieldView *emailTextField;
@property (strong, nonatomic) UIView *logoutView;
@property (strong, nonatomic) UIButton *logoutButton;
@property (strong, nonatomic) TAPCustomButtonView *continueButtonView;
@property (strong, nonatomic) UIView *initialNameView;
@property (strong, nonatomic) UILabel *initialNameLabel;
@property (strong, nonatomic) TAPImageView *profileImageView;
@property (strong, nonatomic) UIButton *removeProfilePictureButton;
@property (strong, nonatomic) UIButton *changeProfilePictureButton;
@property (strong, nonatomic) TAPCustomGrowingTextView *bioTextView;
@property (strong, nonatomic) TAPCustomLabelView *bioLabelField;
@property (strong, nonatomic) TAPCustomLabelView *usernameLabelField;
@property (strong, nonatomic) TAPCustomLabelView *mobileNumberLabelField;
@property (strong, nonatomic) TAPCustomLabelView *emailLabelField;
@property (strong, nonatomic) UILabel *bioWordCounterLabel;
@property (strong, nonatomic) UICollectionView *profilImageCollectionView;
@property (strong, nonatomic) UICollectionView *pageIndicatorCollectionView;
@property (strong, nonatomic) UIView *editViewContainer;
- (void)refreshViewPosition;
- (void)setContinueButtonEnabled:(BOOL)enable;
- (void)setContentEditable:(BOOL)editable;
- (void)setAsLoading:(BOOL)loading;
- (void)animateProgressUploadingImageWithProgress:(CGFloat)progress total:(CGFloat)total;
- (void)showLogoutLoadingView:(BOOL)isShow;
- (void)animateLogoutLoading:(BOOL)isAnimate;
//DV Note
//UserFullName used to show initials when image is null or not found
//END DV Note
- (void)setProfilePictureWithImage:(UIImage *)image userFullName:(NSString *)userFullName;
- (void)setProfilePictureWithImageURL:(NSString *)imageURL userFullName:(NSString *)userFullName;
- (void)showAccountDetailView;
- (void)showEditAccountView;
- (void)updateGrowingTextViewPosition:(CGFloat)textViewHeight;
- (void)showMultipleProfilePicture;
- (void)setAsLoadingState:(BOOL)isLoading withType:(TAPMyAccountLoadingType)type;
- (void)showLoadingView:(BOOL)isShow;
- (void)setCurrentWordCountWithCurrentCharCount:(NSInteger)charCount;
- (void)setEditPorfilPictureButtonVisible:(BOOL) isVisible;
@end
NS_ASSUME_NONNULL_END
|
//
// Levenstein.h
// SiemonaDemo
//
// Created by <NAME> on 24.04.17.
// Copyright © 2017 <NAME>. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface NSString (Levenstein)
-(float)levensteinDistanceTo:(NSString *)comparisonString;
@end
|
<filename>src/fixtures/normal/contentScripts/index.ts
import './baidu/style.less';
console.log(`修改百度按钮颜色`);
|
IN THE UNITED STATES DISTRICT COURT FOR THE EASTERN DISTRICT OF VIRGINIA Alexandria Division UNITED STATES OF AMERICA, IN THE UNITED STATES DISTRICT COURT FOR THE EASTERN DISTRICT OF VIRGINIA Alexandria Division IN RE THE MILLS CORPORATION ) SECURITIES LITIGATION ) ) This Document relates to: ) Civil Action no.1:06-cv-00077 1 :06-cv-00247-LO-TRJ ) 1:06-cv-00265-LO-TRJ ) 1 :06-cv-00304-LO-TRJ ) 1:07-cv-00296-LO-TRJ ) 1:06-cv-01446-LO-TRJ ) MEMORANDUM OPINION This is a class action arising out of allegations of widespread accounting fraud perpetrated by the Mills Corporation and related entities, resulting in massive losses incurred by individual and institutional investors alike. For the reasons that follow, the Court hereby APPROVES the Mills, Ernst & Young, and KanAm Settlements as fair, adequate, and reasonable under FED. R. CR/. P. 23(e). Further, the Court APPROVES the Plan of Allocation as fair, adequate, and reasonable. The Court awards reasonable attorneys' fees in the amount of $36,495,000, or 18% of the total Settlement Fund and awards reasonable costs in the amount of $3,094,764.86 pursuant to FED. R. Cw. P. 23(h). Finally, the Court CERTIFIES as a Class for purposes of each of the Settlements those persons who purchased or acquired Mills common and preferred stock from February 27, 2001 through August 10, 2006, and retained securities through October 31, 2005, excluding certain persons and entities to be named in the Judgment and Order. I. Background and Procedural History The Mills Corporation and related entities (collectively "Mills") operated as a Real Estate Investment Trust (REIT) that owned and developed shopping centers |
Female Marine recruits ready for pugil stick training earlier this year at Parris Island, S.C.
Earlier this year, the Pentagon lifted the ban on women serving in U.S. combat units – including elite special-operations units like the Navy’s SEALs – if they can clear the physical and mental hurdles. While official Washington has saluted and moved on to other matters, there remains a rumble of opposition, especially evident when chatting with soldiers and Marines. Some argue that the existing standards – which already have kept several women from passing the Marines’ grueling infantry officers course – will basically act as a bar to women in the more demanding kinds of combat.
But Robert Maginnis, a retired Army lieutenant colonel and West Point graduate, fears that won’t happen. He spells out what he sees as the dangers of opening combat billets to women in his new book, Deadly Consequences: How Cowards Are Pushing Women into Combat. His key concern is that, under political pressure, the military will ease its standards, resulting in a less-capable force. Battleland recently conducted this email chat with him.
What’s the key thing you learned in writing Deadly Consequences: How Cowards Are Pushing Women into Combat?
Pentagon brass are kowtowing to their political masters and radical feminists to remove exemptions for women in ground combat in defiance of overwhelming scientific evidence and combat experience.
This craven behavior is terribly dangerous for our armed forces, our national security, and especially the young women who will be placed in harm’s way.
Pentagon officials insist they won’t lower standards to enable more women in combat units. Do you believe them?
I don’t believe them, and neither should the American people.
The Obama Administration and the Pentagon say they will maintain high standards “to ensure that the mission is met with the best-qualified and most capable people, regardless of gender,” in the words of former Secretary of Defense Leon Panetta.
Personnel policy, however, is driven by the “diversity metrics” outlined in the 2011 Report of the Military Leadership Diversity Commission.
Diversity, not military readiness, is the highest priority.
The answer right now is yes.
There is no shortage of able-bodied male volunteers who meet the existing, battle-tested standards for ground combat positions.
So why ask the services to consider changing the standards? Because this has become more about politics than fielding the most capable fighting force.
What do you see as the three biggest risks to letting women serve in the combat arms?
— First, standards will be lowered. As a practical matter, there has to be a certain minimum number of women in combat units for the policy to succeed. That can be accomplished only by “gender norming” the standards for combat service. Lower standards will inevitably degrade combat effectiveness, and the nation will be less secure. There is also good evidence that the policy will harm military recruitment and retention.
— Second, women who serve as ground combatants, whether by choice or under compulsion, will suffer disproportionate physical and psychological harm.
— Third, the already serious problem of sexual assault in the military will get worse. Notwithstanding the Administration’s wishful thinking, this prediction is borne out by the statistics.
What do you think will happen, given the push to let women serve in combat, if the nation ever needs to reinstitute the draft?
Lifting all combat exclusions for women virtually guarantees that the Supreme Court will declare male-only conscription unconstitutional.
And a return to the draft is far more likely than most people realize. The unsustainably high cost of the all-volunteer force, especially with $17 trillion in national debt, and the expected requirements of future military operations will probably lead to a resumption of the draft, however politically unpopular it might be.
When that happens, women will be drafted and forced into ground combat roles.
The Joint Chiefs of Staff endorse the idea of women serving in combat. Are they the “cowards” you refer to in your subtitle?
They demonstrate a cowardice of silence because they know better. The scientific evidence and the lessons of combat experience are utterly one-sided: women are unsuited for ground combat service.
Congress has the constitutional responsibility to set the rules and regulations governing the armed forces (Article I, Section 8).
Unfortunately, Congress is as cowardly as the Joint Chiefs.
Putting women in combat is as historic a change of military policy as anything I can think of, yet neither house has held full hearings on the question in over 20 years.
The politicians are running scared.
You said letting openly gay men and women serve in uniform would be a disaster, and likely lead to problems with recruiting and retention. None of that has come to pass. So why should we pay attention to your arguments about women in combat?
It is much too early to assess the effects of open homosexuality in the military.
The Pentagon has not released any external or internal surveys on recruiting and retention since “don’t ask, don’t tell” was repealed. The Pentagon survey conducted prior to the repeal demonstrated substantial opposition within the ranks, which continues today.
What we do have is the Pentagon-sponsored 2013 Sexual Assault Prevention & Response Office survey, which found a giant increase in unwanted male-on-male sexual contact since the repeal.
According to the New York Times, 13,900 active-duty men and 12,100 active duty women said they had experienced unwanted sexual contact in 2012, the first full year after repeal of the homosexual ban.
The proportion of female victims is much higher, of course, but the Pentagon obviously has a serious problem with male-on-male sexual assaults.
Is there cause and effect here or merely correlation?
It is too early to say, but there is certainly no basis for declaring the new policy on homosexuality a success. |
Steroid injections in the era of COVID-19 low haemoglobin at the time of biopsy. Splenic lesions were targeted using either an intercostal or subcostal approach. Coaxial technique with single capsular puncture using an 18-gauge TruCut needle was performed under aseptic conditions to obtain multiple cores. The biopsy tract was plugged using Gelfoam pledgets (two to four) delivered through the coaxial needle and the specimens fi xed in formalin. Results: All four biopsies led to satisfactory specimen retrieval for adequate histological analysis. Diagnosis of one diffuse large B-cell lymphoma, two Hodgkin s lymphomas and one sarcoidosis was made. Ultrasound assess-ment post-biopsy revealed no signi fi cant haematoma. Retrospective re- view of medical records showed no biopsy-related complications. One patient reported mild pain e de fi ned as four out of ten on a Likert scale.. biopsyappears and alternative for obtaining histological sparing by histological diag- nosis. Lymphoma samples were further classi fi ed as diagnostic (no further sampling required), partially diagnostic (diagnostic for lymphoma but exact type could not be con fi rmed) or non-diagnostic. Results: 48 patients were included in the analysis, 46 of whom received a conclusive diagnosis. 73% received a diagnosis of malignancy, including 18 cases of lymphoma. 16 of the lymphoma biopsies were diagnostic. Two were partially diagnostic, needing excision for subtyping. There were no non-diagnostic cases or procedural complications. Conclusion: Our study demonstrates a high diagnostic yield from US-CNB: 96% overall and 89% for lymphoma. Current lymphoma guidance recommends CNB only where excision is impractical or too risky. However, we believe that US-CNB is a safe, minimally invasive procedure with high diagnostic yield that should be considered a fi rst-line method for sampling super fi cial lymph nodes. low haemoglobin at the time of biopsy. Splenic lesions were targeted using either an intercostal or subcostal approach. Coaxial technique with single capsular puncture using an 18-gauge TruCut needle was performed under aseptic conditions to obtain multiple cores. The biopsy tract was plugged using Gelfoam pledgets (two to four) delivered through the coaxial needle and the specimens fixed in formalin. Results: All four biopsies led to satisfactory specimen retrieval for adequate histological analysis. Diagnosis of one diffuse large B-cell lymphoma, two Hodgkin's lymphomas and one sarcoidosis was made. Ultrasound assessment post-biopsy revealed no significant haematoma. Retrospective review of medical records showed no biopsy-related complications. One patient reported mild pain e defined as four out of ten on a Likert scale. Conclusion: The study is limited by the small number of cases that demonstrate achievement of adequate tissue sampling without complication. Splenectomy carries a significant risk of morbidity and long-term infection. Plugged ultrasound-guided spleen biopsy appears to be a safe and effective alternative for obtaining histological diagnosis, sparing the need for splenectomy. Analysis of ultrasound-guided superficial lymph node biopsies Authors: Jawaad Farrukh, Bonnie Dhas, Cherian George Category: Interventional radiology Purpose: Histological analysis of abnormal lymph nodes is essential to diagnosis and management. Consequently, obtaining an adequate specimen is crucial. Surgical excision biopsy is the traditional method of tissue sampling. However, this introduces added risk and cost compared with less invasive alternatives such as image-guided core needle biopsy (CNB). The aim of this study was to review our ultrasound-guided superficial lymph node biopsy (US-CNB) service with an emphasis on its diagnostic yield. Methods and materials: We searched our records for US-CNB procedures performed over 12 months at our trust. Only inguinal, axillary and supraclavicular node CNBs were included. Patient demographics, procedural notes and histological results were reviewed. Specimens were categorised by histological diagnosis. Lymphoma samples were further classified as diagnostic (no further sampling required), partially diagnostic (diagnostic for lymphoma but exact type could not be confirmed) or non-diagnostic. Results: 48 patients were included in the analysis, 46 of whom received a conclusive diagnosis. 73% received a diagnosis of malignancy, including 18 cases of lymphoma. 16 of the lymphoma biopsies were diagnostic. Two were partially diagnostic, needing excision for subtyping. There were no non-diagnostic cases or procedural complications. Conclusion: Our study demonstrates a high diagnostic yield from US-CNB: 96% overall and 89% for lymphoma. Current lymphoma guidance recommends CNB only where excision is impractical or too risky. However, we believe that US-CNB is a safe, minimally invasive procedure with high diagnostic yield that should be considered a first-line method for sampling superficial lymph nodes. Steroid injections in the era of COVID-19 Authors: Sina Motahariasl, Rajeev Gupta, Nima Motahariasl Category: Musculoskeletal Purpose: Intra-and peri-articular steroid injections are commonly used to treat soft tissue and joint pain. Though safe, the systemic absorption of these injections warrants attention to potential side effects like immunosuppression. Since the start of the COVID-19 pandemic, due to the potential threat of immunosuppression and COVID-19 infection, regulatory bodies have advised these injections are done cautiously. Our aim is to assess the safety of intra-and peri-articular steroid injections during the COVID-19 pandemic. Methods and materials: Prospective follow-up of patients receiving intra-or peri-articular steroid injections (either 80 mg or 40 mg of Depo-Medrone) by one radiology consultant from September 2020 to February 2021 at a large London hospital. The patients were aged between 20 and 90 and no patient was excluded on the basis of comorbidities. The patients were telephoned at least two weeks post-injection to assess for symptom relief and complications. Patients were specifically asked about new breathlessness, fever, cough, anosmia and loss of taste. Results: 70 patients with 75 injections were included. Average follow-up time was 28 days and 77% of joints had reduced pain. Eleven patients showed symptoms of COVID-19. Two patients required hospitalisation for COVID but were subsequently discharged home. All patients were alive at followup to our knowledge. Conclusion: These results illustrate the safety of performing intra-and peri-articular injections for musculoskeletal pain during the recent COVID pandemic. In our experience, patients should not be excluded based on age or comorbidities. Significance of abnormal bone marrow signal on spine MRI Authors: Muhammad Butt, Ahmed Musa, Serena Virdi, Maya Jafari Category: Musculoskeletal Purpose: Increasing use of magnetic resonance imaging (MRI) has led to more patients being identified as having abnormal bone marrow signal (BMS). Our aim is to evaluate the incidence and significance of abnormal BMS identified on MRI of the spine in our institution. Methods and materials: This is a retrospective observational study of patients aged 18 and above undergoing MRI of the spine from January to March 2018. The radiology reports were reviewed. Follow-up imaging of up to two years and laboratory work-up for patients with indeterminate BMS were reviewed. Results: A total of 1,883 patients underwent MRI of the spine. MRI was abandoned in 26 patients. No comments were made about the BMS in 1,041 patients. The appearance of the BMS was reported in 816 patients e 642 normal and 174 abnormal. The three most common causes of abnormal BMS were metastases from a known primary (35e20%), haemangiomas (28e16%) and Modic changes (27e16%). Fourteen patients had indeterminate BMS. Following further assessment, significant oncological diagnoses were identified in four patients (2.3%) e two monoclonal gammopathy of uncertain significance, one non-Hodgkin's lymphoma and one breast cancer. Conclusion: Abnormal BMS is a common finding on MRI of the spine. In most cases, a cause will be identified from the history and/or the appearance on imaging. A significant oncological diagnosis needs to be considered in patients with indeterminate BMS on MRI. Early limited MRI in the management of suspected scaphoid fracture: A single-centre experience Authors: Ammaarah Said, Anika Choraria, Kannan Rajesparan Category: MSK Purpose: Blood supply to the scaphoid bone puts it at risk of avascular necrosis when fractured with resultant potential morbid sequelae. To avoid the complications of missed scaphoid fracture, radio-occult suspected scaphoid fractures have traditionally been managed as fractures with immobilisation, repeat radiographs and multiple clinical follow-ups. This management strategy has drawbacks e prolonged immobilisation leading to muscle pain, stiffness and loss of function requiring hand therapy; and Abstract / Clinical Radiology 77 e20ee30 e23 |
from defs import *
from .utils import *
from .entity import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
def register_commands(commands):
commands["constructionSite"] = {
"required_body_parts": [],
"loop": cmd_constructionSite
};
def cmd_constructionSite(entity, command_stack, data_stack):
if entity.type == STRUCTURE_TOWER:
# create job posting for this
job_cmd_stack = []
job_data_stack = []
return False
|
<gh_stars>1-10
// Dependencies
import { unlinkSync } from 'fs'
export function tryDeletingFile(path) {
try {
unlinkSync(path)
} catch (err) {
// do nothing
}
}
|
<gh_stars>1-10
#pragma once
#include <cstdint>
#include <vector>
#include <deque>
#include <list>
#include <set>
#include <string>
#include <sstream>
#include <limits>
#include <algorithm>
#include "UtilityCommon.hpp"
#include "Random.h"
BEG_NAMESPACE_CBTEK_UTILITY
template <typename T>
class CBTEK_UTILITY_DLL ObjectList : public std::vector<T>
{
public:
/**
* @brief ObjectList
*/
ObjectList();
/**
* @brief operator <<
* @param value
* @return
*/
ObjectList & operator<<(const T & value);
/**
* @brief operator <<
* @param value
* @return
*/
ObjectList & operator<<(const ObjectList<T> & value);
/**
* @brief contains
* @param value
* @return
*/
bool contains(const T & value) const;
/**
* @brief contains
* @param value
* @return
*/
bool contains(const T & value);
/**
* @brief getLastIndex
* @return
*/
size_t getLastIndex() const;
/**
* @brief removeAt
* @param index
*/
void removeAt(size_t index);
/**
* @brief removeFirst
* @return
*/
bool removeFirst();
/**
* @brief removeLast
* @return
*/
bool removeLast();
/**
* @brief pushFront
* @param value
*/
void pushFront(const T & value);
/**
* @brief takeAt
* @param index
* @return
*/
T takeAt(size_t index);
/**
* @brief add
* @param value
* @return
*/
size_t add(const T & value);
/**
* @brief addValues
* @param values
*/
void addValues(const ObjectList<T> & values);
/**
* @brief indexOf
* @param value
* @return
*/
size_t indexOf(const T & value);
/**
* @brief insertAfterValue
* @param insertAfterThisValue
* @param valueToBeInserted
* @return
*/
bool insertAfterValue(const T & insertAfterThisValue, const T & valueToBeInserted);
/**
* @brief insertBeforeValue
* @param insertBeforeThisValue
* @param valueToBeInserted
* @return
*/
bool insertBeforeValue(const T & insertBeforeThisValue, const T & valueToBeInserted);
/**
* @brief insertAfterIndex
* @param index
* @param valueToBeInserted
* @return
*/
bool insertAfterIndex(size_t index, const T & valueToBeInserted);
/**
* @brief insertBeforeIndex
* @param index
* @param valueToBeInserted
* @return
*/
bool insertBeforeIndex(size_t index, const T & valueToBeInserted);
/**
* @brief getRandomValue
* @return
*/
const T & getRandomValue() const;
/**
* @brief sort
*/
void sort();
/**
* @brief reverseSort
*/
void reverseSort();
/**
* @brief getNext
* @return
*/
T getNext();
/**
* @brief toStdVector
* @return
*/
std::vector<T> toStdVector() const;
/**
* @brief toStdList
* @return
*/
std::list<T> toStdList() const {return std::list<T>(this->begin(),this->end());}
/**
* @brief toStdSet
* @return
*/
std::set<T> toStdSet() const {return std::set<T>(this->begin(),this->end());}
/**
* @brief toStdDeque
* @return
*/
std::deque<T> toStdDeque() const {return std::deque<T>(this->begin(),this->end());}
private:
static Random ms_RANDOM;
size_t m_next;
static size_t npos;
};
template <typename T>
ObjectList<T> & ObjectList<T>::operator<<(const ObjectList<T> & values)
{
for (size_t a1 = 0;a1,values.size();++a1)
{
this->add(values[a1]);
}
}
template <typename T>
Random ObjectList<T>::ms_RANDOM = Random();
template <typename T>
ObjectList<T>::ObjectList()
{
m_next=0;
}
template <typename T>
const T & ObjectList<T>::getRandomValue() const
{
size_t index = ms_RANDOM.next(this->size()-1);
return this->operator [](index);
}
template <typename T>
void ObjectList<T>::sort()
{
std::sort(this->begin(),this->end());
}
template <typename T>
void ObjectList<T>::removeAt(size_t index)
{
if (index<this->size())
{
this->erase(this->begin()+index);
}
}
template <typename T>
T ObjectList<T>::takeAt(size_t index)
{
T t;
if (index<this->size())
{
t= this->operator [](index);
this->erase(this->begin()+index);
}
return t;
}
template <typename T>
void ObjectList<T>::reverseSort()
{
this->sort();
std::reverse(this->begin(),this->end());
}
template <typename T>
T ObjectList<T>::getNext()
{
T value;
if (m_next>=this->size())
{
m_next=0;
}
value = this->operator [](m_next);
m_next++;
return value;
}
template <typename T>
size_t ObjectList<T>::npos = std::numeric_limits<size_t>::max();
template <typename T>
ObjectList<T> &ObjectList<T>::operator <<(const T &value)
{
this->push_back(value);
return (*this);
}
template <typename T>
size_t ObjectList<T>::indexOf(const T & value)
{
for(size_t a1= 0;a1<this->size();++a1)
{
if ((*this).at(a1)==value)
{
return a1;
}
}
return npos;
}
template <typename T>
bool ObjectList<T>::contains(const T &value)
{
for (size_t a1 = 0;a1<this->size();++a1)
{
if ((*this).at(a1)==value)
{
return true;
}
}
return false;
}
template <typename T>
bool ObjectList<T>::contains(const T &value) const
{
for (size_t a1 = 0;a1<this->size();++a1)
{
if ((*this).at(a1)==value)
{
return true;
}
}
return false;
}
template<typename T>
size_t ObjectList<T>::getLastIndex() const
{
return this->size() > 0 ? this->size()-1 : ObjectList::npos;
}
template<typename T>
bool ObjectList<T>::removeLast()
{
if (this->size()>0)
{
this->erase(this->begin()+(this->size()-1));
return true;
}
return false;
}
template<typename T>
bool ObjectList<T>::removeFirst()
{
if (this->size()>0)
{
this->erase(this->begin()+0);
return true;
}
return false;
}
template<typename T>
void ObjectList<T>::pushFront(const T & value)
{
std::vector<T>::insert(this->begin()+0,value);
}
template<typename T>
size_t ObjectList<T>::add(const T & value)
{
this->push_back(value);
return this->size()-1;
}
template<typename T>
void ObjectList<T>::addValues(const ObjectList<T> & values)
{
for (size_t a1 = 0;a1<values.size();++a1)
{
push_back(values[a1]);
}
}
template<typename T>
bool ObjectList<T>::insertAfterValue(const T & insertAfterThisValue, const T & valueToBeInserted)
{
size_t index = indexOf(insertAfterThisValue);
if (index>=this->size())return false;
return insertAfterIndex(index,valueToBeInserted);
}
template<typename T>
bool ObjectList<T>::insertBeforeValue(const T & insertBeforeThisValue, const T & valueToBeInserted)
{
size_t index = indexOf(insertBeforeThisValue);
if (index>=this->size())return false;
return insertBeforeIndex(index,valueToBeInserted);
}
template<typename T>
bool ObjectList<T>::insertAfterIndex(size_t index, const T & valueToBeInserted)
{
if ((index+1)== this->size())
{
this->push_back(valueToBeInserted);
return true;
}
if ((index+1)<this->size())
{
this->insert(this->begin()+(index+1),valueToBeInserted);
return true;
}
return false;
}
template<typename T>
bool ObjectList<T>::insertBeforeIndex(size_t index, const T & valueToBeInserted)
{
if (index!=0 && (index-1)<this->size())
{
this->insert(this->begin()+(index-1),valueToBeInserted);
return true;
}
return false;
}
//typedefs
typedef ObjectList<std::uint8_t> UInt8List;
typedef ObjectList<std::uint16_t> UInt16List;
typedef ObjectList<std::uint32_t> UInt32List;
typedef ObjectList<std::uint64_t> UInt64List;
typedef ObjectList<std::int8_t> Int8List;
typedef ObjectList<std::int16_t> Int16List;
typedef ObjectList<double> Float64List;
typedef ObjectList<std::int32_t> Int32List;
typedef ObjectList<std::int64_t> Int64List;
typedef ObjectList<size_t> SizeTList;
typedef UInt8List::iterator UInt8ListIter;
typedef UInt16List::iterator UInt16ListIter;
typedef UInt32List::iterator UInt32ListIter;
typedef UInt64List::iterator UInt64ListIter;
typedef UInt8List::const_iterator UInt8ListConstIter;
typedef UInt16List::const_iterator UInt16ListConstIter;
typedef UInt32List::const_iterator UInt32ListConstIter;
typedef UInt64List::const_iterator UInt64ListConstIter;
typedef Int8List::iterator Int8ListIter;
typedef Int16List::iterator Int16ListIter;
typedef Int32List::iterator Int32ListIter;
typedef Int64List::iterator Int64ListIter;
typedef SizeTList::iterator SizeTListIter;
typedef Int8List::const_iterator Int8ListConstIter;
typedef Int16List::const_iterator Int16ListConstIter;
typedef Int32List::const_iterator Int32ListConstIter;
typedef Int64List::const_iterator Int64ListConstIter;
typedef SizeTList::const_iterator SizeTListConstIter;
END_NAMESPACE_CBTEK_UTILITY
|
#ifndef ALEPH_TOPOLOGY_SPINE_HH__
#define ALEPH_TOPOLOGY_SPINE_HH__
#include <aleph/topology/Intersections.hh>
#include <algorithm>
#include <unordered_map>
#include <unordered_set>
#include <set>
#include <vector>
namespace aleph
{
namespace topology
{
// Contains the 'dumbest' implementation for calculating the spine, i.e.
// without any optimizations or skips. This will be used as the baseline
// for comparisons, but also to check the correctness of the approach.
namespace dumb
{
/**
Checks whether a simplex in a simplicial complex is principal, i.e.
whether it is not a proper face of any other simplex in K.
*/
template <class SimplicialComplex, class Simplex> bool isPrincipal( const Simplex& s, const SimplicialComplex& K )
{
// Individual vertices cannot be considered to be principal because
// they do not have a free face.
if( s.dimension() == 0 )
return false;
bool principal = true;
auto itPair = K.range( s.dimension() + 1 );
for( auto it = itPair.first; it != itPair.second; ++it )
{
auto&& t = *it;
// This check assumes that the simplicial complex is valid, so it
// suffices to search faces in one dimension _below_ s. Note that
// the check only has to evaluate the *size* of the intersection,
// as this is sufficient to determine whether a simplex is a face
// of another simplex.
if( sizeOfIntersection(s,t) == s.size() )
principal = false;
}
return principal;
}
/**
Checks whether a simplex in a simplicial complex is admissible, i.e.
the simplex is *principal* and has at least one free face.
*/
template <class SimplicialComplex, class Simplex> Simplex isAdmissible( const Simplex& s, const SimplicialComplex& K )
{
if( !isPrincipal(s,K) )
return Simplex();
// Check whether a free face exists ----------------------------------
//
// This involves iterating over all simplices that have the *same*
// dimension as s, because we are interested in checking whether a
// simplex shares a face of s.
std::vector<Simplex> faces( s.begin_boundary(), s.end_boundary() );
std::vector<bool> admissible( faces.size(), true );
std::size_t i = 0;
auto itPair = K.range( s.dimension() ); // valid range for searches, viz. *all*
// faces in "one dimension up"
for( auto&& face : faces )
{
for( auto it = itPair.first; it != itPair.second; ++it )
{
auto&& t = *it;
// We do not have to check for intersections with the original
// simplex from which we started---we already know that we are
// a face.
if( t != s )
{
if( sizeOfIntersection(face,t) == face.size() )
{
admissible[i] = false;
break;
}
}
}
++i;
}
// Return the free face if possible; as usual, an empty return value
// indicates that we did not find such a face. Note that the call to
// the `find()` function prefers the lexicographically smallest one,
// which ensures consistency.
auto pos = std::find( admissible.begin(), admissible.end(), true );
if( pos == admissible.end() )
return Simplex();
else
return faces.at( std::distance( admissible.begin(), pos ) );
}
/**
Calculates all principal faces of a given simplicial complex and
returns them.
*/
template <class SimplicialComplex> std::unordered_map<typename SimplicialComplex::value_type, typename SimplicialComplex::value_type> principalFaces( const SimplicialComplex& K )
{
using Simplex = typename SimplicialComplex::value_type;
auto L = K;
std::unordered_map<Simplex, Simplex> admissible;
// Step 1: determine free faces --------------------------------------
//
// This first checks which simplices have at least one free face,
// meaning that they may be potentially admissible.
for( auto it = L.begin(); it != L.end(); ++it )
{
if( it->dimension() == 0 )
continue;
// The range of the complex M is sufficient because we have
// already encountered all lower-dimensional simplices that
// precede the current one given by `it`.
//
// This complex will be used for testing free faces.
SimplicialComplex M( L.begin(), it );
// FIXME:
//
// In case of equal data values, the assignment from above does
// *not* work and will result in incorrect candidates.
M = L;
bool hasFreeFace = false;
Simplex freeFace = Simplex();
for( auto itFace = it->begin_boundary(); itFace != it->end_boundary(); ++itFace )
{
bool isFace = false;
for( auto&& simplex : M )
{
if( itFace->dimension() + 1 == simplex.dimension() && simplex != *it )
{
// The current face must *not* be a face of another simplex in
// the simplicial complex.
if( intersect( *itFace, simplex ) == *itFace )
{
isFace = true;
break;
}
}
}
hasFreeFace = !isFace;
if( hasFreeFace )
{
freeFace = *itFace;
break;
}
}
if( hasFreeFace )
admissible.insert( std::make_pair( *it, freeFace ) );
}
// Step 2: determine principality ------------------------------------
//
// All simplices that are faces of higher-dimensional simplices are
// now removed from the map of admissible simplices.
for( auto&& s : L )
{
for( auto itFace = s.begin_boundary(); itFace != s.end_boundary(); ++itFace )
admissible.erase( *itFace );
}
return admissible;
}
/**
Performs an iterated elementary simplicial collapse until *all* of the
admissible simplices have been collapsed. This leads to the *spine* of
the simplicial complex.
Notice that this is the *dumbest* possible implementation, as no state
will be stored, and the search for new principal faces starts fresh in
every iteration.
This implementation is useful to check improved algorithms.
@see <NAME>, "Algorithmic Topology and Classification of 3-Manifolds"
*/
template <class SimplicialComplex> SimplicialComplex spine( const SimplicialComplex& K )
{
auto L = K;
auto admissible = principalFaces( L );
while( !admissible.empty() )
{
auto s = admissible.begin()->first;
auto t = admissible.begin()->second;
L.remove_without_validation( s );
L.remove_without_validation( t );
admissible = principalFaces( L );
}
return L;
}
} // namespace dumb
// ---------------------------------------------------------------------
// From this point on, only 'smart' implementations of the spine
// calculation will be given. Various optimizations will be used
// in order to improve the run-time.
// ---------------------------------------------------------------------
/**
Stores coface relationships in a simplicial complex. Given a simplex
\f$\sigma\f$, the map contains all of its cofaces. Note that the map
will be updated upon every elementary collapse.
*/
template <class Simplex> using CofaceMap = std::unordered_map<Simplex, std::unordered_set<Simplex> >;
template <class SimplicialComplex> CofaceMap<typename SimplicialComplex::ValueType> buildCofaceMap( const SimplicialComplex& K )
{
using Simplex = typename SimplicialComplex::ValueType;
using CofaceMap = CofaceMap<Simplex>;
CofaceMap cofaces;
for( auto&& s : K )
{
// Adding an *empty* list of cofaces (so far) for this simplex
// simplifies the rest of the code because there is no need to
// check for the existence of a simplex.
if( cofaces.find(s) == cofaces.end() )
cofaces[s] = {};
for( auto itFace = s.begin_boundary(); itFace != s.end_boundary(); ++itFace )
cofaces[ *itFace ].insert( s );
}
return cofaces;
}
/**
Checks whether a given simplex is *principal* with respect to its
coface relations. A principal simplex is not the proper face of a
simplex in the complex. Hence, it has no cofaces.
*/
template <class Simplex> bool isPrincipal( const CofaceMap<Simplex>& cofaces, const Simplex& s )
{
return cofaces.at( s ).empty();
}
/**
Given a *principal* simplex, i.e. a simplex that is not a proper face
of another simplex in the complex, returns the first free face of the
simplex, i.e. a face that only has the given simplex as a coface.
If no such face is found, the empty simplex is returned.
*/
template <class Simplex> Simplex getFreeFace( const CofaceMap<Simplex>& cofaces, const Simplex& s )
{
if( !isPrincipal( cofaces, s ) )
return Simplex();
// Check whether a free face exists ----------------------------------
for( auto itFace = s.begin_boundary(); itFace != s.end_boundary(); ++itFace )
{
auto&& allCofaces = cofaces.at( *itFace );
if( allCofaces.size() == 1 && allCofaces.find( s ) != allCofaces.end() )
return *itFace;
}
return Simplex();
}
/**
Gets *all* principal simplices along with their free faces and stores them in
a map. The map contains the principal simplex as its key, and the *free face*
as its value.
*/
template <class SimplicialComplex> std::unordered_map<typename SimplicialComplex::value_type, typename SimplicialComplex::value_type> getPrincipalFaces( const CofaceMap<typename SimplicialComplex::ValueType>& cofaces, const SimplicialComplex& K )
{
using Simplex = typename SimplicialComplex::value_type;
auto L = K;
std::unordered_map<Simplex, Simplex> admissible;
for( auto&& s : K )
{
auto freeFace = getFreeFace( cofaces, s );
if( freeFace )
admissible.insert( std::make_pair( s, freeFace ) );
}
return admissible;
}
/**
Performs an iterated elementary simplicial collapse until *all* of the
admissible simplices have been collapsed. This leads to the *spine* of
the simplicial complex.
@see <NAME>, "Algorithmic Topology and Classification of 3-Manifolds"
*/
template <class SimplicialComplex> SimplicialComplex spine( const SimplicialComplex& K )
{
auto L = K;
auto cofaces = buildCofaceMap( L );
auto admissible = getPrincipalFaces( cofaces, L );
while( !admissible.empty() )
{
auto s = admissible.begin()->first;
auto t = admissible.begin()->second;
L.remove_without_validation( s );
L.remove_without_validation( t );
admissible.erase( s );
// Predicate for removing s and t, the principal simplex with its
// free face, from the coface data structure. This is required in
// order to keep the coface relation up-to-date.
using Simplex = typename SimplicialComplex::ValueType;
auto removeSimplices = [&cofaces,&s,&t] ( const Simplex& sigma )
{
cofaces.at(sigma).erase(s);
cofaces.at(sigma).erase(t);
};
std::for_each( s.begin_boundary(), s.end_boundary(), removeSimplices );
std::for_each( t.begin_boundary(), t.end_boundary(), removeSimplices );
// Both s and t do not have to be stored any more because they
// should not be queried again.
cofaces.erase(s);
cofaces.erase(t);
// New simplices ---------------------------------------------------
//
// Add new admissible simplices that may potentially have been
// spawned by the removal of s.
// 1. Add all faces of the principal simplex, as they may
// potentially become admissible again.
std::vector<Simplex> faces( s.begin_boundary(), s.end_boundary() );
std::for_each( faces.begin(), faces.end(),
[&t, &admissible, &cofaces] ( const Simplex& s )
{
if( t != s )
{
auto face = getFreeFace( cofaces, s );
if( face )
admissible.insert( std::make_pair( s, face ) );
}
}
);
// 2. Add all faces of the free face, as they may now themselves
// become admissible.
faces.assign( t.begin_boundary(), t.end_boundary() );
std::for_each( faces.begin(), faces.end(),
[&admissible, &cofaces] ( const Simplex& s )
{
auto face = getFreeFace( cofaces, s );
if( face )
admissible.insert( std::make_pair( s, face ) );
}
);
// The heuristic above is incapable of detecting *all* principal
// faces of the complex because this may involve searching *all*
// co-faces. Instead, it is easier to fill up the admissible set
// here.
if( admissible.empty() )
admissible = getPrincipalFaces( cofaces, L );
}
return L;
}
} // namespace topology
} // namespace aleph
#endif
|
<filename>src/onos_grpc_demo/api/__init__.py<gh_stars>0
""" Application commands common to all interfaces.
"""
from .device import main as device
from .link import main as link
from .group import main as group
from .application import main as application
__all__ = "device", "link", "group", "application"
|
def on_load(self, **kwargs: Any) -> None:
if self.metadata['location_type'] == 'URL':
self.schema_name = "url-catalog"
elif self.metadata['location_type'] == 'Directory':
self.schema_name = "local-directory-catalog"
else:
self.schema_name = "local-file-catalog"
self.metadata.pop('location_type')
self.version = 1
getLogger('ServerApp').info(f"Migrating 'component-registry' instance '{self.name}' "
f"to schema '{self.schema_name}'...")
MetadataManager(schemaspace="component-registries").update(self.name, self, for_migration=True) |
#include<bits/stdc++.h>
using namespace std;
int n,a,b;
string k;
int main()
{
cin>>n>>a>>b;
cout<<min(b+1,n-a);
}
|
Turkish Publications in Science Citation Index and Citation Index-Expanded Indexed Journals in the Field of Anaesthesiology: A Bibliographic Analysis. OBJECTIVE Our study aimed to assess Turkish publications in Science Citation Index (SCI) and Science Citation Index Expanded (SCI-E) indexed journals in the field of 'anaesthesiology'. METHODS Journals related to 'anaesthesiology' in the Science Citation Index-Expanded database of 'Thomson Reuter Web of Science' were searched. The search engine of Institute for Scientific Information (ISI) Web of Science (WoS) was used in the advanced mode by typing 'IS=ISSN number' to identify publications in the journal. By typing 'IS=ISSN number AND CU=Turkey', Turkish papers on anaesthesiology were found. If Turkish and non-Turkish authors had collaborated, the article was included in the search when the corresponding author had provided a Turkey-based address. The catalogue information and statistics were used to determine Turkish publications as the percentage of total publications and the annual mean number of Turkish publications. In WoS, 'SU=anesthesiology' was used to determine the number, country, year and topic distributions of publications from 1975 to date and within the last 10 years. The citation numbers and h-indices were determined based on the country for publications within the last 10 years. RESULTS From 1975 to the early 2000s Turkey was 20th in the list of countries with highest number of publications on anaesthesiology, however in the last 10 years Turkey moved up to 18th place. Its mean citation number has been 4.64, and it remains the 2nd lowest country pertaining to citations among the 22 countries with the most number of publications. According to the percentage of publications in the field of anaesthesiology, the journals with highest rate of Turkish publications were Revista Brasileira de Anestesiologia, European Journal of Anaesthesiology and Journal of Anesthesia. CONCLUSION In the field of anaesthesiology, the highest number of articles from Turkey was published in Revista Brasileira de Anestesiologia, European Journal of Anaesthesiology and Journal of Anesthesia. The mean citation number from these publications was 4.64. |
Emotional reactivity and expressivity in young children with sex chromosome trisomies: evidence from psychophysiological and observational data. Although sex chromosomal trisomies (SCT) in children are highly prevalent and associated with an increased risk for neurodevelopmental difficulties including socio-emotional problems, little is known about underlying mechanisms that could drive this risk. Studying emotional reactivity and expressivity of young children with SCT in early childhood could identify deviations in early emotional development and potentially serve as risk markers to guide clinical care in developing interventions. Participants in the current study were 90 SCT children and 97 population-based controls, aged 1 to 7years, who experienced a stress-inducing event in which physiological (heart rate) and observational data (expression of negative emotions) were collected. Results showed early disturbances in the emotion system of young children with SCT, in terms of blunted but prolonged emotional reactivity and a reduced emotional expressivity in response to stress. Further, the concordance between emotional reactivity (arousal response) and expressivity was significantly lower in SCT, compared to controls. Given the significant impact of emotions on adaptive day-to-day functioning, deviations in processing emotions could be an important underlying mechanism in explaining the heterogeneity and variability in developmental outcomes often described in individuals with SCT. |
#program to find maximum length of consecutive 0’s in a given binary string.
def max_consecutive_0(input_str):
return max(map(len,input_str.split('1')))
str1 = '111000010000110'
print("Original string:" + str1)
print("Maximum length of consecutive 0’s:")
print(max_consecutive_0(str1))
str1 = '111000111'
print("Original string:" + str1)
print("Maximum length of consecutive 0’s:")
print(max_consecutive_0(str1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.