hexsha
stringlengths 40
40
| size
int64 3
1.05M
| ext
stringclasses 163
values | lang
stringclasses 53
values | max_stars_repo_path
stringlengths 3
945
| max_stars_repo_name
stringlengths 4
112
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
float64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
945
| max_issues_repo_name
stringlengths 4
113
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
float64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
945
| max_forks_repo_name
stringlengths 4
113
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
float64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.05M
| avg_line_length
float64 1
966k
| max_line_length
int64 1
977k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed8dd64dac2ba7f345d73718aabfbff29ff695a5 | 1,175 | pl | Perl | pythia_agent_loc/GaussianKernelEstimate/test.pl | kvbp2k/pythia | b0b07b88699113df8f3e48ed257365d1fe882952 | [
"Apache-2.0"
] | null | null | null | pythia_agent_loc/GaussianKernelEstimate/test.pl | kvbp2k/pythia | b0b07b88699113df8f3e48ed257365d1fe882952 | [
"Apache-2.0"
] | null | null | null | pythia_agent_loc/GaussianKernelEstimate/test.pl | kvbp2k/pythia | b0b07b88699113df8f3e48ed257365d1fe882952 | [
"Apache-2.0"
] | null | null | null | #!perl -w
#
# Copyright 2012 Georgia Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
use strict;
use ExtUtils::testlib;
use GaussianKernelEstimate;
my @array = (1 .. 2000000);
my $n = @array;
my $s = GaussianKernelEstimate::getKernelEstimate(100.5, \@array, 10.12, $n);
my $t = 0; #getsum(100.5, \@array, 10.12, $n);
print "$s $t\n"; # produces output: "210 210\n"
sub getsum {
my $x = shift;
my $ref = shift;
my $h = shift;
my $n = shift;
my @arr = @$ref;
my $est = 0;
for(my $c = 0; $c < $n; $c++)
{
$est += 2.71828183 ** (-(($x - $arr[$c])**2) / (2*$h*$h));
}
$est /= sqrt(2*3.14159265);
$est /= ($n * $h);
return $est;
};
| 23.979592 | 77 | 0.651064 |
ed8dbf9ee54dc3c8f1f247d804ff55083a1c96cf | 112 | pl | Perl | old_logen/filter_prop/ciao_test.pl | leuschel/logen | 0ea806f54628162615e25177c3ed98f6b2c27935 | [
"Apache-2.0"
] | 14 | 2015-10-16T11:35:30.000Z | 2021-05-12T15:31:16.000Z | old_logen/filter_prop/ciao_test.pl | leuschel/logen | 0ea806f54628162615e25177c3ed98f6b2c27935 | [
"Apache-2.0"
] | null | null | null | old_logen/filter_prop/ciao_test.pl | leuschel/logen | 0ea806f54628162615e25177c3ed98f6b2c27935 | [
"Apache-2.0"
] | 5 | 2015-10-16T12:44:41.000Z | 2019-10-02T02:45:38.000Z |
myfile :- yay,
atom,
concat.
/*%%%CIAO
this_is_ciao(A).
*/%%%CIAO
%%%SICS/*
this_is_sics(A)
%%%SICS*/ | 6.222222 | 16 | 0.544643 |
ed60878b875be1e72ac05cca77aeac13a53aa8d0 | 1,310 | pm | Perl | storage/nimble/snmp/plugin.pm | xdrive05/centreon-plugins | 8227ba680fdfd2bb0d8a806ea61ec1611c2779dc | [
"Apache-2.0"
] | 1 | 2021-03-16T22:20:32.000Z | 2021-03-16T22:20:32.000Z | storage/nimble/snmp/plugin.pm | xdrive05/centreon-plugins | 8227ba680fdfd2bb0d8a806ea61ec1611c2779dc | [
"Apache-2.0"
] | null | null | null | storage/nimble/snmp/plugin.pm | xdrive05/centreon-plugins | 8227ba680fdfd2bb0d8a806ea61ec1611c2779dc | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2020 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::nimble::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
%{$self->{modes}} = (
'global-stats' => 'storage::nimble::snmp::mode::globalstats',
'volume-usage' => 'storage::nimble::snmp::mode::volumeusage',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check nimble storage in SNMP.
=cut
| 26.2 | 74 | 0.70229 |
73e904d3712d0276c8ba6ef48e105af4a3004406 | 2,251 | pm | Perl | auto-lib/Azure/Migrate/AssessedMachine.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/Migrate/AssessedMachine.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/Migrate/AssessedMachine.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | 1 | 2021-04-08T15:26:39.000Z | 2021-04-08T15:26:39.000Z | package Azure::Migrate::AssessedMachine;
use Moose;
has 'eTag' => (is => 'ro', isa => 'Str' );
has 'id' => (is => 'ro', isa => 'Str' );
has 'name' => (is => 'ro', isa => 'Str' );
has 'type' => (is => 'ro', isa => 'Str' );
has 'bootType' => (is => 'ro', isa => 'Str' );
has 'createdTimestamp' => (is => 'ro', isa => 'Str' );
has 'datacenterContainer' => (is => 'ro', isa => 'Str' );
has 'datacenterMachineId' => (is => 'ro', isa => 'Str' );
has 'datacenterManagementServer' => (is => 'ro', isa => 'Str' );
has 'datacenterManagementServerId' => (is => 'ro', isa => 'Str' );
has 'description' => (is => 'ro', isa => 'Str' );
has 'discoveredTimestamp' => (is => 'ro', isa => 'Str' );
has 'disks' => (is => 'ro', isa => 'HashRef[Azure::Migrate::AssessedDisk]' );
has 'displayName' => (is => 'ro', isa => 'Str' );
has 'groups' => (is => 'ro', isa => 'ArrayRef[Str]' );
has 'megabytesOfMemory' => (is => 'ro', isa => 'Num' );
has 'megabytesOfMemoryForRecommendedSize' => (is => 'ro', isa => 'Num' );
has 'monthlyBandwidthCost' => (is => 'ro', isa => 'Num' );
has 'monthlyComputeCostForRecommendedSize' => (is => 'ro', isa => 'Num' );
has 'monthlyStorageCost' => (is => 'ro', isa => 'Num' );
has 'networkAdapters' => (is => 'ro', isa => 'HashRef[Azure::Migrate::AssessedNetworkAdapter]' );
has 'numberOfCores' => (is => 'ro', isa => 'Int' );
has 'numberOfCoresForRecommendedSize' => (is => 'ro', isa => 'Int' );
has 'operatingSystem' => (is => 'ro', isa => 'Str' );
has 'percentageCoresUtilization' => (is => 'ro', isa => 'Num' );
has 'percentageCoresUtilizationDataPointsExpected' => (is => 'ro', isa => 'Int' );
has 'percentageCoresUtilizationDataPointsReceived' => (is => 'ro', isa => 'Int' );
has 'percentageMemoryUtilization' => (is => 'ro', isa => 'Num' );
has 'percentageMemoryUtilizationDataPointsExpected' => (is => 'ro', isa => 'Int' );
has 'percentageMemoryUtilizationDataPointsReceived' => (is => 'ro', isa => 'Int' );
has 'recommendedSize' => (is => 'ro', isa => 'Str' );
has 'suitability' => (is => 'ro', isa => 'Str' );
has 'suitabilityExplanation' => (is => 'ro', isa => 'Str' );
has 'updatedTimestamp' => (is => 'ro', isa => 'Str' );
1;
| 57.717949 | 100 | 0.549089 |
73db1000a04437942683c87a200e1cbd9e095de9 | 1,415 | t | Perl | testdist/Hash-MultiValue-1.200/t/hash.t | LTD-Beget/cpanminus | 1f48ea8b93915a2771c29a48a7de720e795f3ed7 | [
"Artistic-1.0"
] | 4 | 2015-11-05T07:18:36.000Z | 2019-06-27T08:07:22.000Z | testdist/Hash-MultiValue-1.200/t/hash.t | LTD-Beget/cpanminus | 1f48ea8b93915a2771c29a48a7de720e795f3ed7 | [
"Artistic-1.0"
] | 2 | 2015-11-11T12:55:39.000Z | 2016-02-21T19:50:16.000Z | testdist/Hash-MultiValue-1.200/t/hash.t | LTD-Beget/cpanminus | 1f48ea8b93915a2771c29a48a7de720e795f3ed7 | [
"Artistic-1.0"
] | 1 | 2015-10-13T15:10:02.000Z | 2015-10-13T15:10:02.000Z | use strict;
use Test::More;
use Hash::MultiValue;
my $hash = Hash::MultiValue->new(
foo => 'a',
foo => 'b',
bar => 'baz',
baz => 33,
);
{
my $foo = $hash->as_hashref;
is ref $foo, 'HASH';
is scalar keys %$foo, 3;
is ref $foo->{foo}, '';
is $foo->{foo}, 'b';
is $foo->{bar}, 'baz';
$foo->{x} = 'y';
isnt $hash->{x}, 'y';
}
{
my $foo = $hash->as_hashref_mixed;
is ref $foo, 'HASH';
is scalar keys %$foo, 3;
is ref $foo->{foo}, 'ARRAY';
is_deeply $foo->{foo}, [ 'a', 'b' ];
is $foo->{bar}, 'baz';
}
{
my $foo = $hash->as_hashref_multi;
is ref $foo, 'HASH';
is scalar keys %$foo, 3;
is ref $foo->{foo}, 'ARRAY';
is_deeply $foo->{foo}, [ 'a', 'b' ];
is_deeply $foo->{bar}, [ 'baz' ];
}
{
my @output;
$hash->each(sub { push @output, [ $_, @_ ] });
is_deeply \@output,
[
[ 0, 'foo', 'a' ],
[ 1, 'foo', 'b' ],
[ 2, 'bar', 'baz' ],
[ 3, 'baz', 33 ],
];
}
{
# Test for this even though we want people not to do it
$hash->each(sub { $_[1]++ });
is_deeply [ $hash->flatten ],
[ foo => 'b', foo => 'c', bar => 'bba', baz => 34 ];
is_deeply $hash,
{ foo => 'b', bar => "baz", baz => 33 };
is_deeply [ $hash->keys ], [ qw(foo foo bar baz) ];
is_deeply [ $hash->values ], [ qw(b c bba 34) ];
}
done_testing;
| 21.119403 | 60 | 0.448763 |
ed763447adf704f31e99d66f7e951d72087a2f0d | 57,609 | pm | Perl | lib/GRNOC/TSDS/Writer/Worker.pm | GlobalNOC/tsds-services | 1d1c8f2c77074bcf3d470a75c4a04d0a3d34faac | [
"Apache-2.0"
] | 9 | 2015-12-04T16:30:04.000Z | 2021-09-15T15:01:35.000Z | lib/GRNOC/TSDS/Writer/Worker.pm | GlobalNOC/tsds-services | 1d1c8f2c77074bcf3d470a75c4a04d0a3d34faac | [
"Apache-2.0"
] | 5 | 2015-11-10T14:51:52.000Z | 2019-02-19T17:52:49.000Z | lib/GRNOC/TSDS/Writer/Worker.pm | GlobalNOC/tsds-services | 1d1c8f2c77074bcf3d470a75c4a04d0a3d34faac | [
"Apache-2.0"
] | 19 | 2015-10-30T14:14:20.000Z | 2020-10-30T08:48:41.000Z | package GRNOC::TSDS::Writer::Worker;
use Moo;
use Types::Standard qw( Str Int HashRef Object Maybe );
use GRNOC::TSDS::DataType;
use GRNOC::TSDS::Constants;
use GRNOC::TSDS::AggregateDocument;
use GRNOC::TSDS::DataDocument;
use GRNOC::TSDS::Writer::AggregateMessage;
use GRNOC::TSDS::Writer::DataMessage;
use GRNOC::TSDS::RedisLock;
use GRNOC::TSDS::DataService::MetaData;
use MongoDB;
use Net::AMQP::RabbitMQ;
use Cache::Memcached::Fast;
use Tie::IxHash;
use JSON::XS;
use Math::Round qw( nlowmult );
use Time::HiRes qw( time );
use Try::Tiny;
use Data::Dumper;
### constants ###
use constant DATA_CACHE_EXPIRATION => 60 * 60;
use constant AGGREGATE_CACHE_EXPIRATION => 60 * 60 * 48;
use constant MEASUREMENT_CACHE_EXPIRATION => 60 * 60;
use constant QUEUE_PREFETCH_COUNT => 5;
use constant QUEUE_FETCH_TIMEOUT => 10 * 1000;
use constant RECONNECT_TIMEOUT => 10;
### required attributes ###
has config => ( is => 'ro',
required => 1 );
has logger => ( is => 'ro',
required => 1 );
has queue => ( is => 'ro',
required => 1 );
### internal attributes ###
has is_running => ( is => 'rwp',
default => 0 );
has data_types => ( is => 'rwp',
default => sub { {} } );
has mongo_rw => ( is => 'rwp' );
has rabbit => ( is => 'rwp' );
has redislock => ( is => 'rwp' );
has memcache => ( is => 'rwp' );
has locker => ( is => 'rwp' );
has json => ( is => 'rwp' );
has metadata_ds => ( is => 'rwp' );
### public methods ###
sub start {
my ( $self ) = @_;
my $queue = $self->queue;
$self->logger->debug( "Starting." );
# flag that we're running
$self->_set_is_running( 1 );
# change our process name
$0 = "tsds_writer ($queue) [worker]";
# setup signal handlers
$SIG{'TERM'} = sub {
$self->logger->info( "Received SIG TERM." );
$self->stop();
};
$SIG{'HUP'} = sub {
$self->logger->info( "Received SIG HUP." );
};
# create JSON object
my $json = JSON::XS->new();
$self->_set_json( $json );
# connect to mongo
my $mongo_host = $self->config->get( '/config/mongo/@host' );
my $mongo_port = $self->config->get( '/config/mongo/@port' );
my $rw_user = $self->config->get( "/config/mongo/readwrite" );
$self->logger->debug( "Connecting to MongoDB as readwrite on $mongo_host:$mongo_port." );
my $mongo;
try {
$mongo = MongoDB::MongoClient->new( host => "$mongo_host:$mongo_port",
username => $rw_user->{'user'},
password => $rw_user->{'password'} );
}
catch {
$self->logger->error( "Error connecting to MongoDB: $_" );
die( "Error connecting to MongoDB: $_" );
};
$self->_set_mongo_rw( $mongo );
$self->_redis_connect();
# connect to memcache
my $memcache_host = $self->config->get( '/config/memcache/@host' );
my $memcache_port = $self->config->get( '/config/memcache/@port' );
$self->logger->debug( "Connecting to memcached $memcache_host:$memcache_port." );
my $memcache = Cache::Memcached::Fast->new( {'servers' => [{'address' => "$memcache_host:$memcache_port", 'weight' => 1}]} );
$self->_set_memcache( $memcache );
# connect to rabbit
$self->_rabbit_connect();
# set up metadata_ds object, will handle metadata messages
my $metadata_ds = GRNOC::TSDS::DataService::MetaData->new(config_file => $self->config->{'config_file'});
$self->_set_metadata_ds( $metadata_ds );
$self->logger->debug( 'Starting RabbitMQ consume loop.' );
# continually consume messages from rabbit queue, making sure we have to acknowledge them
return $self->_consume_loop();
}
sub stop {
my ( $self ) = @_;
$self->logger->debug( 'Stopping.' );
$self->_set_is_running( 0 );
}
### private methods ###
sub _consume_loop {
my ( $self ) = @_;
while ( 1 ) {
# have we been told to stop?
if ( !$self->is_running ) {
$self->logger->debug( 'Exiting consume loop.' );
return 0;
}
# receive the next rabbit message
my $rabbit_message;
try {
$rabbit_message = $self->rabbit->recv( QUEUE_FETCH_TIMEOUT );
}
catch {
$self->logger->error( "Error receiving rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
# didn't get a message?
if ( !$rabbit_message ) {
$self->logger->debug( 'No message received.' );
# re-enter loop to retrieve the next message
next;
}
# try to JSON decode the messages
my $messages;
try {
$messages = $self->json->decode( $rabbit_message->{'body'} );
}
catch {
$self->logger->error( "Unable to JSON decode message: $_" );
};
if ( !$messages ) {
try {
# reject the message and do NOT requeue it since its malformed JSON
$self->rabbit->reject( 1, $rabbit_message->{'delivery_tag'}, 0 );
}
catch {
$self->logger->error( "Unable to reject rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
}
# retrieve the next message from rabbit if we couldn't decode this one
next if ( !$messages );
# make sure its an array (ref) of messages
if ( ref( $messages ) ne 'ARRAY' ) {
$self->logger->error( "Message body must be an array." );
try {
# reject the message and do NOT requeue since its not properly formed
$self->rabbit->reject( 1, $rabbit_message->{'delivery_tag'}, 0 );
}
catch {
$self->logger->error( "Unable to reject rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
next;
}
my $num_messages = @$messages;
$self->logger->debug( "Processing message containing $num_messages updates." );
my $t1 = time();
my $success = $self->_consume_messages( $messages );
my $t2 = time();
my $delta = $t2 - $t1;
$self->logger->debug( "Processed $num_messages updates in $delta seconds." );
# didn't successfully consume the messages, so reject but requeue the entire message to try again
if ( !$success ) {
$self->logger->debug( "Rejecting rabbit message, requeueing." );
try {
$self->rabbit->reject( 1, $rabbit_message->{'delivery_tag'}, 1 );
}
catch {
$self->logger->error( "Unable to reject rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
}
# successfully consumed message, acknowledge it to rabbit
else {
$self->logger->debug( "Acknowledging successful message." );
try {
$self->rabbit->ack( 1, $rabbit_message->{'delivery_tag'} );
}
catch {
$self->logger->error( "Unable to acknowledge rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
}
}
}
sub _consume_messages {
my ( $self, $messages ) = @_;
# gather all messages to process
my $data_to_process = [];
my $aggregates_to_process = [];
my $meta_to_process = [];
# keep track and build up all of the bulk operations we'll want to do at the end
my $bulk_creates = {};
my $bulk_updates = {};
my $acquired_locks = [];
# handle every TSDS message that came within the rabbit message
foreach my $message ( @$messages ) {
# make sure message is an object/hash (ref)
if ( ref( $message ) ne 'HASH' ) {
$self->logger->error( "Messages must be an object/hash of data, skipping." );
next;
}
my $type = $message->{'type'};
my $time = $message->{'time'};
my $interval = $message->{'interval'};
my $values = $message->{'values'};
my $meta = $message->{'meta'};
my $affected = $message->{'affected'};
my $text = $message->{'text'};
my $start = $message->{'start'};
my $end = $message->{'end'};
my $identifier = $message->{'identifier'};
# make sure a type was specified
if ( !defined( $type ) ) {
$self->logger->error( "No type specified, skipping message." );
next;
}
# does it appear to be an aggregate message?
if ( $type =~ /^(.+)\.(aggregate|metadata)$/ ) {
my $data_type_name = $1;
my $message_type = $2;
my $data_type = $self->data_types->{$data_type_name};
# we haven't seen this data type before, re-fetch them
if ( !$data_type ) {
my $success = 1;
# this involves communicating to mongodb which may fail
try {
$self->_fetch_data_types();
}
# requeue the message to try again later if mongo communication fails
catch {
$self->logger->error( "Unable to fetch data types from MongoDB." );
$success = 0;
};
# dont bother handling any more of the messages in this rabbit message
return 0 if !$success;
$data_type = $self->data_types->{$data_type_name};
}
# detect unknown data type, ignore it
if ( !$data_type ) {
$self->logger->warn( "Unknown data type '$data_type_name', skipping." );
next;
}
# was it an aggregate?
if ( $message_type eq "aggregate" ) {
my $aggregate_message;
try {
$aggregate_message = GRNOC::TSDS::Writer::AggregateMessage->new( data_type => $data_type,
time => $time,
interval => $interval,
values => $values,
meta => $meta );
}
catch {
$self->logger->error( $_ );
};
# include this to our list of aggregates to process if it was valid
push( @$aggregates_to_process, $aggregate_message ) if $aggregate_message;
}
elsif ( $message_type eq 'metadata' ) {
my $meta_update = {
"tsds_type" => $data_type_name,
"start" => $time,
"end" => $end
};
foreach my $meta_field (keys %$meta){
$meta_update->{$meta_field} = $meta->{$meta_field};
}
push(@$meta_to_process, $meta_update);
}
}
# must be a data message
else {
my $data_type = $self->data_types->{$type};
# we haven't seen this data type before, re-fetch them
if ( !$data_type ) {
my $success = 1;
# this involves communicating to mongodb, which may fail
try {
$self->_fetch_data_types();
}
# requeue the message to try again later if mongo communication fails
catch {
$self->logger->error( "Unable to fetch data types from MongoDB." );
$success = 0;
};
# dont bother handling any more of the messages in this rabbit message
return 0 if !$success;
$data_type = $self->data_types->{$type};
}
# detected unknown data type, ignore it
if ( !$data_type ) {
$self->logger->warn( "Unknown data type '$type', skipping." );
next;
}
my $data_message;
try {
$data_message = GRNOC::TSDS::Writer::DataMessage->new( data_type => $data_type,
time => $time,
interval => $interval,
values => $values,
meta => $meta );
}
catch {
$self->logger->error( $_ );
# release any outstanding locks
$self->_release_locks( $acquired_locks );
};
# include this to our list of data to process if it was valid
push( @$data_to_process, $data_message ) if $data_message;
}
}
# process all of the data points within this message
my $success = 1;
try {
# at least one aggregate to process
if ( @$aggregates_to_process > 0 ) {
$self->logger->debug( "Processing " . @$aggregates_to_process . " aggregate messages." );
$self->_process_aggregate_messages( messages => $aggregates_to_process,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
}
# at least one high res data to process
if ( @$data_to_process > 0 ) {
$self->logger->debug( "Processing " . @$data_to_process . " data messages." );
$self->_process_data_messages( messages => $data_to_process,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
}
# perform all (most, except for data type changes..) create and update operations in bulk
$self->_process_bulks( $bulk_creates );
$self->_process_bulks( $bulk_updates );
# release all the locks we're acquired for the docs we're changing
$self->_release_locks( $acquired_locks );
# This does it's own locking, so we'll do that here after we release any locks above.
if ( @$meta_to_process > 0 ) {
$self->metadata_ds()->update_measurement_metadata(values => $meta_to_process, type_field => "tsds_type", fatal => 1);
}
}
catch {
$self->logger->error( "Error processing messages: $_" );
$self->_redis_connect();
# release any outstanding locks
$self->_release_locks( $acquired_locks );
$success = 0;
};
return $success;
}
sub _release_locks {
my ( $self, $locks ) = @_;
foreach my $lock ( @$locks ) {
$self->redislock->unlock( $lock );
}
}
sub _process_bulks {
my ( $self, $bulks ) = @_;
my @database_names = keys( %$bulks );
foreach my $database_name ( @database_names ) {
my @collection_names = keys( %{$bulks->{$database_name}});
foreach my $collection_name ( @collection_names ) {
my $bulk = $bulks->{$database_name}{$collection_name};
$self->logger->debug( "Executing bulk query for $database_name - $collection_name." );
my $ret = $bulk->execute();
my $num_errors = $ret->count_writeErrors() + $ret->count_writeConcernErrors();
# did at least one error occur during the bulk update?
if ( $num_errors > 0 ) {
# throw an exception so this entire message will get requeued
die( "bulk update failed: " . $ret->last_errmsg() );
}
}
}
}
sub _process_data_messages {
my ( $self, %args ) = @_;
my $messages = $args{'messages'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
# all unique value types we're handling per each data type
my $unique_data_types = {};
my $unique_value_types = {};
# all unique measurements we're handling
my $unique_measurements = {};
# all unique documents we're handling (and their corresponding data points)
my $unique_documents = {};
# handle every message sent, ordered by their timestamp in ascending order
foreach my $message ( sort { $a->time <=> $b->time } @$messages ) {
my $data_type = $message->data_type;
my $measurement_identifier = $message->measurement_identifier;
my $interval = $message->interval;
my $data_points;
# this is lazily built so it might fail validation
try {
$data_points = $message->data_points;
}
catch {
$self->logger->error( "Error building data points for message: $_" );
};
next if (! defined $data_points);
my $time = $message->time;
my $meta = $message->meta;
# mark this data type as being found
$unique_data_types->{$data_type->name} = $data_type;
# have we handled this measurement already?
my $unique_measurement = $unique_measurements->{$data_type->name}{$measurement_identifier};
if ( $unique_measurement ) {
# keep the older start time, just update its meta data with the latest
$unique_measurements->{$data_type->name}{$measurement_identifier}{'meta'} = $meta;
}
# never seen this measurement before
else {
# mark this measurement as being found, and include its meta data and start time
$unique_measurements->{$data_type->name}{$measurement_identifier} = {'meta' => $meta,
'start' => $time,
'interval' => $interval};
}
# determine proper start and end time of document
my $doc_length = $interval * HIGH_RESOLUTION_DOCUMENT_SIZE;
my $start = nlowmult( $doc_length, $time );
my $end = $start + $doc_length;
# determine the document that this message would belong within
my $document = GRNOC::TSDS::DataDocument->new( data_type => $data_type,
measurement_identifier => $measurement_identifier,
interval => $interval,
start => $start,
end => $end );
# mark the document for this data point if one hasn't been set already
my $unique_doc = $unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end};
# we've never handled a data point for this document before
if ( !$unique_doc ) {
# mark it as being a new unique document we need to handle
$unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end} = $document;
$unique_doc = $unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end};
}
# handle every data point that was included in this message
foreach my $data_point ( @$data_points ) {
my $value_type = $data_point->value_type;
# add this as another data point to update/set in the document
$unique_doc->add_data_point( $data_point );
# mark this value type as being found
$unique_value_types->{$data_type->name}{$value_type} = 1;
}
}
# get cache ids for all unique measurements we'll ask about
my @measurement_cache_ids;
my @data_types = keys( %$unique_measurements );
foreach my $data_type ( @data_types ) {
my @measurement_identifiers = keys( %{$unique_measurements->{$data_type}} );
foreach my $measurement_identifier ( @measurement_identifiers ) {
my $cache_id = $self->redislock->get_cache_id( type => $data_type,
collection => 'measurements',
identifier => $measurement_identifier );
push( @measurement_cache_ids, $cache_id );
}
}
if ( @measurement_cache_ids ) {
# grab measurements from our cache
my $measurement_cache_results = $self->memcache->get_multi( @measurement_cache_ids );
# potentially create new measurement entries that we've never seen before
@data_types = keys( %$unique_measurements );
foreach my $data_type ( sort @data_types ) {
my @measurement_identifiers = keys( %{$unique_measurements->{$data_type}} );
foreach my $measurement_identifier ( sort @measurement_identifiers ) {
my $cache_id = shift( @measurement_cache_ids );
# this measurement exists in our cache, dont bother creating it
next if ( $measurement_cache_results->{$cache_id} );
# potentially create a new entry unless someone else beats us to it
my $meta = $unique_measurements->{$data_type}{$measurement_identifier}{'meta'};
my $start = $unique_measurements->{$data_type}{$measurement_identifier}{'start'};
my $interval = $unique_measurements->{$data_type}{$measurement_identifier}{'interval'};
$self->_create_measurement_document( identifier => $measurement_identifier,
data_type => $unique_data_types->{$data_type},
meta => $meta,
start => $start,
interval => $interval,
bulk_creates => $bulk_creates,
acquired_locks => $acquired_locks );
}
}
}
# potentially update the metadata value types for every distinct one found
@data_types = keys( %$unique_value_types );
foreach my $data_type ( @data_types ) {
my @value_types = keys( %{$unique_value_types->{$data_type}} );
$self->_update_metadata_value_types( data_type => $unique_data_types->{$data_type},
value_types => \@value_types );
}
# handle every distinct document that we'll need to update
@data_types = keys( %$unique_documents );
foreach my $data_type ( sort @data_types ) {
my @measurement_identifiers = sort keys( %{$unique_documents->{$data_type}} );
foreach my $measurement_identifier ( sort @measurement_identifiers ) {
my @starts = keys( %{$unique_documents->{$data_type}{$measurement_identifier}} );
foreach my $start ( sort { $a <=> $b } @starts ) {
my @ends = keys( %{$unique_documents->{$data_type}{$measurement_identifier}{$start}} );
foreach my $end ( sort { $a <=> $b } @ends ) {
my $document = $unique_documents->{$data_type}{$measurement_identifier}{$start}{$end};
# process this data document, including all data points contained within it
$self->_process_data_document( document => $document,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# all done with this document, remove it so we don't hold onto its memory
delete( $unique_documents->{$data_type}{$measurement_identifier}{$start}{$end} );
}
}
}
}
}
sub _process_aggregate_messages {
my ( $self, %args ) = @_;
my $messages = $args{'messages'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
# all unique documents we're handling (and their corresponding data points)
my $unique_documents = {};
# handle every message sent, ordered by their timestamp in ascending order
foreach my $message ( sort { $a->time <=> $b->time } @$messages ) {
my $data_type = $message->data_type;
my $measurement_identifier = $message->measurement_identifier;
my $interval = $message->interval;
my $time = $message->time;
my $meta = $message->meta;
# This is lazily built so it might actually fail type validation
# when we invoke it for the first time
my $aggregate_points;
try {
$aggregate_points = $message->aggregate_points;
}
catch {
$self->logger->error( "Error processing aggregate update - bad data format: $_" );
};
next if (! defined $aggregate_points);
# determine proper start and end time of document
my $doc_length = $interval * AGGREGATE_DOCUMENT_SIZE;
my $start = nlowmult( $doc_length, $time );
my $end = $start + $doc_length;
# determine the document that this message would belong within
my $document = GRNOC::TSDS::AggregateDocument->new( data_type => $data_type,
measurement_identifier => $measurement_identifier,
interval => $interval,
start => $start,
end => $end );
# mark the document for this data point if one hasn't been set already
my $unique_doc = $unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end};
# we've never handled a data point for this document before
if ( !$unique_doc ) {
# mark it as being a new unique document we need to handle
$unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end} = $document;
$unique_doc = $unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end};
}
# handle every aggregate point that was included in this message
foreach my $aggregate_point ( @$aggregate_points ) {
my $value_type = $aggregate_point->value_type;
# add this as another data point to update/set in the document
$unique_doc->add_aggregate_point( $aggregate_point );
}
}
# handle every distinct document that we'll need to update
my @data_types = keys( %$unique_documents );
foreach my $data_type ( sort @data_types ) {
my @measurement_identifiers = keys( %{$unique_documents->{$data_type}} );
foreach my $measurement_identifier ( sort @measurement_identifiers ) {
my @starts = keys( %{$unique_documents->{$data_type}{$measurement_identifier}} );
foreach my $start ( sort { $a <=> $b } @starts ) {
my @ends = keys( %{$unique_documents->{$data_type}{$measurement_identifier}{$start}} );
foreach my $end ( sort { $a <=> $b } @ends ) {
my $document = $unique_documents->{$data_type}{$measurement_identifier}{$start}{$end};
# process this aggregate document, including all aggregate points contained within it
$self->_process_aggregate_document( document => $document,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# all done with this document, remove it so we don't hold onto its memory
delete( $unique_documents->{$data_type}{$measurement_identifier}{$start}{$end} );
}
}
}
}
}
sub _process_data_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
my $data_type = $document->data_type->name;
my $measurement_identifier = $document->measurement_identifier;
my $start = $document->start;
my $end = $document->end;
my %new_value_types = %{$document->value_types};
$self->logger->debug( "Processing data document $data_type / $measurement_identifier / $start / $end." );
# get lock for this data document
my $lock = $self->redislock->lock( type => $data_type,
collection => 'data',
identifier => $measurement_identifier,
start => $start,
end => $end ) or die "Can't lock data document for $data_type / $measurement_identifier / $start / $end";
push( @$acquired_locks, $lock );
my $cache_id = $self->redislock->get_cache_id( type => $data_type,
collection => 'data',
identifier => $measurement_identifier,
start => $start,
end => $end );
# its already in our cache, seen it before
if ( my $cached = $self->memcache->get( $cache_id ) ) {
$self->logger->debug( 'Found document in cache, updating.' );
my $old_value_types = $cached->{'value_types'};
# update existing document along with its new data points
( $document, my $added_value_types ) = $self->_update_data_document( document => $document,
old_value_types => $old_value_types,
new_value_types => \%new_value_types,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# will this update add a new value type?
if ( @$added_value_types > 0 ) {
# invalidate the cache entry so we fetch it from the db later and verify they were properly added during the bulk op
$self->memcache->delete( $cache_id );
}
# maintain/update existing cache entry
else {
$self->memcache->set( $cache_id,
$cached, # This originally set the same value_types as below, but made it so that partial updates didn't work. Keep whatever was already in the cache instead.
DATA_CACHE_EXPIRATION );
}
}
# not in cache, we'll have to query mongo to see if its there
else {
$self->logger->debug( 'Document not found in cache.' );
# retrieve the full updated doc from mongo
my $live_doc = $document->fetch();
# document exists in mongo, so we'll need to update it
if ( $live_doc ) {
# update our cache with the doc info we found in the db
$self->memcache->set( $cache_id,
{'value_types' => $live_doc->value_types},
DATA_CACHE_EXPIRATION );
$self->logger->debug( 'Document exists in mongo, updating.' );
# update existing document along with its new data points
( $document, my $added_value_types ) = $self->_update_data_document( document => $document,
old_value_types => $live_doc->value_types,
new_value_types => \%new_value_types,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# will this update add a new value type?
if ( @$added_value_types > 0 ) {
# invalidate the cache entry so we fetch it from the db again later and verify they were properly added during the bulk op
$self->memcache->delete( $cache_id );
}
}
# doesn't exist in mongo, we'll need to create it along with the data points provided, and
# make sure there are no overlaps with other docs due to interval change, etc.
else {
$self->logger->debug( 'Document does not exist in mongo, creating.' );
$document = $self->_create_data_document( document => $document,
bulk_creates => $bulk_creates,
acquired_locks => $acquired_locks );
}
}
$self->logger->debug( "Finished processing document $data_type / $measurement_identifier / $start / $end." );
}
sub _process_aggregate_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
my $data_type = $document->data_type;
my $data_type_name = $data_type->name;
my $measurement_identifier = $document->measurement_identifier;
my $start = $document->start;
my $end = $document->end;
my $interval = $document->interval;
my %new_value_types = %{$document->value_types};
$self->logger->debug( "Processing aggregate document $data_type_name - $interval / $measurement_identifier / $start / $end." );
# get lock for this aggregate document
my $lock = $self->redislock->lock( type => $data_type_name,
collection => "data_$interval",
identifier => $measurement_identifier,
start => $start,
end => $end ) or die "Can't lock aggregate data doc for $data_type_name - $interval / $measurement_identifier / $start / $end.";
push( @$acquired_locks, $lock );
my $cache_id = $self->redislock->get_cache_id( type => $data_type_name,
collection => "data_$interval",
identifier => $measurement_identifier,
start => $start,
end => $end );
# its already in our cache, seen it before
if ( my $cached = $self->memcache->get( $cache_id ) ) {
$self->logger->debug( 'Found document in cache, updating.' );
my $old_value_types = $cached->{'value_types'};
# update existing document along with its new data points
( $document, my $added_value_types ) = $self->_update_aggregate_document( document => $document,
old_value_types => $old_value_types,
new_value_types => \%new_value_types,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# will this update add a new value type?
if ( @$added_value_types > 0 ) {
# invalidate the cache entry so we fetch it from the db later and verify they were properly added during the bulk op
$self->memcache->delete( $cache_id );
}
# maintain/update existing cache entry
else {
$self->memcache->set( $cache_id,
$cached,
AGGREGATE_CACHE_EXPIRATION );
}
}
# not in cache, we'll have to query mongo to see if its there
else {
$self->logger->debug( 'Document not found in cache.' );
# retrieve the full updated doc from mongo
my $live_doc = $document->fetch();
# document exists in mongo, so we'll need to update it
if ( $live_doc ) {
# update our cache with the doc info we found in the db
$self->memcache->set( $cache_id,
{'value_types' => $live_doc->value_types},
AGGREGATE_CACHE_EXPIRATION );
$self->logger->debug( 'Document exists in mongo, updating.' );
# update existing document along with its new data points
( $document, my $added_value_types ) = $self->_update_aggregate_document( document => $document,
old_value_types => $live_doc->value_types,
new_value_types => \%new_value_types,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# will this update add a new value type?
if ( @$added_value_types > 0 ) {
# invalidate the cache entry so we fetch it from the db again later and verify they were properly added during the bulk op
$self->memcache->delete( $cache_id );
}
}
# doesn't exist in mongo, we'll need to create it along with the aggregate points provided
else {
$self->logger->debug( 'Document does not exist in mongo, creating.' );
my $bulk = $bulk_creates->{$data_type_name}{'data_' . $document->interval};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
my $collection = $data_type->database->get_collection( 'data_' . $document->interval );
$bulk = $collection->initialize_unordered_bulk_op();
$bulk_creates->{$data_type_name}{'data_' . $document->interval} = $bulk;
}
$document = $document->create( bulk => $bulk );
}
}
$self->logger->debug( "Finished processing aggregate document $data_type_name - $interval / $measurement_identifier / $start / $end." );
}
sub _create_data_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $bulk_creates = $args{'bulk_creates'};
my $acquired_locks = $args{'acquired_locks'};
# before we insert this new document, we will want to check for existing documents which
# may have overlapping data with this new one. this can happen if there was an interval
# change, since that affects the start .. end range of the document
my $data_type = $document->data_type;
my $identifier = $document->measurement_identifier;
my $start = $document->start;
my $end = $document->end;
my $interval = $document->interval;
$self->logger->debug( "Creating new data document $identifier / $start / $end." );
# help from http://eli.thegreenplace.net/2008/08/15/intersection-of-1d-segments
my $query = Tie::IxHash->new( 'identifier' => $identifier,
'start' => {'$lt' => $end},
'end' => {'$gt' => $start} );
# get this document's data collection
my $data_collection = $data_type->database->get_collection( 'data' );
$self->logger->debug( 'Finding existing overlapping data documents before creation.' );
# the ids of the overlaps we found
my @overlap_ids;
# the cache ids of the overlaps we found
my @overlap_cache_ids;
# unique documents that the data points, after altering their interval, will belong in
my $unique_documents = {};
# add this new document as one of the unique documents that will need to get created
$unique_documents->{$identifier}{$start}{$end} = $document;
# specify index hint to address occasional performance problems executing this query
my $overlaps = $data_collection->find( $query )->hint( 'identifier_1_start_1_end_1' )->fields( {'interval' => 1,
'start' => 1,
'end' => 1} );
# handle every existing overlapping doc, if any
while ( my $overlap = $overlaps->next ) {
my $id = $overlap->{'_id'};
my $overlap_interval = $overlap->{'interval'};
my $overlap_start = $overlap->{'start'};
my $overlap_end = $overlap->{'end'};
# keep this as one of the docs that will need removed later
push( @overlap_ids, $id );
# determine cache id for this doc
my $cache_id = $self->redislock->get_cache_id( type => $data_type->name,
collection => 'data',
identifier => $identifier,
start => $overlap_start,
end => $overlap_end );
push( @overlap_cache_ids, $cache_id );
# grab lock for this doc
my $lock = $self->redislock->lock( type => $data_type->name,
collection => 'data',
identifier => $identifier,
start => $overlap_start,
end => $overlap_end ) or die "Can't lock overlapping data doc for $identifier";
push( @$acquired_locks, $lock );
$self->logger->debug( "Found overlapping data document with interval: $overlap_interval start: $overlap_start end: $overlap_end." );
# create object representation of this duplicate doc
my $overlap_doc = GRNOC::TSDS::DataDocument->new( data_type => $data_type,
measurement_identifier => $identifier,
interval => $overlap_interval,
start => $overlap_start,
end => $overlap_end );
# fetch entire doc to grab its data points
$overlap_doc->fetch( data => 1 );
# handle every data point in this overlapping doc
my $data_points = $overlap_doc->data_points;
foreach my $data_point ( @$data_points ) {
# set the *new* interval we'll be using for this data point
$data_point->interval( $interval );
# determine proper start and end time of *new* document
my $doc_length = $interval * HIGH_RESOLUTION_DOCUMENT_SIZE;
my $new_start = int($data_point->time / $doc_length) * $doc_length;
my $new_end = $new_start + $doc_length;
# mark the document for this data point if one hasn't been set already
my $unique_doc = $unique_documents->{$identifier}{$new_start}{$new_end};
# we've never handled a data point for this document before
if ( !$unique_doc ) {
# determine the *new* document that this message would belong within
my $new_document = GRNOC::TSDS::DataDocument->new( data_type => $data_type,
measurement_identifier => $identifier,
interval => $interval,
start => $new_start,
end => $new_end );
# mark it as being a new unique document we need to handle
$unique_documents->{$identifier}{$new_start}{$new_end} = $new_document;
$unique_doc = $unique_documents->{$identifier}{$new_start}{$new_end};
}
# add this as another data point to update/set in the document, if needed
$unique_doc->add_data_point( $data_point ) if ( defined $data_point->value );
}
}
# process all new documents that get created as a result of splitting the old document up
my @measurement_identifiers = keys( %$unique_documents );
foreach my $measurement_identifier ( @measurement_identifiers ) {
my @starts = keys( %{$unique_documents->{$measurement_identifier}} );
foreach my $start ( @starts ) {
my @ends = keys( %{$unique_documents->{$measurement_identifier}{$start}} );
foreach my $end ( @ends ) {
my $unique_document = $unique_documents->{$measurement_identifier}{$start}{$end};
my $bulk = $bulk_creates->{$data_type->name}{'data'};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
$bulk = $data_collection->initialize_unordered_bulk_op();
$bulk_creates->{$data_type->name}{'data'} = $bulk;
}
$self->logger->debug( "Creating new data document $measurement_identifier / $start / $end." );
$unique_document->create( bulk => $bulk );
}
}
}
# remove all old documents that are getting replaced with new docs
if ( @overlap_ids > 0 ) {
# first remove from mongo
$data_collection->delete_many( {'_id' => {'$in' => \@overlap_ids}} );
# also must remove them from our cache since they should no longer exist
$self->memcache->delete_multi( @overlap_cache_ids );
}
return $document;
}
sub _update_data_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $old_value_types = $args{'old_value_types'};
my $new_value_types = $args{'new_value_types'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
# do we need to add any value types to the document?
my @value_types_to_add;
my @new_value_types = keys( %$new_value_types );
my @old_value_types = keys( %$old_value_types );
foreach my $new_value_type ( @new_value_types ) {
# already in the doc
next if ( $old_value_types->{$new_value_type} );
# must be new
push( @value_types_to_add, $new_value_type );
}
# did we find at least one new value type not in the doc?
if ( @value_types_to_add ) {
$self->logger->debug( "Adding new value types " . join( ',', @value_types_to_add ) . " to document." );
$document->add_value_types( \@value_types_to_add );
}
my $data_type = $document->data_type;
my $collection_name = 'data';
my $bulk = $bulk_updates->{$data_type->name}{$collection_name};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
my $collection = $data_type->database->get_collection( $collection_name );
$bulk = $collection->initialize_unordered_bulk_op();
$bulk_updates->{$data_type->name}{$collection_name} = $bulk;
}
$document->update( bulk => $bulk );
return ( $document, \@value_types_to_add );
}
sub _update_aggregate_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $old_value_types = $args{'old_value_types'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks =$args{'acquired_locks'};
# do we need to add any value types to the document?
my @value_types_to_add;
foreach my $new_value_type ( keys %{$document->value_types} ) {
# already in the doc
next if ( $old_value_types->{$new_value_type} );
# must be new
push( @value_types_to_add, $new_value_type );
}
# did we find at least one new value type not in the doc?
if ( @value_types_to_add ) {
$self->logger->debug( "Adding new value types " . join( ',', @value_types_to_add ) . " to document." );
$document->add_value_types( \@value_types_to_add );
}
my $data_type = $document->data_type;
my $collection_name = 'data_' . $document->interval;
my $bulk = $bulk_updates->{$data_type->name}{$collection_name};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
my $collection = $data_type->database->get_collection( $collection_name );
$bulk = $collection->initialize_unordered_bulk_op();
$bulk_updates->{$data_type->name}{$collection_name} = $bulk;
}
$document->update( bulk => $bulk );
return ( $document, \@value_types_to_add );
}
sub _update_metadata_value_types {
my ( $self, %args ) = @_;
my $data_type = $args{'data_type'};
my $new_value_types = $args{'value_types'};
# determine all the cache ids for all these metadata value types
my @cache_ids;
foreach my $new_value_type ( @$new_value_types ) {
# include this value type in its data type entry
$self->data_types->{$data_type->name}->value_types->{$new_value_type} = {'description' => $new_value_type,
'units' => $new_value_type};
my $cache_id = $self->redislock->get_cache_id( type => $data_type->name,
collection => 'metadata',
identifier => $new_value_type );
push( @cache_ids, $cache_id );
}
# consult our cache to see if any of them dont exists
my $cache_results = $self->memcache->get_multi( @cache_ids );
my $found_missing = 0;
foreach my $cache_id ( @cache_ids ) {
# cache hit
next if ( $cache_results->{$cache_id} );
# found a value type we've never seen before
$found_missing = 1;
last;
}
# no new value types found to update
return if ( !$found_missing );
# get metadata collection for this data type
my $metadata_collection = $data_type->database->get_collection( 'metadata' );
# get lock for this metadata document
my $lock = $self->redislock->lock( type => $data_type->name,
collection => 'metadata' ) or die "Can't lock metadata for " . $data_type->name;
# grab the current metadata document
my $doc = $metadata_collection->find_one( {}, {'values' => 1} );
# error if there is none present
if ( !$doc ) {
$self->redislock->unlock( $lock );
die( 'No metadata document found for database ' . $data_type->name . '.' );
}
my $updates = {};
# find any new value types
foreach my $new_value_type ( @$new_value_types ) {
# skip it if it already exists
next if ( exists( $doc->{'values'}{$new_value_type} ) );
$self->logger->debug( "Adding new value type $new_value_type to database " . $data_type->name . "." );
# found a new one that needs to be added
$updates->{"values.$new_value_type"} = {'description' => $new_value_type,
'units' => $new_value_type};
}
# is there at least one update to perform?
if ( keys( %$updates ) > 0 ) {
# update the single metadata document with all new value types found
$metadata_collection->update_one( {},
{'$set' => $updates} );
}
# mark all value types in our cache
my @multi = map { [$_ => 1] } @cache_ids;
$self->memcache->set_multi( @multi );
# all done, release our lock on this metadata document
$self->redislock->unlock( $lock );
}
sub _create_measurement_document {
my ( $self, %args ) = @_;
my $identifier = $args{'identifier'};
my $data_type = $args{'data_type'};
my $meta = $args{'meta'};
my $start = $args{'start'};
my $interval = $args{'interval'};
my $bulk_creates = $args{'bulk_creates'};
my $acquired_locks = $args{'acquired_locks'};
$self->logger->debug( "Measurement $identifier in database " . $data_type->name . " not found in cache." );
# get lock for this measurement identifier
my $lock = $self->redislock->lock( type => $data_type->name,
collection => 'measurements',
identifier => $identifier ) or die "Can't lock measurements for $identifier";
push( @$acquired_locks, $lock );
# get measurement collection for this data type
my $measurement_collection = $data_type->database->get_collection( 'measurements' );
# see if it exists in the database (and is active)
my $query = Tie::IxHash->new( identifier => $identifier,
end => undef );
my $exists = $measurement_collection->count( $query );
# doesn't exist yet
if ( !$exists ) {
$self->logger->debug( "Active measurement $identifier not found in database " . $data_type->name . ", adding." );
my $metadata_fields = $data_type->metadata_fields;
my $fields = Tie::IxHash->new( identifier => $identifier,
start => $start + 0,
end => undef,
last_updated => $start + 0 );
while ( my ( $field, $value ) = each( %$meta ) ) {
# skip it if its not a required meta field for this data type, the writer should only ever set those
next if ( !$metadata_fields->{$field}{'required'} );
$fields->Push( $field => $value );
}
# create it
$measurement_collection->insert_one( $fields );
}
# mark it in our known cache so no one ever tries to add it again
my $cache_id = $self->redislock->get_cache_id( type => $data_type->name,
collection => 'measurements',
identifier => $identifier );
my $cache_duration = MEASUREMENT_CACHE_EXPIRATION;
# use longer cache duration for measurements not submitted often
$cache_duration = $interval * 2 if ( $interval * 2 > $cache_duration );
$self->memcache->set( $cache_id, 1, $interval * 2 );
}
sub _fetch_data_types {
my ( $self ) = @_;
$self->logger->debug( 'Getting data types.' );
my $data_types = {};
# determine databases to ignore
my $ignore_databases = {};
$self->config->{'force_array'} = 1;
my @ignore_databases = $self->config->get( '/config/ignore-databases/database' );
$self->config->{'force_array'} = 0;
foreach my $database ( @ignore_databases ) {
$database = $database->[0];
$self->logger->debug( "Ignoring database '$database'." );
$ignore_databases->{$database} = 1;
}
# grab all database names in mongo
my @database_names = $self->mongo_rw->database_names();
foreach my $database ( @database_names ) {
# skip it if its marked to be ignored
next if ( $ignore_databases->{$database} || $database =~ /^_/ );
$self->logger->debug( "Storing data type for database $database." );
my $data_type;
try {
$data_type = GRNOC::TSDS::DataType->new( name => $database,
database => $self->mongo_rw->get_database( $database ) );
}
catch {
$self->logger->warn( $_ );
};
next if !$data_type;
# store this as one of our known data types
$data_types->{$database} = $data_type;
}
# update the list of known data types
$self->_set_data_types( $data_types );
}
sub _redis_connect {
my ( $self ) = @_;
while ( 1 ) {
my $connected = 0;
try {
$self->_set_redislock( GRNOC::TSDS::RedisLock->new( config => $self->config ) );
$connected = 1;
}
catch {
$self->logger->error( "Error connecting to Redis: $_" );
};
last if $connected;
sleep( RECONNECT_TIMEOUT );
}
}
sub _rabbit_connect {
my ( $self ) = @_;
my $rabbit_host = $self->config->get( '/config/rabbit/@host' );
my $rabbit_port = $self->config->get( '/config/rabbit/@port' );
my $rabbit_queue = $self->queue;
while ( 1 ) {
$self->logger->info( "Connecting to RabbitMQ $rabbit_host:$rabbit_port." );
my $connected = 0;
try {
my $rabbit = Net::AMQP::RabbitMQ->new();
$rabbit->connect( $rabbit_host, {'port' => $rabbit_port} );
$rabbit->channel_open( 1 );
$rabbit->queue_declare( 1, $rabbit_queue, {'auto_delete' => 0} );
$rabbit->basic_qos( 1, { prefetch_count => QUEUE_PREFETCH_COUNT } );
$rabbit->consume( 1, $rabbit_queue, {'no_ack' => 0} );
$self->_set_rabbit( $rabbit );
$connected = 1;
}
catch {
$self->logger->error( "Error connecting to RabbitMQ: $_" );
};
last if $connected;
$self->logger->info( "Reconnecting after " . RECONNECT_TIMEOUT . " seconds..." );
sleep( RECONNECT_TIMEOUT );
}
}
1;
| 34.788043 | 192 | 0.542103 |
73eb53318c3b5891347ffe1a44c6c40946ab3179 | 793 | pl | Perl | oeis/interlace/diffs.pl | gfis/fasces | 0134b5d0a7213789a33b2134be7d6d6cb5159ab8 | [
"Apache-2.0"
] | null | null | null | oeis/interlace/diffs.pl | gfis/fasces | 0134b5d0a7213789a33b2134be7d6d6cb5159ab8 | [
"Apache-2.0"
] | null | null | null | oeis/interlace/diffs.pl | gfis/fasces | 0134b5d0a7213789a33b2134be7d6d6cb5159ab8 | [
"Apache-2.0"
] | null | null | null | #!perl
# show iterative differences of a sequence
# @(#) $Id$
# 2018-03-27, Georg Fischer
#------------------------------------------------------
# usage:
# ./armleg $(N) 1 | grep -vE "^#" | cut -d " " -f $$(($(N)+2)) \
# | perl diffs.pl
#--------------------------------------------------------
use strict;
my $count = 0;
my @arr = ();
while (<>) {
s/\s//g; # chompr
push(@arr, $_);
} # while <>
print join(" ", @arr) . "\n";
while (scalar(@arr) > 1) { # compute diffs
my $a0 = shift(@arr);
my $ia = 0;
while ($ia < scalar(@arr)) {
my $ai = $arr[$ia];
$arr[$ia] = abs($ai - $a0);
$a0 = $ai;
$ia ++;
} # while $ia
print join(" ", @arr) . "\n";
} # while computing
__DATA__
./armleg 4 1 | grep -vE "^#" | cut -d " " -f $((4+2))
160
110
68
160
264
110
110
264
160
68
110
160
| 17.622222 | 64 | 0.433796 |
ed05a37f7ae09357022cf496c9f95951b7594b59 | 727 | pm | Perl | lib/March/Component/Move.pm | dnmfarrell/March | 8cf833554a3332c20e9789ec00ce1311f3fc15e4 | [
"BSD-2-Clause"
] | null | null | null | lib/March/Component/Move.pm | dnmfarrell/March | 8cf833554a3332c20e9789ec00ce1311f3fc15e4 | [
"BSD-2-Clause"
] | null | null | null | lib/March/Component/Move.pm | dnmfarrell/March | 8cf833554a3332c20e9789ec00ce1311f3fc15e4 | [
"BSD-2-Clause"
] | null | null | null | package March::Component::Move;
use 5.020;
use Role::Tiny;
use Math::Shape::Vector;
use March::Msg;
use March::Game;
use feature 'signatures';
no warnings 'experimental';
use Carp;
with qw/March::Component::Id
March::Component::Position/;
=head2 move
Moves the actor to a new vector requires a L<Math::Shape::Vector> object as an argument. Returns the new vector.
=cut
sub move ($self, $end_vector)
{
croak 'perform requires a Math::Shape::Vector object'
unless $end_vector->isa('Math::Shape::Vector');
croak 'move location not within game map limits' unless March::Game->instance->map->within_bounds($end_vector);
# update position
$self->position($end_vector);
$end_vector;
}
1;
| 20.771429 | 115 | 0.698762 |
73d13f3e8235bafb9501294edc2fde84a09577a0 | 6,346 | pm | Perl | apps/vmware/connector/mode/swapvm.pm | petneli/centreon-plugins | d131e60a1859fdd0e959623de56e6e7512c669af | [
"Apache-2.0"
] | 316 | 2015-01-18T20:37:21.000Z | 2022-03-27T00:20:35.000Z | apps/vmware/connector/mode/swapvm.pm | petneli/centreon-plugins | d131e60a1859fdd0e959623de56e6e7512c669af | [
"Apache-2.0"
] | 2,333 | 2015-04-26T19:10:19.000Z | 2022-03-31T15:35:21.000Z | apps/vmware/connector/mode/swapvm.pm | petneli/centreon-plugins | d131e60a1859fdd0e959623de56e6e7512c669af | [
"Apache-2.0"
] | 371 | 2015-01-18T20:37:23.000Z | 2022-03-22T10:10:16.000Z | #
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::vmware::connector::mode::swapvm;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold_ng);
sub custom_status_output {
my ($self, %options) = @_;
return '[connection state ' . $self->{result_values}->{connection_state} . '][power state ' . $self->{result_values}->{power_state} . ']';
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'vm', type => 1, cb_prefix_output => 'prefix_vm_output', message_multiple => 'All virtual machines are ok', skipped_code => { -10 => 1 } }
];
$self->{maps_counters}->{vm} = [
{
label => 'status', type => 2, unknown_default => '%{connection_state} !~ /^connected$/i or %{power_state} !~ /^poweredOn$/i',
set => {
key_values => [ { name => 'connection_state' }, { name => 'power_state' } ],
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold_ng
}
},
{ label => 'swap-in', nlabel => 'vm.swap.in.usage.bytespersecond', set => {
key_values => [ { name => 'swap_in' }, { name => 'display' } ],
output_template => 'Swap In: %s %s/s',
output_change_bytes => 1,
perfdatas => [
{ label => 'swap_in', template => '%s',
unit => 'B/s', min => 0, label_extra_instance => 1 }
],
}
},
{ label => 'swap-out', nlabel => 'vm.swap.out.usage.bytespersecond', set => {
key_values => [ { name => 'swap_out' }, { name => 'display' } ],
output_template => 'Swap Out: %s %s/s',
output_change_bytes => 1,
perfdatas => [
{ label => 'swap_out', template => '%s',
unit => 'B/s', min => 0, label_extra_instance => 1 }
]
}
}
];
}
sub prefix_vm_output {
my ($self, %options) = @_;
my $msg = "Virtual machine '" . $options{instance_value}->{display} . "'";
if (defined($options{instance_value}->{config_annotation})) {
$msg .= ' [annotation: ' . $options{instance_value}->{config_annotation} . ']';
}
$msg .= ' : ';
return $msg;
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
'vm-hostname:s' => { name => 'vm_hostname' },
'filter' => { name => 'filter' },
'scope-datacenter:s' => { name => 'scope_datacenter' },
'scope-cluster:s' => { name => 'scope_cluster' },
'scope-host:s' => { name => 'scope_host' },
'filter-description:s' => { name => 'filter_description' },
'filter-os:s' => { name => 'filter_os' },
'filter-uuid:s' => { name => 'filter_uuid' },
'display-description' => { name => 'display_description' }
});
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
$self->{vm} = {};
my $response = $options{custom}->execute(
params => $self->{option_results},
command => 'swapvm'
);
foreach my $vm_id (keys %{$response->{data}}) {
my $vm_name = $response->{data}->{$vm_id}->{name};
$self->{vm}->{$vm_name} = {
display => $vm_name,
connection_state => $response->{data}->{$vm_id}->{connection_state},
power_state => $response->{data}->{$vm_id}->{power_state},
swap_in => $response->{data}->{$vm_id}->{'mem.swapinRate.average'},
swap_out => $response->{data}->{$vm_id}->{'mem.swapoutRate.average'}
};
if (defined($self->{option_results}->{display_description})) {
$self->{vm}->{$vm_name}->{config_annotation} = $options{custom}->strip_cr(value => $response->{data}->{$vm_id}->{'config.annotation'});
}
}
}
1;
__END__
=head1 MODE
Check virtual machine swap rate usage.
=over 8
=item B<--vm-hostname>
VM hostname to check.
If not set, we check all VMs.
=item B<--filter>
VM hostname is a regexp.
=item B<--filter-description>
Filter also virtual machines description (can be a regexp).
=item B<--filter-os>
Filter also virtual machines OS name (can be a regexp).
=item B<--scope-datacenter>
Search in following datacenter(s) (can be a regexp).
=item B<--scope-cluster>
Search in following cluster(s) (can be a regexp).
=item B<--scope-host>
Search in following host(s) (can be a regexp).
=item B<--display-description>
Display virtual machine description.
=item B<--unknown-status>
Set warning threshold for status (Default: '%{connection_state} !~ /^connected$/i or %{power_state} !~ /^poweredOn$/i').
Can used special variables like: %{connection_state}, %{power_state}
=item B<--warning-status>
Set warning threshold for status (Default: '').
Can used special variables like: %{connection_state}, %{power_state}
=item B<--critical-status>
Set critical threshold for status (Default: '').
Can used special variables like: %{connection_state}, %{power_state}
=item B<--warning-*>
Threshold warning.
Can be: 'swap-in', 'swap-out'.
=item B<--critical-*>
Threshold critical.
Can be: 'swap-in', 'swap-out'.
=back
=cut
| 31.107843 | 157 | 0.585881 |
ed700d58fbbb01f9ec7f4ea86aa6df3569b0598c | 1,632 | t | Perl | t/08_busy.t | git-the-cpan/DBD-SQLite-Amalgamation | b9cd321286e1b1aa26c5490c95e81eea417c7b1c | [
"Artistic-1.0"
] | null | null | null | t/08_busy.t | git-the-cpan/DBD-SQLite-Amalgamation | b9cd321286e1b1aa26c5490c95e81eea417c7b1c | [
"Artistic-1.0"
] | null | null | null | t/08_busy.t | git-the-cpan/DBD-SQLite-Amalgamation | b9cd321286e1b1aa26c5490c95e81eea417c7b1c | [
"Artistic-1.0"
] | null | null | null | #!/usr/bin/perl
# Test that two processes can write at once, assuming we commit timely.
use strict;
BEGIN {
$| = 1;
$^W = 1;
}
use t::lib::Test;
use Test::More;
BEGIN {
plan skip_all => 'requires DBI v1.608' if $DBI::VERSION < 1.608;
}
use Test::NoWarnings;
plan tests => 12;
my $dbh = connect_ok(
RaiseError => 1,
PrintError => 0,
AutoCommit => 0,
);
my $dbh2 = connect_ok(
RaiseError => 1,
PrintError => 0,
AutoCommit => 0,
);
ok($dbh2->sqlite_busy_timeout(3000));
ok($dbh->do("CREATE TABLE Blah ( id INTEGER, val VARCHAR )"));
ok($dbh->commit);
ok($dbh->do("INSERT INTO Blah VALUES ( 1, 'Test1' )"));
my $start = time;
eval {
$dbh2->do("INSERT INTO Blah VALUES ( 2, 'Test2' )");
};
ok($@);
if ($@) {
print "# insert failed : $@";
$dbh2->rollback;
}
$dbh->commit;
ok($dbh2->do("INSERT INTO Blah VALUES ( 2, 'Test2' )"));
$dbh2->commit;
$dbh2->disconnect;
undef($dbh2);
pipe(READER, WRITER);
my $pid = fork;
if (!defined($pid)) {
# fork failed
skip("No fork here", 1);
skip("No fork here", 1);
} elsif (!$pid) {
# child
my $dbh2 = DBI->connect('dbi:SQLite:foo', '', '',
{
RaiseError => 1,
PrintError => 0,
AutoCommit => 0,
});
$dbh2->do("INSERT INTO Blah VALUES ( 3, 'Test3' )");
select WRITER; $| = 1; select STDOUT;
print WRITER "Ready\n";
sleep(5);
$dbh2->commit;
} else {
# parent
close WRITER;
my $line = <READER>;
chomp($line);
ok($line, "Ready");
ok($dbh->sqlite_busy_timeout(10000));
ok($dbh->do("INSERT INTO Blah VALUES (4, 'Test4' )"));
$dbh->commit;
wait;
}
| 18.976744 | 71 | 0.5625 |
ed8e8efd1d82c134e0efa04764863388f8d25d7d | 18,045 | pl | Perl | gnu/usr.bin/perl/Porting/pod_lib.pl | ArrogantWombatics/openbsd-src | 75721e1d44322953075b7c4b89337b163a395291 | [
"BSD-3-Clause"
] | 1 | 2019-02-16T13:29:23.000Z | 2019-02-16T13:29:23.000Z | gnu/usr.bin/perl/Porting/pod_lib.pl | ArrogantWombatics/openbsd-src | 75721e1d44322953075b7c4b89337b163a395291 | [
"BSD-3-Clause"
] | 1 | 2018-08-21T03:56:33.000Z | 2018-08-21T03:56:33.000Z | gnu/usr.bin/perl/Porting/pod_lib.pl | ArrogantWombaticus/openbsd-src | 75721e1d44322953075b7c4b89337b163a395291 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/perl -w
use strict;
use File::Find;
=head1 NAME
Porting/pod_lib.pl - functions for building and installing POD
=head1 SYNOPSIS
require './Porting/pod_lib.pl';
=cut
=head1 DESCRIPTION
This program, when C<require>d into other programs in the Perl 5 core
distribution, provides functions useful during building and, secondarily,
testing.
As of this writing, the functions in this program are used in these other
programs:
installman
installperl
pod/buildtoc
pod/perl.pod
Porting/new-perldelta.pl
Porting/pod_rules.pl
Note: Since these functions are used during the Perl build process, they must
work with F<miniperl>. That necessarily implies that these functions must not
rely on XS modules, either directly or indirectly (e.g., C<autodie>).
=head1 SUBROUTINES
=head2 C<my_die()>
=over 4
=item * Purpose
Exit from a process with an error code and a message.
=item * Arguments
List of arguments to be passed with the error message. Example:
close $fh or my_die("close 'utils.lst': $!");
=item * Return Value
Exit code C<255>.
=item * Comment
Prints C<ABORTED> to STDERR.
=back
=cut
# In some situations, eg cross-compiling, we get run with miniperl, so we can't use Digest::MD5
my $has_md5;
BEGIN {
use Carp;
$has_md5 = eval { require Digest::MD5; Digest::MD5->import('md5'); 1; };
}
# make it clearer when we haven't run to completion, as we can be quite
# noisy when things are working ok
sub my_die {
print STDERR "$0: ", @_;
print STDERR "\n" unless $_[-1] =~ /\n\z/;
print STDERR "ABORTED\n";
exit 255;
}
=head2 C<open_or_die()>
=over 4
=item * Purpose
Opens a file or fails if it cannot.
=item * Arguments
String holding filename to be opened. Example:
$fh = open_or_die('utils.lst');
=item * Return Value
Handle to opened file.
=back
=cut
sub open_or_die {
my $filename = shift;
open my $fh, '<', $filename or my_die "Can't open $filename: $!";
return $fh;
}
=head2 C<slurp_or_die()>
=over 4
=item * Purpose
Read the contents of a file into memory as a single string.
=item * Arguments
String holding name of file to be read into memory.
$olddelta = slurp_or_die('pod/perldelta.pod');
=item * Return Value
String holding contents of file.
=back
=cut
sub slurp_or_die {
my $filename = shift;
my $fh = open_or_die($filename);
binmode $fh;
local $/;
my $contents = <$fh>;
die "Can't read $filename: $!" unless defined $contents and close $fh;
return $contents;
}
=head2 C<write_or_die()>
=over 4
=item * Purpose
Write out a string to a file.
=item * Arguments
List of two arguments: (i) String holding name of file to be written to; (ii)
String holding contents to be written.
write_or_die($olddeltaname, $olddelta);
=item * Return Value
Implicitly returns true value upon success.
=back
=cut
sub write_or_die {
my ($filename, $contents) = @_;
open my $fh, '>', $filename or die "Can't open $filename for writing: $!";
binmode $fh;
print $fh $contents or die "Can't write to $filename: $!";
close $fh or die "Can't close $filename: $!";
}
=head2 C<verify_contiguous()>
=over 4
=item * Purpose
Verify that a file contains exactly one contiguous run of lines which matches
the passed in pattern. C<croak()>s if the pattern is not found, or found in
more than one place.
=item * Arguments
=over 4
=item * Name of file
=item * Contents of file
=item * Pattern of interest
=item * Name to report on error
=back
=item * Return Value
The contents of the file, with C<qr/\0+/> substituted for the pattern.
=back
=cut
sub verify_contiguous {
my ($name, $content, $re, $what) = @_;
require Carp;
$content =~ s/$re/\0/g;
my $sections = () = $content =~ m/\0+/g;
Carp::croak("$0: $name contains no $what") if $sections < 1;
Carp::croak("$0: $name contains discontiguous $what") if $sections > 1;
return $content;
}
=head2 C<process()>
=over 4
=item * Purpose
Read a file from disk, pass the contents to the callback, and either update
the file on disk (if changed) or generate TAP output to confirm that the
version on disk is up to date. C<die>s if the file contains any C<NUL> bytes.
This permits the callback routine to use C<NUL> bytes as placeholders while
manipulating the file's contents.
=item * Arguments
=over 4
=item * Description for use in error messages
=item * Name of file
=item * Callback
Passed description and file contents, should return updated file contents.
=item * Test number
If defined, generate TAP output to C<STDOUT>. If defined and false, generate
an unnumbered test. Otherwise this is the test number in the I<ok> line.
=item * Verbose flag
If true, generate verbose output.
=back
=item * Return Value
Does not return anything.
=back
=cut
sub process {
my ($desc, $filename, $callback, $test, $verbose) = @_;
print "Now processing $filename\n" if $verbose;
my $orig = slurp_or_die($filename);
my_die "$filename contains NUL bytes" if $orig =~ /\0/;
my $new = $callback->($desc, $orig);
if (defined $test) {
printf "%s%s # $filename is up to date\n",
($new eq $orig ? 'ok' : 'not ok'), ($test ? " $test" : '');
return;
} elsif ($new eq $orig) {
print "Was not modified\n"
if $verbose;
return;
}
my $mode = (stat $filename)[2];
my_die "Can't stat $filename: $!"
unless defined $mode;
rename $filename, "$filename.old"
or my_die "Can't rename $filename to $filename.old: $!";
write_or_die($filename, $new);
chmod $mode & 0777, $filename or my_die "can't chmod $mode $filename: $!";
}
=head2 C<pods_to_install()>
=over 4
=item * Purpose
Create a lookup table holding information about PODs to be installed.
=item * Arguments
None.
=item * Return Value
Reference to a hash with a structure like this:
$found = {
'MODULE' => {
'CPAN::Bundle' => 'lib/CPAN/Bundle.pm',
'Locale::Codes::Script_Retired' =>
'lib/Locale/Codes/Script_Retired.pm',
'Pod::Simple::DumpAsText' =>
'lib/Pod/Simple/DumpAsText.pm',
# ...
'Locale::Codes::LangVar' =>
'lib/Locale/Codes/LangVar.pod'
},
'PRAGMA' => {
'fields' => 'lib/fields.pm',
'subs' => 'lib/subs.pm',
# ...
},
=item * Comment
Broadly speaking, the function assembles a list of all F<.pm> and F<.pod>
files in the distribution and then excludes certain files from installation.
=back
=cut
sub pods_to_install {
# manpages not to be installed
my %do_not_install = map { ($_ => 1) }
qw(Pod::Functions XS::APItest XS::Typemap);
my (%done, %found);
File::Find::find({no_chdir=>1,
wanted => sub {
if (m!/t\z!) {
++$File::Find::prune;
return;
}
# $_ is $File::Find::name when using no_chdir
return unless m!\.p(?:m|od)\z! && -f $_;
return if m!lib/Net/FTP/.+\.pm\z!; # Hi, Graham! :-)
# Skip .pm files that have corresponding .pod files
return if s!\.pm\z!.pod! && -e $_;
s!\.pod\z!!;
s!\Alib/!!;
s!/!::!g;
my_die("Duplicate files for $_, '$done{$_}' and '$File::Find::name'")
if exists $done{$_};
$done{$_} = $File::Find::name;
return if $do_not_install{$_};
return if is_duplicate_pod($File::Find::name);
$found{/\A[a-z]/ ? 'PRAGMA' : 'MODULE'}{$_}
= $File::Find::name;
}}, 'lib');
return \%found;
}
my %state = (
# Don't copy these top level READMEs
ignore => {
micro => 1,
# vms => 1,
},
);
{
my (%Lengths, %MD5s);
sub is_duplicate_pod {
my $file = shift;
local $_;
return if !$has_md5;
# Initialise the list of possible source files on the first call.
unless (%Lengths) {
__prime_state() unless $state{master};
foreach (@{$state{master}}) {
next unless $_->[2]{dual};
# This is a dual-life perl*.pod file, which will have be copied
# to lib/ by the build process, and hence also found there.
# These are the only pod files that might become duplicated.
++$Lengths{-s $_->[1]};
++$MD5s{md5(slurp_or_die($_->[1]))};
}
}
# We are a file in lib. Are we a duplicate?
# Don't bother calculating the MD5 if there's no interesting file of
# this length.
return $Lengths{-s $file} && $MD5s{md5(slurp_or_die($file))};
}
}
sub __prime_state {
my $source = 'perldelta.pod';
my $filename = "pod/$source";
my $contents = slurp_or_die($filename);
my @want =
$contents =~ /perldelta - what is new for perl v(5)\.(\d+)\.(\d+)\n/;
die "Can't extract version from $filename" unless @want;
my $delta_leaf = join '', 'perl', @want, 'delta';
$state{delta_target} = "$delta_leaf.pod";
$state{delta_version} = \@want;
# This way round so that keys can act as a MANIFEST skip list
# Targets will always be in the pod directory. Currently we can only cope
# with sources being in the same directory.
$state{copies}{$state{delta_target}} = $source;
# The default flags if none explicitly set for the current file.
my $current_flags = '';
my (%flag_set, @paths);
my $master = open_or_die('pod/perl.pod');
while (<$master>) {
last if /^=begin buildtoc$/;
}
die "Can't find '=begin buildtoc':" if eof $master;
while (<$master>) {
next if /^$/ or /^#/;
last if /^=end buildtoc/;
my ($command, @args) = split ' ';
if ($command eq 'flag') {
# For the named pods, use these flags, instead of $current_flags
my $flags = shift @args;
my_die("Malformed flag $flags")
unless $flags =~ /\A=([a-z]*)\z/;
$flag_set{$_} = $1 foreach @args;
} elsif ($command eq 'path') {
# If the pod's name matches the regex, prepend the given path.
my_die("Malformed path for /$args[0]/")
unless @args == 2;
push @paths, [qr/\A$args[0]\z/, $args[1]];
} elsif ($command eq 'aux') {
# The contents of perltoc.pod's "AUXILIARY DOCUMENTATION" section
$state{aux} = [sort @args];
} else {
my_die("Unknown buildtoc command '$command'");
}
}
foreach (<$master>) {
next if /^$/ or /^#/;
next if /^=head2/;
last if /^=for buildtoc __END__$/;
if (my ($action, $flags) = /^=for buildtoc flag ([-+])([a-z]+)/) {
if ($action eq '+') {
$current_flags .= $flags;
} else {
my_die("Attempt to unset [$flags] failed - flags are '$current_flags")
unless $current_flags =~ s/[\Q$flags\E]//g;
}
} elsif (my ($leafname, $desc) = /^\s+(\S+)\s+(.*)/) {
my $podname = $leafname;
my $filename = "pod/$podname.pod";
foreach (@paths) {
my ($re, $path) = @$_;
if ($leafname =~ $re) {
$podname = $path . $leafname;
$filename = "$podname.pod";
last;
}
}
# Keep this compatible with pre-5.10
my $flags = delete $flag_set{$leafname};
$flags = $current_flags unless defined $flags;
my %flags;
$flags{toc_omit} = 1 if $flags =~ tr/o//d;
$flags{dual} = $podname ne $leafname;
$state{generated}{"$podname.pod"}++ if $flags =~ tr/g//d;
if ($flags =~ tr/r//d) {
my $readme = $podname;
$readme =~ s/^perl//;
$state{readmes}{$readme} = $desc;
$flags{readme} = 1;
} else {
$state{pods}{$podname} = $desc;
}
my_die "Unknown flag found in section line: $_" if length $flags;
push @{$state{master}},
[$leafname, $filename, \%flags];
if ($podname eq 'perldelta') {
local $" = '.';
push @{$state{master}},
[$delta_leaf, "pod/$state{delta_target}"];
$state{pods}{$delta_leaf} = "Perl changes in version @want";
}
} else {
my_die("Malformed line: $_");
}
}
close $master or my_die("close pod/perl.pod: $!");
# This has to be special-cased somewhere. Turns out this is cleanest:
push @{$state{master}}, ['a2p', 'x2p/a2p.pod', {toc_omit => 1}];
my_die("perl.pod sets flags for unknown pods: "
. join ' ', sort keys %flag_set)
if keys %flag_set;
}
=head2 C<get_pod_metadata()>
=over 4
=item * Purpose
=item * Arguments
List of one or more arguments.
=over 4
=item * Boolean true or false
=item * Reference to a subroutine.
=item * Various other arguments.
=back
Example:
$state = get_pod_metadata(
0, sub { warn @_ if @_ }, 'pod/perltoc.pod');
get_pod_metadata(
1, sub { warn @_ if @_ }, values %Build);
=item * Return Value
Hash reference; each element provides either a list or a lookup table for
information about various types of POD files.
'aux' => [ # utility programs like
'h2xs' and 'perlbug' ]
'generated' => { # lookup table for generated POD files
like 'perlapi.pod' }
'ignore' => { # lookup table for files to be ignored }
'pods' => { # lookup table in "name" =>
"short description" format }
'readmes' => { # lookup table for OS-specific
and other READMEs }
'delta_version' => [ # major version number, minor no.,
patch no. ]
'delta_target' => 'perl<Mmmpp>delta.pod',
'master' => [ # list holding entries for files callable
by 'perldoc' ]
'copies' => { # patch version perldelta =>
minor version perldelta }
=back
=cut
sub get_pod_metadata {
# Do we expect to find generated pods on disk?
my $permit_missing_generated = shift;
# Do they want a consistency report?
my $callback = shift;
local $_;
__prime_state() unless $state{master};
return \%state unless $callback;
my %BuildFiles;
foreach my $path (@_) {
$path =~ m!([^/]+)$!;
++$BuildFiles{$1};
}
# Sanity cross check
my (%disk_pods, %manipods, %manireadmes);
my (%cpanpods, %cpanpods_leaf);
my (%our_pods);
# There are files that we don't want to list in perl.pod.
# Maybe the various stub manpages should be listed there.
my %ignoredpods = map { ( "$_.pod" => 1 ) } qw( );
# Convert these to a list of filenames.
++$our_pods{"$_.pod"} foreach keys %{$state{pods}};
foreach (@{$state{master}}) {
++$our_pods{"$_->[0].pod"}
if $_->[2]{readme};
}
opendir my $dh, 'pod';
while (defined ($_ = readdir $dh)) {
next unless /\.pod\z/;
++$disk_pods{$_};
}
# Things we copy from won't be in perl.pod
# Things we copy to won't be in MANIFEST
my $mani = open_or_die('MANIFEST');
while (<$mani>) {
chomp;
s/\s+.*$//;
if (m!^pod/([^.]+\.pod)!i) {
++$manipods{$1};
} elsif (m!^README\.(\S+)!i) {
next if $state{ignore}{$1};
++$manireadmes{"perl$1.pod"};
} elsif (exists $our_pods{$_}) {
++$cpanpods{$_};
m!([^/]+)$!;
++$cpanpods_leaf{$1};
$disk_pods{$_}++
if -e $_;
}
}
close $mani or my_die "close MANIFEST: $!\n";
# Are we running before known generated files have been generated?
# (eg in a clean checkout)
my %not_yet_there;
if ($permit_missing_generated) {
# If so, don't complain if these files aren't yet in place
%not_yet_there = (%manireadmes, %{$state{generated}}, %{$state{copies}})
}
my @inconsistent;
foreach my $i (sort keys %disk_pods) {
push @inconsistent, "$0: $i exists but is unknown by buildtoc\n"
unless $our_pods{$i} || $ignoredpods{$i};
push @inconsistent, "$0: $i exists but is unknown by MANIFEST\n"
if !$BuildFiles{'MANIFEST'} # Ignore if we're rebuilding MANIFEST
&& !$manipods{$i} && !$manireadmes{$i} && !$state{copies}{$i}
&& !$state{generated}{$i} && !$cpanpods{$i};
}
foreach my $i (sort keys %our_pods) {
push @inconsistent, "$0: $i is known by buildtoc but does not exist\n"
unless $disk_pods{$i} or $BuildFiles{$i} or $not_yet_there{$i};
}
unless ($BuildFiles{'MANIFEST'}) {
# Again, ignore these if we're about to rebuild MANIFEST
foreach my $i (sort keys %manipods) {
push @inconsistent, "$0: $i is known by MANIFEST but does not exist\n"
unless $disk_pods{$i};
push @inconsistent, "$0: $i is known by MANIFEST but is marked as generated\n"
if $state{generated}{$i};
}
}
&$callback(@inconsistent);
return \%state;
}
1;
# Local variables:
# cperl-indent-level: 4
# indent-tabs-mode: nil
# End:
#
# ex: set ts=8 sts=4 sw=4 et:
| 26.693787 | 95 | 0.553228 |
ed67603a3199b91d39091055bae1c0cc024f0bcb | 1,894 | pm | Perl | auto-lib/Paws/AutoScaling/AttachLoadBalancers.pm | agimenez/aws-sdk-perl | 9c4dff7d1af2ff0210c28ca44fb9e92bc625712b | [
"Apache-2.0"
] | null | null | null | auto-lib/Paws/AutoScaling/AttachLoadBalancers.pm | agimenez/aws-sdk-perl | 9c4dff7d1af2ff0210c28ca44fb9e92bc625712b | [
"Apache-2.0"
] | null | null | null | auto-lib/Paws/AutoScaling/AttachLoadBalancers.pm | agimenez/aws-sdk-perl | 9c4dff7d1af2ff0210c28ca44fb9e92bc625712b | [
"Apache-2.0"
] | null | null | null |
package Paws::AutoScaling::AttachLoadBalancers;
use Moose;
has AutoScalingGroupName => (is => 'ro', isa => 'Str', required => 1);
has LoadBalancerNames => (is => 'ro', isa => 'ArrayRef[Str|Undef]', required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'AttachLoadBalancers');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::AutoScaling::AttachLoadBalancersResultType');
class_has _result_key => (isa => 'Str', is => 'ro', default => 'AttachLoadBalancersResult');
1;
### main pod documentation begin ###
=head1 NAME
Paws::AutoScaling::AttachLoadBalancers - Arguments for method AttachLoadBalancers on Paws::AutoScaling
=head1 DESCRIPTION
This class represents the parameters used for calling the method AttachLoadBalancers on the
Auto Scaling service. Use the attributes of this class
as arguments to method AttachLoadBalancers.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to AttachLoadBalancers.
As an example:
$service_obj->AttachLoadBalancers(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 B<REQUIRED> AutoScalingGroupName => Str
The name of the group.
=head2 B<REQUIRED> LoadBalancerNames => ArrayRef[Str|Undef]
One or more load balancer names.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method AttachLoadBalancers in L<Paws::AutoScaling>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| 30.548387 | 249 | 0.740232 |
ed727c2a737750331eb739f5d7590f8f688426c0 | 1,653 | t | Perl | t/plugin/http-logger-json.t | xdshivani/apisix | cba60de99971abfa4befe7c2d259064415064990 | [
"Apache-2.0"
] | 5,886 | 2020-08-01T04:53:49.000Z | 2022-03-31T16:52:51.000Z | t/plugin/http-logger-json.t | xdshivani/apisix | cba60de99971abfa4befe7c2d259064415064990 | [
"Apache-2.0"
] | 3,324 | 2020-07-31T16:39:11.000Z | 2022-03-31T17:23:27.000Z | t/plugin/http-logger-json.t | xdshivani/apisix | cba60de99971abfa4befe7c2d259064415064990 | [
"Apache-2.0"
] | 1,246 | 2020-08-03T06:27:27.000Z | 2022-03-31T22:34:24.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
use t::APISIX 'no_plan';
log_level('info');
repeat_each(1);
no_long_string();
no_root_location();
add_block_preprocessor(sub {
my ($block) = @_;
my $yaml_config = $block->yaml_config // <<_EOC_;
apisix:
node_listen: 1984
config_center: yaml
enable_admin: false
_EOC_
$block->set_value("yaml_config", $yaml_config);
if (!$block->no_error_log) {
$block->set_value("no_error_log", "[error]");
}
});
run_tests;
__DATA__
=== TEST 1: json body
--- apisix_yaml
routes:
-
uri: /hello
upstream:
nodes:
"127.0.0.1:1980": 1
type: roundrobin
plugins:
http-logger:
batch_max_size: 1
uri: http://127.0.0.1:1980/log
include_req_body: true
#END
--- request
POST /hello
{"sample_payload":"hello"}
--- error_log
"body":"{\"sample_payload\":\"hello\"}"
| 25.430769 | 74 | 0.681791 |
ed8e401b6f6ffc347be24e051ec84e9daff1be0a | 2,713 | pm | Perl | auto-lib/Paws/SecurityHub/AwsDynamoDbTableGlobalSecondaryIndex.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 164 | 2015-01-08T14:58:53.000Z | 2022-02-20T19:16:24.000Z | auto-lib/Paws/SecurityHub/AwsDynamoDbTableGlobalSecondaryIndex.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 348 | 2015-01-07T22:08:38.000Z | 2022-01-27T14:34:44.000Z | auto-lib/Paws/SecurityHub/AwsDynamoDbTableGlobalSecondaryIndex.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 87 | 2015-04-22T06:29:47.000Z | 2021-09-29T14:45:55.000Z | # Generated by default/object.tt
package Paws::SecurityHub::AwsDynamoDbTableGlobalSecondaryIndex;
use Moose;
has Backfilling => (is => 'ro', isa => 'Bool');
has IndexArn => (is => 'ro', isa => 'Str');
has IndexName => (is => 'ro', isa => 'Str');
has IndexSizeBytes => (is => 'ro', isa => 'Int');
has IndexStatus => (is => 'ro', isa => 'Str');
has ItemCount => (is => 'ro', isa => 'Int');
has KeySchema => (is => 'ro', isa => 'ArrayRef[Paws::SecurityHub::AwsDynamoDbTableKeySchema]');
has Projection => (is => 'ro', isa => 'Paws::SecurityHub::AwsDynamoDbTableProjection');
has ProvisionedThroughput => (is => 'ro', isa => 'Paws::SecurityHub::AwsDynamoDbTableProvisionedThroughput');
1;
### main pod documentation begin ###
=head1 NAME
Paws::SecurityHub::AwsDynamoDbTableGlobalSecondaryIndex
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::SecurityHub::AwsDynamoDbTableGlobalSecondaryIndex object:
$service_obj->Method(Att1 => { Backfilling => $value, ..., ProvisionedThroughput => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::SecurityHub::AwsDynamoDbTableGlobalSecondaryIndex object:
$result = $service_obj->Method(...);
$result->Att1->Backfilling
=head1 DESCRIPTION
Information abut a global secondary index for the table.
=head1 ATTRIBUTES
=head2 Backfilling => Bool
Whether the index is currently backfilling.
=head2 IndexArn => Str
The ARN of the index.
=head2 IndexName => Str
The name of the index.
=head2 IndexSizeBytes => Int
The total size in bytes of the index.
=head2 IndexStatus => Str
The current status of the index.
=head2 ItemCount => Int
The number of items in the index.
=head2 KeySchema => ArrayRef[L<Paws::SecurityHub::AwsDynamoDbTableKeySchema>]
The key schema for the index.
=head2 Projection => L<Paws::SecurityHub::AwsDynamoDbTableProjection>
Attributes that are copied from the table into an index.
=head2 ProvisionedThroughput => L<Paws::SecurityHub::AwsDynamoDbTableProvisionedThroughput>
Information about the provisioned throughput settings for the indexes.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::SecurityHub>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: L<https://github.com/pplu/aws-sdk-perl>
Please report bugs to: L<https://github.com/pplu/aws-sdk-perl/issues>
=cut
| 25.35514 | 126 | 0.730188 |
ed8c8889ca9583984bbfdeff197ceb766361dce9 | 565 | t | Perl | test/blackbox-tests/test-cases/github644/run.t | Khady/dune | 9afd3d215759f65d52c74def6992b2771def3363 | [
"MIT"
] | 1 | 2018-09-14T10:47:11.000Z | 2018-09-14T10:47:11.000Z | test/blackbox-tests/test-cases/github644/run.t | nandor/shmap | 6a730de81a1e8568a61ac0f53af3aebabde8e58b | [
"MIT"
] | null | null | null | test/blackbox-tests/test-cases/github644/run.t | nandor/shmap | 6a730de81a1e8568a61ac0f53af3aebabde8e58b | [
"MIT"
] | null | null | null | $ dune runtest
File "jbuild", line 4, characters 20-42:
Error: Library "ppx_that_doesn't_exist" not found.
Hint: try: dune external-lib-deps --missing @runtest
[1]
These should print something:
$ dune external-lib-deps @runtest
These are the external library dependencies in the default context:
- ocaml-migrate-parsetree
- ppx_that_doesn't_exist
$ dune external-lib-deps --missing @runtest
Error: The following libraries are missing in the default context:
- ppx_that_doesn't_exist
Hint: try: opam install ppx_that_doesn't_exist
[1]
| 29.736842 | 69 | 0.745133 |
ed6f1f4e4d3162c87077245b6c7398253bb433e7 | 7,156 | pm | Perl | modules/Bio/EnsEMBL/Compara/PipeConfig/BaseAge_conf.pm | MatBarba/ensembl-compara | e7b0ac16adca6849934b15bc37e58603be3690ff | [
"Apache-2.0"
] | null | null | null | modules/Bio/EnsEMBL/Compara/PipeConfig/BaseAge_conf.pm | MatBarba/ensembl-compara | e7b0ac16adca6849934b15bc37e58603be3690ff | [
"Apache-2.0"
] | null | null | null | modules/Bio/EnsEMBL/Compara/PipeConfig/BaseAge_conf.pm | MatBarba/ensembl-compara | e7b0ac16adca6849934b15bc37e58603be3690ff | [
"Apache-2.0"
] | null | null | null | =head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 NAME
Bio::EnsEMBL::Compara::PipeConfig::BaseAge_conf
=head1 SYNOPSIS
init_pipeline.pl Bio::EnsEMBL::Compara::PipeConfig::BaseAge_conf -password <your_password>
=head1 DESCRIPTION
Calculate the age of a base
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
package Bio::EnsEMBL::Compara::PipeConfig::BaseAge_conf;
use strict;
use warnings;
use Bio::EnsEMBL::Hive::Version 2.4;
use base ('Bio::EnsEMBL::Compara::PipeConfig::ComparaGeneric_conf');
sub default_options {
my ($self) = @_;
return {
%{$self->SUPER::default_options},
'pipeline_name' => $self->o('ref_species').'_base_age_'.$self->o('rel_with_suffix'), # name used by the beekeeper to prefix job names on the farm
#Write either the node name or node_id in "name" field of the bed file
# 'name' => "node_id",
'name' => "name",
#Location url of database to get EPO GenomicAlignTree objects from
#'compara_url' => 'mysql://ensro@compara3:3306/cc21_mammals_epo_pt3_86',
#Location url of database to get snps from
#'variation_url' => 'mysql://ensro@ens-staging1:3306/homo_sapiens_variation_86_38?group=variation',
# executable locations:
'populate_new_database_exe' => $self->o('ensembl_cvs_root_dir')."/ensembl-compara/scripts/pipeline/populate_new_database.pl",
#'big_bed_exe' => '/software/ensembl/funcgen/bedToBigBed',
'baseage_autosql' => $self->o('ensembl_cvs_root_dir')."/ensembl-compara/scripts/pipeline/baseage_autosql.as",
#Locations to write output files
#'bed_dir' => sprintf('/lustre/scratch109/ensembl/%s/%s', $ENV{USER}, $self->o('pipeline_name')),
'chr_sizes_file' => 'chrom.sizes',
'big_bed_file' => 'base_age'.$self->o('ensembl_release').'.bb',
};
}
sub no_compara_schema {} # Tell the base class not to create the Compara tables in the database
sub pipeline_create_commands {
my ($self) = @_;
return [
@{$self->SUPER::pipeline_create_commands}, # inheriting database and hive tables' creation
'mkdir -p '.$self->o('bed_dir'), #Make bed_dir directory
];
}
sub resource_classes {
my ($self) = @_;
return {
%{$self->SUPER::resource_classes}, # inherit 'default' from the parent class
'100Mb' => { 'LSF' => '-C0 -M100 -R"select[mem>100] rusage[mem=100]"' },
'1Gb' => { 'LSF' => '-C0 -M1000 -R"select[mem>1000] rusage[mem=1000]"' },
'1.8Gb' => { 'LSF' => '-C0 -M1800 -R"select[mem>1800] rusage[mem=1800]"' },
'3.6Gb' => { 'LSF' => '-C0 -M3600 -R"select[mem>3600] rusage[mem=3600]"' },
};
}
sub pipeline_analyses {
my ($self) = @_;
return [
{ -logic_name => 'chrom_sizes',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::DbCmd',
-parameters => {
'db_conn' => $self->o('compara_url'),
'bed_dir' => $self->o('bed_dir'),
'append' => [qw(-N -q)],
'input_query' => "SELECT concat('chr',dnafrag.name), length FROM dnafrag JOIN genome_db USING (genome_db_id) WHERE genome_db.name = '" . $self->o('ref_species') . "'" . " AND is_reference = 1 AND coord_system_name = 'chromosome'",
'chr_sizes_file' => $self->o('chr_sizes_file'),
'output_file' => "#bed_dir#/#chr_sizes_file#",
},
-input_ids => [{}],
-flow_into => {
'1' => [ 'base_age_factory' ],
},
},
{ -logic_name => 'base_age_factory',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::JobFactory',
-parameters => {
'db_conn' => $self->o('compara_url'),
'ref_species' => $self->o('ref_species'),
'inputquery' => "SELECT dnafrag.name as seq_region FROM dnafrag JOIN genome_db USING (genome_db_id) WHERE genome_db.name = '" . $self->o('ref_species') . "'" . " AND is_reference = 1 AND coord_system_name = 'chromosome'",
},
-flow_into => {
'2->A' => [ 'base_age' ],
'A->1' => [ 'big_bed' ],
},
-rc_name => '100Mb',
},
{ -logic_name => 'base_age',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::BaseAge::BaseAge',
-parameters => {
'compara_db' => $self->o('compara_url'),
'variation_url' => $self->o('variation_url'),
'species_set_name' => $self->o('species_set_name'),
'species' => $self->o('ref_species'),
'bed_dir' => $self->o('bed_dir'),
'name' => $self->o('name'),
'clade_taxon_id' => $self->o('clade_taxon_id'),
},
-batch_size => 1,
-hive_capacity => $self->o('base_age_capacity'),
-rc_name => '3.6Gb',
-flow_into => {
2 => '?accu_name=bed_files&accu_address={seq_region}',
},
},
{ -logic_name => 'big_bed',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::BaseAge::BigBed',
-parameters => {
'program' => $self->o('big_bed_exe'),
'baseage_autosql' => $self->o('baseage_autosql'),
'big_bed_file' => '#bed_dir#/'.$self->o('big_bed_file'),
'bed_dir' => $self->o('bed_dir'),
'chr_sizes_file' => $self->o('chr_sizes_file'),
'chr_sizes' => '#bed_dir#/#chr_sizes_file#',
},
-rc_name => '1.8Gb',
},
];
}
1;
| 40.891429 | 260 | 0.529067 |
ed247f58afedff2978b5af9c1778af5ce48f123b | 1,603 | t | Perl | t/114-Report-get_result_details-vs-tap.t | dallaylaen/assert-contract-perl | 3271521525e08d82d5dc4c3175650293c2636881 | [
"Artistic-2.0",
"Unlicense"
] | null | null | null | t/114-Report-get_result_details-vs-tap.t | dallaylaen/assert-contract-perl | 3271521525e08d82d5dc4c3175650293c2636881 | [
"Artistic-2.0",
"Unlicense"
] | null | null | null | t/114-Report-get_result_details-vs-tap.t | dallaylaen/assert-contract-perl | 3271521525e08d82d5dc4c3175650293c2636881 | [
"Artistic-2.0",
"Unlicense"
] | null | null | null | #!/usr/bin/env perl
use strict;
use warnings;
BEGIN{ delete @ENV{qw(NDEBUG PERL_NDEBUG)} };
use Assert::Refute::Report;
use Test::More;
my $report = Assert::Refute::Report->new;
$report->diag( "premature message" );
$report->refute( 0, undef );
$report->refute( 0, 0 );
$report->refute( 0, "passing test" );
$report->refute( 1, "failing test" );
$report->refute( "reason", "test with reason" );
$report->refute( [ {foo => 42}, "bar"], "multiline reason" );
$report->done_testing;
is $report->get_sign, "t3NNNd", "Report is consistent";
note "<report>\n", $report->get_tap, "</report>";
my @diag_from_tap = grep { /^#/ } split /\n/, $report->get_tap;
my @diag_by_hand = map { "# $_" } map { @$_ }
map { $report->get_result_details( $_ )->{diag} } 0 .. $report->get_count, -1;
subtest "diag is the same both ways" => sub {
foreach ( 1 .. @diag_from_tap ) {
is $diag_from_tap[$_], $diag_by_hand[$_], "Line $_ matches";
};
is scalar @diag_by_hand, scalar @diag_from_tap, "Number or lines equal";
};
my @names_from_tap = grep { !/^#/ } split /\n/, $report->get_tap;
my @names_by_hand = map {
($_->{ok} ? "ok " : "not ok ")
. $_->{number}
. ($_->{name} ? " - $_->{name}" : "" )
} map {
$report->get_result_details( $_ );
} 1 .. $report->get_count;
push @names_by_hand, "1..".$report->get_count;
subtest "test names & numbers" => sub {
foreach ( 1 .. @names_from_tap ) {
is $names_from_tap[$_], $names_by_hand[$_], "Line $_ matches";
};
is scalar @names_by_hand, scalar @names_from_tap, "Number or lines equal";
};
done_testing;
| 29.685185 | 82 | 0.605115 |
ed55efcd549151f04aad6324531dcc6b97ccd500 | 1,299 | pm | Perl | auto-lib/Azure/StorSimple/CancelJobs.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/StorSimple/CancelJobs.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/StorSimple/CancelJobs.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | 1 | 2021-04-08T15:26:39.000Z | 2021-04-08T15:26:39.000Z | package Azure::StorSimple::CancelJobs;
use Moose;
use MooseX::ClassAttribute;
has 'api_version' => (is => 'ro', required => 1, isa => 'Str', default => '2017-06-01',
traits => [ 'Azure::ParamInQuery', 'Azure::LocationInResponse' ], location => 'api-version',
);
has 'deviceName' => (is => 'ro', required => 1, isa => 'Str',
traits => [ 'Azure::ParamInPath' ],
);
has 'jobName' => (is => 'ro', required => 1, isa => 'Str',
traits => [ 'Azure::ParamInPath' ],
);
has 'managerName' => (is => 'ro', required => 1, isa => 'Str',
traits => [ 'Azure::ParamInPath' ],
);
has 'resourceGroupName' => (is => 'ro', required => 1, isa => 'Str',
traits => [ 'Azure::ParamInPath' ],
);
has 'subscriptionId' => (is => 'ro', required => 1, isa => 'Str',
traits => [ 'Azure::ParamInPath' ],
);
class_has _api_uri => (is => 'ro', default => '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/devices/{deviceName}/jobs/{jobName}/cancel');
class_has _returns => (is => 'ro', isa => 'HashRef', default => sub { {
202 => undef,
204 => undef,
} });
class_has _is_async => (is => 'ro', default => 1);
class_has _api_method => (is => 'ro', default => 'POST');
1;
| 37.114286 | 215 | 0.572748 |
ed371320b085205645a4f7847f8c35e1c0212704 | 2,408 | t | Perl | t/04_flat.t | samebchase/P6-Data-Dump-Tree | ca1a376d97e5032ac8b028dfb264e0823c8fced7 | [
"Artistic-2.0"
] | 22 | 2016-03-11T17:37:05.000Z | 2022-02-11T07:48:23.000Z | t/04_flat.t | samebchase/P6-Data-Dump-Tree | ca1a376d97e5032ac8b028dfb264e0823c8fced7 | [
"Artistic-2.0"
] | 51 | 2015-12-19T13:38:53.000Z | 2020-11-14T14:56:57.000Z | t/04_flat.t | samebchase/P6-Data-Dump-Tree | ca1a376d97e5032ac8b028dfb264e0823c8fced7 | [
"Artistic-2.0"
] | 17 | 2016-01-02T02:20:32.000Z | 2020-11-14T14:31:13.000Z | #!/usr/bin/env perl6
use Data::Dump::Tree ;
use Test ;
plan 23 ;
my @a = [4..5] ;
my $d = [[[[1..2],[3..4],],]] ;
my %h1 = <c 3> ;
my %h2 = <a 1 b 2> ;
my $s = ([1..3], %h1, %h2, @a) ;
my %h3 = <a 1 b 2 c 3 d 4> ;
my @a2 = [1..10] ;
my $d2 = ([1..10], [|(1..10), @a2 ], %h3) ;
my $d3 = ([1..10], [|(1..10), [|(1..22), %h1, %h2,%h2, |(23..30), [1..6], |(1..4)] ], {some => {a => 1, b => [|(1..5), %h1]}, thing => $s}) ;
for
(
(13, :title<test 10, string>, $s, :flat(10, <hello>)),
(13, :title<test [1..3]>, $s, :flat([1..3],)),
(14, :title<test Hash>, $s, :flat(Hash,)),
(6, :title<test flat:>, $s, :flat),
(6, :title<test 0>, $s, :flat(0)),
(13, :title<test 1>, $s, :flat(1)),
(11, :title<test 2>, ($d, [3..5]), :flat(2)),
(14, :title<test 3>, ($d, [3..5], $d), :flat(3)),
(13, :title<<test %(a => 1, b => 2)>>, $s, :flat(%(a => 1, b => 2),)),
(14, :title<test %h1>, $s, :flat(%h1,)),
(13, :title<test @a>, $s, :flat(@a,)),
(14, :title<test sub: Hash>, $s, :flat({$_ ~~ Hash})),
(12, :title<test sub Array $s.first: 3>, $s, :flat({$_ ~~ Array && $_.first: 3})),
(14, :title<test sub: $s == %h1>, $s, :flat({$_ === %h1})),
# columns
(39, :title<flat()>, $d2, :flat()),
(38, :title<flat((H, 2))>, $d2, :flat((Hash, 2),)),
(23, :title<flat((sA, 2))>, $d2, :flat(({$_ ~~ Array && $*d == 1}, 2), )),
(26, :title<flat((sA, L1, *5) 2)>, $d2, :flat(({$_ ~~ Array && $*d == 1, 5}, 2), )),
(35, :title<flat((sA, L2, *5) 2)>, $d2, :flat(({$_ ~~ Array && $*d == 2, 5}, 2), )),
(35, :title<flat((s@a2, L2, *5) 2)>, $d2, :flat(({$_ === @a2 && $*d == 2, 5}, 2), )),
(35, :title<flat((sA, L2, *5) 2)>, $d2, :flat({$_ ~~ Array && $*d == 2, 5}, )),
(22, :title<flat((sA, *5) 2)>, $d2, :flat(({$_ ~~ Array, 5}, 2), )),
# hash flatten if more than two keys, if less only if keys are non final
# array guess number of columns based on the number of elements and left space and rendering, which we know nothing about :)
(48, :title<d3, flat(H, sA-5)>, $d3, :flat({$_ ~~ Hash && $_.keys > 1}, {$_ ~~ Array && $_.elems > 5, 5} )),
)
{
my ($lines, $title, $ds, $flat) = | $_ ;
my Capture $c = \(|$title, $ds, |$flat) ;
my $r = ddt :get_lines_integrated, |$c, :width(80), :!color ;
is($r.elems, $lines) or do
{
diag ddt :get, |$title, $ds ;
#diag ddt :get, |$c, :width(80) ;
diag $r.join("\n") ;
}
}
| 37.625 | 141 | 0.44186 |
ed26f605b4e8aad8edfb197de0df9d416d517407 | 994 | pm | Perl | perl/vendor/lib/Moose/Meta/Method/Accessor/Native/Array/grep.pm | luiscarlosg27/xampp | c295dbdd435c9c62fbd4cc6fc42097bea7a900a0 | [
"Apache-2.0"
] | 2 | 2021-07-24T12:46:49.000Z | 2021-08-02T08:37:53.000Z | perl/vendor/lib/Moose/Meta/Method/Accessor/Native/Array/grep.pm | luiscarlosg27/xampp | c295dbdd435c9c62fbd4cc6fc42097bea7a900a0 | [
"Apache-2.0"
] | null | null | null | perl/vendor/lib/Moose/Meta/Method/Accessor/Native/Array/grep.pm | luiscarlosg27/xampp | c295dbdd435c9c62fbd4cc6fc42097bea7a900a0 | [
"Apache-2.0"
] | null | null | null | package Moose::Meta::Method::Accessor::Native::Array::grep;
our $VERSION = '2.2013';
use strict;
use warnings;
use Params::Util ();
use Moose::Role;
with 'Moose::Meta::Method::Accessor::Native::Reader';
sub _minimum_arguments { 1 }
sub _maximum_arguments { 1 }
sub _inline_check_arguments {
my $self = shift;
return (
'if (!Params::Util::_CODELIKE($_[0])) {',
$self->_inline_throw_exception( InvalidArgumentToMethod =>
'argument => $_[0],'.
'method_name => "grep",'.
'type_of_argument => "code reference",'.
'type => "CodeRef",',
) . ';',
'}',
);
}
sub _return_value {
my $self = shift;
my ($slot_access) = @_;
return 'grep { $_[0]->() } @{ (' . $slot_access . ') }';
}
no Moose::Role;
1;
| 23.666667 | 91 | 0.447686 |
ed501c5b7a7c973cc408a66dd51aeb57282511f4 | 248 | pm | Perl | auto-lib/Azure/CognitiveServices/CognitiveServicesAccountProperties.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/CognitiveServices/CognitiveServicesAccountProperties.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/CognitiveServices/CognitiveServicesAccountProperties.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | 1 | 2021-04-08T15:26:39.000Z | 2021-04-08T15:26:39.000Z | package Azure::CognitiveServices::CognitiveServicesAccountProperties;
use Moose;
has 'endpoint' => (is => 'ro', isa => 'Str' );
has 'internalId' => (is => 'ro', isa => 'Str' );
has 'provisioningState' => (is => 'ro', isa => 'Str' );
1;
| 31 | 69 | 0.58871 |
ed56e943843020ba743c39fcdad4f9d0b47d3d76 | 703 | pm | Perl | auto-lib/Paws/SageMaker/ListAutoMLJobsResponse.pm | shogo82148/aws-sdk-perl | a87555a9d30dd1415235ebacd2715b2f7e5163c7 | [
"Apache-2.0"
] | null | null | null | auto-lib/Paws/SageMaker/ListAutoMLJobsResponse.pm | shogo82148/aws-sdk-perl | a87555a9d30dd1415235ebacd2715b2f7e5163c7 | [
"Apache-2.0"
] | null | null | null | auto-lib/Paws/SageMaker/ListAutoMLJobsResponse.pm | shogo82148/aws-sdk-perl | a87555a9d30dd1415235ebacd2715b2f7e5163c7 | [
"Apache-2.0"
] | null | null | null |
package Paws::SageMaker::ListAutoMLJobsResponse;
use Moose;
has AutoMLJobSummaries => (is => 'ro', isa => 'ArrayRef[Paws::SageMaker::AutoMLJobSummary]', required => 1);
has NextToken => (is => 'ro', isa => 'Str');
has _request_id => (is => 'ro', isa => 'Str');
### main pod documentation begin ###
=head1 NAME
Paws::SageMaker::ListAutoMLJobsResponse
=head1 ATTRIBUTES
=head2 B<REQUIRED> AutoMLJobSummaries => ArrayRef[L<Paws::SageMaker::AutoMLJobSummary>]
Returns a summary list of jobs.
=head2 NextToken => Str
If the previous response was truncated, you will receive this token.
Use it in your next request to receive the next set of results.
=head2 _request_id => Str
=cut
1; | 20.676471 | 110 | 0.704125 |
ed981788163a0b6e348284224f0869177b6399cb | 472 | pm | Perl | auto-lib/Paws/Quicksight/DeleteGroupResponse.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 164 | 2015-01-08T14:58:53.000Z | 2022-02-20T19:16:24.000Z | auto-lib/Paws/Quicksight/DeleteGroupResponse.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 348 | 2015-01-07T22:08:38.000Z | 2022-01-27T14:34:44.000Z | auto-lib/Paws/Quicksight/DeleteGroupResponse.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 87 | 2015-04-22T06:29:47.000Z | 2021-09-29T14:45:55.000Z |
package Paws::Quicksight::DeleteGroupResponse;
use Moose;
has RequestId => (is => 'ro', isa => 'Str');
has Status => (is => 'ro', isa => 'Int');
has _request_id => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Quicksight::DeleteGroupResponse
=head1 ATTRIBUTES
=head2 RequestId => Str
The AWS request ID for this operation.
=head2 Status => Int
The HTTP status of the request.
=head2 _request_id => Str
=cut
| 13.882353 | 48 | 0.646186 |
ed2b579aeaf250996e70ad8c37552bdb186df291 | 6,613 | t | Perl | t/zoneinfo_tz_env.t | david-dick/time-zone-olson | acff4f39ed36e39118acc2020172b5d66530fd58 | [
"Artistic-1.0"
] | null | null | null | t/zoneinfo_tz_env.t | david-dick/time-zone-olson | acff4f39ed36e39118acc2020172b5d66530fd58 | [
"Artistic-1.0"
] | null | null | null | t/zoneinfo_tz_env.t | david-dick/time-zone-olson | acff4f39ed36e39118acc2020172b5d66530fd58 | [
"Artistic-1.0"
] | null | null | null | #!perl -T
use 5.006;
use strict;
use warnings FATAL => 'all';
use Test::More;
use Time::Local();
use Time::Zone::Olson();
use POSIX();
use Config;
use Time::Local();
use File::Find();
use Digest::SHA();
$ENV{PATH} = '/bin:/usr/bin:/usr/sbin:/sbin';
delete @ENV{'IFS', 'CDPATH', 'ENV', 'BASH_ENV'};
my $test_gnu_date = `TZ="Australia/Melbourne" date -d "2015/02/28 11:00:00" +"%Y/%m/%d %H:%M:%S" 2>&1`;
if (defined $test_gnu_date) {
chomp $test_gnu_date;
ok(1, "Checking date is '$test_gnu_date', should be '2015/02/28 11:00:00'");
} else {
diag(q['date -d "2015/02/28 11:00:00" +"%Y/%m/%d %H:%M:%S" 2>&1'] . " does not function correctly on '$^O'");
ok(1, q['date -d "2015/02/28 11:00:00" +"%Y/%m/%d %H:%M:%S" 2>&1'] . " does not function correctly on '$^O'");
}
my %dates;
my $max_number_of_years = 0;
if ($ENV{RELEASE_TESTING}) {
$max_number_of_years = 20;
}
foreach my $tz (
($] >= 5.010 ? '<+0330>-3:30<+0430>,79/24,263/24' : ()),
'<+0330>-3:30<+0430>,J79/24,J263/24',
'WET0WEST,M3.5.0,M10.5.0/3',
'EST5EDT,M3.2.0,M11.1.0',
'<-04>4<-03>,M11.1.0/0,M2.3.0/0',
'AEST-10AEDT,M10.1.0,M4.1.0/3',
'NZST-12NZDT,M9.5.0,M4.1.0/3',
)
{
foreach my $number_of_years ( ( 0 .. $max_number_of_years )) {
next unless (($test_gnu_date) && ($test_gnu_date eq '2015/02/28 11:00:00'));
my $time = Time::Zone::Olson->new( timezone => $tz )->time_local(0,0,0,1,0,118 + $number_of_years);
%dates = ();
$time -= ($time % 3600);
DAY: foreach my $day ( 0 .. 365 ) {
foreach my $hour ( 0 .. 24 ) {
$time += 3600;
check_time($tz, $time - 1);
check_time($tz, $time);
check_time($tz, $time + 1);
}
}
my $doubles = 0;
foreach my $date (sort { $a cmp $b } keys %dates) {
if ($dates{$date} > 1) {
$doubles += 1;
}
}
ok($doubles == 3, "Found $doubles doubles");
my $zone = Time::Zone::Olson->new( timezone => $tz );
ok($zone->equiv( $tz ), "'$tz' is equivalent to '$tz'");
ok(!$zone->equiv( 'GMT0BST,M3.5.0/1,M10.5.0' ), "'GMT0BST,M3.5.0/1,M10.5.0' is NOT equivalent to '$tz'");
}
}
foreach my $tz (
'<GMT+10>+10',
'<+07>-7',
($] >= 5.010 ? ('UTC', 'Etc/GMT-0') : ()),
) {
next if ($^O eq 'MSWin32');
my $zone = Time::Zone::Olson->new(timezone => $tz);
ok($zone->timezone() eq $tz, "Allowed to specify an odd timezone such as '$tz'");
}
Test::More::done_testing();
sub _LOCALTIME_MINUTE_INDEX { return 1 }
sub _LOCALTIME_HOUR_INDEX { return 2 }
sub _LOCALTIME_DAY_INDEX { return 3 }
sub _LOCALTIME_MONTH_INDEX { return 4 }
sub _LOCALTIME_DAY_OF_WEEK_INDEX { return 6 }
sub _LOCALTIME_DAY_OF_YEAR_INDEX { return 7 }
sub check_time {
my ($tz, $time) = @_;
my @time_local = Time::Zone::Olson->new(timezone => $tz)->local_time($time);
my $match;
my $ok;
while ($tz =~ /M(\d+)[.](\d+)[.](\d+)(?:\/(\d+))?/smxg) {
my ($month, $week, $day, $hour) = ($1, $2, $3, $4);
$hour = defined $hour ? $hour : 2;
$ok = 1;
if ($hour == 0) {
return if (($time_local[_LOCALTIME_HOUR_INDEX()] > $hour + 1) && ($time_local[_LOCALTIME_HOUR_INDEX()] < 23));
} else {
return if (($time_local[_LOCALTIME_HOUR_INDEX()] > $hour + 1) || ($time_local[_LOCALTIME_HOUR_INDEX()] < $hour - 1));
}
if ($time_local[_LOCALTIME_HOUR_INDEX()] == $hour) {
return unless (($time_local[_LOCALTIME_MINUTE_INDEX()] == 59) || ($time_local[_LOCALTIME_MINUTE_INDEX()] == 0) || ($time_local[_LOCALTIME_MINUTE_INDEX()] == 1));
}
if ($month == ($time_local[_LOCALTIME_MONTH_INDEX()] + 1)) {
if ($week == 1) {
if (($time_local[_LOCALTIME_DAY_INDEX()]) < 8) {
if ($day == 0) {
if ((($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()]) == 0) || ($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()] == 7)) {
$match = 1;
} elsif (($hour == 0) && ($time_local[_LOCALTIME_HOUR_INDEX()] == 23) && ($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()]) == 6) {
$match = 1;
}
}
}
} elsif ($week == 2) {
if ((($time_local[_LOCALTIME_DAY_INDEX()]) >= 7) && ($time_local[_LOCALTIME_DAY_INDEX()] < 15)) {
if ($day == 0) {
if ((($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()]) == 0) || ($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()] == 7)) {
$match = 1;
} elsif (($hour == 0) && ($time_local[_LOCALTIME_HOUR_INDEX()] == 23) && ($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()]) == 6) {
$match = 1;
}
}
}
} elsif ($week == 3) {
if ((($time_local[_LOCALTIME_DAY_INDEX()]) >= 14) && ($time_local[_LOCALTIME_DAY_INDEX()] < 22)) {
if ($day == 0) {
if ((($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()]) == 0) || ($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()] == 7)) {
$match = 1;
} elsif (($hour == 0) && ($time_local[_LOCALTIME_HOUR_INDEX()] == 23) && ($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()]) == 6) {
$match = 1;
}
}
}
} elsif ($week == 5) {
if (($time_local[_LOCALTIME_DAY_INDEX()]) > 20) {
if ($day == 0) {
if ((($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()]) == 0) || ($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()] == 7)) {
$match = 1;
} elsif (($hour == 0) && ($time_local[_LOCALTIME_HOUR_INDEX()] == 23) && ($time_local[_LOCALTIME_DAY_OF_WEEK_INDEX()]) == 6) {
$match = 1;
}
}
}
} else {
die "Unknown TZ format for week";
}
}
}
while ($tz =~ /[J,](\d+)(?:\/(\d+))?/smxg) {
my ($day, $hour) = ($1, $2);
$hour = defined $hour ? $hour : 2;
$ok = 1;
if ($hour == 0) {
return if (($time_local[_LOCALTIME_HOUR_INDEX()] > $hour + 1) && ($time_local[_LOCALTIME_HOUR_INDEX()] < 23));
} else {
return if (($time_local[_LOCALTIME_HOUR_INDEX()] > $hour + 1) || ($time_local[_LOCALTIME_HOUR_INDEX()] < $hour - 1));
}
if (($time_local[_LOCALTIME_DAY_OF_YEAR_INDEX()] >= $day - 2) && ($time_local[_LOCALTIME_DAY_OF_YEAR_INDEX()] <= $day + 2)) {
$match = 1;
}
}
if (!$ok) {
die "Weird TZ format";
}
return unless ($match);
my $time_zone_olson = POSIX::strftime('%Y/%m/%d %H:%M:%S', @time_local);
$dates{$time_zone_olson} += 1;
my $date = `TZ='$tz' date -d "\@$time" +"%Y/%m/%d %H:%M:%S"`;
chomp $date;
ok($time_zone_olson eq $date, "$time_zone_olson eq $date for the local_time time $time in $tz");
{
local $ENV{TZ} = $tz;
my $revert_time = Time::Zone::Olson->new()->time_local(@time_local);
ok($revert_time <= $time, "$revert_time (localtime " . (scalar localtime($revert_time)) . ") is returned for $time (localtime " . (scalar localtime($time)) . ") in time_local in $tz (offset of " . ($revert_time - $time) . " seconds)");
ok($tz eq Time::Zone::Olson->new()->timezone(), "$tz is the timezone from the environment variable TZ");
}
}
| 37.573864 | 237 | 0.577953 |
ed57f63b59391a63b06a76859c116f6fd74d4005 | 27,154 | pl | Perl | crypto/camellia/asm/cmll-x86_64.pl | incshaun/openssl | bc007767b24a385719d70d99751c736c914d3cf5 | [
"Apache-2.0"
] | null | null | null | crypto/camellia/asm/cmll-x86_64.pl | incshaun/openssl | bc007767b24a385719d70d99751c736c914d3cf5 | [
"Apache-2.0"
] | null | null | null | crypto/camellia/asm/cmll-x86_64.pl | incshaun/openssl | bc007767b24a385719d70d99751c736c914d3cf5 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env perl
# Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
# ====================================================================
# Copyright (c) 2008 Andy Polyakov <appro@openssl.org>
#
# This module may be used under the terms of either the GNU General
# Public License version 2 or later, the GNU Lesser General Public
# License version 2.1 or later, the Mozilla Public License version
# 1.1 or the BSD License. The exact terms of either license are
# distributed along with this module. For further details see
# http://www.openssl.org/~appro/camellia/.
# ====================================================================
# Performance in cycles per processed byte (less is better) in
# 'openssl speed ...' benchmark:
#
# AMD64 Core2 EM64T
# -evp camellia-128-ecb 16.7 21.0 22.7
# + over gcc 3.4.6 +25% +5% 0%
#
# camellia-128-cbc 15.7 20.4 21.1
#
# 128-bit key setup 128 216 205 cycles/key
# + over gcc 3.4.6 +54% +39% +15%
#
# Numbers in "+" rows represent performance improvement over compiler
# generated code. Key setup timings are impressive on AMD and Core2
# thanks to 64-bit operations being covertly deployed. Improvement on
# EM64T, pre-Core2 Intel x86_64 CPU, is not as impressive, because it
# apparently emulates some of 64-bit operations in [32-bit] microcode.
$flavour = shift;
$output = shift;
if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
*STDOUT=*OUT;
sub hi() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1h/; $r; }
sub lo() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/;
$r =~ s/%[er]([sd]i)/%\1l/;
$r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; }
$t0="%eax";$t1="%ebx";$t2="%ecx";$t3="%edx";
@S=("%r8d","%r9d","%r10d","%r11d");
$i0="%esi";
$i1="%edi";
$Tbl="%rbp"; # size optimization
$inp="%r12";
$out="%r13";
$key="%r14";
$keyend="%r15";
$arg0d=$win64?"%ecx":"%edi";
# const unsigned int Camellia_SBOX[4][256];
# Well, sort of... Camellia_SBOX[0][] is interleaved with [1][],
# and [2][] - with [3][]. This is done to minimize code size.
$SBOX1_1110=0; # Camellia_SBOX[0]
$SBOX4_4404=4; # Camellia_SBOX[1]
$SBOX2_0222=2048; # Camellia_SBOX[2]
$SBOX3_3033=2052; # Camellia_SBOX[3]
sub Camellia_Feistel {
my $i=@_[0];
my $seed=defined(@_[1])?@_[1]:0;
my $scale=$seed<0?-8:8;
my $j=($i&1)*2;
my ($s0,$s1,$s2,$s3)=(@S[($j)%4],@S[($j+1)%4],@S[($j+2)%4],@S[($j+3)%4]);
$code.=<<___;
xor $s0,$t0 # t0^=key[0]
xor $s1,$t1 # t1^=key[1]
movz `&hi("$t0")`,$i0 # (t0>>8)&0xff
movz `&lo("$t1")`,$i1 # (t1>>0)&0xff
mov $SBOX3_3033($Tbl,$i0,8),$t3 # t3=SBOX3_3033[0]
mov $SBOX1_1110($Tbl,$i1,8),$t2 # t2=SBOX1_1110[1]
movz `&lo("$t0")`,$i0 # (t0>>0)&0xff
shr \$16,$t0
movz `&hi("$t1")`,$i1 # (t1>>8)&0xff
xor $SBOX4_4404($Tbl,$i0,8),$t3 # t3^=SBOX4_4404[0]
shr \$16,$t1
xor $SBOX4_4404($Tbl,$i1,8),$t2 # t2^=SBOX4_4404[1]
movz `&hi("$t0")`,$i0 # (t0>>24)&0xff
movz `&lo("$t1")`,$i1 # (t1>>16)&0xff
xor $SBOX1_1110($Tbl,$i0,8),$t3 # t3^=SBOX1_1110[0]
xor $SBOX3_3033($Tbl,$i1,8),$t2 # t2^=SBOX3_3033[1]
movz `&lo("$t0")`,$i0 # (t0>>16)&0xff
movz `&hi("$t1")`,$i1 # (t1>>24)&0xff
xor $SBOX2_0222($Tbl,$i0,8),$t3 # t3^=SBOX2_0222[0]
xor $SBOX2_0222($Tbl,$i1,8),$t2 # t2^=SBOX2_0222[1]
mov `$seed+($i+1)*$scale`($key),$t1 # prefetch key[i+1]
mov `$seed+($i+1)*$scale+4`($key),$t0
xor $t3,$t2 # t2^=t3
ror \$8,$t3 # t3=RightRotate(t3,8)
xor $t2,$s2
xor $t2,$s3
xor $t3,$s3
___
}
# void VR_Camellia_EncryptBlock_Rounds(
# int grandRounds,
# const Byte plaintext[],
# const KEY_TABLE_TYPE keyTable,
# Byte ciphertext[])
$code=<<___;
.text
# V1.x API
.globl VR_Camellia_EncryptBlock
.type VR_Camellia_EncryptBlock,\@abi-omnipotent
.align 16
VR_Camellia_EncryptBlock:
movl \$128,%eax
subl $arg0d,%eax
movl \$3,$arg0d
adcl \$0,$arg0d # keyBitLength==128?3:4
jmp .Lenc_rounds
.size VR_Camellia_EncryptBlock,.-VR_Camellia_EncryptBlock
# V2
.globl VR_Camellia_EncryptBlock_Rounds
.type VR_Camellia_EncryptBlock_Rounds,\@function,4
.align 16
.Lenc_rounds:
VR_Camellia_EncryptBlock_Rounds:
.cfi_startproc
push %rbx
.cfi_push %rbx
push %rbp
.cfi_push %rbp
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
.Lenc_prologue:
#mov %rsi,$inp # put away arguments
mov %rcx,$out
mov %rdx,$key
shl \$6,%edi # process grandRounds
lea .LCamellia_SBOX(%rip),$Tbl
lea ($key,%rdi),$keyend
mov 0(%rsi),@S[0] # load plaintext
mov 4(%rsi),@S[1]
mov 8(%rsi),@S[2]
bswap @S[0]
mov 12(%rsi),@S[3]
bswap @S[1]
bswap @S[2]
bswap @S[3]
call _x86_64_Camellia_encrypt
bswap @S[0]
bswap @S[1]
bswap @S[2]
mov @S[0],0($out)
bswap @S[3]
mov @S[1],4($out)
mov @S[2],8($out)
mov @S[3],12($out)
mov 0(%rsp),%r15
.cfi_restore %r15
mov 8(%rsp),%r14
.cfi_restore %r14
mov 16(%rsp),%r13
.cfi_restore %r13
mov 24(%rsp),%rbp
.cfi_restore %rbp
mov 32(%rsp),%rbx
.cfi_restore %rbx
lea 40(%rsp),%rsp
.cfi_adjust_cfa_offset -40
.Lenc_epilogue:
ret
.cfi_endproc
.size VR_Camellia_EncryptBlock_Rounds,.-VR_Camellia_EncryptBlock_Rounds
.type _x86_64_Camellia_encrypt,\@abi-omnipotent
.align 16
_x86_64_Camellia_encrypt:
xor 0($key),@S[1]
xor 4($key),@S[0] # ^=key[0-3]
xor 8($key),@S[3]
xor 12($key),@S[2]
.align 16
.Leloop:
mov 16($key),$t1 # prefetch key[4-5]
mov 20($key),$t0
___
for ($i=0;$i<6;$i++) { Camellia_Feistel($i,16); }
$code.=<<___;
lea 16*4($key),$key
cmp $keyend,$key
mov 8($key),$t3 # prefetch key[2-3]
mov 12($key),$t2
je .Ledone
and @S[0],$t0
or @S[3],$t3
rol \$1,$t0
xor $t3,@S[2] # s2^=s3|key[3];
xor $t0,@S[1] # s1^=LeftRotate(s0&key[0],1);
and @S[2],$t2
or @S[1],$t1
rol \$1,$t2
xor $t1,@S[0] # s0^=s1|key[1];
xor $t2,@S[3] # s3^=LeftRotate(s2&key[2],1);
jmp .Leloop
.align 16
.Ledone:
xor @S[2],$t0 # SwapHalf
xor @S[3],$t1
xor @S[0],$t2
xor @S[1],$t3
mov $t0,@S[0]
mov $t1,@S[1]
mov $t2,@S[2]
mov $t3,@S[3]
.byte 0xf3,0xc3 # rep ret
.size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt
# V1.x API
.globl VR_Camellia_DecryptBlock
.type VR_Camellia_DecryptBlock,\@abi-omnipotent
.align 16
VR_Camellia_DecryptBlock:
movl \$128,%eax
subl $arg0d,%eax
movl \$3,$arg0d
adcl \$0,$arg0d # keyBitLength==128?3:4
jmp .Ldec_rounds
.size VR_Camellia_DecryptBlock,.-VR_Camellia_DecryptBlock
# V2
.globl VR_Camellia_DecryptBlock_Rounds
.type VR_Camellia_DecryptBlock_Rounds,\@function,4
.align 16
.Ldec_rounds:
VR_Camellia_DecryptBlock_Rounds:
.cfi_startproc
push %rbx
.cfi_push %rbx
push %rbp
.cfi_push %rbp
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
.Ldec_prologue:
#mov %rsi,$inp # put away arguments
mov %rcx,$out
mov %rdx,$keyend
shl \$6,%edi # process grandRounds
lea .LCamellia_SBOX(%rip),$Tbl
lea ($keyend,%rdi),$key
mov 0(%rsi),@S[0] # load plaintext
mov 4(%rsi),@S[1]
mov 8(%rsi),@S[2]
bswap @S[0]
mov 12(%rsi),@S[3]
bswap @S[1]
bswap @S[2]
bswap @S[3]
call _x86_64_Camellia_decrypt
bswap @S[0]
bswap @S[1]
bswap @S[2]
mov @S[0],0($out)
bswap @S[3]
mov @S[1],4($out)
mov @S[2],8($out)
mov @S[3],12($out)
mov 0(%rsp),%r15
.cfi_restore %r15
mov 8(%rsp),%r14
.cfi_restore %r14
mov 16(%rsp),%r13
.cfi_restore %r13
mov 24(%rsp),%rbp
.cfi_restore %rbp
mov 32(%rsp),%rbx
.cfi_restore %rbx
lea 40(%rsp),%rsp
.cfi_adjust_cfa_offset -40
.Ldec_epilogue:
ret
.cfi_endproc
.size VR_Camellia_DecryptBlock_Rounds,.-VR_Camellia_DecryptBlock_Rounds
.type _x86_64_Camellia_decrypt,\@abi-omnipotent
.align 16
_x86_64_Camellia_decrypt:
xor 0($key),@S[1]
xor 4($key),@S[0] # ^=key[0-3]
xor 8($key),@S[3]
xor 12($key),@S[2]
.align 16
.Ldloop:
mov -8($key),$t1 # prefetch key[4-5]
mov -4($key),$t0
___
for ($i=0;$i<6;$i++) { Camellia_Feistel($i,-8); }
$code.=<<___;
lea -16*4($key),$key
cmp $keyend,$key
mov 0($key),$t3 # prefetch key[2-3]
mov 4($key),$t2
je .Lddone
and @S[0],$t0
or @S[3],$t3
rol \$1,$t0
xor $t3,@S[2] # s2^=s3|key[3];
xor $t0,@S[1] # s1^=LeftRotate(s0&key[0],1);
and @S[2],$t2
or @S[1],$t1
rol \$1,$t2
xor $t1,@S[0] # s0^=s1|key[1];
xor $t2,@S[3] # s3^=LeftRotate(s2&key[2],1);
jmp .Ldloop
.align 16
.Lddone:
xor @S[2],$t2
xor @S[3],$t3
xor @S[0],$t0
xor @S[1],$t1
mov $t2,@S[0] # SwapHalf
mov $t3,@S[1]
mov $t0,@S[2]
mov $t1,@S[3]
.byte 0xf3,0xc3 # rep ret
.size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt
___
sub _saveround {
my ($rnd,$key,@T)=@_;
my $bias=int(@T[0])?shift(@T):0;
if ($#T==3) {
$code.=<<___;
mov @T[1],`$bias+$rnd*8+0`($key)
mov @T[0],`$bias+$rnd*8+4`($key)
mov @T[3],`$bias+$rnd*8+8`($key)
mov @T[2],`$bias+$rnd*8+12`($key)
___
} else {
$code.=" mov @T[0],`$bias+$rnd*8+0`($key)\n";
$code.=" mov @T[1],`$bias+$rnd*8+8`($key)\n" if ($#T>=1);
}
}
sub _loadround {
my ($rnd,$key,@T)=@_;
my $bias=int(@T[0])?shift(@T):0;
$code.=" mov `$bias+$rnd*8+0`($key),@T[0]\n";
$code.=" mov `$bias+$rnd*8+8`($key),@T[1]\n" if ($#T>=1);
}
# shld is very slow on Intel EM64T family. Even on AMD it limits
# instruction decode rate [because it's VectorPath] and consequently
# performance...
sub __rotl128 {
my ($i0,$i1,$rot)=@_;
if ($rot) {
$code.=<<___;
mov $i0,%r11
shld \$$rot,$i1,$i0
shld \$$rot,%r11,$i1
___
}
}
# ... Implementing 128-bit rotate without shld gives 80% better
# performance EM64T, +15% on AMD64 and only ~7% degradation on
# Core2. This is therefore preferred.
sub _rotl128 {
my ($i0,$i1,$rot)=@_;
if ($rot) {
$code.=<<___;
mov $i0,%r11
shl \$$rot,$i0
mov $i1,%r9
shr \$`64-$rot`,%r9
shr \$`64-$rot`,%r11
or %r9,$i0
shl \$$rot,$i1
or %r11,$i1
___
}
}
{ my $step=0;
$code.=<<___;
.globl VR_Camellia_Ekeygen
.type VR_Camellia_Ekeygen,\@function,3
.align 16
VR_Camellia_Ekeygen:
.cfi_startproc
push %rbx
.cfi_push %rbx
push %rbp
.cfi_push %rbp
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
.Lkey_prologue:
mov %edi,${keyend}d # put away arguments, keyBitLength
mov %rdx,$out # keyTable
mov 0(%rsi),@S[0] # load 0-127 bits
mov 4(%rsi),@S[1]
mov 8(%rsi),@S[2]
mov 12(%rsi),@S[3]
bswap @S[0]
bswap @S[1]
bswap @S[2]
bswap @S[3]
___
&_saveround (0,$out,@S); # KL<<<0
$code.=<<___;
cmp \$128,$keyend # check keyBitLength
je .L1st128
mov 16(%rsi),@S[0] # load 128-191 bits
mov 20(%rsi),@S[1]
cmp \$192,$keyend
je .L1st192
mov 24(%rsi),@S[2] # load 192-255 bits
mov 28(%rsi),@S[3]
jmp .L1st256
.L1st192:
mov @S[0],@S[2]
mov @S[1],@S[3]
not @S[2]
not @S[3]
.L1st256:
bswap @S[0]
bswap @S[1]
bswap @S[2]
bswap @S[3]
___
&_saveround (4,$out,@S); # temp storage for KR!
$code.=<<___;
xor 0($out),@S[1] # KR^KL
xor 4($out),@S[0]
xor 8($out),@S[3]
xor 12($out),@S[2]
.L1st128:
lea .LCamellia_SIGMA(%rip),$key
lea .LCamellia_SBOX(%rip),$Tbl
mov 0($key),$t1
mov 4($key),$t0
___
&Camellia_Feistel($step++);
&Camellia_Feistel($step++);
$code.=<<___;
xor 0($out),@S[1] # ^KL
xor 4($out),@S[0]
xor 8($out),@S[3]
xor 12($out),@S[2]
___
&Camellia_Feistel($step++);
&Camellia_Feistel($step++);
$code.=<<___;
cmp \$128,$keyend
jne .L2nd256
lea 128($out),$out # size optimization
shl \$32,%r8 # @S[0]||
shl \$32,%r10 # @S[2]||
or %r9,%r8 # ||@S[1]
or %r11,%r10 # ||@S[3]
___
&_loadround (0,$out,-128,"%rax","%rbx"); # KL
&_saveround (2,$out,-128,"%r8","%r10"); # KA<<<0
&_rotl128 ("%rax","%rbx",15);
&_saveround (4,$out,-128,"%rax","%rbx"); # KL<<<15
&_rotl128 ("%r8","%r10",15);
&_saveround (6,$out,-128,"%r8","%r10"); # KA<<<15
&_rotl128 ("%r8","%r10",15); # 15+15=30
&_saveround (8,$out,-128,"%r8","%r10"); # KA<<<30
&_rotl128 ("%rax","%rbx",30); # 15+30=45
&_saveround (10,$out,-128,"%rax","%rbx"); # KL<<<45
&_rotl128 ("%r8","%r10",15); # 30+15=45
&_saveround (12,$out,-128,"%r8"); # KA<<<45
&_rotl128 ("%rax","%rbx",15); # 45+15=60
&_saveround (13,$out,-128,"%rbx"); # KL<<<60
&_rotl128 ("%r8","%r10",15); # 45+15=60
&_saveround (14,$out,-128,"%r8","%r10"); # KA<<<60
&_rotl128 ("%rax","%rbx",17); # 60+17=77
&_saveround (16,$out,-128,"%rax","%rbx"); # KL<<<77
&_rotl128 ("%rax","%rbx",17); # 77+17=94
&_saveround (18,$out,-128,"%rax","%rbx"); # KL<<<94
&_rotl128 ("%r8","%r10",34); # 60+34=94
&_saveround (20,$out,-128,"%r8","%r10"); # KA<<<94
&_rotl128 ("%rax","%rbx",17); # 94+17=111
&_saveround (22,$out,-128,"%rax","%rbx"); # KL<<<111
&_rotl128 ("%r8","%r10",17); # 94+17=111
&_saveround (24,$out,-128,"%r8","%r10"); # KA<<<111
$code.=<<___;
mov \$3,%eax
jmp .Ldone
.align 16
.L2nd256:
___
&_saveround (6,$out,@S); # temp storage for KA!
$code.=<<___;
xor `4*8+0`($out),@S[1] # KA^KR
xor `4*8+4`($out),@S[0]
xor `5*8+0`($out),@S[3]
xor `5*8+4`($out),@S[2]
___
&Camellia_Feistel($step++);
&Camellia_Feistel($step++);
&_loadround (0,$out,"%rax","%rbx"); # KL
&_loadround (4,$out,"%rcx","%rdx"); # KR
&_loadround (6,$out,"%r14","%r15"); # KA
$code.=<<___;
lea 128($out),$out # size optimization
shl \$32,%r8 # @S[0]||
shl \$32,%r10 # @S[2]||
or %r9,%r8 # ||@S[1]
or %r11,%r10 # ||@S[3]
___
&_saveround (2,$out,-128,"%r8","%r10"); # KB<<<0
&_rotl128 ("%rcx","%rdx",15);
&_saveround (4,$out,-128,"%rcx","%rdx"); # KR<<<15
&_rotl128 ("%r14","%r15",15);
&_saveround (6,$out,-128,"%r14","%r15"); # KA<<<15
&_rotl128 ("%rcx","%rdx",15); # 15+15=30
&_saveround (8,$out,-128,"%rcx","%rdx"); # KR<<<30
&_rotl128 ("%r8","%r10",30);
&_saveround (10,$out,-128,"%r8","%r10"); # KB<<<30
&_rotl128 ("%rax","%rbx",45);
&_saveround (12,$out,-128,"%rax","%rbx"); # KL<<<45
&_rotl128 ("%r14","%r15",30); # 15+30=45
&_saveround (14,$out,-128,"%r14","%r15"); # KA<<<45
&_rotl128 ("%rax","%rbx",15); # 45+15=60
&_saveround (16,$out,-128,"%rax","%rbx"); # KL<<<60
&_rotl128 ("%rcx","%rdx",30); # 30+30=60
&_saveround (18,$out,-128,"%rcx","%rdx"); # KR<<<60
&_rotl128 ("%r8","%r10",30); # 30+30=60
&_saveround (20,$out,-128,"%r8","%r10"); # KB<<<60
&_rotl128 ("%rax","%rbx",17); # 60+17=77
&_saveround (22,$out,-128,"%rax","%rbx"); # KL<<<77
&_rotl128 ("%r14","%r15",32); # 45+32=77
&_saveround (24,$out,-128,"%r14","%r15"); # KA<<<77
&_rotl128 ("%rcx","%rdx",34); # 60+34=94
&_saveround (26,$out,-128,"%rcx","%rdx"); # KR<<<94
&_rotl128 ("%r14","%r15",17); # 77+17=94
&_saveround (28,$out,-128,"%r14","%r15"); # KA<<<77
&_rotl128 ("%rax","%rbx",34); # 77+34=111
&_saveround (30,$out,-128,"%rax","%rbx"); # KL<<<111
&_rotl128 ("%r8","%r10",51); # 60+51=111
&_saveround (32,$out,-128,"%r8","%r10"); # KB<<<111
$code.=<<___;
mov \$4,%eax
.Ldone:
mov 0(%rsp),%r15
.cfi_restore %r15
mov 8(%rsp),%r14
.cfi_restore %r14
mov 16(%rsp),%r13
.cfi_restore %r13
mov 24(%rsp),%rbp
.cfi_restore %rbp
mov 32(%rsp),%rbx
.cfi_restore %rbx
lea 40(%rsp),%rsp
.cfi_adjust_cfa_offset -40
.Lkey_epilogue:
ret
.cfi_endproc
.size VR_Camellia_Ekeygen,.-VR_Camellia_Ekeygen
___
}
@SBOX=(
112,130, 44,236,179, 39,192,229,228,133, 87, 53,234, 12,174, 65,
35,239,107,147, 69, 25,165, 33,237, 14, 79, 78, 29,101,146,189,
134,184,175,143,124,235, 31,206, 62, 48,220, 95, 94,197, 11, 26,
166,225, 57,202,213, 71, 93, 61,217, 1, 90,214, 81, 86,108, 77,
139, 13,154,102,251,204,176, 45,116, 18, 43, 32,240,177,132,153,
223, 76,203,194, 52,126,118, 5,109,183,169, 49,209, 23, 4,215,
20, 88, 58, 97,222, 27, 17, 28, 50, 15,156, 22, 83, 24,242, 34,
254, 68,207,178,195,181,122,145, 36, 8,232,168, 96,252,105, 80,
170,208,160,125,161,137, 98,151, 84, 91, 30,149,224,255,100,210,
16,196, 0, 72,163,247,117,219,138, 3,230,218, 9, 63,221,148,
135, 92,131, 2,205, 74,144, 51,115,103,246,243,157,127,191,226,
82,155,216, 38,200, 55,198, 59,129,150,111, 75, 19,190, 99, 46,
233,121,167,140,159,110,188,142, 41,245,249,182, 47,253,180, 89,
120,152, 6,106,231, 70,113,186,212, 37,171, 66,136,162,141,250,
114, 7,185, 85,248,238,172, 10, 54, 73, 42,104, 60, 56,241,164,
64, 40,211,123,187,201, 67,193, 21,227,173,244,119,199,128,158);
sub S1110 { my $i=shift; $i=@SBOX[$i]; $i=$i<<24|$i<<16|$i<<8; sprintf("0x%08x",$i); }
sub S4404 { my $i=shift; $i=($i<<1|$i>>7)&0xff; $i=@SBOX[$i]; $i=$i<<24|$i<<16|$i; sprintf("0x%08x",$i); }
sub S0222 { my $i=shift; $i=@SBOX[$i]; $i=($i<<1|$i>>7)&0xff; $i=$i<<16|$i<<8|$i; sprintf("0x%08x",$i); }
sub S3033 { my $i=shift; $i=@SBOX[$i]; $i=($i>>1|$i<<7)&0xff; $i=$i<<24|$i<<8|$i; sprintf("0x%08x",$i); }
$code.=<<___;
.align 64
.LCamellia_SIGMA:
.long 0x3bcc908b, 0xa09e667f, 0x4caa73b2, 0xb67ae858
.long 0xe94f82be, 0xc6ef372f, 0xf1d36f1c, 0x54ff53a5
.long 0xde682d1d, 0x10e527fa, 0xb3e6c1fd, 0xb05688c2
.long 0, 0, 0, 0
.LCamellia_SBOX:
___
# tables are interleaved, remember?
sub data_word { $code.=".long\t".join(',',@_)."\n"; }
for ($i=0;$i<256;$i++) { &data_word(&S1110($i),&S4404($i)); }
for ($i=0;$i<256;$i++) { &data_word(&S0222($i),&S3033($i)); }
# void VR_Camellia_cbc_encrypt (const void char *inp, unsigned char *out,
# size_t length, const CAMELLIA_KEY *key,
# unsigned char *ivp,const int enc);
{
$_key="0(%rsp)";
$_end="8(%rsp)"; # inp+len&~15
$_res="16(%rsp)"; # len&15
$ivec="24(%rsp)";
$_ivp="40(%rsp)";
$_rsp="48(%rsp)";
$code.=<<___;
.globl VR_Camellia_cbc_encrypt
.type VR_Camellia_cbc_encrypt,\@function,6
.align 16
VR_Camellia_cbc_encrypt:
.cfi_startproc
cmp \$0,%rdx
je .Lcbc_abort
push %rbx
.cfi_push %rbx
push %rbp
.cfi_push %rbp
push %r12
.cfi_push %r12
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
.Lcbc_prologue:
mov %rsp,%rbp
.cfi_def_cfa_register %rbp
sub \$64,%rsp
and \$-64,%rsp
# place stack frame just "above mod 1024" the key schedule,
# this ensures that cache associativity suffices
lea -64-63(%rcx),%r10
sub %rsp,%r10
neg %r10
and \$0x3C0,%r10
sub %r10,%rsp
#add \$8,%rsp # 8 is reserved for callee's ra
mov %rdi,$inp # inp argument
mov %rsi,$out # out argument
mov %r8,%rbx # ivp argument
mov %rcx,$key # key argument
mov 272(%rcx),${keyend}d # grandRounds
mov %r8,$_ivp
mov %rbp,$_rsp
.cfi_cfa_expression $_rsp,deref,+56
.Lcbc_body:
lea .LCamellia_SBOX(%rip),$Tbl
mov \$32,%ecx
.align 4
.Lcbc_prefetch_sbox:
mov 0($Tbl),%rax
mov 32($Tbl),%rsi
mov 64($Tbl),%rdi
mov 96($Tbl),%r11
lea 128($Tbl),$Tbl
loop .Lcbc_prefetch_sbox
sub \$4096,$Tbl
shl \$6,$keyend
mov %rdx,%rcx # len argument
lea ($key,$keyend),$keyend
cmp \$0,%r9d # enc argument
je .LCBC_DECRYPT
and \$-16,%rdx
and \$15,%rcx # length residue
lea ($inp,%rdx),%rdx
mov $key,$_key
mov %rdx,$_end
mov %rcx,$_res
cmp $inp,%rdx
mov 0(%rbx),@S[0] # load IV
mov 4(%rbx),@S[1]
mov 8(%rbx),@S[2]
mov 12(%rbx),@S[3]
je .Lcbc_enc_tail
jmp .Lcbc_eloop
.align 16
.Lcbc_eloop:
xor 0($inp),@S[0]
xor 4($inp),@S[1]
xor 8($inp),@S[2]
bswap @S[0]
xor 12($inp),@S[3]
bswap @S[1]
bswap @S[2]
bswap @S[3]
call _x86_64_Camellia_encrypt
mov $_key,$key # "rewind" the key
bswap @S[0]
mov $_end,%rdx
bswap @S[1]
mov $_res,%rcx
bswap @S[2]
mov @S[0],0($out)
bswap @S[3]
mov @S[1],4($out)
mov @S[2],8($out)
lea 16($inp),$inp
mov @S[3],12($out)
cmp %rdx,$inp
lea 16($out),$out
jne .Lcbc_eloop
cmp \$0,%rcx
jne .Lcbc_enc_tail
mov $_ivp,$out
mov @S[0],0($out) # write out IV residue
mov @S[1],4($out)
mov @S[2],8($out)
mov @S[3],12($out)
jmp .Lcbc_done
.align 16
.Lcbc_enc_tail:
xor %rax,%rax
mov %rax,0+$ivec
mov %rax,8+$ivec
mov %rax,$_res
.Lcbc_enc_pushf:
pushfq
cld
mov $inp,%rsi
lea 8+$ivec,%rdi
.long 0x9066A4F3 # rep movsb
popfq
.Lcbc_enc_popf:
lea $ivec,$inp
lea 16+$ivec,%rax
mov %rax,$_end
jmp .Lcbc_eloop # one more time
.align 16
.LCBC_DECRYPT:
xchg $key,$keyend
add \$15,%rdx
and \$15,%rcx # length residue
and \$-16,%rdx
mov $key,$_key
lea ($inp,%rdx),%rdx
mov %rdx,$_end
mov %rcx,$_res
mov (%rbx),%rax # load IV
mov 8(%rbx),%rbx
jmp .Lcbc_dloop
.align 16
.Lcbc_dloop:
mov 0($inp),@S[0]
mov 4($inp),@S[1]
mov 8($inp),@S[2]
bswap @S[0]
mov 12($inp),@S[3]
bswap @S[1]
mov %rax,0+$ivec # save IV to temporary storage
bswap @S[2]
mov %rbx,8+$ivec
bswap @S[3]
call _x86_64_Camellia_decrypt
mov $_key,$key # "rewind" the key
mov $_end,%rdx
mov $_res,%rcx
bswap @S[0]
mov ($inp),%rax # load IV for next iteration
bswap @S[1]
mov 8($inp),%rbx
bswap @S[2]
xor 0+$ivec,@S[0]
bswap @S[3]
xor 4+$ivec,@S[1]
xor 8+$ivec,@S[2]
lea 16($inp),$inp
xor 12+$ivec,@S[3]
cmp %rdx,$inp
je .Lcbc_ddone
mov @S[0],0($out)
mov @S[1],4($out)
mov @S[2],8($out)
mov @S[3],12($out)
lea 16($out),$out
jmp .Lcbc_dloop
.align 16
.Lcbc_ddone:
mov $_ivp,%rdx
cmp \$0,%rcx
jne .Lcbc_dec_tail
mov @S[0],0($out)
mov @S[1],4($out)
mov @S[2],8($out)
mov @S[3],12($out)
mov %rax,(%rdx) # write out IV residue
mov %rbx,8(%rdx)
jmp .Lcbc_done
.align 16
.Lcbc_dec_tail:
mov @S[0],0+$ivec
mov @S[1],4+$ivec
mov @S[2],8+$ivec
mov @S[3],12+$ivec
.Lcbc_dec_pushf:
pushfq
cld
lea 8+$ivec,%rsi
lea ($out),%rdi
.long 0x9066A4F3 # rep movsb
popfq
.Lcbc_dec_popf:
mov %rax,(%rdx) # write out IV residue
mov %rbx,8(%rdx)
jmp .Lcbc_done
.align 16
.Lcbc_done:
mov $_rsp,%rcx
.cfi_def_cfa %rcx,56
mov 0(%rcx),%r15
.cfi_restore %r15
mov 8(%rcx),%r14
.cfi_restore %r14
mov 16(%rcx),%r13
.cfi_restore %r13
mov 24(%rcx),%r12
.cfi_restore %r12
mov 32(%rcx),%rbp
.cfi_restore %rbp
mov 40(%rcx),%rbx
.cfi_restore %rbx
lea 48(%rcx),%rsp
.cfi_def_cfa %rsp,8
.Lcbc_abort:
ret
.cfi_endproc
.size VR_Camellia_cbc_encrypt,.-VR_Camellia_cbc_encrypt
.asciz "Camellia for x86_64 by <appro\@openssl.org>"
___
}
# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
# CONTEXT *context,DISPATCHER_CONTEXT *disp)
if ($win64) {
$rec="%rcx";
$frame="%rdx";
$context="%r8";
$disp="%r9";
$code.=<<___;
.extern __imp_RtlVirtualUnwind
.type common_se_handler,\@abi-omnipotent
.align 16
common_se_handler:
push %rsi
push %rdi
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
pushfq
lea -64(%rsp),%rsp
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
mov 8($disp),%rsi # disp->ImageBase
mov 56($disp),%r11 # disp->HandlerData
mov 0(%r11),%r10d # HandlerData[0]
lea (%rsi,%r10),%r10 # prologue label
cmp %r10,%rbx # context->Rip<prologue label
jb .Lin_prologue
mov 152($context),%rax # pull context->Rsp
mov 4(%r11),%r10d # HandlerData[1]
lea (%rsi,%r10),%r10 # epilogue label
cmp %r10,%rbx # context->Rip>=epilogue label
jae .Lin_prologue
lea 40(%rax),%rax
mov -8(%rax),%rbx
mov -16(%rax),%rbp
mov -24(%rax),%r13
mov -32(%rax),%r14
mov -40(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
mov %r13,224($context) # restore context->R13
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
.Lin_prologue:
mov 8(%rax),%rdi
mov 16(%rax),%rsi
mov %rax,152($context) # restore context->Rsp
mov %rsi,168($context) # restore context->Rsi
mov %rdi,176($context) # restore context->Rdi
jmp .Lcommon_seh_exit
.size common_se_handler,.-common_se_handler
.type cbc_se_handler,\@abi-omnipotent
.align 16
cbc_se_handler:
push %rsi
push %rdi
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
pushfq
lea -64(%rsp),%rsp
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
lea .Lcbc_prologue(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lcbc_prologue
jb .Lin_cbc_prologue
lea .Lcbc_body(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lcbc_body
jb .Lin_cbc_frame_setup
mov 152($context),%rax # pull context->Rsp
lea .Lcbc_abort(%rip),%r10
cmp %r10,%rbx # context->Rip>=.Lcbc_abort
jae .Lin_cbc_prologue
# handle pushf/popf in VR_Camellia_cbc_encrypt
lea .Lcbc_enc_pushf(%rip),%r10
cmp %r10,%rbx # context->Rip<=.Lcbc_enc_pushf
jbe .Lin_cbc_no_flag
lea 8(%rax),%rax
lea .Lcbc_enc_popf(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lcbc_enc_popf
jb .Lin_cbc_no_flag
lea -8(%rax),%rax
lea .Lcbc_dec_pushf(%rip),%r10
cmp %r10,%rbx # context->Rip<=.Lcbc_dec_pushf
jbe .Lin_cbc_no_flag
lea 8(%rax),%rax
lea .Lcbc_dec_popf(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lcbc_dec_popf
jb .Lin_cbc_no_flag
lea -8(%rax),%rax
.Lin_cbc_no_flag:
mov 48(%rax),%rax # $_rsp
lea 48(%rax),%rax
.Lin_cbc_frame_setup:
mov -8(%rax),%rbx
mov -16(%rax),%rbp
mov -24(%rax),%r12
mov -32(%rax),%r13
mov -40(%rax),%r14
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
mov %r12,216($context) # restore context->R12
mov %r13,224($context) # restore context->R13
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
.Lin_cbc_prologue:
mov 8(%rax),%rdi
mov 16(%rax),%rsi
mov %rax,152($context) # restore context->Rsp
mov %rsi,168($context) # restore context->Rsi
mov %rdi,176($context) # restore context->Rdi
.align 4
.Lcommon_seh_exit:
mov 40($disp),%rdi # disp->ContextRecord
mov $context,%rsi # context
mov \$`1232/8`,%ecx # sizeof(CONTEXT)
.long 0xa548f3fc # cld; rep movsq
mov $disp,%rsi
xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
mov 8(%rsi),%rdx # arg2, disp->ImageBase
mov 0(%rsi),%r8 # arg3, disp->ControlPc
mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
mov 40(%rsi),%r10 # disp->ContextRecord
lea 56(%rsi),%r11 # &disp->HandlerData
lea 24(%rsi),%r12 # &disp->EstablisherFrame
mov %r10,32(%rsp) # arg5
mov %r11,40(%rsp) # arg6
mov %r12,48(%rsp) # arg7
mov %rcx,56(%rsp) # arg8, (NULL)
call *__imp_RtlVirtualUnwind(%rip)
mov \$1,%eax # ExceptionContinueSearch
lea 64(%rsp),%rsp
popfq
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
pop %rbx
pop %rdi
pop %rsi
ret
.size cbc_se_handler,.-cbc_se_handler
.section .pdata
.align 4
.rva .LSEH_begin_VR_Camellia_EncryptBlock_Rounds
.rva .LSEH_end_VR_Camellia_EncryptBlock_Rounds
.rva .LSEH_info_VR_Camellia_EncryptBlock_Rounds
.rva .LSEH_begin_VR_Camellia_DecryptBlock_Rounds
.rva .LSEH_end_VR_Camellia_DecryptBlock_Rounds
.rva .LSEH_info_VR_Camellia_DecryptBlock_Rounds
.rva .LSEH_begin_VR_Camellia_Ekeygen
.rva .LSEH_end_VR_Camellia_Ekeygen
.rva .LSEH_info_VR_Camellia_Ekeygen
.rva .LSEH_begin_VR_Camellia_cbc_encrypt
.rva .LSEH_end_VR_Camellia_cbc_encrypt
.rva .LSEH_info_VR_Camellia_cbc_encrypt
.section .xdata
.align 8
.LSEH_info_VR_Camellia_EncryptBlock_Rounds:
.byte 9,0,0,0
.rva common_se_handler
.rva .Lenc_prologue,.Lenc_epilogue # HandlerData[]
.LSEH_info_VR_Camellia_DecryptBlock_Rounds:
.byte 9,0,0,0
.rva common_se_handler
.rva .Ldec_prologue,.Ldec_epilogue # HandlerData[]
.LSEH_info_VR_Camellia_Ekeygen:
.byte 9,0,0,0
.rva common_se_handler
.rva .Lkey_prologue,.Lkey_epilogue # HandlerData[]
.LSEH_info_VR_Camellia_cbc_encrypt:
.byte 9,0,0,0
.rva cbc_se_handler
___
}
$code =~ s/\`([^\`]*)\`/eval $1/gem;
print $code;
close STDOUT;
| 23.69459 | 106 | 0.625359 |
ed8091ce8bf5cd951b4fe843fc1b73359bee126b | 215 | t | Perl | t/only.t | tokuhirom/test-base-pm | 252f93c077bcee9f08ca786cac2978e0d7fd77b0 | [
"Net-SNMP",
"Xnet"
] | 1 | 2015-11-09T01:23:13.000Z | 2015-11-09T01:23:13.000Z | t/only.t | tokuhirom/test-base-pm | 252f93c077bcee9f08ca786cac2978e0d7fd77b0 | [
"Net-SNMP",
"Xnet"
] | null | null | null | t/only.t | tokuhirom/test-base-pm | 252f93c077bcee9f08ca786cac2978e0d7fd77b0 | [
"Net-SNMP",
"Xnet"
] | null | null | null | use Test::Base tests => 3;
no_diag_on_only;
run { pass };
is scalar(blocks), 1;
is first_block->foo, "2";
__DATA__
=== One
--- foo: 1
=== Two
--- ONLY
--- foo: 2
=== Three
--- foo: 3
--- ONLY
=== Four
--- foo: 4
| 10.75 | 26 | 0.548837 |
ed3a28924a6aaff6054cdd2ea753e9d6ffdb77ed | 1,413 | pm | Perl | t/lib/AuditTest2/Schema/Result/User.pm | git-the-cpan/DBIx-Class-AuditLog | ff1d64ff9435751039bc0fe7b4f1da80ac33cef4 | [
"Artistic-1.0"
] | null | null | null | t/lib/AuditTest2/Schema/Result/User.pm | git-the-cpan/DBIx-Class-AuditLog | ff1d64ff9435751039bc0fe7b4f1da80ac33cef4 | [
"Artistic-1.0"
] | null | null | null | t/lib/AuditTest2/Schema/Result/User.pm | git-the-cpan/DBIx-Class-AuditLog | ff1d64ff9435751039bc0fe7b4f1da80ac33cef4 | [
"Artistic-1.0"
] | null | null | null | use utf8;
package AuditTest2::Schema::Result::User;
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
=head1 NAME
AuditTest2::Schema::Result::User
=cut
use strict;
use warnings;
use base 'DBIx::Class::Core';
=head1 TABLE: C<audit_test.user>
=cut
__PACKAGE__->table("user");
=head1 ACCESSORS
=head2 id
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
=head2 name
data_type: 'varchar'
is_nullable: 1
size: 100
=head2 phone
data_type: 'varchar'
is_nullable: 1
size: 30
=head2 email
data_type: 'varchar'
is_nullable: 1
size: 100
=cut
__PACKAGE__->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
"name",
{ data_type => "varchar", is_nullable => 1, size => 100 },
"phone",
{ data_type => "varchar", is_nullable => 1, size => 30 },
"email",
{ data_type => "varchar", is_nullable => 1, size => 100 },
);
=head1 PRIMARY KEY
=over 4
=item * L</id>
=back
=cut
__PACKAGE__->set_primary_key("id");
# Created by DBIx::Class::Schema::Loader v0.07015 @ 2012-02-13 15:52:04
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:I6IyqTkjKebY+VQoOcIYqA
# You can replace this text with custom code or comments, and it will be preserved on regeneration
__PACKAGE__->load_components(qw/ AuditLog /);
__PACKAGE__->add_columns( "+email", { audit_log_column => 0, } );
1;
| 16.430233 | 98 | 0.672328 |
73f8701c1467dbc71939f6574b1c138272405bd7 | 9,592 | pm | Perl | perl/vendor/lib/DateTime/Locale/pa_PK.pm | mnikolop/Thesis_project_CyberDoc | 9a37fdd5a31de24cb902ee31ef19eb992faa1665 | [
"Apache-2.0"
] | 4 | 2018-04-20T07:27:13.000Z | 2021-12-21T05:19:24.000Z | perl/vendor/lib/DateTime/Locale/pa_PK.pm | mnikolop/Thesis_project_CyberDoc | 9a37fdd5a31de24cb902ee31ef19eb992faa1665 | [
"Apache-2.0"
] | 4 | 2021-03-10T19:10:00.000Z | 2021-05-11T14:58:19.000Z | perl/vendor/lib/DateTime/Locale/pa_PK.pm | mnikolop/Thesis_project_CyberDoc | 9a37fdd5a31de24cb902ee31ef19eb992faa1665 | [
"Apache-2.0"
] | 1 | 2019-11-12T02:29:26.000Z | 2019-11-12T02:29:26.000Z | ###########################################################################
#
# This file is auto-generated by the Perl DateTime Suite locale
# generator (0.05). This code generator comes with the
# DateTime::Locale distribution in the tools/ directory, and is called
# generate-from-cldr.
#
# This file as generated from the CLDR XML locale data. See the
# LICENSE.cldr file included in this distribution for license details.
#
# This file was generated from the source file pa_PK.xml
# The source file version number was 1.15, generated on
# 2009/05/05 23:06:39.
#
# Do not edit this file directly.
#
###########################################################################
package DateTime::Locale::pa_PK;
use strict;
use warnings;
use utf8;
use base 'DateTime::Locale::pa_Arab_PK';
sub cldr_version { return "1\.7\.1" }
{
my $first_day_of_week = "7";
sub first_day_of_week { return $first_day_of_week }
}
{
my $glibc_date_format = "\%d\/\%m\/\%Y";
sub glibc_date_format { return $glibc_date_format }
}
{
my $glibc_datetime_format = "و\ \%H\:\%M\:\%S\ \%Z\ ت\ \%d\ \%B\ \%Y";
sub glibc_datetime_format { return $glibc_datetime_format }
}
{
my $glibc_time_format = "\%H\:\%M\:\%S";
sub glibc_time_format { return $glibc_time_format }
}
{
my $glibc_time_12_format = "\%P\ \%I\:\%M\:\%S";
sub glibc_time_12_format { return $glibc_time_12_format }
}
1;
__END__
=pod
=encoding utf8
=head1 NAME
DateTime::Locale::pa_PK
=head1 SYNOPSIS
use DateTime;
my $dt = DateTime->now( locale => 'pa_PK' );
print $dt->month_name();
=head1 DESCRIPTION
This is the DateTime locale package for Punjabi Pakistan.
=head1 DATA
This locale inherits from the L<DateTime::Locale::pa_Arab_PK> locale.
It contains the following data.
=head2 Days
=head3 Wide (format)
پیر
منگل
بُدھ
جمعرات
جمعہ
ہفتہ
اتوار
=head3 Abbreviated (format)
پیر
منگل
بُدھ
جمعرات
جمعہ
ہفتہ
اتوار
=head3 Narrow (format)
ਸੋ
ਮੰ
ਬੁੱ
ਵੀ
ਸ਼ੁੱ
ਸ਼
ਐ
=head3 Wide (stand-alone)
پیر
منگل
بُدھ
جمعرات
جمعہ
ہفتہ
اتوار
=head3 Abbreviated (stand-alone)
پیر
منگل
بُدھ
جمعرات
جمعہ
ہفتہ
اتوار
=head3 Narrow (stand-alone)
ਸੋ
ਮੰ
ਬੁੱ
ਵੀ
ਸ਼ੁੱ
ਸ਼
ਐ
=head2 Months
=head3 Wide (format)
جنوری
فروری
مارچ
اپریل
مئ
جون
جولائی
اگست
ستمبر
اکتوبر
نومبر
دسمبر
=head3 Abbreviated (format)
جنوری
فروری
مارچ
اپریل
مئ
جون
جولائی
اگست
ستمبر
اکتوبر
نومبر
دسمبر
=head3 Narrow (format)
ਜ
ਫ
ਮਾ
ਅ
ਮ
ਜੂ
ਜੁ
ਅ
ਸ
ਅ
ਨ
ਦ
=head3 Wide (stand-alone)
جنوری
فروری
مارچ
اپریل
مئ
جون
جولائی
اگست
ستمبر
اکتوبر
نومبر
دسمبر
=head3 Abbreviated (stand-alone)
1
2
3
4
5
6
7
8
9
10
11
12
=head3 Narrow (stand-alone)
ਜ
ਫ
ਮਾ
ਅ
ਮ
ਜੂ
ਜੁ
ਅ
ਸ
ਅ
ਨ
ਦ
=head2 Quarters
=head3 Wide (format)
چوتھاي پہلاں
چوتھاي دوجا
چوتھاي تيجا
چوتھاي چوتھا
=head3 Abbreviated (format)
چوتھاي پہلاں
چوتھاي دوجا
چوتھاي تيجا
چوتھاي چوتھا
=head3 Narrow (format)
1
2
3
4
=head3 Wide (stand-alone)
چوتھاي پہلاں
چوتھاي دوجا
چوتھاي تيجا
چوتھاي چوتھا
=head3 Abbreviated (stand-alone)
چوتھاي پہلاں
چوتھاي دوجا
چوتھاي تيجا
چوتھاي چوتھا
=head3 Narrow (stand-alone)
1
2
3
4
=head2 Eras
=head3 Wide
ايساپورو
سں
=head3 Abbreviated
ايساپورو
سں
=head3 Narrow
ايساپورو
سں
=head2 Date Formats
=head3 Full
2008-02-05T18:30:30 = منگل, 05 فروری 2008
1995-12-22T09:05:02 = جمعہ, 22 دسمبر 1995
-0010-09-15T04:44:23 = ہفتہ, 15 ستمبر -10
=head3 Long
2008-02-05T18:30:30 = 5 فروری 2008
1995-12-22T09:05:02 = 22 دسمبر 1995
-0010-09-15T04:44:23 = 15 ستمبر -10
=head3 Medium
2008-02-05T18:30:30 = 5 فروری 2008
1995-12-22T09:05:02 = 22 دسمبر 1995
-0010-09-15T04:44:23 = 15 ستمبر -10
=head3 Short
2008-02-05T18:30:30 = 05/02/2008
1995-12-22T09:05:02 = 22/12/1995
-0010-09-15T04:44:23 = 15/09/-010
=head3 Default
2008-02-05T18:30:30 = 5 فروری 2008
1995-12-22T09:05:02 = 22 دسمبر 1995
-0010-09-15T04:44:23 = 15 ستمبر -10
=head2 Time Formats
=head3 Full
2008-02-05T18:30:30 = 6:30:30 ਸ਼ਾਮ UTC
1995-12-22T09:05:02 = 9:05:02 ਸਵੇਰੇ UTC
-0010-09-15T04:44:23 = 4:44:23 ਸਵੇਰੇ UTC
=head3 Long
2008-02-05T18:30:30 = 6:30:30 ਸ਼ਾਮ UTC
1995-12-22T09:05:02 = 9:05:02 ਸਵੇਰੇ UTC
-0010-09-15T04:44:23 = 4:44:23 ਸਵੇਰੇ UTC
=head3 Medium
2008-02-05T18:30:30 = 6:30:30 ਸ਼ਾਮ
1995-12-22T09:05:02 = 9:05:02 ਸਵੇਰੇ
-0010-09-15T04:44:23 = 4:44:23 ਸਵੇਰੇ
=head3 Short
2008-02-05T18:30:30 = 6:30 ਸ਼ਾਮ
1995-12-22T09:05:02 = 9:05 ਸਵੇਰੇ
-0010-09-15T04:44:23 = 4:44 ਸਵੇਰੇ
=head3 Default
2008-02-05T18:30:30 = 6:30:30 ਸ਼ਾਮ
1995-12-22T09:05:02 = 9:05:02 ਸਵੇਰੇ
-0010-09-15T04:44:23 = 4:44:23 ਸਵੇਰੇ
=head2 Datetime Formats
=head3 Full
2008-02-05T18:30:30 = منگل, 05 فروری 2008 6:30:30 ਸ਼ਾਮ UTC
1995-12-22T09:05:02 = جمعہ, 22 دسمبر 1995 9:05:02 ਸਵੇਰੇ UTC
-0010-09-15T04:44:23 = ہفتہ, 15 ستمبر -10 4:44:23 ਸਵੇਰੇ UTC
=head3 Long
2008-02-05T18:30:30 = 5 فروری 2008 6:30:30 ਸ਼ਾਮ UTC
1995-12-22T09:05:02 = 22 دسمبر 1995 9:05:02 ਸਵੇਰੇ UTC
-0010-09-15T04:44:23 = 15 ستمبر -10 4:44:23 ਸਵੇਰੇ UTC
=head3 Medium
2008-02-05T18:30:30 = 5 فروری 2008 6:30:30 ਸ਼ਾਮ
1995-12-22T09:05:02 = 22 دسمبر 1995 9:05:02 ਸਵੇਰੇ
-0010-09-15T04:44:23 = 15 ستمبر -10 4:44:23 ਸਵੇਰੇ
=head3 Short
2008-02-05T18:30:30 = 05/02/2008 6:30 ਸ਼ਾਮ
1995-12-22T09:05:02 = 22/12/1995 9:05 ਸਵੇਰੇ
-0010-09-15T04:44:23 = 15/09/-010 4:44 ਸਵੇਰੇ
=head3 Default
2008-02-05T18:30:30 = 5 فروری 2008 6:30:30 ਸ਼ਾਮ
1995-12-22T09:05:02 = 22 دسمبر 1995 9:05:02 ਸਵੇਰੇ
-0010-09-15T04:44:23 = 15 ستمبر -10 4:44:23 ਸਵੇਰੇ
=head2 Available Formats
=head3 d (d)
2008-02-05T18:30:30 = 5
1995-12-22T09:05:02 = 22
-0010-09-15T04:44:23 = 15
=head3 EEEd (d EEE)
2008-02-05T18:30:30 = 5 منگل
1995-12-22T09:05:02 = 22 جمعہ
-0010-09-15T04:44:23 = 15 ہفتہ
=head3 HHmmss (HH:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Hm (H:mm)
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 9:05
-0010-09-15T04:44:23 = 4:44
=head3 hm (h:mm a)
2008-02-05T18:30:30 = 6:30 ਸ਼ਾਮ
1995-12-22T09:05:02 = 9:05 ਸਵੇਰੇ
-0010-09-15T04:44:23 = 4:44 ਸਵੇਰੇ
=head3 Hms (H:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 9:05:02
-0010-09-15T04:44:23 = 4:44:23
=head3 hms (h:mm:ss a)
2008-02-05T18:30:30 = 6:30:30 ਸ਼ਾਮ
1995-12-22T09:05:02 = 9:05:02 ਸਵੇਰੇ
-0010-09-15T04:44:23 = 4:44:23 ਸਵੇਰੇ
=head3 M (L)
2008-02-05T18:30:30 = 2
1995-12-22T09:05:02 = 12
-0010-09-15T04:44:23 = 9
=head3 Md (d/M)
2008-02-05T18:30:30 = 5/2
1995-12-22T09:05:02 = 22/12
-0010-09-15T04:44:23 = 15/9
=head3 MEd (E, M-d)
2008-02-05T18:30:30 = منگل, 2-5
1995-12-22T09:05:02 = جمعہ, 12-22
-0010-09-15T04:44:23 = ہفتہ, 9-15
=head3 MMM (LLL)
2008-02-05T18:30:30 = 2
1995-12-22T09:05:02 = 12
-0010-09-15T04:44:23 = 9
=head3 MMMd (MMM d)
2008-02-05T18:30:30 = فروری 5
1995-12-22T09:05:02 = دسمبر 22
-0010-09-15T04:44:23 = ستمبر 15
=head3 MMMEd (E MMM d)
2008-02-05T18:30:30 = منگل فروری 5
1995-12-22T09:05:02 = جمعہ دسمبر 22
-0010-09-15T04:44:23 = ہفتہ ستمبر 15
=head3 MMMMd (MMMM d)
2008-02-05T18:30:30 = فروری 5
1995-12-22T09:05:02 = دسمبر 22
-0010-09-15T04:44:23 = ستمبر 15
=head3 MMMMEd (E MMMM d)
2008-02-05T18:30:30 = منگل فروری 5
1995-12-22T09:05:02 = جمعہ دسمبر 22
-0010-09-15T04:44:23 = ہفتہ ستمبر 15
=head3 mmss (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 ms (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 y (y)
2008-02-05T18:30:30 = 2008
1995-12-22T09:05:02 = 1995
-0010-09-15T04:44:23 = -10
=head3 yM (y-M)
2008-02-05T18:30:30 = 2008-2
1995-12-22T09:05:02 = 1995-12
-0010-09-15T04:44:23 = -10-9
=head3 yMEd (EEE, y-M-d)
2008-02-05T18:30:30 = منگل, 2008-2-5
1995-12-22T09:05:02 = جمعہ, 1995-12-22
-0010-09-15T04:44:23 = ہفتہ, -10-9-15
=head3 yMMM (y MMM)
2008-02-05T18:30:30 = 2008 فروری
1995-12-22T09:05:02 = 1995 دسمبر
-0010-09-15T04:44:23 = -10 ستمبر
=head3 yMMMEd (EEE, y MMM d)
2008-02-05T18:30:30 = منگل, 2008 فروری 5
1995-12-22T09:05:02 = جمعہ, 1995 دسمبر 22
-0010-09-15T04:44:23 = ہفتہ, -10 ستمبر 15
=head3 yMMMM (y MMMM)
2008-02-05T18:30:30 = 2008 فروری
1995-12-22T09:05:02 = 1995 دسمبر
-0010-09-15T04:44:23 = -10 ستمبر
=head3 yQ (y Q)
2008-02-05T18:30:30 = 2008 1
1995-12-22T09:05:02 = 1995 4
-0010-09-15T04:44:23 = -10 3
=head3 yQQQ (y QQQ)
2008-02-05T18:30:30 = 2008 چوتھاي پہلاں
1995-12-22T09:05:02 = 1995 چوتھاي چوتھا
-0010-09-15T04:44:23 = -10 چوتھاي تيجا
=head3 yyMMM (MMM yy)
2008-02-05T18:30:30 = فروری 08
1995-12-22T09:05:02 = دسمبر 95
-0010-09-15T04:44:23 = ستمبر -10
=head3 yyQ (Q yy)
2008-02-05T18:30:30 = 1 08
1995-12-22T09:05:02 = 4 95
-0010-09-15T04:44:23 = 3 -10
=head2 Miscellaneous
=head3 Prefers 24 hour time?
No
=head3 Local first day of the week
اتوار
=head1 SUPPORT
See L<DateTime::Locale>.
=head1 AUTHOR
Dave Rolsky <autarch@urth.org>
=head1 COPYRIGHT
Copyright (c) 2008 David Rolsky. All rights reserved. This program is
free software; you can redistribute it and/or modify it under the same
terms as Perl itself.
This module was generated from data provided by the CLDR project, see
the LICENSE.cldr in this distribution for details on the CLDR data's
license.
=cut
| 16.285229 | 75 | 0.628857 |
73ec19eb4ec9837e21a7678c473d4e0561f8faa0 | 432 | pm | Perl | lib/VMOMI/ArrayOfGuestAuthAliasInfo.pm | stumpr/p5-vmomi | e2571d72a1f552ddd0258ad289ec229d8d12a147 | [
"Apache-2.0"
] | 1 | 2017-06-22T21:26:24.000Z | 2017-06-22T21:26:24.000Z | lib/VMOMI/ArrayOfGuestAuthAliasInfo.pm | restump/p5-vmomi | e2571d72a1f552ddd0258ad289ec229d8d12a147 | [
"Apache-2.0"
] | null | null | null | lib/VMOMI/ArrayOfGuestAuthAliasInfo.pm | restump/p5-vmomi | e2571d72a1f552ddd0258ad289ec229d8d12a147 | [
"Apache-2.0"
] | 1 | 2016-07-19T19:56:09.000Z | 2016-07-19T19:56:09.000Z | package VMOMI::ArrayOfGuestAuthAliasInfo;
use parent 'VMOMI::ComplexType';
use strict;
use warnings;
our @class_ancestors = ( );
our @class_members = (
['GuestAuthAliasInfo', 'GuestAuthAliasInfo', 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| 18 | 59 | 0.703704 |
73da33ddc4ec1a7e6f52e0b7dabdc7c8b438d83a | 366 | t | Perl | t/17-propagate-subname-colons.t | perlpunk/Test-Warnings | a885cba65f7a47b690b3080bccbf47198b03ef0f | [
"Artistic-1.0"
] | null | null | null | t/17-propagate-subname-colons.t | perlpunk/Test-Warnings | a885cba65f7a47b690b3080bccbf47198b03ef0f | [
"Artistic-1.0"
] | null | null | null | t/17-propagate-subname-colons.t | perlpunk/Test-Warnings | a885cba65f7a47b690b3080bccbf47198b03ef0f | [
"Artistic-1.0"
] | null | null | null | use strict;
use warnings;
# checks handling of a warning handler named '::warning_capturer'
my $code = do {
open(my $fh, 't/14-propagate-subname.t') or die "cannot open t/14-propagate-subname.t for reading: $!";
local $/;
<$fh>
};
$code =~ s/\$SIG\{__WARN__\} = 'warning_capturer'/\$SIG\{__WARN__\} = '::warning_capturer'/;
eval $code;
die $@ if $@;
| 22.875 | 107 | 0.63388 |
ed7b85439c65822d61c453b66efb9430a606128f | 4,471 | pl | Perl | scripts/display_ftp_data/display_ftp_files.pl | avikdatta/mojolicious_scripts | 6e3d7736e56f519978756fe20d2d6a3784e72943 | [
"Apache-2.0"
] | null | null | null | scripts/display_ftp_data/display_ftp_files.pl | avikdatta/mojolicious_scripts | 6e3d7736e56f519978756fe20d2d6a3784e72943 | [
"Apache-2.0"
] | null | null | null | scripts/display_ftp_data/display_ftp_files.pl | avikdatta/mojolicious_scripts | 6e3d7736e56f519978756fe20d2d6a3784e72943 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env perl
use strict;
use warnings;
use DBI;
use Mojolicious::Lite;
use Getopt::Long;
my $dbhost='localhost';
my $dbuser=undef;
my $dbpass=undef;
my $dbname=undef;
GetOptions( 'dbhost=s' => \$dbhost,
'dbuser=s' => \$dbuser,
'dbpass=s' => \$dbpass,
'dbname=s' => \$dbname
);
my %dbparams=( dbhost => $dbhost,
dbuser => $dbuser,
dbpass => $dbpass,
dbname => $dbname
);
die if (!$dbhost || !$dbuser || !$dbpass || !$dbname);
my $dbh = get_db_con(\%dbparams);
my $exp_type_summary = get_exp_type($dbh);
my $lib_strategy = get_lib_strategy($dbh);
my $histone_counts = get_histone_counts($dbh);
## index page
get '/' => sub {
my $self = shift;
my $url = $self->req->url->to_abs->to_string;
$url =~ s/\/$//;
$self->render( template => 'index', title => '', url => $url );
};
## summary page
get 'summary' => sub {
my $self = shift;
$self->respond_to(
json => sub{
$self->render(
json => {
experiment_type => $exp_type_summary,
library_strategy => $lib_strategy,
histone_count => $histone_counts,
}
);
},
html => sub{
$self->stash( experiment_type => $exp_type_summary,
library_strategy => $lib_strategy,
histone_count => $histone_counts
);
$self->render( template => 'summary', title => 'Summary');
},
);
};
app->start;
sub get_exp_type{
my ( $dbh ) = @_;
my $stmt='select experiment_type, count(filename) as `counts` from ftp_index_file group by experiment_type';
my $sth = $dbh->prepare($stmt);
$sth->execute();
my @summary;
while(my $row=$sth->fetchrow_hashref()){
push @summary, $row;
}
$sth->finish();
return \@summary;
}
sub get_lib_strategy{
my ( $dbh ) = @_;
my $stmt='select library_strategy, count(distinct sample_name) as sample
from ftp_index_file
group by library_strategy';
my $sth = $dbh->prepare($stmt);
$sth->execute();
my @summary;
while(my $row=$sth->fetchrow_hashref()){
push @summary, $row;
}
$sth->finish();
return \@summary;
}
sub get_histone_counts{
my ( $dbh ) = @_;
my $stmt='select t.experiment_type as experiment_type,
count(distinct t.sample_name) as sample from
(select experiment_type , sample_name from ftp_index_file
where library_strategy = \'ChIP-Seq\')as t
group by t.experiment_type';
my $sth = $dbh->prepare($stmt);
$sth->execute();
my @summary;
while(my $row=$sth->fetchrow_hashref()){
push @summary, $row;
}
$sth->finish();
return \@summary;
}
sub get_db_con{
my ($dbparams) = @_;
my $database = $dbparams->{dbname};
my $hostname = $dbparams->{dbhost};
my $user = $dbparams->{dbuser};
my $password = $dbparams->{dbpass};
my $dsn = "DBI:mysql:database=$database;host=$hostname";
my $dbh = DBI->connect($dsn, $user, $password);
return $dbh;
}
__DATA__
@@ index.html.ep
<html>
<head>
<title>FTP files information</title>
</head>
<body>
<h1>FTP files information</h1>
<h2>Subpages</h2>
<dl class="dl-horizontal">
<dt><a href="<%= $url %>/summary">/summary</a></dt>
<dd>Report summary stats</dd>
</dl>
</body>
@@ summary.html.ep
<html>
<head>
<title><%= $title %></title>
</head>
<body>
<h2>Experiment type summary</h2>
<table class="table table-hover table-condensed table-striped">
<thead>
<tr>
<th>Experiment type</th>
<th>File counts</th>
</tr>
</thead>
<tbody>
% for my $row ( @$experiment_type) {
<tr>
<td><%= $$row{'experiment_type'}%></td>
<td><%= $$row{'counts'}%></td>
</tr>
% }
</table>
<p/>
<p/>
<h2>Library strategy summary</h2>
<table class="table table-hover table-condensed table-striped">
<thead>
<tr>
<th>Library strategy</th>
<th>Sample counts</th>
</tr>
</thead>
<tbody>
% for my $row ( @$library_strategy){
<tr>
<td><%= $$row{'library_strategy'}%></td>
<td><%= $$row{'sample'}%></td>
</tr>
% }
</table>
<p/>
<p/>
<h2>ChIP-Seq Histone summary</h2>
<table class="table table-hover table-condensed table-striped">
<thead>
<tr>
<th>Experiment type</th>
<th>Sample count</th>
</tr>
</thead>
<tbody>
% for my $row ( @$histone_count ){
<tr>
<td><%= $$row{'experiment_type'}%></td>
<td><%= $$row{'sample'}%></td>
</tr>
% }
</table>
<p/>
</p>
</body>
| 22.133663 | 110 | 0.573697 |
73ff59181d4e794cd24a5151012b4f4946dfda7e | 219 | pm | Perl | lib/Webtoo/Template.pm | supernovus/webtoo | 7b8a68a913bddda7e2a4935654ea98c3409f3de2 | [
"Artistic-2.0"
] | null | null | null | lib/Webtoo/Template.pm | supernovus/webtoo | 7b8a68a913bddda7e2a4935654ea98c3409f3de2 | [
"Artistic-2.0"
] | null | null | null | lib/Webtoo/Template.pm | supernovus/webtoo | 7b8a68a913bddda7e2a4935654ea98c3409f3de2 | [
"Artistic-2.0"
] | null | null | null | package Webtoo::Template;
use v5.12;
use Moo::Role;
requires qw(render);
has opts => (is => 'ro', default => sub { {} });
has path => (is => 'rw', default => sub { './views' });
has engine => (is => 'lazy');
1;
| 16.846154 | 57 | 0.534247 |
ed30bf71910c8bfece96f00e206ac4cf3172ab1d | 1,286 | perl | Perl | BlobN2Off.perl | k----n/lookup | d89367e6ff3e43947dbc902c14c4fe2b015b98fa | [
"RSA-MD"
] | null | null | null | BlobN2Off.perl | k----n/lookup | d89367e6ff3e43947dbc902c14c4fe2b015b98fa | [
"RSA-MD"
] | null | null | null | BlobN2Off.perl | k----n/lookup | d89367e6ff3e43947dbc902c14c4fe2b015b98fa | [
"RSA-MD"
] | null | null | null | #!/usr/bin/perl
use lib ("$ENV{HOME}/lookup", "$ENV{HOME}/lib64/perl5", "/home/audris/lib64/perl5","$ENV{HOME}/lib/perl5", "$ENV{HOME}/lib/x86_64-linux-gnu/perl", "$ENV{HOME}/share/perl5");
use strict;
use warnings;
use Error qw(:try);
use TokyoCabinet;
use Compress::LZF;
sub toHex {
return unpack "H*", $_[0];
}
sub fromHex {
return pack "H*", $_[0];
}
my $sections = 128;
my $fbaseo="All.sha1o/sha1.blob_";
my $fbasei ="/data/All.blobs/blob_";
my (%fhoso);
#for my $sec (0 .. ($sections-1)){
my $sec = $ARGV[0];
my $pre = "/fast";
tie %{$fhoso{$sec}}, "TokyoCabinet::HDB", "$pre/${fbaseo}$sec.tch", TokyoCabinet::HDB::OWRITER | TokyoCabinet::HDB::OCREAT,
16777213, -1, -1, TokyoCabinet::TDB::TLARGE, 100000
or die "cant open $pre/$fbaseo$sec.tch\n";
my $nn = 0;
if ( -f "$fbasei$sec.idx"){
open A, "tac $fbasei$sec.idx|" or die ($!);
while (<A>){
chop ();
my @x = split (/\;/, $_, -1);
my $of = $x[1];
my $len = $x[2];
my $hash = $x[3];
if ($#x > 4){
$hash = $x[4];
}
my $h = fromHex ($hash);
if (defined $fhoso{$sec}{$h}){
print "done/updated $nn\n";
last;
}
$nn ++;
$fhoso{$sec}{$h} = pack ("w w", $of, $len);
}
}else{
die "no $fbasei$sec.idx\n";
}
untie %{$fhoso{$sec}}
| 22.561404 | 173 | 0.543546 |
ed0142e10fded0ecb20d4c584c9dbfc1aa8758de | 1,357 | pm | Perl | BonnLogger.pm | deapplegate/wtgpipeline | 9693e8562022cc97bf5a96427e22965e1a5e8497 | [
"MIT"
] | 1 | 2019-03-15T04:01:19.000Z | 2019-03-15T04:01:19.000Z | BonnLogger.pm | deapplegate/wtgpipeline | 9693e8562022cc97bf5a96427e22965e1a5e8497 | [
"MIT"
] | 5 | 2017-12-11T00:11:39.000Z | 2021-07-09T17:05:16.000Z | BonnLogger.pm | deapplegate/wtgpipeline | 9693e8562022cc97bf5a96427e22965e1a5e8497 | [
"MIT"
] | 2 | 2017-08-15T21:19:11.000Z | 2017-10-12T00:36:35.000Z | #########################
# @file BonnLogger.pm
#
# @brief Provides some convinient utils for operating DS9 through XPA
#########################
#$CVSID = "$Id: BonnLogger.pm,v 1.2 2008-07-10 00:19:11 dapple Exp $";
use Exporter 'import';
@EXPORT = qw(log_start log_force_start log_status);
#############################################################################
$bonn_log_id = '-1';
sub log_start {
$raw_bonn_log_id = `./BonnLogger.py log maskImages.pl @ARGV`;
die unless $? == 0;
$bonn_log_id = $raw_bonn_log_id;
chomp($bonn_log_id)
}
#############################################################################
sub log_force_start {
$raw_bonn_log_id = `./BonnLogger.py forceLog maskImages.pl @ARGV`;
die unless $? == 0;
$bonn_log_id = $raw_bonn_log_id;
chomp($bonn_log_id)
}
#############################################################################
sub log_status {
my($exitCode, @comments) = @_;
system("./BonnLogger.py update $bonn_log_id $exitCode @comments");
}
#############################################################################
sub log_status_and_exit {
my($exitCode, @comments) = @_;
system("./BonnLogger.py update $bonn_log_id $exitCode @comments");
exit $exitCode;
}
#############################################################################
1;
| 23 | 77 | 0.448047 |
ed908bed4ec2d3d14a105a6a3a77c058ae482cca | 3,448 | pm | Perl | auto-lib/Paws/EC2/DescribeCoipPools.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 164 | 2015-01-08T14:58:53.000Z | 2022-02-20T19:16:24.000Z | auto-lib/Paws/EC2/DescribeCoipPools.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 348 | 2015-01-07T22:08:38.000Z | 2022-01-27T14:34:44.000Z | auto-lib/Paws/EC2/DescribeCoipPools.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 87 | 2015-04-22T06:29:47.000Z | 2021-09-29T14:45:55.000Z |
package Paws::EC2::DescribeCoipPools;
use Moose;
has DryRun => (is => 'ro', isa => 'Bool');
has Filters => (is => 'ro', isa => 'ArrayRef[Paws::EC2::Filter]', traits => ['NameInRequest'], request_name => 'Filter' );
has MaxResults => (is => 'ro', isa => 'Int');
has NextToken => (is => 'ro', isa => 'Str');
has PoolIds => (is => 'ro', isa => 'ArrayRef[Str|Undef]', traits => ['NameInRequest'], request_name => 'PoolId' );
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'DescribeCoipPools');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::EC2::DescribeCoipPoolsResult');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::EC2::DescribeCoipPools - Arguments for method DescribeCoipPools on L<Paws::EC2>
=head1 DESCRIPTION
This class represents the parameters used for calling the method DescribeCoipPools on the
L<Amazon Elastic Compute Cloud|Paws::EC2> service. Use the attributes of this class
as arguments to method DescribeCoipPools.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to DescribeCoipPools.
=head1 SYNOPSIS
my $ec2 = Paws->service('EC2');
my $DescribeCoipPoolsResult = $ec2->DescribeCoipPools(
DryRun => 1, # OPTIONAL
Filters => [
{
Name => 'MyString', # OPTIONAL
Values => [
'MyString', ... # OPTIONAL
], # OPTIONAL
},
...
], # OPTIONAL
MaxResults => 1, # OPTIONAL
NextToken => 'MyString', # OPTIONAL
PoolIds => [ 'MyCoipPoolId', ... ], # OPTIONAL
);
# Results:
my $CoipPools = $DescribeCoipPoolsResult->CoipPools;
my $NextToken = $DescribeCoipPoolsResult->NextToken;
# Returns a L<Paws::EC2::DescribeCoipPoolsResult> object.
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
For the AWS API documentation, see L<https://docs.aws.amazon.com/goto/WebAPI/ec2/DescribeCoipPools>
=head1 ATTRIBUTES
=head2 DryRun => Bool
Checks whether you have the required permissions for the action,
without actually making the request, and provides an error response. If
you have the required permissions, the error response is
C<DryRunOperation>. Otherwise, it is C<UnauthorizedOperation>.
=head2 Filters => ArrayRef[L<Paws::EC2::Filter>]
The filters. The following are the possible values:
=over
=item *
C<coip-pool.pool-id>
=back
=over
=item *
C<coip-pool.local-gateway-route-table-id>
=back
=head2 MaxResults => Int
The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned C<nextToken>
value.
=head2 NextToken => Str
The token for the next page of results.
=head2 PoolIds => ArrayRef[Str|Undef]
The IDs of the address pools.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method DescribeCoipPools in L<Paws::EC2>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: L<https://github.com/pplu/aws-sdk-perl>
Please report bugs to: L<https://github.com/pplu/aws-sdk-perl/issues>
=cut
| 27.149606 | 249 | 0.672854 |
ed754ec1da00a4d947add353410343fd72b0142c | 13,090 | pm | Perl | local/lib/perl5/Mojolicious/Plugin/DefaultHelpers.pm | jkb78/extrajnm | 6890e38e15f85ea9c09a141aa14affad0b8e91e7 | [
"MIT"
] | null | null | null | local/lib/perl5/Mojolicious/Plugin/DefaultHelpers.pm | jkb78/extrajnm | 6890e38e15f85ea9c09a141aa14affad0b8e91e7 | [
"MIT"
] | null | null | null | local/lib/perl5/Mojolicious/Plugin/DefaultHelpers.pm | jkb78/extrajnm | 6890e38e15f85ea9c09a141aa14affad0b8e91e7 | [
"MIT"
] | null | null | null | package Mojolicious::Plugin::DefaultHelpers;
use Mojo::Base 'Mojolicious::Plugin';
use Mojo::ByteStream;
use Mojo::Collection;
use Mojo::Exception;
use Mojo::IOLoop;
use Mojo::Util qw(dumper hmac_sha1_sum steady_time);
sub register {
my ($self, $app) = @_;
# Controller alias helpers
for my $name (qw(app flash param stash session url_for validation)) {
$app->helper($name => sub { shift->$name(@_) });
}
# Stash key shortcuts (should not generate log messages)
for my $name (qw(extends layout title)) {
$app->helper($name => sub { shift->stash($name, @_) });
}
$app->helper(accepts => sub { $_[0]->app->renderer->accepts(@_) });
$app->helper(b => sub { shift; Mojo::ByteStream->new(@_) });
$app->helper(c => sub { shift; Mojo::Collection->new(@_) });
$app->helper(config => sub { shift->app->config(@_) });
$app->helper(content => sub { _content(0, 0, @_) });
$app->helper(content_for => sub { _content(1, 0, @_) });
$app->helper(content_with => sub { _content(0, 1, @_) });
$app->helper($_ => $self->can("_$_"))
for qw(csrf_token current_route delay inactivity_timeout is_fresh url_with);
$app->helper(dumper => sub { shift; dumper @_ });
$app->helper(include => sub { shift->render_to_string(@_) });
$app->helper("reply.$_" => $self->can("_$_")) for qw(asset static);
$app->helper('reply.exception' => sub { _development('exception', @_) });
$app->helper('reply.not_found' => sub { _development('not_found', @_) });
$app->helper(ua => sub { shift->app->ua });
}
sub _asset {
my $c = shift;
$c->app->static->serve_asset($c, @_);
$c->rendered;
}
sub _block { ref $_[0] eq 'CODE' ? $_[0]() : $_[0] }
sub _content {
my ($append, $replace, $c, $name, $content) = @_;
$name ||= 'content';
my $hash = $c->stash->{'mojo.content'} ||= {};
if (defined $content) {
if ($append) { $hash->{$name} .= _block($content) }
if ($replace) { $hash->{$name} = _block($content) }
else { $hash->{$name} //= _block($content) }
}
return Mojo::ByteStream->new($hash->{$name} // '');
}
sub _csrf_token {
my $c = shift;
return $c->session->{csrf_token}
||= hmac_sha1_sum($$ . steady_time . rand 999, $c->app->secrets->[0]);
}
sub _current_route {
return '' unless my $route = shift->match->endpoint;
return @_ ? $route->name eq shift : $route->name;
}
sub _delay {
my $c = shift;
my $tx = $c->render_later->tx;
my $delay = Mojo::IOLoop->delay(@_);
$delay->catch(sub { $c->helpers->reply->exception(pop) and undef $tx })->wait;
}
sub _development {
my ($page, $c, $e) = @_;
my $app = $c->app;
$app->log->error($e = Mojo::Exception->new($e)) if $page eq 'exception';
# Filtered stash snapshot
my $stash = $c->stash;
my %snapshot = map { $_ => $stash->{$_} }
grep { !/^mojo\./ and defined $stash->{$_} } keys %$stash;
# Render with fallbacks
my $mode = $app->mode;
my $renderer = $app->renderer;
my $options = {
exception => $page eq 'exception' ? $e : undef,
format => $stash->{format} || $renderer->default_format,
handler => undef,
snapshot => \%snapshot,
status => $page eq 'exception' ? 500 : 404,
template => "$page.$mode"
};
my $bundled = 'mojo/' . ($mode eq 'development' ? 'debug' : $page);
return $c if _fallbacks($c, $options, $page, $bundled);
_fallbacks($c, {%$options, format => 'html'}, $page, $bundled);
return $c;
}
sub _fallbacks {
my ($c, $options, $template, $bundled) = @_;
# Mode specific template
return 1 if $c->render_maybe(%$options);
# Normal template
return 1 if $c->render_maybe(%$options, template => $template);
# Inline template
my $stash = $c->stash;
return undef unless $stash->{format} eq 'html';
delete @$stash{qw(extends layout)};
return $c->render_maybe($bundled, %$options, handler => 'ep');
}
sub _inactivity_timeout {
my ($c, $timeout) = @_;
my $stream = Mojo::IOLoop->stream($c->tx->connection // '');
$stream->timeout($timeout) if $stream;
return $c;
}
sub _is_fresh {
my ($c, %options) = @_;
return $c->app->static->is_fresh($c, \%options);
}
sub _static {
my ($c, $file) = @_;
return !!$c->rendered if $c->app->static->serve($c, $file);
$c->app->log->debug(qq{Static file "$file" not found});
return !$c->helpers->reply->not_found;
}
sub _url_with {
my $c = shift;
return $c->url_for(@_)->query($c->req->url->query->clone);
}
1;
=encoding utf8
=head1 NAME
Mojolicious::Plugin::DefaultHelpers - Default helpers plugin
=head1 SYNOPSIS
# Mojolicious
$app->plugin('DefaultHelpers');
# Mojolicious::Lite
plugin 'DefaultHelpers';
=head1 DESCRIPTION
L<Mojolicious::Plugin::DefaultHelpers> is a collection of helpers for
L<Mojolicious>.
This is a core plugin, that means it is always enabled and its code a good
example for learning to build new plugins, you're welcome to fork it.
See L<Mojolicious::Plugins/"PLUGINS"> for a list of plugins that are available
by default.
=head1 HELPERS
L<Mojolicious::Plugin::DefaultHelpers> implements the following helpers.
=head2 accepts
my $formats = $c->accepts;
my $format = $c->accepts('html', 'json', 'txt');
Select best possible representation for resource from C<Accept> request header,
C<format> stash value or C<format> C<GET>/C<POST> parameter with
L<Mojolicious::Renderer/"accepts">, defaults to returning the first extension
if no preference could be detected.
# Check if JSON is acceptable
$c->render(json => {hello => 'world'}) if $c->accepts('json');
# Check if JSON was specifically requested
$c->render(json => {hello => 'world'}) if $c->accepts('', 'json');
# Unsupported representation
$c->render(data => '', status => 204)
unless my $format = $c->accepts('html', 'json');
# Detected representations to select from
my @formats = @{$c->accepts};
=head2 app
%= app->secrets->[0]
Alias for L<Mojolicious::Controller/"app">.
=head2 b
%= b('test 123')->b64_encode
Turn string into a L<Mojo::ByteStream> object.
=head2 c
%= c('a', 'b', 'c')->shuffle->join
Turn list into a L<Mojo::Collection> object.
=head2 config
%= config 'something'
Alias for L<Mojo/"config">.
=head2 content
%= content foo => begin
test
% end
%= content bar => 'Hello World!'
%= content 'foo'
%= content 'bar'
%= content
Store partial rendered content in a named buffer and retrieve it later,
defaults to retrieving the named buffer C<content>, which is commonly used for
the renderers C<layout> and C<extends> features. New content will be ignored if
the named buffer is already in use.
=head2 content_for
% content_for foo => begin
test
% end
%= content_for 'foo'
Same as L</"content">, but appends content to named buffers if they are already
in use.
% content_for message => begin
Hello
% end
% content_for message => begin
world!
% end
%= content 'message'
=head2 content_with
% content_with foo => begin
test
% end
%= content_with 'foo'
Same as L</"content">, but replaces content of named buffers if they are
already in use.
% content message => begin
world!
% end
% content_with message => begin
Hello <%= content 'message' %>
% end
%= content 'message'
=head2 csrf_token
%= csrf_token
Get CSRF token from L</"session">, and generate one if none exists.
=head2 current_route
% if (current_route 'login') {
Welcome to Mojolicious!
% }
%= current_route
Check or get name of current route.
=head2 delay
$c->delay(sub {...}, sub {...});
Disable automatic rendering and use L<Mojo::IOLoop/"delay"> to manage callbacks
and control the flow of events, which can help you avoid deep nested closures
that often result from continuation-passing style. Also keeps a reference to
L<Mojolicious::Controller/"tx"> in case the underlying connection gets closed
early, and calls L</"reply-E<gt>exception"> if an exception gets thrown in one
of the steps, breaking the chain.
# Longer version
$c->render_later;
my $tx = $c->tx;
my $delay = Mojo::IOLoop->delay(sub {...}, sub {...});
$delay->catch(sub { $c->reply->exception(pop) and undef $tx })->wait;
# Non-blocking request
$c->delay(
sub {
my $delay = shift;
$c->ua->get('http://mojolicious.org' => $delay->begin);
},
sub {
my ($delay, $tx) = @_;
$c->render(json => {title => $tx->res->dom->at('title')->text});
}
);
=head2 dumper
%= dumper {some => 'data'}
Dump a Perl data structure with L<Mojo::Util/"dumper">, very useful for
debugging.
=head2 extends
% extends 'blue';
% extends 'blue', title => 'Blue!';
Set C<extends> stash value, all additional key/value pairs get merged into the
L</"stash">.
=head2 flash
%= flash 'foo'
Alias for L<Mojolicious::Controller/"flash">.
=head2 inactivity_timeout
$c = $c->inactivity_timeout(3600);
Use L<Mojo::IOLoop/"stream"> to find the current connection and increase
timeout if possible.
# Longer version
Mojo::IOLoop->stream($c->tx->connection)->timeout(3600);
=head2 include
%= include 'menubar'
%= include 'menubar', format => 'txt'
Alias for L<Mojolicious::Controller/"render_to_string">.
=head2 is_fresh
my $bool = $c->is_fresh;
my $bool = $c->is_fresh(etag => 'abc');
my $bool = $c->is_fresh(last_modified => $epoch);
Check freshness of request by comparing the C<If-None-Match> and
C<If-Modified-Since> request headers to the C<ETag> and C<Last-Modified>
response headers with L<Mojolicious::Static/"is_fresh">.
# Add ETag/Last-Modified headers and check freshness before rendering
$c->is_fresh(etag => 'abc', last_modified => 1424985708)
? $c->rendered(304)
: $c->render(text => 'I ♥ Mojolicious!');
=head2 layout
% layout 'green';
% layout 'green', title => 'Green!';
Set C<layout> stash value, all additional key/value pairs get merged into the
L</"stash">.
=head2 param
%= param 'foo'
Alias for L<Mojolicious::Controller/"param">.
=head2 reply->asset
$c->reply->asset(Mojo::Asset::File->new);
Reply with a L<Mojo::Asset::File> or L<Mojo::Asset::Memory> object using
L<Mojolicious::Static/"serve_asset">, and perform content negotiation with
C<Range>, C<If-Modified-Since> and C<If-None-Match> headers.
# Serve asset with custom modification time
my $asset = Mojo::Asset::Memory->new;
$asset->add_chunk('Hello World!')->mtime(784111777);
$c->res->headers->content_type('text/plain');
$c->reply->asset($asset);
# Serve static file if it exists
if (my $asset = $c->app->static->file('images/logo.png')) {
$c->res->headers->content_type('image/png');
$c->reply->asset($asset);
}
=head2 reply->exception
$c = $c->reply->exception('Oops!');
$c = $c->reply->exception(Mojo::Exception->new('Oops!'));
Render the exception template C<exception.$mode.$format.*> or
C<exception.$format.*> and set the response status code to C<500>. Also sets
the stash values C<exception> to a L<Mojo::Exception> object and C<snapshot> to
a copy of the L</"stash"> for use in the templates.
=head2 reply->not_found
$c = $c->reply->not_found;
Render the not found template C<not_found.$mode.$format.*> or
C<not_found.$format.*> and set the response status code to C<404>. Also sets
the stash value C<snapshot> to a copy of the L</"stash"> for use in the
templates.
=head2 reply->static
my $bool = $c->reply->static('images/logo.png');
my $bool = $c->reply->static('../lib/MyApp.pm');
Reply with a static file using L<Mojolicious/"static">, usually from the
C<public> directories or C<DATA> sections of your application. Note that this
helper does not protect from traversing to parent directories.
# Serve file with a custom content type
$c->res->headers->content_type('application/myapp');
$c->reply->static('foo.txt');
=head2 session
%= session 'foo'
Alias for L<Mojolicious::Controller/"session">.
=head2 stash
%= stash 'foo'
% stash foo => 'bar';
Alias for L<Mojolicious::Controller/"stash">.
%= stash('name') // 'Somebody'
=head2 title
%= title
% title 'Welcome!';
% title 'Welcome!', foo => 'bar';
Get or set C<title> stash value, all additional key/value pairs get merged into
the L</"stash">.
=head2 ua
%= ua->get('mojolicious.org')->res->dom->at('title')->text
Alias for L<Mojo/"ua">.
=head2 url_for
%= url_for 'named', controller => 'bar', action => 'baz'
Alias for L<Mojolicious::Controller/"url_for">.
%= url_for('/index.html')->query(foo => 'bar')
=head2 url_with
%= url_with 'named', controller => 'bar', action => 'baz'
Does the same as L</"url_for">, but inherits query parameters from the current
request.
%= url_with->query([page => 2])
=head2 validation
%= validation->param('foo')
Alias for L<Mojolicious::Controller/"validation">.
=head1 METHODS
L<Mojolicious::Plugin::DefaultHelpers> inherits all methods from
L<Mojolicious::Plugin> and implements the following new ones.
=head2 register
$plugin->register(Mojolicious->new);
Register helpers in L<Mojolicious> application.
=head1 SEE ALSO
L<Mojolicious>, L<Mojolicious::Guides>, L<http://mojolicious.org>.
=cut
| 25.417476 | 80 | 0.651948 |
73ec9da654be69c6a473186df17969a5dcb13ca2 | 2,826 | pm | Perl | perl/vendor/lib/Moose/Meta/Attribute/Native/Trait/Counter.pm | ifleeyo180/VspriteMoodleWebsite | 38baa924829c83808d2c87d44740ff365927a646 | [
"Apache-2.0"
] | 2 | 2021-11-19T22:37:28.000Z | 2021-11-22T18:04:55.000Z | perl/vendor/lib/Moose/Meta/Attribute/Native/Trait/Counter.pm | ifleeyo180/VspriteMoodleWebsite | 38baa924829c83808d2c87d44740ff365927a646 | [
"Apache-2.0"
] | 6 | 2021-11-18T00:39:48.000Z | 2021-11-20T00:31:40.000Z | perl/vendor/lib/Moose/Meta/Attribute/Native/Trait/Counter.pm | ifleeyo180/VspriteMoodleWebsite | 38baa924829c83808d2c87d44740ff365927a646 | [
"Apache-2.0"
] | null | null | null | package Moose::Meta::Attribute::Native::Trait::Counter;
our $VERSION = '2.2014';
use Moose::Role;
with 'Moose::Meta::Attribute::Native::Trait';
sub _helper_type { 'Num' }
sub _root_types { 'Num', 'Int' }
no Moose::Role;
1;
# ABSTRACT: Helper trait for Int attributes which represent counters
__END__
=pod
=encoding UTF-8
=head1 NAME
Moose::Meta::Attribute::Native::Trait::Counter - Helper trait for Int attributes which represent counters
=head1 VERSION
version 2.2014
=head1 SYNOPSIS
package MyHomePage;
use Moose;
has 'counter' => (
traits => ['Counter'],
is => 'ro',
isa => 'Num',
default => 0,
handles => {
inc_counter => 'inc',
dec_counter => 'dec',
reset_counter => 'reset',
},
);
my $page = MyHomePage->new();
$page->inc_counter; # same as $page->counter( $page->counter + 1 );
$page->dec_counter; # same as $page->counter( $page->counter - 1 );
my $count_by_twos = 2;
$page->inc_counter($count_by_twos);
=head1 DESCRIPTION
This trait provides native delegation methods for counters. A counter can be
any sort of number (integer or not). The delegation methods allow you to
increment, decrement, or reset the value.
=head1 DEFAULT TYPE
If you don't provide an C<isa> value for your attribute, it will default to
C<Num>.
=head1 PROVIDED METHODS
=over 4
=item * B<set($value)>
Sets the counter to the specified value and returns the new value.
This method requires a single argument.
=item * B<inc>
=item * B<inc($arg)>
Increases the attribute value by the amount of the argument, or by 1 if no
argument is given. This method returns the new value.
This method accepts a single argument.
=item * B<dec>
=item * B<dec($arg)>
Decreases the attribute value by the amount of the argument, or by 1 if no
argument is given. This method returns the new value.
This method accepts a single argument.
=item * B<reset>
Resets the value stored in this slot to its default value, and returns the new
value.
=back
=head1 BUGS
See L<Moose/BUGS> for details on reporting bugs.
=head1 AUTHORS
=over 4
=item *
Stevan Little <stevan@cpan.org>
=item *
Dave Rolsky <autarch@urth.org>
=item *
Jesse Luehrs <doy@cpan.org>
=item *
Shawn M Moore <sartak@cpan.org>
=item *
יובל קוג'מן (Yuval Kogman) <nothingmuch@woobling.org>
=item *
Karen Etheridge <ether@cpan.org>
=item *
Florian Ragwitz <rafl@debian.org>
=item *
Hans Dieter Pearcey <hdp@cpan.org>
=item *
Chris Prather <chris@prather.org>
=item *
Matt S Trout <mstrout@cpan.org>
=back
=head1 COPYRIGHT AND LICENSE
This software is copyright (c) 2006 by Infinity Interactive, Inc.
This is free software; you can redistribute it and/or modify it under
the same terms as the Perl 5 programming language system itself.
=cut
| 17.886076 | 105 | 0.692498 |
73f8a6d4374b4f14b3df69171274fc0f6119c23d | 1,959 | pm | Perl | lib/Mojolicious/Command/Generate/LiteApp.pm | rsp/mojo | bca70181288d4922a4d7806071f588005396e354 | [
"Artistic-2.0"
] | 1 | 2019-06-27T08:04:23.000Z | 2019-06-27T08:04:23.000Z | lib/Mojolicious/Command/Generate/LiteApp.pm | rsp/mojo | bca70181288d4922a4d7806071f588005396e354 | [
"Artistic-2.0"
] | null | null | null | lib/Mojolicious/Command/Generate/LiteApp.pm | rsp/mojo | bca70181288d4922a4d7806071f588005396e354 | [
"Artistic-2.0"
] | null | null | null | # Copyright (C) 2008-2009, Sebastian Riedel.
package Mojolicious::Command::Generate::LiteApp;
use strict;
use warnings;
use base 'Mojo::Command';
__PACKAGE__->attr(description => <<'EOF');
Generate a minimalistic web application.
EOF
__PACKAGE__->attr(usage => <<"EOF");
usage: $0 generate lite_app [NAME]
EOF
# If for any reason you're not completely satisfied, I hate you.
sub run {
my ($self, $name) = @_;
$name ||= 'myapp.pl';
# App
$self->renderer->line_start('%%');
$self->renderer->tag_start('<%%');
$self->renderer->tag_end('%%>');
$self->render_to_rel_file('liteapp', $name);
$self->chmod_file($name, 0744);
}
1;
__DATA__
@@ liteapp
%% my $class = shift;
#!/usr/bin/env perl
use Mojolicious::Lite;
get '/' => 'index';
get '/:groovy' => sub {
my $self = shift;
$self->render_text($self->stash('groovy'));
};
shagadelic;
<%%= '__DATA__' %%>
<%%= '@@ index.html.ep' %%>
% layout 'funky';
Yea baby!
<%%= '@@ layouts/funky.html.ep' %%>
<!doctype html><html>
<head><title>Funky!</title></head>
<body><%== content %></body>
</html>
__END__
=head1 NAME
Mojolicious::Command::Generate::LiteApp - Lite App Generator Command
=head1 SYNOPSIS
use Mojolicious::Command::Generate::LiteApp;
my $app = Mojolicious::Command::Generate::LiteApp->new;
$app->run(@ARGV);
=head1 DESCRIPTION
L<Mojolicious::Command::Generate::LiteApp> is a application generator.
=head1 ATTRIBUTES
L<Mojolicious::Command::Generate::LiteApp> inherits all attributes from
L<Mojo::Command> and implements the following new ones.
=head2 C<description>
my $description = $app->description;
$app = $app->description('Foo!');
=head2 C<usage>
my $usage = $app->usage;
$app = $app->usage('Foo!');
=head1 METHODS
L<Mojolicious::Command::Generate::LiteApp> inherits all methods from
L<Mojo::Command> and implements the following new ones.
=head2 C<run>
$app->run(@ARGV);
=cut
| 19.989796 | 71 | 0.64829 |
73e057fef9ec9dfd9d58d50832be643ff51fa737 | 1,969 | pm | Perl | perl/vendor/lib/DateTime/TimeZone/Africa/Gaborone.pm | DDMoReFoundation/PortableNonmem | 7e40b30887537f24fed12421935b58325ba2e5c3 | [
"BSD-3-Clause-Clear"
] | null | null | null | perl/vendor/lib/DateTime/TimeZone/Africa/Gaborone.pm | DDMoReFoundation/PortableNonmem | 7e40b30887537f24fed12421935b58325ba2e5c3 | [
"BSD-3-Clause-Clear"
] | null | null | null | perl/vendor/lib/DateTime/TimeZone/Africa/Gaborone.pm | DDMoReFoundation/PortableNonmem | 7e40b30887537f24fed12421935b58325ba2e5c3 | [
"BSD-3-Clause-Clear"
] | null | null | null | # This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from /tmp/_mzyzyR3wa/africa. Olson data version 2014g
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Africa::Gaborone;
$DateTime::TimeZone::Africa::Gaborone::VERSION = '1.74';
use strict;
use Class::Singleton 1.03;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Africa::Gaborone::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY, # utc_start
59453388980, # utc_end 1884-12-31 22:16:20 (Wed)
DateTime::TimeZone::NEG_INFINITY, # local_start
59453395200, # local_end 1885-01-01 00:00:00 (Thu)
6220,
0,
'LMT',
],
[
59453388980, # utc_start 1884-12-31 22:16:20 (Wed)
60026394600, # utc_end 1903-02-28 22:30:00 (Sat)
59453394380, # local_start 1884-12-31 23:46:20 (Wed)
60026400000, # local_end 1903-03-01 00:00:00 (Sun)
5400,
0,
'SAST',
],
[
60026394600, # utc_start 1903-02-28 22:30:00 (Sat)
61306156800, # utc_end 1943-09-19 00:00:00 (Sun)
60026401800, # local_start 1903-03-01 00:30:00 (Sun)
61306164000, # local_end 1943-09-19 02:00:00 (Sun)
7200,
0,
'CAT',
],
[
61306156800, # utc_start 1943-09-19 00:00:00 (Sun)
61321878000, # utc_end 1944-03-18 23:00:00 (Sat)
61306167600, # local_start 1943-09-19 03:00:00 (Sun)
61321888800, # local_end 1944-03-19 02:00:00 (Sun)
10800,
1,
'CAST',
],
[
61321878000, # utc_start 1944-03-18 23:00:00 (Sat)
DateTime::TimeZone::INFINITY, # utc_end
61321885200, # local_start 1944-03-19 01:00:00 (Sun)
DateTime::TimeZone::INFINITY, # local_end
7200,
0,
'CAT',
],
];
sub olson_version { '2014g' }
sub has_dst_changes { 1 }
sub _max_year { 2024 }
sub _new_instance
{
return shift->_init( @_, spans => $spans );
}
1;
| 23.440476 | 90 | 0.67293 |
ed5462a09b0fc977fb60fa66d36de87c5bcafe4e | 812 | pm | Perl | lib/Wing/Instrument.pm | plainblack/Wing | cbc6221a0cc961855d6d7ffabaa7cad7d005a344 | [
"Artistic-2.0"
] | 13 | 2015-05-13T13:44:55.000Z | 2020-04-22T20:23:42.000Z | lib/Wing/Instrument.pm | plainblack/Wing | cbc6221a0cc961855d6d7ffabaa7cad7d005a344 | [
"Artistic-2.0"
] | 19 | 2015-09-19T21:23:05.000Z | 2020-11-14T04:34:38.000Z | lib/Wing/Instrument.pm | plainblack/Wing | cbc6221a0cc961855d6d7ffabaa7cad7d005a344 | [
"Artistic-2.0"
] | 6 | 2016-05-11T12:12:14.000Z | 2021-04-11T19:15:16.000Z | package Wing::Instrument;
use Wing::Perl;
use Wing;
use Moo;
use Time::HiRes;
use JSON;
has id => (
is => 'rw',
default => sub { time() },
);
has sensitivity => (
is => 'rw',
default => 1,
);
has recordings => (
is => 'rw',
default => sub { [] },
);
has start => (
is => 'ro',
default => sub { [Time::HiRes::gettimeofday] },
);
sub interval {
my $self = shift;
Time::HiRes::tv_interval($self->start)
}
sub record {
my ($self, $name) = @_;
push @{$self->recordings}, { $name => $self->interval };
}
sub as_json {
my $self = shift;
return encode_json($self->recordings);
}
sub log {
my ($self, $label) = @_;
Wing->log->warn(sprintf('INSTRUMENT: %s (%s) = %s', $label, $self->id, $self->as_json)) if $self->interval > $self->sensitivity;
}
1;
| 16.24 | 132 | 0.543103 |
ed2a9de2b6db79246c939d1dc17c643e2e9eaa02 | 1,065 | pm | Perl | lib/Google/Ads/GoogleAds/V8/Services/CurrencyConstantService/GetCurrencyConstantRequest.pm | googleads/google-ads-perl | 3ee6c09e11330fea1e6a0c9ee9f837e5e36d8177 | [
"Apache-2.0"
] | 19 | 2019-06-21T00:43:57.000Z | 2022-03-29T14:23:01.000Z | lib/Google/Ads/GoogleAds/V8/Services/CurrencyConstantService/GetCurrencyConstantRequest.pm | googleads/google-ads-perl | 3ee6c09e11330fea1e6a0c9ee9f837e5e36d8177 | [
"Apache-2.0"
] | 16 | 2020-03-04T07:44:53.000Z | 2021-12-15T23:06:23.000Z | lib/Google/Ads/GoogleAds/V8/Services/CurrencyConstantService/GetCurrencyConstantRequest.pm | googleads/google-ads-perl | 3ee6c09e11330fea1e6a0c9ee9f837e5e36d8177 | [
"Apache-2.0"
] | 9 | 2020-02-28T03:00:48.000Z | 2021-11-10T14:23:02.000Z | # Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V8::Services::CurrencyConstantService::GetCurrencyConstantRequest;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {resourceName => $args->{resourceName}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| 30.428571 | 98 | 0.743662 |
ed4d6b1a58b38898367a0a3da109dbf5cf1b8278 | 59 | pl | Perl | HelloWorld/ProgrammingPerl/ch14/ch14.072.pl | grtlinux/KieaPerl | ed8e1e3359ad0186d5cc4f7ed037e956d2f26c9e | [
"Apache-2.0"
] | null | null | null | HelloWorld/ProgrammingPerl/ch14/ch14.072.pl | grtlinux/KieaPerl | ed8e1e3359ad0186d5cc4f7ed037e956d2f26c9e | [
"Apache-2.0"
] | null | null | null | HelloWorld/ProgrammingPerl/ch14/ch14.072.pl | grtlinux/KieaPerl | ed8e1e3359ad0186d5cc4f7ed037e956d2f26c9e | [
"Apache-2.0"
] | null | null | null | sub READLINE {
my $self = shift;
return <$self>;
}
| 11.8 | 21 | 0.542373 |
ed9c4d6d2f34072bf7de0698f0c19e648f19b9ef | 80 | pl | Perl | HelloWorld/ProgrammingPerl/ch14/ch14.089.pl | grtlinux/KieaPerl | ed8e1e3359ad0186d5cc4f7ed037e956d2f26c9e | [
"Apache-2.0"
] | null | null | null | HelloWorld/ProgrammingPerl/ch14/ch14.089.pl | grtlinux/KieaPerl | ed8e1e3359ad0186d5cc4f7ed037e956d2f26c9e | [
"Apache-2.0"
] | null | null | null | HelloWorld/ProgrammingPerl/ch14/ch14.089.pl | grtlinux/KieaPerl | ed8e1e3359ad0186d5cc4f7ed037e956d2f26c9e | [
"Apache-2.0"
] | null | null | null | sub GETC { $_[0]->trace; # NEW
my($self) = @_;
getc($self);
}
| 16 | 40 | 0.4 |
ed305a5b56f0334b166c0ee5e41aa98f72b0ae1e | 104 | t | Perl | t/00_compile.t | adokoy001/MapReduce-Framework-Simple | 2800167e6f1d2c4886bf2a24190b0334ef41c548 | [
"Artistic-1.0"
] | 1 | 2016-07-06T03:32:34.000Z | 2016-07-06T03:32:34.000Z | t/00_compile.t | adokoy001/MapReduce-Framework-Simple | 2800167e6f1d2c4886bf2a24190b0334ef41c548 | [
"Artistic-1.0"
] | null | null | null | t/00_compile.t | adokoy001/MapReduce-Framework-Simple | 2800167e6f1d2c4886bf2a24190b0334ef41c548 | [
"Artistic-1.0"
] | null | null | null | use strict;
use Test::More 0.98;
use_ok $_ for qw(
MapReduce::Framework::Simple
);
done_testing;
| 10.4 | 32 | 0.682692 |
ed89da5a504229f47e823aec03422126a2546485 | 854 | pm | Perl | lib/Date/Manip/Offset/off346.pm | Helmholtz-HIPS/prosnap | 5286cda39276d5eda85d2ddb23b8ab83c5d4960c | [
"MIT"
] | 1 | 2021-11-26T17:29:56.000Z | 2021-11-26T17:29:56.000Z | lib/Date/Manip/Offset/off346.pm | Helmholtz-HIPS/prosnap | 5286cda39276d5eda85d2ddb23b8ab83c5d4960c | [
"MIT"
] | 1 | 2020-03-19T21:12:23.000Z | 2020-03-19T21:12:23.000Z | lib/Date/Manip/Offset/off346.pm | Helmholtz-HIPS/prosnap | 5286cda39276d5eda85d2ddb23b8ab83c5d4960c | [
"MIT"
] | null | null | null | package #
Date::Manip::Offset::off346;
# Copyright (c) 2008-2016 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Thu Dec 1 12:04:54 EST 2016
# Data version: tzdata2016j
# Code version: tzcode2016j
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our ($VERSION);
$VERSION='6.57';
END { undef $VERSION; }
our ($Offset,%Offset);
END {
undef $Offset;
undef %Offset;
}
$Offset = '-05:32:11';
%Offset = (
0 => [
'america/detroit',
],
);
1;
| 21.35 | 79 | 0.666276 |
ed614ca12771f2c0b56076ea5724d2fe66f21b8b | 2,845 | pl | Perl | docs/libcurl/symbols.pl | baruchsiach/curl | 7de2a4ce35dee7a9409ab78962ae12894a073d60 | [
"curl"
] | 12 | 2016-10-04T11:36:23.000Z | 2021-04-29T00:15:18.000Z | docs/libcurl/symbols.pl | baruchsiach/curl | 7de2a4ce35dee7a9409ab78962ae12894a073d60 | [
"curl"
] | 70 | 2020-04-24T21:56:29.000Z | 2020-05-18T16:48:04.000Z | docs/libcurl/symbols.pl | baruchsiach/curl | 7de2a4ce35dee7a9409ab78962ae12894a073d60 | [
"curl"
] | 7 | 2020-04-24T21:22:35.000Z | 2020-05-14T15:41:43.000Z | #!/usr/bin/env perl
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 2011 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
#
# Experience has shown that the symbols-in-versions file is very useful to
# applications that want to build with a wide range of libcurl versions.
# It is however easy to get it wrong and the source gets a bit messy with all
# the fixed numerical comparisons.
#
# The point of this script is to provide an easy-to-use macro for libcurl-
# using applications to do preprocessor checks for specific libcurl defines,
# and yet make the code clearly show what the macro is used for.
#
# Run this script and generate libcurl-symbols.h and then use that header in
# a fashion similar to:
#
# #include "libcurl-symbols.h"
#
# #if LIBCURL_HAS(CURLOPT_MUTE)
# has mute
# #else
# no mute
# #endif
#
#
open F, "<symbols-in-versions";
sub str2num {
my ($str)=@_;
if($str =~ /([0-9]*)\.([0-9]*)\.*([0-9]*)/) {
return sprintf("0x%06x", $1<<16 | $2 << 8 | $3);
}
}
print <<EOS
#include <curl/curl.h>
#define LIBCURL_HAS(x) \\
(defined(x ## _FIRST) && (x ## _FIRST <= LIBCURL_VERSION_NUM) && \\
(!defined(x ## _LAST) || ( x ## _LAST >= LIBCURL_VERSION_NUM)))
EOS
;
while(<F>) {
if(/^(CURL[^ ]*)[ \t]*(.*)/) {
my ($sym, $vers)=($1, $2);
my $intr;
my $rm;
my $dep;
# is there removed info?
if($vers =~ /([\d.]+)[ \t-]+([\d.-]+)[ \t]+([\d.]+)/) {
($intr, $dep, $rm)=($1, $2, $3);
}
# is it a dep-only line?
elsif($vers =~ /([\d.]+)[ \t-]+([\d.]+)/) {
($intr, $dep)=($1, $2);
}
else {
$intr = $vers;
}
my $inum = str2num($intr);
print <<EOS
#define ${sym}_FIRST $inum /* Added in $intr */
EOS
;
my $irm = str2num($rm);
if($rm) {
print <<EOS
#define ${sym}_LAST $irm /* Last featured in $rm */
EOS
;
}
}
}
| 28.168317 | 77 | 0.514236 |
ed8f40bd160c59d1ea8a8079901da2b78f191678 | 2,186 | pl | Perl | cgi-bin/phenome/annot_stats.pl | labroo2/sgn | c8a1a10e4ac2104d82c5fd2d986f1688d01b20be | [
"MIT"
] | 39 | 2015-02-03T15:47:55.000Z | 2022-03-23T13:34:05.000Z | cgi-bin/phenome/annot_stats.pl | labroo2/sgn | c8a1a10e4ac2104d82c5fd2d986f1688d01b20be | [
"MIT"
] | 2,491 | 2015-01-07T05:49:17.000Z | 2022-03-31T15:31:05.000Z | cgi-bin/phenome/annot_stats.pl | labroo2/sgn | c8a1a10e4ac2104d82c5fd2d986f1688d01b20be | [
"MIT"
] | 20 | 2015-06-30T19:10:09.000Z | 2022-03-23T13:34:09.000Z | #!usr/bin/perl
use warnings;
use strict;
use CXGN::Page;
use CXGN::Login;
use CXGN::People;
use CXGN::Tools::WebImageCache;
use CXGN::Phenome::Locus;
use GD::Graph::lines;
use GD::Graph::linespoints;
use GD::Graph::area;
use GD::Graph::bars;
use CatalystX::GlobalContext '$c';
use CXGN::Page::FormattingHelpers qw/info_section_html
page_title_html
columnar_table_html
info_table_html
html_optional_show
html_alternate_show
/;
my $dbh = $c->dbc->dbh;
my $logged_sp_person_id = CXGN::Login->new($dbh)->verify_session();
my $page = CXGN::Page->new("Phenome annotation stats","Naama");
$page->header();
my $form = CXGN::Page::WebForm->new();
my @lstats=CXGN::Phenome::Locus->get_locus_stats( $dbh );
my $image= get_graph(@lstats);
print info_section_html(title => 'Locus stats',
contents => $image,
);
$page->footer();
sub get_graph {
my @stats=@_;
my $basepath = $c->config->{"basepath"};
my $tempfile_dir = $c->config->{"tempfiles_subdir"};
my $cache = CXGN::Tools::WebImageCache->new;
$cache->set_basedir($basepath);
$cache->set_temp_dir($tempfile_dir."/temp_images");
$cache->set_key("Locus_num");
#$cache->set_map_name("locusnum");
$cache->set_force(1);
if (! $cache->is_valid()) {
my $graph = GD::Graph::area->new(600,400);
$graph->set(
x_label => 'Date',
y_label => 'Number of loci',
title => 'SGN locus database',
#cumulate =>'true',
y_max_value => 7000,
#y_tick_number => 8,
#y_label_skip => 2
) or die $graph->error;
for my $i ( 0 .. $#stats ) {
my $aref = $stats[$i];
my $n = @$aref - 1;
for my $j ( 0 .. $n ) {
}
}
$graph->set_title_font('gdTinyFont');
my @bar_clr = ("orange");
$cache->set_image_data($graph->plot(\@stats)->png);
}
my $image = $cache->get_image_tag();
my $title = "SGN locus database";
return $image;
}
| 24.840909 | 67 | 0.542086 |
ed4ccbcafc5b00f1e518f8c8c5e602f5e32af876 | 390 | t | Perl | t/02_fl_event_can_ok.t | sanko/Fltk.pm | 9b7755f0303322f72530e1435564de8d8db4941d | [
"Artistic-2.0"
] | 5 | 2016-05-06T14:46:52.000Z | 2018-04-02T18:47:31.000Z | t/02_fl_event_can_ok.t | sanko/Fltk.pm | 9b7755f0303322f72530e1435564de8d8db4941d | [
"Artistic-2.0"
] | 2 | 2016-05-13T02:29:23.000Z | 2016-09-26T20:30:55.000Z | t/02_fl_event_can_ok.t | sanko/Fltk.pm | 9b7755f0303322f72530e1435564de8d8db4941d | [
"Artistic-2.0"
] | null | null | null | use strict;
use warnings;
use Test::More 0.98;
use lib '../blib/', '../blib/lib', '../lib';
use Fl qw[:event];
#
can_ok 'Fl', 'wait';
can_ok 'Fl', 'run';
can_ok 'Fl', 'check';
can_ok 'Fl', 'ready';
can_ok 'Fl', 'belowmouse';
# Check :event import tag
can_ok 'main', 'wait';
can_ok 'main', 'run';
can_ok 'main', 'check';
can_ok 'main', 'ready';
can_ok 'main', 'belowmouse';
#
done_testing;
| 18.571429 | 44 | 0.617949 |
ed7c5f45f4979c6a92db70efe549ff88e705a884 | 349 | pm | Perl | auto-lib/Azure/ApiManagement/OpenidConnectProviderContractProperties.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/ApiManagement/OpenidConnectProviderContractProperties.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/ApiManagement/OpenidConnectProviderContractProperties.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | 1 | 2021-04-08T15:26:39.000Z | 2021-04-08T15:26:39.000Z | package Azure::ApiManagement::OpenidConnectProviderContractProperties;
use Moose;
has 'clientId' => (is => 'ro', isa => 'Str' );
has 'clientSecret' => (is => 'ro', isa => 'Str' );
has 'description' => (is => 'ro', isa => 'Str' );
has 'metadataEndpoint' => (is => 'ro', isa => 'Str' );
has 'name' => (is => 'ro', isa => 'Str' );
1;
| 34.9 | 70 | 0.541547 |
ed52a5b8423f8beaba44bd95074d7ae088c771ae | 2,436 | pm | Perl | lib/Flotsam/Model/User.pm | Aerdan/Flotsam | 258fbc20489ac5e749d0e9a93c5c5fc849a22dc8 | [
"Apache-2.0"
] | null | null | null | lib/Flotsam/Model/User.pm | Aerdan/Flotsam | 258fbc20489ac5e749d0e9a93c5c5fc849a22dc8 | [
"Apache-2.0"
] | null | null | null | lib/Flotsam/Model/User.pm | Aerdan/Flotsam | 258fbc20489ac5e749d0e9a93c5c5fc849a22dc8 | [
"Apache-2.0"
] | null | null | null | #
# Flotsam::Model::User - interactables for the users table
# Copyright 2021 Síle Ekaterin Aman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Flotsam::Model::User;
use Mojo::Base -base, -signatures;
has 'pg';
use Authen::TOTP;
sub add ($self, $display, $email) {
return $self->pg->db->insert('users', {display_name => $display, email => $email}, {returning => 'user_id'})->hash->{user_id};
}
sub add_mfa ($self, $email) {
return $self->pg->db->update('users', {mfa_ok => 1}, {email => $email}, {returning => 'user_id'})->hash->{user_id};
}
sub is_mfa_ok ($self, $email) {
return $self->pg->db->select('users', [qw(mfa_ok)], {email => $email})->hash->{mfa_ok};
}
sub is_public ($self, $user_id) {
return $self->pg->db->select('users', [qw(public)], {user_id => $user_id})->hash->{public};
}
sub exists_by_email ($self, $email) {
my $user = $self->pg->db->select('users', [qw(user_id email)], {email => $email})->hash;
if ($user->{email} eq $email)
return $user->{user_id};
}
return undef;
}
sub exists_by_id ($self, $user_id) {
my $user = $self->pg->db->select('users', [qw(user_id)], {user_id => $user_id})->hash;
if ($user->{user_id} == $user_id) {
return $user->{user_id};
}
return undef;
}
sub list ($self) {
return $self->pg->db->select('users', [qw(id display_name email email_ok mfa_ok)])->hashes->array;
}
sub delete_by_id ($self, $user_id) {
return $self->pg->db->delete('users', {user_id => $user_id});
}
sub get ($self, $id) {
return $self->pg->db->select('users', [qw(id display_name email email_ok mfa_ok)], {user_id => $user_id})->hash;
}
sub profile ($self, $user_id) {
return $self->pg->db->select('users', [qw(display_name bio)], {user_id => $user_id})->hash;
}
sub full_profile ($self, $id) {
return $self->pg->db->select('users', [qw(display_name email email_ok mfa_ok bio)], {user_id => $user_id})->hash;
}
1;
| 30.45 | 130 | 0.64532 |
ed55838b667ce29ed5a8803e4ea172bb4f0ff8c3 | 1,759 | pm | Perl | auto-lib/Azure/CustomerInsights/RoleAssignment.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/CustomerInsights/RoleAssignment.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/CustomerInsights/RoleAssignment.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | 1 | 2021-04-08T15:26:39.000Z | 2021-04-08T15:26:39.000Z | package Azure::CustomerInsights::RoleAssignment;
use Moose;
has 'assignmentName' => (is => 'ro', isa => 'Str' );
has 'conflationPolicies' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'connectors' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'description' => (is => 'ro', isa => 'HashRef[Str]' );
has 'displayName' => (is => 'ro', isa => 'HashRef[Str]' );
has 'interactions' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'kpis' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'links' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'principals' => (is => 'ro', isa => 'ArrayRef[Azure::CustomerInsights::AssignmentPrincipal]' );
has 'profiles' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'provisioningState' => (is => 'ro', isa => 'Str' );
has 'relationshipLinks' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'relationships' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'role' => (is => 'ro', isa => 'Str' );
has 'roleAssignments' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'sasPolicies' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'segments' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'tenantId' => (is => 'ro', isa => 'Str' );
has 'views' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
has 'widgetTypes' => (is => 'ro', isa => 'Azure::CustomerInsights::ResourceSetDescription' );
1;
| 70.36 | 103 | 0.626492 |
ed350cfcb1c42abd24f7a6ec7d1c15aaa3e483f0 | 1,586 | pm | Perl | lib/Bio/EnsEMBL/DataCheck/Checks/MetaKeyOptional.pm | ilavidas/ensembl-datacheck | 704aa5cd851e246bc250b4b77804d68cbd85527e | [
"Apache-2.0"
] | null | null | null | lib/Bio/EnsEMBL/DataCheck/Checks/MetaKeyOptional.pm | ilavidas/ensembl-datacheck | 704aa5cd851e246bc250b4b77804d68cbd85527e | [
"Apache-2.0"
] | null | null | null | lib/Bio/EnsEMBL/DataCheck/Checks/MetaKeyOptional.pm | ilavidas/ensembl-datacheck | 704aa5cd851e246bc250b4b77804d68cbd85527e | [
"Apache-2.0"
] | null | null | null | =head1 LICENSE
Copyright [2018-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package Bio::EnsEMBL::DataCheck::Checks::MetaKeyOptional;
use warnings;
use strict;
use Moose;
use Test::More;
extends 'Bio::EnsEMBL::DataCheck::DbCheck';
use constant {
NAME => 'MetaKeyOptional',
DESCRIPTION => 'Optional, but expected, meta keys exist',
GROUPS => ['core', 'meta'],
DATACHECK_TYPE => 'advisory',
DB_TYPES => ['core'],
TABLES => ['meta']
};
sub tests {
my ($self) = @_;
my @optional = qw/
assembly.accession
assembly.coverage_depth
assembly.date
assembly.ucsc_alias
genebuild.id
genebuild.initial_release_date
genebuild.last_geneset_update
genebuild.projection_source_db
species.common_name
species.stable_id_prefix
/;
my $mca = $self->dba->get_adaptor("MetaContainer");
foreach my $meta_key (@optional) {
my $values = $mca->list_value_by_key($meta_key);
my $desc = "Value exists for meta_key $meta_key";
ok(scalar @$values, $desc);
}
}
1;
| 24.4 | 72 | 0.705549 |
ed7e0343a48b143a01c1575a1544c487e6c3f996 | 677 | pm | Perl | local/lib/perl5/DBIx/Inspector/Column/Pg.pm | rosiro/wasarabi | 1016fb3a612fc4da3761c53c73473dd113a578d3 | [
"MIT"
] | null | null | null | local/lib/perl5/DBIx/Inspector/Column/Pg.pm | rosiro/wasarabi | 1016fb3a612fc4da3761c53c73473dd113a578d3 | [
"MIT"
] | null | null | null | local/lib/perl5/DBIx/Inspector/Column/Pg.pm | rosiro/wasarabi | 1016fb3a612fc4da3761c53c73473dd113a578d3 | [
"MIT"
] | null | null | null | package DBIx::Inspector::Column::Pg;
use strict;
use warnings;
use utf8;
use base qw/DBIx::Inspector::Column/;
sub new {
my $class = shift;
my %args = @_ == 1 ? %{ $_[0] } : @_;
if ( exists $args{ PK_NAME } ) {
# primary_key_info returns pg_column with mistaken value.
# I have no idea in case of quotated column name intentionally.
$args{COLUMN_NAME} =~ s{^"(.+)"$}{$1};
}
else {
# pg_column contains the unquoted name.
# DBD::Pg v1.xx does not support the attribue,
# but don't you use such a old module, do you?
$args{ COLUMN_NAME } = $args{ PG_COLUMN };
}
bless {%args}, $class;
}
1;
| 24.178571 | 71 | 0.579025 |
ed6caabd472522f39c03a681cd2a34e5f18d34c0 | 4,066 | t | Perl | t/document/module.t | Mattlk13/metacpan-api | bf98ed26fce5d36f776c080265414c614b080344 | [
"Artistic-1.0"
] | 1 | 2020-09-25T10:22:12.000Z | 2020-09-25T10:22:12.000Z | t/document/module.t | Mattlk13/metacpan-api | bf98ed26fce5d36f776c080265414c614b080344 | [
"Artistic-1.0"
] | null | null | null | t/document/module.t | Mattlk13/metacpan-api | bf98ed26fce5d36f776c080265414c614b080344 | [
"Artistic-1.0"
] | null | null | null | use strict;
use warnings;
use lib 't/lib';
use MetaCPAN::Document::Module;
use Test::More;
subtest hide_from_pause => sub {
foreach my $test (
# The original:
[ 'No::CommentNL' => "package # hide\n No::CommentNL;" ],
# I'm not sure how PAUSE handles this one but currently we ignore it.
[ 'No::JustNL' => "package \n No::JustNL;" ],
# The good ones:
[ 'Pkg' => 'package Pkg;' ],
[ 'Pkg::Ver' => 'package Pkg::Ver v1.2.3;' ],
[ 'Pkg::Block' => 'package Pkg::Block { our $var = 1 }' ],
[
'Pkg::VerBlock' => 'package Pkg::VerBlock 1.203 { our $var = 1 }'
],
[ 'Pkg::SemiColons' => '; package Pkg::SemiColons; $var' ],
[ 'Pkg::InABlock' => '{ package Pkg::InABlock; $var }' ],
# This doesn't work as a BOM can only appear at the start of a file.
#[ 'Pkg::AfterABOM' => "\xef\xbb\xbfpackage Pkg::AfterABOM" ],
[ 'No::JustVar' => qq["\n\$package No::JustVar;\n"] ],
# This shouldn't match, but there's only so much we can do...
# we're not going to eval the whole file to figure it out.
[ 'Pkg::InsideStr' => qq["\n package Pkg::InsideStr;\n"] ],
[ 'No::Comment' => qq[# package No::Comment;\n] ],
[ 'No::Different' => q[package No::Different::Pkg;] ],
[ 'No::PkgWithNum' => qq["\npackage No::PkgWithNumv2.3;\n"] ],
[ 'No::CrazyChars' => qq["\npackage No::CrazyChars\[0\];\n"] ],
)
{
my ( $name, $content ) = @$test;
subtest $name => sub {
my $module = MetaCPAN::Document::Module->new( name => $name );
SKIP: {
skip( 'Perl 5.14 needed for package block compilation', 1 )
if $] < 5.014;
## no critic
ok eval "sub { no strict; $content }", "code compiles"
or diag $@;
}
my ($hidden) = ( $name =~ /^No::/ ? 1 : 0 );
is $module->hide_from_pause($content), $hidden,
"hide_from_pause is $hidden";
};
}
};
subtest set_associated_pod => sub {
test_associated_pod( 'Squirrel', [qw( lib/Squirrel.pod )],
'lib/Squirrel.pod' );
test_associated_pod( 'Squirrel::Face', [qw( lib/Face.pm )],
'lib/Face.pm' );
test_associated_pod( 'Squirrel::Face', [qw( bin/sf.pl )], 'bin/sf.pl' );
test_associated_pod( 'Squirrel::Face', [qw( bin/sf.pl lib/Face.pm )],
'lib/Face.pm', 'prefer .pm', );
test_associated_pod( 'Squirrel::Face',
[qw( bin/sf.pl lib/Face.pm lib/Squirrel.pod )],
'lib/Squirrel.pod', 'prefer .pod', );
test_associated_pod(
'Squirrel::Face', [qw( bin/sf.pl lib/Face.pm README.pod )],
'lib/Face.pm', 'prefer .pm to README.pod',
);
test_associated_pod(
'Squirrel::Face', [qw( Zoob.pod README.pod )],
'Zoob.pod', 'prefer any .pod to README.pod',
);
test_associated_pod(
'Squirrel::Face', [qw( narf.pl README.pod )],
'narf.pl', 'prefer .pl to README.pod',
);
# This goes along with the Pod::With::Generator tests.
# Since file order is not reliable (there) we can't get a reliable failure
# so test here so that we can ensure the order.
test_associated_pod(
'Foo::Bar', [qw( a/b.pm x/Foo/Bar.pm lib/Foo/Bar.pm )],
'lib/Foo/Bar.pm', 'prefer lib/ with matching name to other files',
);
};
{
package PodFile; ## no critic
sub new { bless { path => $_[1] }, $_[0]; }
sub path { $_[0]->{path} }
sub name { $_[0]->{name} ||= ( $_[0]->{path} =~ m{([^\/]+)$} )[0] }
sub full_path { '.../' . $_[0]->{path} }
}
sub test_associated_pod {
my ( $name, $files, $exp, $desc ) = @_;
my $module = MetaCPAN::Document::Module->new( name => $name );
$module->set_associated_pod(
{ $name => [ map { PodFile->new($_) } @$files ] } );
is $module->associated_pod, ".../$exp", $desc || 'Best pod file selected';
}
done_testing;
| 34.168067 | 78 | 0.522381 |
ed784496ab7550a90ab98fdbc778f6b6095e03f4 | 5,872 | pl | Perl | inc/conf/configure.pl | nkrios/ATSCAN | bbd1caec5eaa5814d11d60cec0639c8aa0c1242c | [
"MIT"
] | 1 | 2018-01-12T03:14:34.000Z | 2018-01-12T03:14:34.000Z | inc/conf/configure.pl | 99Scratch/ATSCAN | bbd1caec5eaa5814d11d60cec0639c8aa0c1242c | [
"MIT"
] | null | null | null | inc/conf/configure.pl | 99Scratch/ATSCAN | bbd1caec5eaa5814d11d60cec0639c8aa0c1242c | [
"MIT"
] | 1 | 2019-11-01T17:04:08.000Z | 2019-11-01T17:04:08.000Z | #!/usr/bin/perl
use strict;
use warnings;
use FindBin '$Bin';
use POSIX qw(strftime);
## Copy@right Alisam Technology see License.txt
#########################################################################################################################
## CLEAR
if ($^O!~/Win/) { printf "\033c"; }else{ system("cls"); }
#########################################################################################################################
##
our @TODO=();
our $validText;
our @V_VALID=($validText) if defined $validText;
our (@buildArrays, @dorks, @payloads, @exploits, @data, @aTsearch, @aTscans, @aTtargets, @aTcopy, @ports, @motor, @motors, @systems)=();
our (@V_LFI, @V_XSS, @V_AFD, @E_MICROSOFT, @E_ORACLE, @E_DB2, @E_ODBC, @E_POSTGRESQL, @E_SYBASE, @E_JBOSSWEB, @E_JDBC, @E_JAVA, @E_PHP, @E_ASP, @E_UNDEFINED, @E_MARIADB, @E_SHELL, @DT);
our (@V_WP, @V_JOOM, @V_TP, @V_SMF, @V_PhpBB, @V_VB, @V_MyBB, @V_CF, @V_DRP, @V_PN, @V_AT, @V_PHPN, @V_MD, @V_ACM, @V_SS, @V_MX, @V_XO, @V_OSC, @V_PSH, @V_BB2, @V_MG, @V_ZC, @V_CC5, @V_OCR);
our ($Version, $logoVersion, $scriptUrl, $logUrl, $ipUrl, $conectUrl, $script, $scriptInstall, $script_bac, $scriptbash, $scriptv,
$scriptCompletion, $scriptComplInstall, $readme, $uplog, $replace, $replaceFROM, $server, $geoServer, @configuration);
#########################################################################################################################
$Version="16.0.7";
$logoVersion="V $Version";
$scriptUrl="https://raw.githubusercontent.com/AlisamTechnology/ATSCAN/master/atscan.pl";
$logUrl="https://raw.githubusercontent.com/AlisamTechnology/ATSCAN/master/inc/conf/version.log";
$ipUrl="http://dynupdate.no-ip.com/ip.php";
$conectUrl="http://www.bing.com";
$server="https://cxsecurity.com";
$geoServer="https://www.onyphe.io/api/geoloc";
#########################################################################################################################
## DEFINE SCAN LISTS
$scriptv=$Bin."/inc/conf/version.log";
$script=$Bin."/atscan";
if (substr($0, -3) eq '.pl') { $script.=".pl"; }
$scriptComplInstall="$Bin/inc/conf/atscan";
$scriptInstall="$Bin/install.sh";
$script_bac=$Bin."/version_bac.log";
$uplog="$Bin/inc/conf/uplog.log";
$scriptbash="/usr/bin/atscan";
$scriptCompletion="/etc/bash_completion.d";
$readme="/usr/share/doc/atscan";
our $userSetting="$Bin/inc/conf/userSetting";
our $deskIcon="$Bin/inc/conf/desktop/";
our $deskIcoConf="/usr/share/applications";
unlink "$Bin/inc/conf/user/cookies.txt" if -e "$Bin/inc/conf/user/cookies.txt";
#########################################################################################################################
## DATE
our $date = strftime "%H:%M:%S", localtime;
our $fulldate = strftime "%Y%j", localtime;
#########################################################################################################################
## TIMER
sub timer { my $time=strftime "%H:%M:%S", localtime; print "[$time]"; }
#########################################################################################################################
## DELETE CLEAR LISTS
unlink $script_bac if -e $script_bac;
#########################################################################################################################
## USER CONFIGUATION
sub get_configuration {
@configuration=();
if (-e $userSetting) {
open(F1, $userSetting);
while (my $set=<F1>) {
if (!($set=~/^$/)) {
push @configuration, $set;
}
}
close( F1 );
}
return @configuration;
}
#########################################################################################################################
## LOAD PAYLOADS
require "$Bin/inc/payloads.pl";
## LOAD DIALOG TEXT
require "$Bin/inc/theme/dialog.pl";
#########################################################################################################################
## MAIL VALIDATION
our ($searchRegex, $regex);
our $V_EMAIL='((([A-Za-z0-9]+_+)|([A-Za-z0-9]+\-+)|([A-Za-z0-9]+\.+)|([A-Za-z0-9]+\++))*[A-Za-z0-9]+@((\w+\-+)|(\w+\.))*\w{1,63}\.[a-zA-Z]{2,6})';
our $V_IP='((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}))';
our $V_RANG='(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})\-(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})';
our $V_SEARCH='((https?|ftps?):\/\/([^>\"\<\'\(\)\#\s]*))';
our $V_REGEX=$regex;
our $S_REGEX=$searchRegex;
#########################################################################################################################
## ENGINE MOTORS
our $motor1="http://www.bing.com/search?q=MYDORK&first=MYNPAGES&FORM=PERE&cc=MYBROWSERLANG";
our $motor2="http://www.google.MYGOOGLEDOMAINE/search?q=MYDORK&start=MYNPAGES";
our $motor3="http://webcache.googleusercontent.com/search?q=cache:MYDORK/&start=MYNPAGES";
our $motor4="https://www.googleapis.com/customsearch/v1?key=MYAPIKEY&cx=MYCX&q=MYDORK&start=MYNPAGES";
our $motor5="http://www.sogou.com/web?query=MYDORK&page=MYNPAGES&ie=utf8";
our $motor6="https://www.exalead.com/search/web/results/?q=MYDORK&elements_per_page=10&start_index=MYNPAGES";
our $motor7="http://www.ask.com/web?q=MYDORK&page=MYNPAGES&qid=MYID";
our $motor8="http://www.yandex.com/search/?msid=MYMSID&text=MYDORK&lr=25402&p=MYNPAGES";
#########################################################################################################################
our @mrands=($motor1, $motor2, $motor3, $motor4, $motor5, $motor6, $motor7, $motor8);
our @allMotors=($motor1, $motor2, $motor3, $motor4, $motor5, $motor6, $motor7, $motor8);
our $pat2='inurl:|intitle:|intext:|allinurl:|index of:|site:(.*)\+|\+site:(.*)';
our $paylNote="[i] $DT[28]\n";
#########################################################################################################################
1;
| 49.762712 | 191 | 0.466621 |
ed68c849204f5254c882e685330f02c11d572d7e | 6,794 | pl | Perl | virtual-server/modify-plan.pl | diogovi/responsivebacula | 9a584500074a04a3147a30b1249249d6113afdab | [
"BSD-3-Clause"
] | null | null | null | virtual-server/modify-plan.pl | diogovi/responsivebacula | 9a584500074a04a3147a30b1249249d6113afdab | [
"BSD-3-Clause"
] | null | null | null | virtual-server/modify-plan.pl | diogovi/responsivebacula | 9a584500074a04a3147a30b1249249d6113afdab | [
"BSD-3-Clause"
] | 3 | 2016-09-23T03:42:35.000Z | 2020-11-06T11:01:34.000Z | #!/usr/bin/perl
=head1 modify-plan.pl
Modifies an existing account plan for use with virtual servers.
This command allows you to modify the limits for an existing account plan,
and optionally apply it to all virtual servers currently on that plan
(with the C<--apply> flag). Its parameters are exactly the same as
C<create-plan>, so for full documentation you should refer to that command.
To change the name of a plan, use the C<--new-name> flag followed by the
new name of your choice.
=cut
package virtual_server;
if (!$module_name) {
$main::no_acl_check++;
$ENV{'WEBMIN_CONFIG'} ||= "/etc/webmin";
$ENV{'WEBMIN_VAR'} ||= "/var/webmin";
if ($0 =~ /^(.*)\/[^\/]+$/) {
chdir($pwd = $1);
}
else {
chop($pwd = `pwd`);
}
$0 = "$pwd/modify-plan.pl";
require './virtual-server-lib.pl';
$< == 0 || die "modify-plan.pl must be run as root";
}
@OLDARGV = @ARGV;
# Parse command-line args
$newplan = { };
while(@ARGV > 0) {
local $a = shift(@ARGV);
if ($a eq "--name") {
$planname = shift(@ARGV);
}
elsif ($a eq "--id") {
$planid = shift(@ARGV);
}
elsif ($a eq "--new-name") {
$newplan->{'name'} = shift(@ARGV);
}
elsif ($a eq "--owner") {
$newplan->{'owner'} = shift(@ARGV);
&get_reseller($plan->{'owner'}) ||
&usage("Reseller owner $plan->{'owner'} does not exisst");
}
elsif ($a eq "--no-owner") {
$newplan->{'owner'} = '';
}
elsif ($a eq "--quota" || $a eq "--admin-quota") {
# Some quota
$q = shift(@ARGV);
$q =~ /^\d+$/ ||
&usage("$a must be followed by a quota in blocks");
$f = $a eq "--quota" ? "quota" : "uquota";
$newplan->{$f} = $q;
}
elsif ($a eq "--no-quota" || $a eq "--no-admin-quota") {
# Unlimited quota
$f = $a eq "--no-quota" ? "quota" : "uquota";
$newplan->{$f} = '';
}
elsif ($a =~ /^\-\-max\-(\S+)$/ && &indexof($1, @plan_maxes) >= 0) {
# Some limit on domains / etc
$l = $1; $q = shift(@ARGV);
$q =~ /^\d+$/ ||
&usage("$a must be followed by a numeric limit");
$newplan->{$l.'limit'} = $q;
}
elsif ($a =~ /^\-\-no\-max\-(\S+)$/ && &indexof($1, @plan_maxes) >= 0) {
# Removing limit on domains / etc
$l = $1;
$newplan->{$l.'limit'} = '';
}
elsif ($a =~ /^\-\-(\S+)$/ &&
&indexof($1, @plan_restrictions) >= 0) {
# No db name or other binary limit
$newplan->{$1} = 1;
}
elsif ($a =~ /^\-\-no\-(\S+)$/ &&
&indexof($1, @plan_restrictions) >= 0) {
# Disabel no db name or other binary limit
$newplan->{$1} = 0;
}
elsif ($a eq "--features") {
# Allowed features
@fl = split(/\s+/, shift(@ARGV));
@allf = ( @opt_features, "virt", &list_feature_plugins() );
foreach $f (@fl) {
&indexof($f, @allf) >= 0 ||
&usage("Unknown feature $f - allowed options ".
"are : ".join(" ", @allf));
}
$newplan->{'featurelimits'} = join(" ", @fl);
}
elsif ($a eq "--auto-features") {
# Allow all features
$newplan->{'featurelimits'} = '';
}
elsif ($a eq "--no-features") {
# Remove all features
$newplan->{'featurelimits'} = 'none';
}
elsif ($a eq "--capabilities") {
# Edit capabilities
@cl = split(/\s+/, shift(@ARGV));
foreach $c (@cl) {
&indexof($c, @edit_limits) >=0 ||
&usage("Unknown capability $c - allowed options ".
"are : ".join(" ", @edit_limits));
}
$newplan->{'capabilities'} = join(" ", @cl);
}
elsif ($a eq "--auto-capabilities") {
# Allow all capabilities
$newplan->{'capabilities'} = '';
}
elsif ($a eq "--scripts") {
# Allowed scripts
@sc = split(/\s+/, shift(@ARGV));
foreach $s (@sc) {
&get_script($s) ||
&usage("Unknown script code $s");
}
$newplan->{'scripts'} = join(" ", @sc);
}
elsif ($a eq "--all-scripts") {
# Allow all scripts
$newplan->{'scripts'} = '';
}
elsif ($a eq "--no-resellers") {
# Not for any resellers
$newplan->{'resellers'} = 'none';
}
elsif ($a eq "--resellers") {
# Only for listed resellers
@rl = split(/\s+/, shift(@ARGV));
foreach $r (@rl) {
&get_reseller($r) || &usage("Unknown reseller $r");
}
$newplan->{'resellers'} = join(" ", @rl);
}
elsif ($a eq "--all-resellers") {
# For all resellers
$newplan->{'resellers'} = '';
}
elsif ($a eq "--apply") {
# Apply to domains
$applyplan = 1;
}
elsif ($a eq "--multiline") {
$multiline = 1;
}
else {
&usage("Unknown parameter $a");
}
}
# Get the plan
if (defined($planid)) {
$plan = &get_plan($planid);
$plan || &usage("No plan with ID $planid was found");
}
elsif (defined($planname)) {
($plan) = grep { $_->{'name'} eq $planname } &list_plans();
$plan || &usage("No plan with name $planname was found");
}
else {
&usage("Either the --id or --name parameter must be given");
}
# Check for name clash
if ($newplan->{'name'}) {
($clash) = grep { lc($_->{'name'}) eq lc($newplan->{'name'}) &&
$_->{'id'} ne $plan->{'id'} } &list_plans();
$clash && &usage("A plan named $newplan->{'name'} already exists");
}
# Merge in changes from command line
foreach $k (keys %$newplan) {
$plan->{$k} = $newplan->{$k};
}
# Save it
&save_plan($plan);
print "Modified plan $plan->{'name'} with ID $plan->{'id'}\n";
# Apply the change
if ($applyplan) {
$count = 0;
&set_all_null_print();
foreach my $d (&get_domain_by("plan", $plan->{'id'})) {
next if ($d->{'parent'});
local $oldd = { %$d };
&set_limits_from_plan($d, $plan);
&set_featurelimits_from_plan($d, $plan);
&set_capabilities_from_plan($d, $plan);
foreach $f (&domain_features($d), &list_feature_plugins()) {
&call_feature_func($f, $d, $oldd);
}
&save_domain($d);
$count++;
}
&run_post_actions();
print "Applied to $count virtual servers\n";
}
&virtualmin_api_log(\@OLDARGV);
sub usage
{
print $_[0],"\n\n" if ($_[0]);
print "Updates an existing Virtualmin account plan with the given limits.\n";
print "\n";
print "virtualmin modify-plan --name plan-name | --id number\n";
print " [--new-name plan-name]\n";
print " [--owner reseller | --no-owner]\n";
print " [--quota blocks | --no-quota]\n";
print " [--admin-quota blocks | --no-admin-quota]\n";
foreach $l (@plan_maxes) {
print " [--max-$l limit | --no-max-$l]\n";
}
foreach $r (@plan_restrictions) {
print " [--$r | --no-$r]\n";
}
print " [--features \"web dns mail ...\" |\n";
print " --auto-features | --no-features]\n";
print " [--capabilities \"domain users aliases ...\" |\n";
print " --auto-capabilities]\n";
if (defined(&list_resellers)) {
print " [--no-resellers | --resellers \"name name..\" |\n";
print " --all-resellers]\n";
}
print " [--apply]\n";
exit(1);
}
| 27.067729 | 81 | 0.543273 |
ed8658936293b1321dbe7fc49a512a868ad6c8dd | 2,488 | perl | Perl | imcover.perl | andk/imager | 91aa1a92653c3d6702840519f0f068b9069fb231 | [
"Adobe-2006",
"Adobe-Glyph"
] | 11 | 2015-01-31T00:18:26.000Z | 2021-12-23T15:00:04.000Z | imcover.perl | andk/imager | 91aa1a92653c3d6702840519f0f068b9069fb231 | [
"Adobe-2006",
"Adobe-Glyph"
] | 465 | 2020-12-20T04:18:47.000Z | 2022-01-25T01:38:28.000Z | imcover.perl | andk/imager | 91aa1a92653c3d6702840519f0f068b9069fb231 | [
"Adobe-2006",
"Adobe-Glyph"
] | 3 | 2016-10-10T09:59:39.000Z | 2021-10-03T23:50:09.000Z | #!perl -w
use strict;
use Config;
use ExtUtils::Manifest 'maniread';
use Cwd;
use Getopt::Long;
my @tests;
my $verbose;
my $nodc;
my $make_opts = "";
my $regen_only;
GetOptions("t|test=s" => \@tests,
"m=s" => \$make_opts,
"n" => \$nodc,
"v" => \$verbose,
"r" => \$regen_only)
or die;
my $do_build = !$regen_only;
if ($do_build) {
my $make = $Config{make};
# if there's a way to make with profiling for a recursive build like
# Imager I don't see how
if (-f 'Makefile') {
run("$make clean");
}
run("cover -delete");
run("$^X Makefile.PL --coverage @ARGV")
and die "Makefile.PL failed\n";
run("$make $make_opts 'OTHERLDFLAGS=-ftest-coverage -fprofile-arcs'")
and die "build failed\n";
{
local $ENV{DEVEL_COVER_OPTIONS} = "-db," . getcwd() . "/cover_db,-coverage,statement,branch,condition,subroutine";
my $makecmd = "$make test";
$makecmd .= " TEST_VERBOSE=1" if $verbose;
$makecmd .= " HARNESS_PERL_SWITCHES=-MDevel::Cover" unless $nodc;
if (@tests) {
$makecmd .= " TEST_FILES='@tests'";
}
run($makecmd)
and die "Test failed\n";
}
}
# build gcov files
my $mani = maniread();
# split by directory
my %paths;
for my $filename (keys %$mani) {
next unless $filename =~ /\.(xs|c|im)$/;
(my $gcda = $filename) =~ s/\.\w+$/.gcda/;
next unless -f $gcda;
if ($filename =~ m!^(\w+)/(\w+\.\w+)$!) {
push @{$paths{$1}}, $2;
}
else {
push @{$paths{''}}, $filename;
}
if ($filename =~ s/\.(xs|im)$/.c/) {
if ($filename =~ m!^(\w+)/(\w+\.\w+)$!) {
push @{$paths{$1}}, $2;
}
else {
push @{$paths{''}}, $filename;
}
}
}
my $gcov2perl = $Config{sitebin} . "/gcov2perl";
for my $path (keys %paths) {
if ($path) {
run("cd $path ; gcov -abc @{$paths{$path}} ; cd ..");
}
else {
run("gcov -abc @{$paths{$path}}");
}
my $dir = $path ? $path : '.';
for my $file (@{$paths{$path}}) {
run("$gcov2perl $dir/$file.gcov");
}
}
my @dbs = "cover_db", map "$_/cover_db", grep $_, keys %paths;
# we already ran gcov
run("cover -nogcov -ignore_re '^t/'");
sub run {
my $cmd = shift;
print "Running: $cmd\n" if $verbose;
return system $cmd;
}
=head1 NAME
imcover.perl - perform C and perl coverage testing for Imager
=head1 SYNOPSIS
perl imcover.perl [-m=...][-t=...][-n][-v][-r] -- ... Makefile.PL options
=head1 DESCRIPTION
Builds Imager with the C< -ftest-coverage -fprofile-arcs > gcc options
and then runs perl's tests.
=cut
| 22.414414 | 118 | 0.571543 |
ed74d27e2a4f69d17d94f5e3ab34f2f8b8992c72 | 13,519 | pm | Perl | lib/EzmaxApi/Object/EzsigntemplateformfieldgroupRequestCompound.pm | ezmaxinc/eZmax-SDK-perl | 3de20235136371b946247d2aed9e5e5704a4051c | [
"MIT"
] | null | null | null | lib/EzmaxApi/Object/EzsigntemplateformfieldgroupRequestCompound.pm | ezmaxinc/eZmax-SDK-perl | 3de20235136371b946247d2aed9e5e5704a4051c | [
"MIT"
] | null | null | null | lib/EzmaxApi/Object/EzsigntemplateformfieldgroupRequestCompound.pm | ezmaxinc/eZmax-SDK-perl | 3de20235136371b946247d2aed9e5e5704a4051c | [
"MIT"
] | null | null | null | =begin comment
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications.
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
=end comment
=cut
#
# NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
# Do not edit the class manually.
# Ref: https://openapi-generator.tech
#
package EzmaxApi::Object::EzsigntemplateformfieldgroupRequestCompound;
require 5.6.0;
use strict;
use warnings;
use utf8;
use JSON qw(decode_json);
use Data::Dumper;
use Module::Runtime qw(use_module);
use Log::Any qw($log);
use Date::Parse;
use DateTime;
use EzmaxApi::Object::CustomDropdownElementRequestCompound;
use EzmaxApi::Object::EzsigntemplateformfieldRequestCompound;
use EzmaxApi::Object::EzsigntemplateformfieldgroupRequest;
use EzmaxApi::Object::EzsigntemplateformfieldgroupRequestCompoundAllOf;
use EzmaxApi::Object::EzsigntemplateformfieldgroupsignerRequestCompound;
use EzmaxApi::Object::FieldEEzsigntemplateformfieldgroupSignerrequirement;
use EzmaxApi::Object::FieldEEzsigntemplateformfieldgroupTooltipposition;
use EzmaxApi::Object::FieldEEzsigntemplateformfieldgroupType;
use base ("Class::Accessor", "Class::Data::Inheritable");
#
#A Ezsigntemplateformfieldgroup Object and children
#
# NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually.
# REF: https://openapi-generator.tech
#
=begin comment
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications.
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
=end comment
=cut
#
# NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
# Do not edit the class manually.
# Ref: https://openapi-generator.tech
#
__PACKAGE__->mk_classdata('attribute_map' => {});
__PACKAGE__->mk_classdata('openapi_types' => {});
__PACKAGE__->mk_classdata('method_documentation' => {});
__PACKAGE__->mk_classdata('class_documentation' => {});
# new plain object
sub new {
my ($class, %args) = @_;
my $self = bless {}, $class;
$self->init(%args);
return $self;
}
# initialize the object
sub init
{
my ($self, %args) = @_;
foreach my $attribute (keys %{$self->attribute_map}) {
my $args_key = $self->attribute_map->{$attribute};
$self->$attribute( $args{ $args_key } );
}
}
# return perl hash
sub to_hash {
my $self = shift;
my $_hash = decode_json(JSON->new->convert_blessed->encode($self));
return $_hash;
}
# used by JSON for serialization
sub TO_JSON {
my $self = shift;
my $_data = {};
foreach my $_key (keys %{$self->attribute_map}) {
if (defined $self->{$_key}) {
$_data->{$self->attribute_map->{$_key}} = $self->{$_key};
}
}
return $_data;
}
# from Perl hashref
sub from_hash {
my ($self, $hash) = @_;
# loop through attributes and use openapi_types to deserialize the data
while ( my ($_key, $_type) = each %{$self->openapi_types} ) {
my $_json_attribute = $self->attribute_map->{$_key};
if ($_type =~ /^array\[(.+)\]$/i) { # array
my $_subclass = $1;
my @_array = ();
foreach my $_element (@{$hash->{$_json_attribute}}) {
push @_array, $self->_deserialize($_subclass, $_element);
}
$self->{$_key} = \@_array;
} elsif ($_type =~ /^hash\[string,(.+)\]$/i) { # hash
my $_subclass = $1;
my %_hash = ();
while (my($_key, $_element) = each %{$hash->{$_json_attribute}}) {
$_hash{$_key} = $self->_deserialize($_subclass, $_element);
}
$self->{$_key} = \%_hash;
} elsif (exists $hash->{$_json_attribute}) { #hash(model), primitive, datetime
$self->{$_key} = $self->_deserialize($_type, $hash->{$_json_attribute});
} else {
$log->debugf("Warning: %s (%s) does not exist in input hash\n", $_key, $_json_attribute);
}
}
return $self;
}
# deserialize non-array data
sub _deserialize {
my ($self, $type, $data) = @_;
$log->debugf("deserializing %s with %s",Dumper($data), $type);
if ($type eq 'DateTime') {
return DateTime->from_epoch(epoch => str2time($data));
} elsif ( grep( /^$type$/, ('int', 'double', 'string', 'boolean'))) {
return $data;
} else { # hash(model)
my $_instance = eval "EzmaxApi::Object::$type->new()";
return $_instance->from_hash($data);
}
}
__PACKAGE__->class_documentation({description => 'A Ezsigntemplateformfieldgroup Object and children',
class => 'EzsigntemplateformfieldgroupRequestCompound',
required => [], # TODO
} );
__PACKAGE__->method_documentation({
'pki_ezsigntemplateformfieldgroup_id' => {
datatype => 'int',
base_name => 'pkiEzsigntemplateformfieldgroupID',
description => 'The unique ID of the Ezsigntemplateformfieldgroup',
format => '',
read_only => '',
},
'fki_ezsigntemplatedocument_id' => {
datatype => 'int',
base_name => 'fkiEzsigntemplatedocumentID',
description => 'The unique ID of the Ezsigntemplatedocument',
format => '',
read_only => '',
},
'e_ezsigntemplateformfieldgroup_type' => {
datatype => 'FieldEEzsigntemplateformfieldgroupType',
base_name => 'eEzsigntemplateformfieldgroupType',
description => '',
format => '',
read_only => '',
},
'e_ezsigntemplateformfieldgroup_signerrequirement' => {
datatype => 'FieldEEzsigntemplateformfieldgroupSignerrequirement',
base_name => 'eEzsigntemplateformfieldgroupSignerrequirement',
description => '',
format => '',
read_only => '',
},
's_ezsigntemplateformfieldgroup_label' => {
datatype => 'string',
base_name => 'sEzsigntemplateformfieldgroupLabel',
description => 'The Label for the Ezsigntemplateformfieldgroup',
format => '',
read_only => '',
},
'i_ezsigntemplateformfieldgroup_step' => {
datatype => 'int',
base_name => 'iEzsigntemplateformfieldgroupStep',
description => 'The step when the Ezsigntemplatesigner will be invited to fill the form fields',
format => '',
read_only => '',
},
's_ezsigntemplateformfieldgroup_defaultvalue' => {
datatype => 'string',
base_name => 'sEzsigntemplateformfieldgroupDefaultvalue',
description => 'The default value for the Ezsigntemplateformfieldgroup',
format => '',
read_only => '',
},
'i_ezsigntemplateformfieldgroup_filledmin' => {
datatype => 'int',
base_name => 'iEzsigntemplateformfieldgroupFilledmin',
description => 'The minimum number of Ezsigntemplateformfield that must be filled in the Ezsigntemplateformfieldgroup',
format => '',
read_only => '',
},
'i_ezsigntemplateformfieldgroup_filledmax' => {
datatype => 'int',
base_name => 'iEzsigntemplateformfieldgroupFilledmax',
description => 'The maximum number of Ezsigntemplateformfield that must be filled in the Ezsigntemplateformfieldgroup',
format => '',
read_only => '',
},
'b_ezsigntemplateformfieldgroup_readonly' => {
datatype => 'boolean',
base_name => 'bEzsigntemplateformfieldgroupReadonly',
description => 'Whether the Ezsigntemplateformfieldgroup is read only or not.',
format => '',
read_only => '',
},
'i_ezsigntemplateformfieldgroup_maxlength' => {
datatype => 'int',
base_name => 'iEzsigntemplateformfieldgroupMaxlength',
description => 'The maximum length for the value in the Ezsigntemplateformfieldgroup This can only be set if eEzsigntemplateformfieldgroupType is **Text** or **Textarea**',
format => '',
read_only => '',
},
'b_ezsigntemplateformfieldgroup_encrypted' => {
datatype => 'boolean',
base_name => 'bEzsigntemplateformfieldgroupEncrypted',
description => 'Whether the Ezsigntemplateformfieldgroup is encrypted in the database or not. Encrypted values are not displayed on the Ezsigndocument. This can only be set if eEzsigntemplateformfieldgroupType is **Text** or **Textarea**',
format => '',
read_only => '',
},
's_ezsigntemplateformfieldgroup_regexp' => {
datatype => 'string',
base_name => 'sEzsigntemplateformfieldgroupRegexp',
description => 'A regular expression to indicate what values are acceptable for the Ezsigntemplateformfieldgroup. This can only be set if eEzsigntemplateformfieldgroupType is **Text** or **Textarea**',
format => '',
read_only => '',
},
't_ezsigntemplateformfieldgroup_tooltip' => {
datatype => 'string',
base_name => 'tEzsigntemplateformfieldgroupTooltip',
description => 'A tooltip that will be presented to Ezsigntemplatesigner about the Ezsigntemplateformfieldgroup',
format => '',
read_only => '',
},
'e_ezsigntemplateformfieldgroup_tooltipposition' => {
datatype => 'FieldEEzsigntemplateformfieldgroupTooltipposition',
base_name => 'eEzsigntemplateformfieldgroupTooltipposition',
description => '',
format => '',
read_only => '',
},
'a_obj_ezsigntemplateformfieldgroupsigner' => {
datatype => 'ARRAY[EzsigntemplateformfieldgroupsignerRequestCompound]',
base_name => 'a_objEzsigntemplateformfieldgroupsigner',
description => '',
format => '',
read_only => '',
},
'a_obj_dropdown_element' => {
datatype => 'ARRAY[CustomDropdownElementRequestCompound]',
base_name => 'a_objDropdownElement',
description => '',
format => '',
read_only => '',
},
'a_obj_ezsigntemplateformfield' => {
datatype => 'ARRAY[EzsigntemplateformfieldRequestCompound]',
base_name => 'a_objEzsigntemplateformfield',
description => '',
format => '',
read_only => '',
},
});
__PACKAGE__->openapi_types( {
'pki_ezsigntemplateformfieldgroup_id' => 'int',
'fki_ezsigntemplatedocument_id' => 'int',
'e_ezsigntemplateformfieldgroup_type' => 'FieldEEzsigntemplateformfieldgroupType',
'e_ezsigntemplateformfieldgroup_signerrequirement' => 'FieldEEzsigntemplateformfieldgroupSignerrequirement',
's_ezsigntemplateformfieldgroup_label' => 'string',
'i_ezsigntemplateformfieldgroup_step' => 'int',
's_ezsigntemplateformfieldgroup_defaultvalue' => 'string',
'i_ezsigntemplateformfieldgroup_filledmin' => 'int',
'i_ezsigntemplateformfieldgroup_filledmax' => 'int',
'b_ezsigntemplateformfieldgroup_readonly' => 'boolean',
'i_ezsigntemplateformfieldgroup_maxlength' => 'int',
'b_ezsigntemplateformfieldgroup_encrypted' => 'boolean',
's_ezsigntemplateformfieldgroup_regexp' => 'string',
't_ezsigntemplateformfieldgroup_tooltip' => 'string',
'e_ezsigntemplateformfieldgroup_tooltipposition' => 'FieldEEzsigntemplateformfieldgroupTooltipposition',
'a_obj_ezsigntemplateformfieldgroupsigner' => 'ARRAY[EzsigntemplateformfieldgroupsignerRequestCompound]',
'a_obj_dropdown_element' => 'ARRAY[CustomDropdownElementRequestCompound]',
'a_obj_ezsigntemplateformfield' => 'ARRAY[EzsigntemplateformfieldRequestCompound]'
} );
__PACKAGE__->attribute_map( {
'pki_ezsigntemplateformfieldgroup_id' => 'pkiEzsigntemplateformfieldgroupID',
'fki_ezsigntemplatedocument_id' => 'fkiEzsigntemplatedocumentID',
'e_ezsigntemplateformfieldgroup_type' => 'eEzsigntemplateformfieldgroupType',
'e_ezsigntemplateformfieldgroup_signerrequirement' => 'eEzsigntemplateformfieldgroupSignerrequirement',
's_ezsigntemplateformfieldgroup_label' => 'sEzsigntemplateformfieldgroupLabel',
'i_ezsigntemplateformfieldgroup_step' => 'iEzsigntemplateformfieldgroupStep',
's_ezsigntemplateformfieldgroup_defaultvalue' => 'sEzsigntemplateformfieldgroupDefaultvalue',
'i_ezsigntemplateformfieldgroup_filledmin' => 'iEzsigntemplateformfieldgroupFilledmin',
'i_ezsigntemplateformfieldgroup_filledmax' => 'iEzsigntemplateformfieldgroupFilledmax',
'b_ezsigntemplateformfieldgroup_readonly' => 'bEzsigntemplateformfieldgroupReadonly',
'i_ezsigntemplateformfieldgroup_maxlength' => 'iEzsigntemplateformfieldgroupMaxlength',
'b_ezsigntemplateformfieldgroup_encrypted' => 'bEzsigntemplateformfieldgroupEncrypted',
's_ezsigntemplateformfieldgroup_regexp' => 'sEzsigntemplateformfieldgroupRegexp',
't_ezsigntemplateformfieldgroup_tooltip' => 'tEzsigntemplateformfieldgroupTooltip',
'e_ezsigntemplateformfieldgroup_tooltipposition' => 'eEzsigntemplateformfieldgroupTooltipposition',
'a_obj_ezsigntemplateformfieldgroupsigner' => 'a_objEzsigntemplateformfieldgroupsigner',
'a_obj_dropdown_element' => 'a_objDropdownElement',
'a_obj_ezsigntemplateformfield' => 'a_objEzsigntemplateformfield'
} );
__PACKAGE__->mk_accessors(keys %{__PACKAGE__->attribute_map});
1;
| 39.185507 | 247 | 0.672091 |
ed8a9effff2894d7298883244f7e1aaf479fb241 | 3,522 | pm | Perl | auto-lib/Paws/SageMaker/ListModels.pm | galenhuntington/aws-sdk-perl | 13b775dcb5f0b3764f0a82f3679ed5c7721e67d3 | [
"Apache-2.0"
] | null | null | null | auto-lib/Paws/SageMaker/ListModels.pm | galenhuntington/aws-sdk-perl | 13b775dcb5f0b3764f0a82f3679ed5c7721e67d3 | [
"Apache-2.0"
] | null | null | null | auto-lib/Paws/SageMaker/ListModels.pm | galenhuntington/aws-sdk-perl | 13b775dcb5f0b3764f0a82f3679ed5c7721e67d3 | [
"Apache-2.0"
] | null | null | null |
package Paws::SageMaker::ListModels;
use Moose;
has CreationTimeAfter => (is => 'ro', isa => 'Str');
has CreationTimeBefore => (is => 'ro', isa => 'Str');
has MaxResults => (is => 'ro', isa => 'Int');
has NameContains => (is => 'ro', isa => 'Str');
has NextToken => (is => 'ro', isa => 'Str');
has SortBy => (is => 'ro', isa => 'Str');
has SortOrder => (is => 'ro', isa => 'Str');
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'ListModels');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::SageMaker::ListModelsOutput');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::SageMaker::ListModels - Arguments for method ListModels on L<Paws::SageMaker>
=head1 DESCRIPTION
This class represents the parameters used for calling the method ListModels on the
L<Amazon SageMaker Service|Paws::SageMaker> service. Use the attributes of this class
as arguments to method ListModels.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to ListModels.
=head1 SYNOPSIS
my $sagemaker = Paws->service('SageMaker');
my $ListModelsOutput = $sagemaker->ListModels(
CreationTimeAfter => '1970-01-01T01:00:00', # OPTIONAL
CreationTimeBefore => '1970-01-01T01:00:00', # OPTIONAL
MaxResults => 1, # OPTIONAL
NameContains => 'MyModelNameContains', # OPTIONAL
NextToken => 'MyPaginationToken', # OPTIONAL
SortBy => 'Name', # OPTIONAL
SortOrder => 'Ascending', # OPTIONAL
);
# Results:
my $Models = $ListModelsOutput->Models;
my $NextToken = $ListModelsOutput->NextToken;
# Returns a L<Paws::SageMaker::ListModelsOutput> object.
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
For the AWS API documentation, see L<https://docs.aws.amazon.com/goto/WebAPI/sagemaker/ListModels>
=head1 ATTRIBUTES
=head2 CreationTimeAfter => Str
A filter that returns only models created after the specified time
(timestamp).
=head2 CreationTimeBefore => Str
A filter that returns only models created before the specified time
(timestamp).
=head2 MaxResults => Int
The maximum number of models to return in the response.
=head2 NameContains => Str
A string in the training job name. This filter returns only models in
the training job whose name contains the specified string.
=head2 NextToken => Str
If the response to a previous C<ListModels> request was truncated, the
response includes a C<NextToken>. To retrieve the next set of models,
use the token in the next request.
=head2 SortBy => Str
Sorts the list of results. The default is C<CreationTime>.
Valid values are: C<"Name">, C<"CreationTime">
=head2 SortOrder => Str
The sort order for results. The default is C<Ascending>.
Valid values are: C<"Ascending">, C<"Descending">
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method ListModels in L<Paws::SageMaker>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: L<https://github.com/pplu/aws-sdk-perl>
Please report bugs to: L<https://github.com/pplu/aws-sdk-perl/issues>
=cut
| 29.847458 | 249 | 0.681715 |
73e1c84d81f5dcd76393cab72e8e7f3a14bd5d93 | 21,504 | pm | Perl | emacc-pexacc-lexacc/pexacc2conf.pm | accurat-toolkit/pdatamining | f7115f425f5078c2d0048e13445caed203500ba0 | [
"Apache-2.0"
] | null | null | null | emacc-pexacc-lexacc/pexacc2conf.pm | accurat-toolkit/pdatamining | f7115f425f5078c2d0048e13445caed203500ba0 | [
"Apache-2.0"
] | null | null | null | emacc-pexacc-lexacc/pexacc2conf.pm | accurat-toolkit/pdatamining | f7115f425f5078c2d0048e13445caed203500ba0 | [
"Apache-2.0"
] | null | null | null | # PEXACC configuration file. Change this before running!
#
# (C) ICIA 2011, Radu ION.
#
# ver 1.0, 22.09.2011, Radu ION: Windows/Unix portable.
# ver 1.1, 04.10.2011, Radu ION: added file manipulations functions.
# ver 2.0, 11.10.2011, Radu ION: corresponding to PEXACC2.
# ver 2.01, 3.11.2011, Radu ION: added selective debug messsages.
# ver 3.0, 23.11.2011, Radu ION: heavy modifications: no NFS, scp between master and worker, all files copied on each cluster node.
# ver 4.0, 15.12.2001, Radu ION: S2T and T2S dictionaries for symmetrical measure.
package pexacc2conf;
use strict;
use warnings;
use File::Spec;
use File::Path;
use Sys::Hostname;
sub checkDir( $$ );
sub checkFile( $$ );
sub checkSplitMode( $$ );
sub checkWeights( $$$ );
sub checkClusterFile( $$ );
sub checkInt( $$ );
sub checkProb( $$ );
sub checkReal( $$ );
sub checkBool( $$ );
sub checkLang( $$ );
sub checkIP( $$ );
sub new;
sub addValue( $$$$ );
sub genClusterFile();
sub findMyIPAdress();
sub portableCopyFile2File( $$ );
sub portableCopyFileToDir( $$ );
sub portableRemoveFile( $ );
sub portableRemoveFileFromDir( $$ );
sub portableRemoveAllFilesFromDir( $ );
sub portableForkAndDetach( $ );
sub portableVerboseSystem( $ );
sub portableListFiles( $ );
sub portableRemoteCopy( $$$$ );
##################
#CONFIG FILE######
##################
my( $DEBUG ) = 0;
#Only change values between 'BEGIN CONF' and 'END CONF'!
sub new {
my( $classname ) = shift();
my( $conf ) = shift();
my( $this ) = {};
############################## BEGIN CONF ##############################################################################
#MODIFY THE LAST ARGUMENT OF THE addvalue() function.
#Possible values: strings, integers, booleans, real numbers.
#Source language
#IN: --source LANG
addValue( $this, $conf, "SRCL", "en" );
#Target language
#IN: --target LANG
addValue( $this, $conf, "TRGL", "ro" );
#MUST EXIST!
#This is the directory containing 'res' and 'dict' resources directories on worker machines.
#This is the directory which contains the aligned documents on the master machine.
#Alignments of the documents are relative to PEXACCWORKINGDIR
#This directory MUST HAVE THE SAME PATH on master and worker machines!!
#MUST BE AN ABSOLUTE PATH!
#Windows/Unix Not OK! SET!
addValue( $this, $conf, "PEXACCWORKINGDIR", "emacc-pexacc-lexacc" );
#Master IP. This is the IP of the machine that runs 'pexacc2.pl'.
#For clustering purposes THIS MAY NOT BE '127.0.0.1'!
#If not doing clustering, you may set this to '127.0.0.1'.
#A value of 'autodetect' will attempt to discover this address but this method is not very reliable.
#Windows/Unix Not OK! SET!
addValue( $this, $conf, "MASTERIP", "127.0.0.1" );
#Windows/Unix OK!
#MUST BE relative to PEXACCWORKINGDIR!
addValue( $this, $conf, "GIZAPPNEWDICTDIR", File::Spec->catdir( $this->{"PEXACCWORKINGDIR"}, "dict", "learntdict" ) );
#The GIZA++ executable.
#PLEASE CHANGE THAT TO MATCH YOUR INSTALLATION!
#Windows/Unix Not OK! SET!
#IN: --param GIZAPPEXE=/path/to/GIZA++
addValue( $this, $conf, "GIZAPPEXE", "emacc-pexacc-lexacc\\giza++-1.0.5\\GIZA++.exe" );
#The utility to convert from plain text to GIZA++ format.
#PLEASE CHANGE THAT TO MATCH YOUR INSTALLATION!
#Windows/Unix Not OK! SET!
#IN: --param PLAIN2SNTEXE=/path/to/plain2snt.out
addValue( $this, $conf, "PLAIN2SNTEXE", "emacc-pexacc-lexacc\\giza++-1.0.5\\plain2snt.exe" );
#GIZA++ configuration file
#This file will be automatically updated by pdataextract-p.pl so make sure that it's writable!
#Make sure it's in the same directory as pdataextract-p.pl!
#GIZA++ documentation gives additional information on this file if one wishes to play with GIZA++ parameters.
#If not given, it will be read from the current directory.
#Windows/Unix OK!
addValue( $this, $conf, "GIZAPPCONF", File::Spec->catdir( "emacc-pexacc-lexacc", "pdataextract-gizapp.gizacfg" ) );
#Local mount point. Intermediary files are kept in this dir. If not given, it will be created (on all cluster nodes).
#Windows/Unix OK!
my( $tmpdir ) = File::Spec->catdir( File::Spec->tmpdir(), "pdex" );
mkpath( $tmpdir );
addValue( $this, $conf, "TMPDIR", $tmpdir );
#Corpus name (going to be in the name of the output file).
#Change it to whatever corpus you are processing: Sheffield, Wikipedia, etc. :)
addValue( $this, $conf, "CORPUSNAME", "pexacc2-run" );
#Out file basename for extracted parallel phrases.
#The output file will be placed in the same directory as the pdataextract-p.pl resides.
#The iteration number and .txt extension will be added to this basename.
#Best to be left unchanged.
addValue( $this, $conf, "OUTFILEBN", $this->{"SRCL"} . "-" . $this->{"TRGL"} . "-" . $this->{"CORPUSNAME"} . "-pexacc2" );
#If this is specified, the last output file (named using OUTFILEBN and last iteration number) will take this name.
#IN: --output FILE
addValue( $this, $conf, "OUTFILE", "" );
#The cluster file. Number of processors on each machine.
#If the value is 'generate' a './cluster.info' file will be automatically generated in the current directory.
#IN: --param CLUSTERFILE=FILE|generate
addValue( $this, $conf, "CLUSTERFILE", "generate" );
#Use translation equivalents with at least this probability from the base (main) GIZA++ dictionary...
addValue( $this, $conf, "GIZAPPTHR", 0.001 );
#From the new-learnt dictionaries, use translation equivalents with at least this probability...
addValue( $this, $conf, "NEWGIZAPPTHR", 0.1 );
#Sure GIZA++ probability threshold (translation equivalents with at lest this probability are considered correct)
addValue( $this, $conf, "SUREGIZAPPTHR", 0.33 );
#Indentical strings in source and target language are not allowed.
#So reject a pair of phrases if they are more similar than...
addValue( $this, $conf, "IDENTICALPHRTHR", 0.99 );
#Source/Target sentence/chunk ratio in words (biggest/smallest) (trained: 1.5 for en-ro)
#IN: --param SENTRATIO=1.5
addValue( $this, $conf, "SENTRATIO", 1.5 );
#Windows/Unix OK!
#MUST BE relative to PEXACCWORKINGDIR!
#Source-target (PEXACC similarity measure is symmetrical)
my( $DICTFILEST ) = File::Spec->catfile( $this->{"PEXACCWORKINGDIR"}, "dict", $this->{"SRCL"} . "_" . $this->{"TRGL"} );
#Target-source
my( $DICTFILETS ) = File::Spec->catfile( $this->{"PEXACCWORKINGDIR"}, "dict", $this->{"TRGL"} . "_" . $this->{"SRCL"} );
#The main dictionary file.
addValue( $this, $conf, "DICTFILEST", $DICTFILEST );
addValue( $this, $conf, "DICTFILETS", $DICTFILETS );
#The weights with which to combine the probabilities of the main and learnt dictionaries (must sum to 1):
addValue( $this, $conf, "DICTWEIGHTMAIN", 0.7 );
addValue( $this, $conf, "DICTWEIGHTLEARNT", 0.3 );
#Learnt dictionary file:
#Windows/Unix OK!
addValue( $this, $conf, "LEARNTDICTFILEST", File::Spec->catfile( $this->{"GIZAPPNEWDICTDIR"}, $this->{"SRCL"} . "-" . $this->{"TRGL"} . "-GIZA++-" . $this->{"CORPUSNAME"} . ".gpp" ) );
addValue( $this, $conf, "LEARNTDICTFILETS", File::Spec->catfile( $this->{"GIZAPPNEWDICTDIR"}, $this->{"TRGL"} . "-" . $this->{"SRCL"} . "-GIZA++-" . $this->{"CORPUSNAME"} . ".gpp" ) );
#Resources. Make sure that these files from the PEXACC kit are installed in the right places and readable.
#MUST BE relative to PEXACCWORKINGDIR!
#Windows/Unix OK!
addValue( $this, $conf, "ENMARKERSFILE", File::Spec->catfile( $this->{"PEXACCWORKINGDIR"}, "res", "markers-" . $this->{"SRCL"} . ".txt" ) );
addValue( $this, $conf, "ROMARKERSFILE", File::Spec->catfile( $this->{"PEXACCWORKINGDIR"}, "res", "markers-" . $this->{"TRGL"} . ".txt" ) );
addValue( $this, $conf, "ENSTOPWORDSFILE", File::Spec->catfile( $this->{"PEXACCWORKINGDIR"}, "res", "stopwords_" . $this->{"SRCL"} . ".txt" ) );
addValue( $this, $conf, "ROSTOPWORDSFILE", File::Spec->catfile( $this->{"PEXACCWORKINGDIR"}, "res", "stopwords_" . $this->{"TRGL"} . ".txt" ) );
addValue( $this, $conf, "INFLENFILE", File::Spec->catfile( $this->{"PEXACCWORKINGDIR"}, "res", "endings_" . $this->{"SRCL"} . ".txt" ) );
addValue( $this, $conf, "INFLROFILE", File::Spec->catfile( $this->{"PEXACCWORKINGDIR"}, "res", "endings_" . $this->{"TRGL"} . ".txt" ) );
#Do lemmatization or not? (0 or 1)
addValue( $this, $conf, "LEMMAS", 1 );
#Split mode may be 'sent' from split text @ sentence boundaries or 'chunk' from split sentences @ marker level.
#IN: --param SPLITMODE={chunk|sent}
addValue( $this, $conf, "SPLITMODE", "chunk" );
#Output threshold for pairs of "parallel" (as determined by algorithm) phrases (values between 0 and 1 or 0..1):
#IN: --param OUTPUTTHR=0.2
addValue( $this, $conf, "OUTPUTTHR", 0.2 );
#GIZA++ dictionary training is done on pairs of phrases having at least this parallelism threshold (0..1):
#This value is different for sent (0.3) and chunk (0.5)
addValue( $this, $conf, "GIZAPPPARALLELTHR", 0.3 );
#How many bootstrapping iterations (extract parallel phrases, extract GIZA++ dicts and reloop):
#IN: --param GIZAPPITERATIONS=3
addValue( $this, $conf, "GIZAPPITERATIONS", 3 );
#Cognates: similarity threshold between a source and a target word for them to be considered cognates.
addValue( $this, $conf, "SSTHR", 0.7 );
#How apart are phrases (in absolute positions from the beginning of the document) such that they are considered "adjacent".
#7
#0 disables the feature
addValue( $this, $conf, "CLUSTERLIM", 0 );
#DEBUG messages or not (bool)
addValue( $this, $conf, "DEBUG", 1 );
#This is a bool indicating the status of remote computing
#This value is dynamically set by PEXACC2
addValue( $this, $conf, "REMOTEWORKER", 0 );
$DEBUG = $this->{"DEBUG"};
############################## END CONF ################################################################################
checkLang( "SRCL", $this );
checkLang( "TRGL", $this );
checkDir( "PEXACCWORKINGDIR", $this );
checkDir( "TMPDIR", $this );
checkIP( "MASTERIP", $this );
checkDir( "GIZAPPNEWDICTDIR", $this );
checkClusterFile( "CLUSTERFILE", $this );
checkFile( "GIZAPPCONF", $this );
checkFile( "GIZAPPEXE", $this );
checkFile( "DICTFILEST", $this );
checkFile( "DICTFILETS", $this );
checkFile( "ENMARKERSFILE", $this );
checkFile( "ROMARKERSFILE", $this );
checkSplitMode( "SPLITMODE", $this );
checkReal( "OUTPUTTHR", $this );
checkReal( "SENTRATIO", $this );
checkBool( "LEMMAS", $this );
checkBool( "DEBUG", $this );
checkBool( "REMOTEWORKER", $this );
checkProb( "SSTHR", $this );
checkProb( "GIZAPPTHR", $this );
checkProb( "IDENTICALPHRTHR", $this );
checkProb( "DICTWEIGHTMAIN", $this );
checkProb( "DICTWEIGHTLEARNT", $this );
checkWeights( "DICTWEIGHTMAIN + DICTWEIGHTLEARNT", $this->{"DICTWEIGHTMAIN"}, $this->{"DICTWEIGHTLEARNT"} );
checkInt( "GIZAPPITERATIONS", $this );
checkInt( "CLUSTERLIM", $this );
checkReal( "GIZAPPPARALLELTHR", $this );
bless( $this, $classname );
return $this;
}
#The rest of these functions are not to be called through the object interface.
sub addValue( $$$$ ) {
my( $this, $conf, $varname, $vardefaultvalue ) = @_;
if ( exists( $conf->{$varname} ) && $conf->{$varname} ne "" ) {
$this->{$varname} = $conf->{$varname};
}
else {
$this->{$varname} = $vardefaultvalue;
}
}
sub checkIP( $$ ) {
my( $varname, $this ) = @_;
my( $ip ) = $this->{$varname};
if ( $ip =~ /autodetect/i ) {
$this->{$varname} = portableFindMyIPAdress();
}
elsif ( $ip !~ /^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$/ ) {
die( "pexacc2conf::checkIP: '$varname' is not a valid IP address !\n" );
}
else {
my( @ipbytes ) = ( $ip =~ /^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$/ );
foreach my $b ( @ipbytes ) {
if ( $b > 255 ) {
die( "pexacc2conf::checkIP: '$varname' is not a valid IP address !\n" );
}
}
}
}
sub checkDir( $$ ) {
my( $varname, $this ) = @_;
my( $dir ) = $this->{$varname};
my( $testfile ) = File::Spec->catfile( $dir, "test245blah790" );
open( TF, ">", $testfile ) or die( "pexacc2conf::checkDir: '$varname' has issues: '$! (" . int( $! ) . ")'.\n" );
close( TF );
unlink( $testfile ) or warn( "pexacc2conf::checkDir: could not remove '$testfile' because '$!'.\n" );
}
sub checkClusterFile( $$ ) {
my( $varname, $this ) = @_;
if ( $this->{$varname} eq "generate" ) {
genClusterFile();
$this->{$varname} = "cluster-autogen.info";
}
checkFile( $varname, $this );
}
sub checkFile( $$ ) {
my( $varname, $this ) = @_;
my( $file ) = $this->{$varname};
if ( ! -f ( $file ) ) {
die( "pexacc2conf::checkFile: '$varname' does not exist !\n" );
}
}
sub checkSplitMode( $$ ) {
my( $varname, $this ) = @_;
my( $smode ) = $this->{$varname};
if ( $smode ne "sent" && $smode ne "chunk" ) {
die( "pexacc2conf::checkSplitMode: invalid value for '$varname' (either 'chunk' or 'sent') !\n" );
}
}
sub checkWeights( $$$ ) {
my( $varname, $w1, $w2 ) = @_;
if ( ( $w1 + $w2 ) != 1 ) {
die( "pexacc2conf::checkWeights: invalid value for '$varname' (must sum to 1) !\n" );
}
}
sub checkInt( $$ ) {
my( $varname, $this ) = @_;
my( $int ) = $this->{$varname};
if ( $int !~ /^[0-9]+$/ ) {
die( "pexacc2conf::checkInt: invalid value for '$varname' !\n" );
}
}
sub checkProb( $$ ) {
my( $varname, $this ) = @_;
my( $prob ) = $this->{$varname};
if ( $prob !~ /^[0-9]+(?:\.[0-9]+)?(?:[eE]-?[0-9]+)?$/ ) {
die( "pdataextractconf::checkProb: invalid value for '$varname' (real number) !\n" );
}
if ( $prob < 0 || $prob > 1 ) {
die( "pexacc2conf::checkProb: invalid value for '$varname' ([0..1]) !\n" );
}
}
sub checkReal( $$ ) {
my( $varname, $this ) = @_;
my( $real ) = $this->{$varname};
if ( $real !~ /^[0-9]+(?:\.[0-9]+)?(?:[eE]-?[0-9]+)?$/ ) {
die( "pexacc2conf::checkReal: invalid value for '$varname' !\n" );
}
}
sub checkBool( $$ ) {
my( $varname, $this ) = @_;
my( $bool ) = $this->{$varname};
if ( $bool !~ /^[01]$/ ) {
die( "pexacc2conf::checkBool: invalid value for '$varname' (either '0' or '1') !\n" );
}
}
sub checkLang( $$ ) {
my( $varname, $this ) = @_;
my( $lang ) = $this->{$varname};
if ( $lang !~ /^(?:en|ro|de|lt|lv|sl|el|hr|et)$/ ) {
die( "pdataextractconf::checkLang: invalid value for '$varname' !\n" );
}
}
sub genClusterFile() {
#Windows/Linux OK!
my( $thishostname ) = hostname();
open( CLF, ">", "cluster-autogen.info" ) or die( "pdataextractconf::genClusterFile: cannot open file 'cluster-autogen.info' !\n" );
print( CLF "#This is a comment.\n" );
print( CLF "#This autogenerated file will NOT work if a cluster run is desired!\n" );
print( CLF "#Line format (tab separated fields):\n" );
print( CLF "#- hostname of the machine in cluster (run 'hostname' command)\n" );
print( CLF "#- IP of the machine\n" );
print( CLF "#- ID (string) of one CPU core\n\n" );
#Linux systems...
if ( -f ( "/proc/cpuinfo" ) ) {
open( CPU, "<", "/proc/cpuinfo" ) or die( "pdataextractconf::genClusterFile: cannot open file '/proc/cpuinfo' !\n" );
while ( my $line = <CPU> ) {
$line =~ s/^\s+//;
$line =~ s/\s+$//;
next if ( $line !~ /:/ );
my( $variable, $value ) = split( /\s*:\s*/, $line );
$variable =~ s/^\s+//;
$variable =~ s/\s+$//;
$value =~ s/^\s+//;
$value =~ s/\s+$//;
if ( $variable eq "processor" ) {
print( CLF $thishostname . "\t" . "127.0.0.1" . "\t" . "cpu$value" . "\n" );
}
}
close( CPU );
}
#Windows or other systems...
else {
#Don't know. 1 core :D
print( CLF $thishostname . "\t" . "127.0.0.1" . "\t" . "cpu0" . "\n" );
}
close( CLF );
}
sub portableCopyFile2File( $$ ) {
my( $filea, $fileb ) = @_;
#Windows run
if ( $^O =~ /^MSWin(?:32|64)$/i ) {
if ( $DEBUG ) {
warn( "`copy \/Y ${filea} ${fileb}'\n" );
}
qx/copy \/Y ${filea} ${fileb}/;
}
#Linux run
elsif ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
if ( $DEBUG ) {
qx/cp -fv ${filea} ${fileb} 1>&2/;
}
else {
qx/cp -f ${filea} ${fileb}/;
}
}
else {
die( "pexacc2conf::portableRenameFile: unsupported operating system '$^O' !\n" );
}
}
sub portableCopyFileToDir( $$ ) {
my( $file, $dir ) = @_;
#Windows run
if ( $^O =~ /^MSWin(?:32|64)$/i ) {
if ( $DEBUG ) {
warn( "`copy \/Y ${file} ${dir}\\'\n" );
}
qx/copy \/Y ${file} ${dir}\\/;
}
#Linux run
elsif ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
if ( $DEBUG ) {
qx/cp -fv ${file} ${dir}\/ 1>&2/;
}
else {
qx/cp -f ${file} ${dir}\//;
}
}
else {
die( "pexacc2conf::portableCopyFileToDir: unsupported operating system '$^O' !\n" );
}
}
sub portableRemoveFile( $ ) {
my( $file ) = $_[0];
#Windows run
if ( $^O =~ /^MSWin(?:32|64)$/i ) {
if ( $DEBUG ) {
warn( "`del \/F \/Q ${file}'\n" );
}
qx/del \/F \/Q ${file}/;
}
#Linux run
elsif ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
if ( $DEBUG ) {
qx/rm -fv ${file} 1>&2/;
}
else {
qx/rm -f ${file}/;
}
}
else {
die( "pexacc2conf::portableRemoveFile: unsupported operating system '$^O' !\n" );
}
}
sub portableRemoveFileFromDir( $$ ) {
my( $dir, $file ) = @_;
#Windows run
if ( $^O =~ /^MSWin(?:32|64)$/i ) {
if ( $DEBUG ) {
warn( "`del \/F \/Q ${dir}\\${file}'\n" );
}
qx/del \/F \/Q ${dir}\\${file}/;
}
#Linux run
elsif ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
if ( $DEBUG ) {
qx/rm -fv ${dir}\/${file} 1>&2/;
}
else {
qx/rm -f ${dir}\/${file}/;
}
}
else {
die( "pexacc2conf::portableRemoveFileFromDir: unsupported operating system '$^O' !\n" );
}
}
sub portableRemoveAllFilesFromDir( $ ) {
my( $dir ) = $_[0];
#Windows run
if ( $^O =~ /^MSWin(?:32|64)$/i ) {
if ( $DEBUG ) {
warn( "`del \/F \/Q ${dir}\\'\n" );
}
qx/del \/F \/Q ${dir}\\/;
}
#Linux run
elsif ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
if ( $DEBUG ) {
qx/rm -fv ${dir}\/* 1>&2/;
}
else {
qx/rm -f ${dir}\/*/;
}
}
else {
die( "pexacc2conf::portableRemoveAllFilesFromDir: unsupported operating system '$^O' !\n" );
}
}
sub portableForkAndDetach( $ ) {
my( $cmd ) = $_[0];
#Windows run
if ( $^O =~ /^MSWin(?:32|64)$/i ) {
if ( $DEBUG ) {
warn( "`start /B ${cmd}'\n" );
}
system( "start /B ${cmd}" );
}
#Linux run
elsif ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
if ( $DEBUG ) {
warn( "`${cmd} &'\n" );
}
system( "${cmd} &" );
}
else {
die( "pexacc2conf::portableForkAndDetach: unsupported operating system '$^O' !\n" );
}
}
sub portableVerboseSystem( $ ) {
my( $cmd ) = $_[0];
#Windows run
if ( $^O =~ /^MSWin(?:32|64)$/i ) {
if ( $DEBUG ) {
warn( "`${cmd}'\n" );
}
system( "${cmd}" );
}
#Linux run
elsif ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
if ( $DEBUG ) {
warn( "`${cmd} 1>&2'\n" );
}
system( "${cmd} 1>&2" );
}
else {
die( "pexacc2conf::verboseSystem: unsupported operating system '$^O' !\n" );
}
}
sub portableListFiles( $ ) {
my( $dirwithmask ) = $_[0];
#Windows run
if ( $^O =~ /^MSWin(?:32|64)$/i ) {
my( @files ) = qx/"dir \/B ${dirwithmask}"/;
if ( $DEBUG ) {
warn( "`dir \/B ${dirwithmask}'\n" );
}
foreach my $f ( @files ) {
$f =~ s/^\s+//;
$f =~ s/\s+$//;
}
return @files;
}
#Linux run
elsif ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
my( @files ) = qx/ls -1 ${dirwithmask} 2>\/dev\/null/;
if ( $DEBUG ) {
warn( "`ls -1 ${dirwithmask} 2>\/dev\/null'\n" );
}
foreach my $f ( @files ) {
$f =~ s/^\s+//;
$f =~ s/\s+$//;
}
return @files;
}
else {
die( "pexacc2conf::verboseSystem: unsupported operating system '$^O' !\n" );
}
}
sub portableRemoteCopy( $$$$ ) {
my( $localfile, $remoteuser, $remotemachine, $remotedir ) = @_;
if ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
if ( $DEBUG ) {
qx/scp ${localfile} ${remoteuser}\@${remotemachine}:${remotedir} 1>&2/;
}
else {
qx/scp -q ${localfile} ${remoteuser}\@${remotemachine}:${remotedir}/;
}
}
else {
die( "pexacc2conf::portableRemoteCopy: unsupported operating system '$^O' !\n" );
}
}
sub portableFindMyIPAdress() {
my( $myip ) = "127.0.0.1";
#Windows run
if ( $^O =~ /^MSWin(?:32|64)$/i ) {
my( @output ) = qx/ipconfig/;
my( $outstring ) = join( "", @output );
#IPv4 Address. . . . . . . . . . . : 89.38.230.4
my( @allips ) = ( $outstring =~ /IPv4\s+Address[\s.]+:\s*([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})/g );
if ( scalar( @allips ) > 1 ) {
die( "pexacc2conf::portableFindMyIPAdress: multiple IPs detected! Please choose from: " . join( ", ", @allips ) . "\n" );
}
elsif ( scalar( @allips ) == 1 ) {
$myip = $allips[0];
}
else {
die( "pexacc2conf::portableFindMyIPAdress: no IP(s) detected! Will set 127.0.0.1 ...\n" );
}
}
#Linux run
elsif ( $^O =~ /^Linux$/i || $^O =~ /^Cygwin$/i || $^O =~ /^MSys$/i ) {
my( @output ) = qx/ifconfig/;
my( $outstring ) = join( "", @output );
#inet addr:172.16.39.117
my( @allips ) = ( $outstring =~ /inet\s+addr\s*:\s*([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})/g );
if ( scalar( @allips ) > 1 ) {
die( "pexacc2conf::portableFindMyIPAdress: multiple IPs detected! Please choose from: " . join( ", ", @allips ) . "\n" );
}
elsif ( scalar( @allips ) == 1 ) {
$myip = $allips[0];
}
else {
die( "pexacc2conf::portableFindMyIPAdress: no IP(s) detected! Will set 127.0.0.1 ...\n" );
}
}
else {
die( "pexacc2conf::portableFindMyIPAdress: unsupported operating system '$^O' !\n" );
}
return $myip;
}
1;
| 31.857778 | 185 | 0.589193 |
ed72abc90edfa0b91223df002cc0df64035c3829 | 6,237 | pm | Perl | assets/PortableGit/usr/share/perl5/core_perl/Tie/RefHash.pm | ther3tyle/git-it-electron | d808d9ce9f95ffebe39911e57907d72f4653ff6e | [
"BSD-2-Clause"
] | 4,499 | 2015-05-08T04:02:28.000Z | 2022-03-31T20:07:15.000Z | assets/PortableGit/usr/share/perl5/core_perl/Tie/RefHash.pm | enterstudio/git-it-electron | b101d208e9a393e16fc4368e205275e5dfc6f63e | [
"BSD-2-Clause"
] | 387 | 2019-09-05T16:33:09.000Z | 2022-03-31T10:43:39.000Z | assets/PortableGit/usr/share/perl5/core_perl/Tie/RefHash.pm | enterstudio/git-it-electron | b101d208e9a393e16fc4368e205275e5dfc6f63e | [
"BSD-2-Clause"
] | 1,318 | 2015-05-09T02:16:45.000Z | 2022-03-28T11:10:01.000Z | package Tie::RefHash;
use vars qw/$VERSION/;
$VERSION = "1.39";
use 5.005;
=head1 NAME
Tie::RefHash - use references as hash keys
=head1 SYNOPSIS
require 5.004;
use Tie::RefHash;
tie HASHVARIABLE, 'Tie::RefHash', LIST;
tie HASHVARIABLE, 'Tie::RefHash::Nestable', LIST;
untie HASHVARIABLE;
=head1 DESCRIPTION
This module provides the ability to use references as hash keys if you
first C<tie> the hash variable to this module. Normally, only the
keys of the tied hash itself are preserved as references; to use
references as keys in hashes-of-hashes, use Tie::RefHash::Nestable,
included as part of Tie::RefHash.
It is implemented using the standard perl TIEHASH interface. Please
see the C<tie> entry in perlfunc(1) and perltie(1) for more information.
The Nestable version works by looking for hash references being stored
and converting them to tied hashes so that they too can have
references as keys. This will happen without warning whenever you
store a reference to one of your own hashes in the tied hash.
=head1 EXAMPLE
use Tie::RefHash;
tie %h, 'Tie::RefHash';
$a = [];
$b = {};
$c = \*main;
$d = \"gunk";
$e = sub { 'foo' };
%h = ($a => 1, $b => 2, $c => 3, $d => 4, $e => 5);
$a->[0] = 'foo';
$b->{foo} = 'bar';
for (keys %h) {
print ref($_), "\n";
}
tie %h, 'Tie::RefHash::Nestable';
$h{$a}->{$b} = 1;
for (keys %h, keys %{$h{$a}}) {
print ref($_), "\n";
}
=head1 THREAD SUPPORT
L<Tie::RefHash> fully supports threading using the C<CLONE> method.
=head1 STORABLE SUPPORT
L<Storable> hooks are provided for semantically correct serialization and
cloning of tied refhashes.
=head1 RELIC SUPPORT
This version of Tie::RefHash seems to no longer work with 5.004. This has not
been throughly investigated. Patches welcome ;-)
=head1 LICENSE
This program is free software; you can redistribute it and/or modify it under
the same terms as Perl itself
=head1 MAINTAINER
Yuval Kogman E<lt>nothingmuch@woobling.orgE<gt>
=head1 AUTHOR
Gurusamy Sarathy gsar@activestate.com
'Nestable' by Ed Avis ed@membled.com
=head1 SEE ALSO
perl(1), perlfunc(1), perltie(1)
=cut
use Tie::Hash;
use vars '@ISA';
@ISA = qw(Tie::Hash);
use strict;
use Carp qw/croak/;
BEGIN {
local $@;
# determine whether we need to take care of threads
use Config ();
my $usethreads = $Config::Config{usethreads}; # && exists $INC{"threads.pm"}
*_HAS_THREADS = $usethreads ? sub () { 1 } : sub () { 0 };
*_HAS_SCALAR_UTIL = eval { require Scalar::Util; 1 } ? sub () { 1 } : sub () { 0 };
*_HAS_WEAKEN = defined(&Scalar::Util::weaken) ? sub () { 1 } : sub () { 0 };
}
BEGIN {
# create a refaddr function
local $@;
if ( _HAS_SCALAR_UTIL ) {
Scalar::Util->import("refaddr");
} else {
require overload;
*refaddr = sub {
if ( overload::StrVal($_[0]) =~ /\( 0x ([a-zA-Z0-9]+) \)$/x) {
return $1;
} else {
die "couldn't parse StrVal: " . overload::StrVal($_[0]);
}
};
}
}
my (@thread_object_registry, $count); # used by the CLONE method to rehash the keys after their refaddr changed
sub TIEHASH {
my $c = shift;
my $s = [];
bless $s, $c;
while (@_) {
$s->STORE(shift, shift);
}
if (_HAS_THREADS ) {
if ( _HAS_WEAKEN ) {
# remember the object so that we can rekey it on CLONE
push @thread_object_registry, $s;
# but make this a weak reference, so that there are no leaks
Scalar::Util::weaken( $thread_object_registry[-1] );
if ( ++$count > 1000 ) {
# this ensures we don't fill up with a huge array dead weakrefs
@thread_object_registry = grep { defined } @thread_object_registry;
$count = 0;
}
} else {
$count++; # used in the warning
}
}
return $s;
}
my $storable_format_version = join("/", __PACKAGE__, "0.01");
sub STORABLE_freeze {
my ( $self, $is_cloning ) = @_;
my ( $refs, $reg ) = @$self;
return ( $storable_format_version, [ values %$refs ], $reg || {} );
}
sub STORABLE_thaw {
my ( $self, $is_cloning, $version, $refs, $reg ) = @_;
croak "incompatible versions of Tie::RefHash between freeze and thaw"
unless $version eq $storable_format_version;
@$self = ( {}, $reg );
$self->_reindex_keys( $refs );
}
sub CLONE {
my $pkg = shift;
if ( $count and not _HAS_WEAKEN ) {
warn "Tie::RefHash is not threadsafe without Scalar::Util::weaken";
}
# when the thread has been cloned all the objects need to be updated.
# dead weakrefs are undefined, so we filter them out
@thread_object_registry = grep { defined && do { $_->_reindex_keys; 1 } } @thread_object_registry;
$count = 0; # we just cleaned up
}
sub _reindex_keys {
my ( $self, $extra_keys ) = @_;
# rehash all the ref keys based on their new StrVal
%{ $self->[0] } = map { refaddr($_->[0]) => $_ } (values(%{ $self->[0] }), @{ $extra_keys || [] });
}
sub FETCH {
my($s, $k) = @_;
if (ref $k) {
my $kstr = refaddr($k);
if (defined $s->[0]{$kstr}) {
$s->[0]{$kstr}[1];
}
else {
undef;
}
}
else {
$s->[1]{$k};
}
}
sub STORE {
my($s, $k, $v) = @_;
if (ref $k) {
$s->[0]{refaddr($k)} = [$k, $v];
}
else {
$s->[1]{$k} = $v;
}
$v;
}
sub DELETE {
my($s, $k) = @_;
(ref $k)
? (delete($s->[0]{refaddr($k)}) || [])->[1]
: delete($s->[1]{$k});
}
sub EXISTS {
my($s, $k) = @_;
(ref $k) ? exists($s->[0]{refaddr($k)}) : exists($s->[1]{$k});
}
sub FIRSTKEY {
my $s = shift;
keys %{$s->[0]}; # reset iterator
keys %{$s->[1]}; # reset iterator
$s->[2] = 0; # flag for iteration, see NEXTKEY
$s->NEXTKEY;
}
sub NEXTKEY {
my $s = shift;
my ($k, $v);
if (!$s->[2]) {
if (($k, $v) = each %{$s->[0]}) {
return $v->[0];
}
else {
$s->[2] = 1;
}
}
return each %{$s->[1]};
}
sub CLEAR {
my $s = shift;
$s->[2] = 0;
%{$s->[0]} = ();
%{$s->[1]} = ();
}
package Tie::RefHash::Nestable;
use vars '@ISA';
@ISA = 'Tie::RefHash';
sub STORE {
my($s, $k, $v) = @_;
if (ref($v) eq 'HASH' and not tied %$v) {
my @elems = %$v;
tie %$v, ref($s), @elems;
}
$s->SUPER::STORE($k, $v);
}
1;
| 22.275 | 111 | 0.581209 |
ed803ab3371e023f0b00456254a3c629c9c8f781 | 852 | t | Perl | t/datetime.t | AlexDaniel/SQL-Lexer | 9fb33d0c8edd594a1a5af0018c196d7f330af5f1 | [
"MIT"
] | 1 | 2019-04-12T14:02:25.000Z | 2019-04-12T14:02:25.000Z | t/datetime.t | AlexDaniel/SQL-Lexer | 9fb33d0c8edd594a1a5af0018c196d7f330af5f1 | [
"MIT"
] | null | null | null | t/datetime.t | AlexDaniel/SQL-Lexer | 9fb33d0c8edd594a1a5af0018c196d7f330af5f1 | [
"MIT"
] | 3 | 2018-11-14T17:33:22.000Z | 2022-01-30T09:50:10.000Z | use v6;
use Test;
use SQL::Lexer;
my @good-literals = (
Q/'0000-00-00'/ => 'date-string',
Q/"2017-12-25"/ => 'date-string',
Q/'2017-12-00'/ => 'date-string',
Q/'00:00:00'/ => 'time-string',
Q/'23:59:59'/ => 'time-string',
Q/'12:30:00-06:00'/ => 'time-string',
Q/'6:30:00+04:00'/ => 'time-string',
Q/'1492-10-12 11:30:21'/ => 'timestamp-string',
Q/DATE "2017-12-25"/ => 'date-literal',
Q/TIME '12:30:00-06:00'/ => 'time-literal',
Q/TIMESTAMP '1492-10-12 11:30:21'/ => 'timestamp-literal',
);
my @bad-literals = (
Q/DATE "2017-12-25'/,
Q/DATE 2017-12-25/,
Q/TIME "2017-12-25"/,
);
for @good-literals {
ok SQL::Lexer.parse( .key, :rule(.value) ), "Good datetime({ .value }): { .key }";
}
nok SQL::Lexer.parse( $_, :rule<literal> ), "Bad literal: |$_|"
for @bad-literals;
done-testing;
| 25.058824 | 86 | 0.548122 |
ed4df5e7c5cd9fd1bf290defb5c0ed8999633fbd | 2,210 | pm | Perl | external/win_perl/lib/Test2/Plugin/UTF8.pm | phixion/l0phtcrack | 48ee2f711134e178dbedbd925640f6b3b663fbb5 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-10-20T00:25:39.000Z | 2021-11-08T12:52:42.000Z | external/win_perl/lib/Test2/Plugin/UTF8.pm | Brute-f0rce/l0phtcrack | 25f681c07828e5e68e0dd788d84cc13c154aed3d | [
"Apache-2.0",
"MIT"
] | null | null | null | external/win_perl/lib/Test2/Plugin/UTF8.pm | Brute-f0rce/l0phtcrack | 25f681c07828e5e68e0dd788d84cc13c154aed3d | [
"Apache-2.0",
"MIT"
] | 1 | 2022-03-14T06:41:16.000Z | 2022-03-14T06:41:16.000Z | package Test2::Plugin::UTF8;
use strict;
use warnings;
our $VERSION = '0.000094';
use Test2::API qw{
test2_add_callback_post_load
test2_stack
};
sub import {
my $class = shift;
# Load and import UTF8 into the caller.
require utf8;
utf8->import;
# Set the output formatters to use utf8
test2_add_callback_post_load(sub {
my $stack = test2_stack;
$stack->top; # Make sure we have at least 1 hub
my $warned = 0;
for my $hub ($stack->all) {
my $format = $hub->format || next;
unless ($format->can('encoding')) {
warn "Could not apply UTF8 to unknown formatter ($format)\n" unless $warned++;
next;
}
$format->encoding('utf8');
}
});
}
1;
__END__
=pod
=encoding UTF-8
=head1 NAME
Test2::Plugin::UTF8 - Test2 plugin to test with utf8.
=head1 DESCRIPTION
When used, this plugin will make tests work with utf8. This includes
turning on the utf8 pragma and updating the Test2 output formatter to
use utf8.
=head1 SYNOPSIS
use Test2::Plugin::UTF8;
This is similar to:
use utf8;
BEGIN {
require Test2::Tools::Encoding;
Test2::Tools::Encoding::set_encoding('utf8');
}
=head1 NOTES
This module currently sets output handles to have the ':utf8' output
layer. Some might prefer ':encoding(utf-8)' which is more strict about
verifying characters. There is a debate about weather or not encoding
to utf8 from perl internals can ever fail, so it may not matter. This
was also chosen because the alternative causes threads to segfault,
see L<perlbug 31923|https://rt.perl.org/Public/Bug/Display.html?id=31923>.
=head1 SOURCE
The source code repository for Test2-Suite can be found at
F<http://github.com/Test-More/Test2-Suite/>.
=head1 MAINTAINERS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 AUTHORS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 COPYRIGHT
Copyright 2017 Chad Granum E<lt>exodist@cpan.orgE<gt>.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
See F<http://dev.perl.org/licenses/>
=cut
| 20.462963 | 94 | 0.675566 |
ed9ef5fb3b461008d7afe96bc1a2680e1d64d430 | 10,099 | t | Perl | usr.sbin/config/SMM.doc/d.t | weiss/original-bsd | b44636d7febc9dcf553118bd320571864188351d | [
"Unlicense"
] | 114 | 2015-01-18T22:55:52.000Z | 2022-02-17T10:45:02.000Z | usr.sbin/config/SMM.doc/d.t | JamesLinus/original-bsd | b44636d7febc9dcf553118bd320571864188351d | [
"Unlicense"
] | null | null | null | usr.sbin/config/SMM.doc/d.t | JamesLinus/original-bsd | b44636d7febc9dcf553118bd320571864188351d | [
"Unlicense"
] | 29 | 2015-11-03T22:05:22.000Z | 2022-02-08T15:36:37.000Z | .\" Copyright (c) 1983, 1993
.\" The Regents of the University of California. All rights reserved.
.\"
.\" %sccs.include.redist.roff%
.\"
.\" @(#)d.t 8.1 (Berkeley) 06/08/93
.\"
.\".ds RH "Data Structure Sizing Rules
.bp
.LG
.B
.ce
APPENDIX D. VAX KERNEL DATA STRUCTURE SIZING RULES
.sp
.R
.NL
.PP
Certain system data structures are sized at compile time
according to the maximum number of simultaneous users expected,
while others are calculated at boot time based on the
physical resources present, e.g. memory. This appendix lists
both sets of rules and also includes some hints on changing
built-in limitations on certain data structures.
.SH
Compile time rules
.PP
The file \fI/sys/conf\|/param.c\fP contains the definitions of
almost all data structures sized at compile time. This file
is copied into the directory of each configured system to allow
configuration-dependent rules and values to be maintained.
(Each copy normally depends on the copy in /sys/conf,
and global modifications cause the file to be recopied unless
the makefile is modified.)
The rules implied by its contents are summarized below (here
MAXUSERS refers to the value defined in the configuration file
in the ``maxusers'' rule).
Most limits are computed at compile time and stored in global variables
for use by other modules; they may generally be patched in the system
binary image before rebooting to test new values.
.IP \fBnproc\fP
.br
The maximum number of processes which may be running at any time.
It is referred to in other calculations as NPROC and is defined to be
.DS
20 + 8 * MAXUSERS
.DE
.IP \fBntext\fP
.br
The maximum number of active shared text segments.
The constant is intended to allow for network servers and common commands
that remain in the table.
It is defined as
.DS
36 + MAXUSERS.
.DE
.IP \fBninode\fP
.br
The maximum number of files in the file system which may be
active at any time. This includes files in use by users, as
well as directory files being read or written by the system
and files associated with bound sockets in the UNIX IPC domain.
It is defined as
.DS
(NPROC + 16 + MAXUSERS) + 32
.DE
.IP \fBnfile\fP
.br
The number of ``file table'' structures. One file
table structure is used for each open, unshared, file descriptor.
Multiple file descriptors may reference a single file table
entry when they are created through a \fIdup\fP call, or as the
result of a \fIfork\fP. This is defined to be
.DS
16 * (NPROC + 16 + MAXUSERS) / 10 + 32
.DE
.IP \fBncallout\fP
.br
The number of ``callout'' structures. One callout
structure is used per internal system event handled with
a timeout. Timeouts are used for terminal delays,
watchdog routines in device drivers, protocol timeout processing, etc.
This is defined as
.DS
16 + NPROC
.DE
.IP \fBnclist\fP
.br
The number of ``c-list'' structures. C-list structures are
used in terminal I/O, and currently each holds 60 characters.
Their number is defined as
.DS
60 + 12 * MAXUSERS
.DE
.IP \fBnmbclusters\fP
.br
The maximum number of pages which may be allocated by the network.
This is defined as 256 (a quarter megabyte of memory) in /sys/h/mbuf.h.
In practice, the network rarely uses this much memory. It starts off
by allocating 8 kilobytes of memory, then requesting more as
required. This value represents an upper bound.
.IP \fBnquota\fP
.br
The number of ``quota'' structures allocated. Quota structures
are present only when disc quotas are configured in the system. One
quota structure is kept per user. This is defined to be
.DS
(MAXUSERS * 9) / 7 + 3
.DE
.IP \fBndquot\fP
.br
The number of ``dquot'' structures allocated. Dquot structures
are present only when disc quotas are configured in the system.
One dquot structure is required per user, per active file system quota.
That is, when a user manipulates a file on a file system on which
quotas are enabled, the information regarding the user's quotas on
that file system must be in-core. This information is cached, so
that not all information must be present in-core all the time.
This is defined as
.DS
NINODE + (MAXUSERS * NMOUNT) / 4
.DE
where NMOUNT is the maximum number of mountable file systems.
.LP
In addition to the above values, the system page tables (used to
map virtual memory in the kernel's address space) are sized at
compile time by the SYSPTSIZE definition in the file /sys/vax/vmparam.h.
This is defined to be
.DS
20 + MAXUSERS
.DE
pages of page tables.
Its definition affects
the size of many data structures allocated at boot time because
it constrains the amount of virtual memory which may be addressed
by the running system. This is often the limiting factor
in the size of the buffer cache, in which case a message is printed
when the system configures at boot time.
.SH
Run-time calculations
.PP
The most important data structures sized at run-time are those used in
the buffer cache. Allocation is done by allocating physical memory
(and system virtual memory) immediately after the system
has been started up; look in the file /sys/vax/machdep.c.
The amount of physical memory which may be allocated to the buffer
cache is constrained by the size of the system page tables, among
other things. While the system may calculate
a large amount of memory to be allocated to the buffer cache,
if the system page
table is too small to map this physical
memory into the virtual address space
of the system, only as much as can be mapped will be used.
.PP
The buffer cache is comprised of a number of ``buffer headers''
and a pool of pages attached to these headers. Buffer headers
are divided into two categories: those used for swapping and
paging, and those used for normal file I/O. The system tries
to allocate 10% of the first two megabytes and 5% of the remaining
available physical memory for the buffer
cache (where \fIavailable\fP does not count that space occupied by
the system's text and data segments). If this results in fewer
than 16 pages of memory allocated, then 16 pages are allocated.
This value is kept in the initialized variable \fIbufpages\fP
so that it may be patched in the binary image (to allow tuning
without recompiling the system),
or the default may be overridden with a configuration-file option.
For example, the option \fBoptions BUFPAGES="3200"\fP
causes 3200 pages (3.2M bytes) to be used by the buffer cache.
A sufficient number of file I/O buffer headers are then allocated
to allow each to hold 2 pages each.
Each buffer maps 8K bytes.
If the number of buffer pages is larger than can be mapped
by the buffer headers, the number of pages is reduced.
The number of buffer headers allocated
is stored in the global variable \fInbuf\fP,
which may be patched before the system is booted.
The system option \fBoptions NBUF="1000"\fP forces the allocation
of 1000 buffer headers.
Half as many swap I/O buffer headers as file I/O buffers
are allocated,
but no more than 256.
.SH
System size limitations
.PP
As distributed, the sum of the virtual sizes of the core-resident
processes is limited to 256M bytes. The size of the text
segment of a single process is currently limited to 6M bytes.
It may be increased to no greater than the data segment size limit
(see below) by redefining MAXTSIZ.
This may be done with a configuration file option,
e.g. \fBoptions MAXTSIZ="(10*1024*1024)"\fP
to set the limit to 10 million bytes.
Other per-process limits discussed here may be changed with similar options
with names given in parentheses.
Soft, user-changeable limits are set to 512K bytes for stack (DFLSSIZ)
and 6M bytes for the data segment (DFLDSIZ) by default;
these may be increased up to the hard limit
with the \fIsetrlimit\fP\|(2) system call.
The data and stack segment size hard limits are set by a system configuration
option to one of 17M, 33M or 64M bytes.
One of these sizes is chosen based on the definition of MAXDSIZ;
with no option, the limit is 17M bytes; with an option
\fBoptions MAXDSIZ="(32*1024*1024)"\fP (or any value between 17M and 33M),
the limit is increased to 33M bytes, and values larger than 33M
result in a limit of 64M bytes.
You must be careful in doing this that you have adequate paging space.
As normally configured , the system has 16M or 32M bytes per paging area,
depending on disk size.
The best way to get more space is to provide multiple, thereby
interleaved, paging areas.
Increasing the virtual memory limits results in interleaving of
swap space in larger sections (from 500K bytes to 1M or 2M bytes).
.PP
By default, the virtual memory system allocates enough memory
for system page tables mapping user page tables
to allow 256 megabytes of simultaneous active virtual memory.
That is, the sum of the virtual memory sizes of all (completely- or partially-)
resident processes can not exceed this limit.
If the limit is exceeded, some process(es) must be swapped out.
To increase the amount of resident virtual space possible,
you can alter the constant USRPTSIZE (in
/sys/vax/vmparam.h).
Each page of system page tables allows 8 megabytes of user virtual memory.
.PP
Because the file system block numbers are stored in
page table \fIpg_blkno\fP
entries, the maximum size of a file system is limited to
2^24 1024 byte blocks. Thus no file system can be larger than 8 gigabytes.
.PP
The number of mountable file systems is set at 20 by the definition
of NMOUNT in /sys/h/param.h.
This should be sufficient; if not, the value can be increased up to 255.
If you have many disks, it makes sense to make some of
them single file systems, and the paging areas don't count in this total.
.PP
The limit to the number of files that a process may have open simultaneously
is set to 64.
This limit is set by the NOFILE definition in /sys/h/param.h.
It may be increased arbitrarily, with the caveat that the user structure
expands by 5 bytes for each file, and thus UPAGES (/sys/vax/machparam.h)
must be increased accordingly.
.PP
The amount of physical memory is currently limited to 64 Mb
by the size of the index fields in the core-map (/sys/h/cmap.h).
The limit may be increased by following instructions in that file
to enlarge those fields.
| 40.88664 | 79 | 0.780374 |
ed70c82217a2f044983fad0ef12bea937fea5a45 | 5,437 | pl | Perl | postprocessing/dirbias/scrummbleTest.pl | alperezq/BioPipeline | 85da159a14a2a6cb0839cce6f107903851e93e15 | [
"MIT"
] | null | null | null | postprocessing/dirbias/scrummbleTest.pl | alperezq/BioPipeline | 85da159a14a2a6cb0839cce6f107903851e93e15 | [
"MIT"
] | 1 | 2020-03-16T17:42:04.000Z | 2020-03-16T17:42:04.000Z | postprocessing/dirbias/scrummbleTest.pl | alperezq/BioPipeline | 85da159a14a2a6cb0839cce6f107903851e93e15 | [
"MIT"
] | 1 | 2021-03-03T03:56:44.000Z | 2021-03-03T03:56:44.000Z | #!/usr/bin/perl
$argc = @ARGV;
if ($argc != 7)
{
print "usage: \n perl scrummbleTest.pl sequence_file cost_file number_of_iterations outputfilename use_modular_structure_flag $breakeven_flag arlem_in_full_path\n";
exit -1;
}
my $seq_file = $ARGV[0];
my $costfile = $ARGV[1];
my $numberofsimulations=$ARGV[2];
my $outputfile=$ARGV[3];
my $modular_struct_flag=$ARGV[4];
my $breakeven_flag=$ARGV[5];
my $strongflag=1;
my $len_threshold=30;
my $arlem=$ARGV[6];
#print "No of iterations: $numberofsimulations \n";
open(seqIN, "<$seq_file")
or die "Oops! can't open input file ".$seq_file."\n";
my @Seqs=<seqIN>;
close(seqIN);
open(seqOut, ">$outputfile")
or die "Oops! can't open input file ".$outputfile."\n";
if(1){ # to skip computation
my @unique_types;
my @unique_type_length;
my $tmplen;
# read each sequence and scrumble its unit types
foreach $seq (@Seqs){
if($seq=~/>/){
#print $seq;
}
else{
my @types=split(/\s+/,$seq);
$seqlen=scalar(@types);
push(@unique_types,$types[0]);
for($i=1;$i<$seqlen;$i++){
if($types[$i] ne $types[$i-1]){
push(@unique_types,$types[$i]);
}
}
foreach $utype (@unique_types){
push(@unique_type_length,1);
}
my $j=0;
for($i=1;$i<$seqlen;$i++){
if($types[$i] ne $types[$i-1]){
#push(@unique_types,$types[$i]);
$j++;
}
else{
$unique_type_length[$j]++;
}
}
# printing result for debugging
$j=0;
$tmplen=scalar(@unique_types);
# swap types
# crunch big chunks
if($numberofsimulations>1){
if((0)&&($strongflag==1)){
foreach $utype (@unique_types){
if($unique_type_length[$j]>$len_threshold){
push(@unique_type_length,$unique_type_length[$j]-$len_threshold);
push(@unique_types,$utype);
$unique_type_length[$j]=$len_threshold;
}
$j++;
}
}
}
for($k=0;$k<$numberofsimulations;$k++){
$pos1=int (rand($tmplen));
$pos2=int (rand($tmplen));
if($pos1==$pos2){
$k--;
next;
}
#print "length: ".$tmplen.", ".$pos1." ".$pos2."\n";
if(($pos1<$tmplen) && ($pos2<$tmplen)&&(1)){ # the last and is a flag to take/skip swap
$tmpswap=$unique_types[$pos1];
$unique_types[$pos1]=$unique_types[$pos2];;
$unique_types[$pos2]=$tmpswap;
$tmpswap2=$unique_type_length[$pos1];
$unique_type_length[$pos1]=$unique_type_length[$pos2];
$unique_type_length[$pos2]=$tmpswap2;
}
}
# end swap
# display after swap
$j=0;
if(0){ # ms format
foreach $utype (@unique_types){
print seqOut $utype."(".$unique_type_length[$j].") ";
$j++;
}
print seqOut "\n";
}
else{ # fasta like format
#print seqOut "> seq \n";
foreach $utype (@unique_types){
if($modular_struct_flag){
$unique_type_length[$j]=1;
}
for($z=0;$z<$unique_type_length[$j];$z++){
print seqOut $utype." ";
}
$j++;
}
print seqOut "\n";
}
#end display after swap
for($i=0;$i<$tmplen;$i++){
pop(@unique_types);
pop(@unique_type_length);
}
}
#$sqcounter++;
}
close(seqOut);
# report sequence file
print "Runing: arelm.....................\n";
# run arlem three times and return difference
if($breakeven_flag){
$argstring="$arlem -f $outputfile -cfile $costfile -align -rle -breakeven > $outputfile.align.both";
}
else{
$argstring="$arlem -f $outputfile -cfile $costfile -align -rle > $outputfile.align.both";
}
print "Runing: $argstring.\n";
$retval=system($argstring);
if($retval != 0){
print "Error running: $argstring\n";
exit -1;
}
$argstring="$arlem -f $outputfile -cfile $costfile -align -rle -onlyleft > $outputfile.align.onlyleft";
print "Runing: $argstring.\n";
$retval=system($argstring);
if($retval!=0){
print "Error running: $argstring\n";
exit -1;
}
$argstring="$arlem -f $outputfile -cfile $costfile -align -rle -onlyright > $outputfile.align.onlyright";
print "Runing: $argstring.\n";
$retval=system($argstring);
if($retval!=0){
print "Error running: $argstring\n";
exit -1;
}
# evaluate left
$argstring="$arlem -evaluate $outputfile.align.both $outputfile.align.onlyleft> $outputfile.evaluate.left";
print "Runing: $argstring.\n";
$retval=system($argstring);
if($retval!=0){
print "Error running: $argstring\n";
exit -1;
}
#evaluate right
$argstring="$arlem -evaluate $outputfile.align.both $outputfile.align.onlyright> $outputfile.evaluate.right";
print "Runing: $argstring.\n";
$retval=system($argstring);
if($retval!=0){
print "Error running: $argstring\n";
exit -1;
}
} # to skip
# improvement in left
open(EVIN, "<$outputfile.evaluate.left")
or die "Oops! can't open input file $outputfile.evaluate.left \n";
my @lines=<EVIN>;
foreach $line (@lines){
if($line=~/improved.+\s+(\d+)/){
$improved_left=$1;
}
}
close(EVIN);
# improvement in right
open(EVIN, "<$outputfile.evaluate.right")
or die "Oops! can't open input file $outputfile.evaluate.right \n";
my @lines=<EVIN>;
foreach $line (@lines){
if($line=~/improved.+\s+(\d+)/){
$improved_right=$1;
}
}
close(EVIN);
#print "Improved when left only: $improved_left, Improved when right only: $improved_right \n";
$diff=$improved_left-$improved_right;
open(ScrumbleOut, ">>$seq_file.ScrumbleResult.txt")
or die "Oops! can't open input file $seq_file.ScrumbleResult.txt \n";
print ScrumbleOut "L: $improved_left, R: $improved_right, diff: $diff \n";
close(ScrumbleOut);
exit 0;
| 22.844538 | 168 | 0.633438 |
73d1aa603a6c7b5e953baf51042698721cc46514 | 7,167 | pl | Perl | src/curl-7.9.8/tests/memanalyze.pl | phochste/srepod | 6dcf220ce96ae7d2f943c27f3533c19e79335144 | [
"BSD-3-Clause"
] | null | null | null | src/curl-7.9.8/tests/memanalyze.pl | phochste/srepod | 6dcf220ce96ae7d2f943c27f3533c19e79335144 | [
"BSD-3-Clause"
] | null | null | null | src/curl-7.9.8/tests/memanalyze.pl | phochste/srepod | 6dcf220ce96ae7d2f943c27f3533c19e79335144 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env perl
#
# Example input:
#
# MEM mprintf.c:1094 malloc(32) = e5718
# MEM mprintf.c:1103 realloc(e5718, 64) = e6118
# MEM sendf.c:232 free(f6520)
do {
if($ARGV[0] eq "-v") {
$verbose=1;
}
elsif($ARGV[0] eq "-t") {
$trace=1;
}
} while (shift @ARGV);
my $maxmem;
sub newtotal {
my ($newtot)=@_;
# count a max here
if($newtot > $maxmem) {
$maxmem= $newtot;
}
}
while(<STDIN>) {
chomp $_;
$line = $_;
if($line =~ /^MEM ([^:]*):(\d*) (.*)/) {
# generic match for the filename+linenumber
$source = $1;
$linenum = $2;
$function = $3;
if($function =~ /free\(0x([0-9a-f]*)/) {
$addr = $1;
if($sizeataddr{$addr} == 0) {
print "FREE ERROR: No memory allocated: $line\n";
}
elsif(-1 == $sizeataddr{$addr}) {
print "FREE ERROR: Memory freed twice: $line\n";
print "FREE ERROR: Previously freed at: ".$getmem{$addr}."\n";
}
else {
$totalmem -= $sizeataddr{$addr};
if($trace) {
print "FREE: malloc at ".$getmem{$addr}." is freed again at $source:$linenum\n";
printf("FREE: %d bytes freed, left allocated: $totalmem bytes\n", $sizeataddr{$addr});
}
newtotal($totalmem);
$frees++;
$sizeataddr{$addr}=-1; # set -1 to mark as freed
$getmem{$addr}="$source:$linenum";
}
}
elsif($function =~ /malloc\((\d*)\) = 0x([0-9a-f]*)/) {
$size = $1;
$addr = $2;
if($sizeataddr{$addr}>0) {
# this means weeeeeirdo
print "Fucked up debug compile, rebuild curl now\n";
}
$sizeataddr{$addr}=$size;
$totalmem += $size;
if($trace) {
print "MALLOC: malloc($size) at $source:$linenum",
" makes totally $totalmem bytes\n";
}
newtotal($totalmem);
$mallocs++;
$getmem{$addr}="$source:$linenum";
}
elsif($function =~ /realloc\(0x([0-9a-f]*), (\d*)\) = 0x([0-9a-f]*)/) {
$oldaddr = $1;
$newsize = $2;
$newaddr = $3;
$totalmem -= $sizeataddr{$oldaddr};
if($trace) {
printf("REALLOC: %d less bytes and ", $sizeataddr{$oldaddr});
}
$sizeataddr{$oldaddr}=0;
$totalmem += $newsize;
$sizeataddr{$newaddr}=$newsize;
if($trace) {
printf("%d more bytes ($source:$linenum)\n", $newsize);
}
newtotal($totalmem);
$reallocs++;
$getmem{$oldaddr}="";
$getmem{$newaddr}="$source:$linenum";
}
elsif($function =~ /strdup\(0x([0-9a-f]*)\) \((\d*)\) = 0x([0-9a-f]*)/) {
# strdup(a5b50) (8) = df7c0
$dup = $1;
$size = $2;
$addr = $3;
$getmem{$addr}="$source:$linenum";
$sizeataddr{$addr}=$size;
$totalmem += $size;
if($trace) {
printf("STRDUP: $size bytes at %s, makes totally: %d bytes\n",
$getmem{$addr}, $totalmem);
}
newtotal($totalmem);
$strdups++;
}
else {
print "Not recognized input line: $function\n";
}
}
# FD url.c:1282 socket() = 5
elsif($_ =~ /^FD ([^:]*):(\d*) (.*)/) {
# generic match for the filename+linenumber
$source = $1;
$linenum = $2;
$function = $3;
if($function =~ /socket\(\) = (\d*)/) {
$filedes{$1}=1;
$getfile{$1}="$source:$linenum";
$openfile++;
}
elsif($function =~ /accept\(\) = (\d*)/) {
$filedes{$1}=1;
$getfile{$1}="$source:$linenum";
$openfile++;
}
elsif($function =~ /sclose\((\d*)\)/) {
if($filedes{$1} != 1) {
print "Close without open: $line\n";
}
else {
$filedes{$1}=0; # closed now
$openfile--;
}
}
}
# FILE url.c:1282 fopen("blabla") = 0x5ddd
elsif($_ =~ /^FILE ([^:]*):(\d*) (.*)/) {
# generic match for the filename+linenumber
$source = $1;
$linenum = $2;
$function = $3;
if($function =~ /fopen\(\"([^\"]*)\"\) = (\(nil\)|0x([0-9a-f]*))/) {
if($2 eq "(nil)") {
;
}
else {
$fopen{$3}=1;
$fopenfile{$3}="$source:$linenum";
$fopens++;
}
}
# fclose(0x1026c8)
elsif($function =~ /fclose\(0x([0-9a-f]*)\)/) {
if(!$fopen{$1}) {
print "fclose() without fopen(): $line\n";
}
else {
$fopen{$1}=0;
$fopens--;
}
}
}
# ADDR url.c:1282 getaddrinfo() = 0x5ddd
elsif($_ =~ /^ADDR ([^:]*):(\d*) (.*)/) {
# generic match for the filename+linenumber
$source = $1;
$linenum = $2;
$function = $3;
if($function =~ /getaddrinfo\(\) = (\(nil\)|0x([0-9a-f]*))/) {
my $add = $2;
if($add eq "(nil)") {
;
}
else {
$addrinfo{$add}=1;
$addrinfofile{$add}="$source:$linenum";
$addrinfos++;
}
}
# fclose(0x1026c8)
elsif($function =~ /freeaddrinfo\(0x([0-9a-f]*)\)/) {
if(!$addrinfo{$1}) {
print "freeaddrinfo() without getaddrinfo(): $line\n";
}
else {
$addrinfo{$1}=0;
$addrinfos--;
}
}
}
else {
print "Not recognized prefix line: $line\n";
}
}
if($totalmem) {
print "Leak detected: memory still allocated: $totalmem bytes\n";
for(keys %sizeataddr) {
$addr = $_;
$size = $sizeataddr{$addr};
if($size > 0) {
print "At $addr, there's $size bytes.\n";
print " allocated by ".$getmem{$addr}."\n";
}
}
}
if($openfile) {
for(keys %filedes) {
if($filedes{$_} == 1) {
print "Open file descriptor created at ".$getfile{$_}."\n";
}
}
}
if($fopens) {
print "Open FILE handles left at:\n";
for(keys %fopen) {
if($fopen{$_} == 1) {
print "fopen() called at ".$fopenfile{$_}."\n";
}
}
}
if($addrinfos) {
print "IPv6-style name resolve data left at:\n";
for(keys %addrinfofile) {
if($addrinfo{$_} == 1) {
print "getaddrinfo() called at ".$addrinfofile{$_}."\n";
}
}
}
if($verbose) {
print "Mallocs: $mallocs\n",
"Reallocs: $reallocs\n",
"Strdups: $strdups\n",
"Frees: $frees\n";
print "Maximum allocated: $maxmem\n";
}
| 26.544444 | 106 | 0.413562 |
73dc92ce3d08c96147515e5d39316145637d6876 | 15,327 | al | Perl | Modules/System/Extension Management/src/ExtensionManagement.Codeunit.al | MiguelMercadoActual/ALAppExtensions | 97ee3823053eb32fa7e38dc3d1e7a89bdcca8d7b | [
"MIT"
] | 10 | 2021-06-04T12:40:13.000Z | 2021-06-04T13:27:08.000Z | Modules/System/Extension Management/src/ExtensionManagement.Codeunit.al | MiguelMercadoActual/ALAppExtensions | 97ee3823053eb32fa7e38dc3d1e7a89bdcca8d7b | [
"MIT"
] | null | null | null | Modules/System/Extension Management/src/ExtensionManagement.Codeunit.al | MiguelMercadoActual/ALAppExtensions | 97ee3823053eb32fa7e38dc3d1e7a89bdcca8d7b | [
"MIT"
] | 1 | 2019-01-08T10:41:44.000Z | 2019-01-08T10:41:44.000Z | // ------------------------------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// ------------------------------------------------------------------------------------------------
/// <summary>
/// Provides features for installing and uninstalling, downloading and uploading, configuring and publishing extensions and their dependencies.
/// </summary>
codeunit 2504 "Extension Management"
{
Access = Public;
var
ExtensionInstallationImpl: Codeunit "Extension Installation Impl";
ExtensionOperationImpl: Codeunit "Extension Operation Impl";
ExtensionMarketplace: Codeunit "Extension Marketplace";
/// <summary>
/// Installs an extension, based on its PackageId and Locale Identifier.
/// </summary>
/// <param name="PackageId">The ID of the extension package.</param>
/// <param name="lcid">The Locale Identifier.</param>
/// <param name="IsUIEnabled">Indicates whether the install operation is invoked through the UI.</param>
/// <returns>True if the extention is installed successfully; false otherwise.</returns>
procedure InstallExtension(PackageId: Guid; lcid: Integer; IsUIEnabled: Boolean): Boolean
begin
exit(ExtensionInstallationImpl.InstallExtension(PackageId, lcid, IsUIEnabled));
end;
/// <summary>
/// Uninstalls an extension, based on its PackageId.
/// </summary>
/// <param name="PackageId">The ID of the extension package.</param>
/// <param name="IsUIEnabled">Indicates if the uninstall operation is invoked through the UI.</param>
/// <returns>True if the extention is uninstalled successfully; false otherwise.</returns>
procedure UninstallExtension(PackageId: Guid; IsUIEnabled: Boolean): Boolean
begin
exit(ExtensionInstallationImpl.UninstallExtension(PackageId, IsUIEnabled));
end;
/// <summary>
/// Uninstalls an extension, based on its PackageId and permanently deletes the tables that contain data for the extension.
/// </summary>
/// <param name="PackageId">The ID of the extension package.</param>
/// <param name="IsUIEnabled">Indicates if the uninstall operation is invoked through the UI.</param>
/// <returns>True if the extention is uninstalled successfully; false otherwise.</returns>
procedure UninstallExtensionAndDeleteExtensionData(PackageId: Guid; IsUIEnabled: Boolean): Boolean
begin
exit(ExtensionInstallationImpl.UninstallExtension(PackageId, IsUIEnabled, true));
end;
/// <summary>
/// Uploads an extension, using a File Stream and based on the Locale Identifier.
/// This method is only applicable in SaaS environment.
/// </summary>
/// <param name="FileStream">The File Stream containing the extension to be uploaded.</param>
/// <param name="lcid">The Locale Identifier.</param>
procedure UploadExtension(FileStream: InStream; lcid: Integer)
begin
ExtensionOperationImpl.UploadExtension(FileStream, lcid);
end;
/// <summary>
/// Deploys an extension, based on its ID and Locale Identifier.
/// This method is only applicable in SaaS environment.
/// </summary>
/// <param name="AppId">The AppId of the extension.</param>
/// <param name="lcid">The Locale Identifier.</param>
/// <param name="IsUIEnabled">Indicates whether the install operation is invoked through the UI.</param>
procedure DeployExtension(AppId: Guid; lcid: Integer; IsUIEnabled: Boolean)
begin
ExtensionOperationImpl.DeployExtension(AppId, lcid, IsUIEnabled);
end;
/// <summary>
/// Unpublishes an extension, based on its PackageId.
/// An extension can only be unpublished, if it is a per-tenant one and it has been uninstalled first.
/// </summary>
/// <param name="PackageId">The PackageId of the extension.</param>
/// <returns>True if the extention is unpublished successfully; false otherwise.</returns>
procedure UnpublishExtension(PackageId: Guid): Boolean
begin
exit(ExtensionOperationImpl.UnpublishExtension(PackageId));
end;
/// <summary>
/// Downloads the source of an extension, based on its PackageId.
/// </summary>
/// <param name="PackageId">The PackageId of the extension.</param>
/// <returns>True if the operation was successful; false otherwise.</returns>
procedure DownloadExtensionSource(PackageId: Guid): Boolean
begin
exit(ExtensionOperationImpl.DownloadExtensionSource(PackageId));
end;
/// <summary>
/// Retrives the source of an extension, based on its PackageId.
/// </summary>
/// <param name="PackageId">The PackageId of the extension.</param>
/// <param name="ExtensionSourceTempBlob">TempBlob where the zip is stored.</param>
/// <returns>True if the operation was successful; false otherwise.</returns>
procedure GetExtensionSource(PackageId: Guid; var ExtensionSourceTempBlob: Codeunit "Temp Blob"): Boolean
var
FileName: Text;
begin
exit(ExtensionOperationImpl.GetExtensionSource(PackageId, ExtensionSourceTempBlob, FileName));
end;
/// <summary>
/// Checks whether an extension is installed, based on its PackageId.
/// </summary>
/// <param name="PackageId">The ID of the extension package.</param>
/// <returns>The result of checking whether an extension is installed.</returns>
procedure IsInstalledByPackageId(PackageId: Guid): Boolean
begin
exit(ExtensionInstallationImpl.IsInstalledByPackageId(PackageId));
end;
/// <summary>
/// Checks whether an extension is installed, based on its AppId.
/// </summary>
/// <param name="AppId">The AppId of the extension.</param>
/// <returns>The result of checking whether an extension is installed.</returns>
procedure IsInstalledByAppId(AppId: Guid): Boolean
begin
exit(ExtensionInstallationImpl.IsInstalledByAppId(AppId));
end;
/// <summary>
/// Retrieves a list of all the Deployment Status Entries
/// </summary>
/// <param name="NavAppTenantOperation">Gets the list of all the Deployment Status Entries.</param>
[Obsolete('Required parameter is not accessible for Cloud development', '17.0')]
procedure GetAllExtensionDeploymentStatusEntries(var NavAppTenantOperation: Record "NAV App Tenant Operation")
begin
ExtensionOperationImpl.GetAllExtensionDeploymentStatusEntries(NavAppTenantOperation);
end;
/// <summary>
/// Retrieves a list of all the Deployment Status Entries
/// </summary>
/// <param name="TempExtensionDeploymentStatus">Gets the list of all the Deployment Status Entries in a temporary record.</param>
procedure GetAllExtensionDeploymentStatusEntries(var TempExtensionDeploymentStatus: Record "Extension Deployment Status" temporary)
begin
ExtensionOperationImpl.GetAllExtensionDeploymentStatusEntries(TempExtensionDeploymentStatus);
end;
/// <summary>
/// Retrieves the AppName,Version,Schedule,Publisher by the NAVApp Tenant OperationId.
/// </summary>
/// <param name="OperationId">The OperationId of the NAVApp Tenant.</param>
/// <param name="Version">Gets the Version of the NavApp.</param>
/// <param name="Schedule">Gets the Schedule of the NavApp.</param>
/// <param name="Publisher">Gets the Publisher of the NavApp.</param>
/// <param name="AppName">Gets the AppName of the NavApp.</param>
/// <param name="Description">The Description of the NavApp; in case no name is provided, the description will replace the AppName.</param>
procedure GetDeployOperationInfo(OperationId: Guid; var Version: Text; var Schedule: Text; var Publisher: Text; var AppName: Text; Description: Text)
begin
ExtensionOperationImpl.GetDeployOperationInfo(OperationId, Version, Schedule, Publisher, AppName, Description);
end;
/// <summary>
/// Refreshes the status of the Operation.
/// </summary>
/// <param name="OperationId">The Id of the operation to be refreshed.</param>
procedure RefreshStatus(OperationId: Guid)
begin
ExtensionOperationImpl.RefreshStatus(OperationId);
end;
/// <summary>
/// Allows or disallows Http Client requests against the specified extension.
/// </summary>
/// <param name="PackageId">The Id of the extension to configure.</param>
/// <param name="AreHttpClientRqstsAllowed">The value to set for "Allow HttpClient Requests".</param>
/// <returns>True configuration was successful; false otherwise.</returns>
procedure ConfigureExtensionHttpClientRequestsAllowance(PackageId: Text; AreHttpClientRqstsAllowed: Boolean): Boolean
begin
ExtensionOperationImpl.ConfigureExtensionHttpClientRequestsAllowance(PackageId, AreHttpClientRqstsAllowed);
end;
/// <summary>
/// Gets the PackageId of the latest Extension Version by the Extension AppId.
/// </summary>
/// <param name="AppId">The AppId of the extension.</param>
/// <returns>The package ID by app ID. Empty GUID, if package with the provided app ID does not exist.</returns>
procedure GetLatestVersionPackageIdByAppId(AppId: Guid): Guid
begin
exit(ExtensionOperationImpl.GetLatestVersionPackageIdByAppId(AppId));
end;
/// <summary>
/// Gets the PackageId of the latest version of the extension by the extension's AppId.
/// </summary>
/// <param name="AppId">The AppId of the installed extension.</param>
/// <returns>The package ID of the installed version of an extenstion. Empty GUID, if package with the provided app ID does not exist.</returns>
procedure GetCurrentlyInstalledVersionPackageIdByAppId(AppId: Guid): Guid
begin
exit(ExtensionOperationImpl.GetCurrentlyInstalledVersionPackageIdByAppId(AppId));
end;
/// <summary>
/// Gets the package ID of the version of the extension by the extension's AppId, Name, Version Major, Version Minor, Version Build, Version Revision.
/// </summary>
/// <param name="AppId">The AppId of the extension.</param>
/// <param name="Name">The input/output Name parameter of the extension. If there is no need to filter by this parameter, the default value is ''.</param>
/// <param name="VersionMajor">The input/output Version Major parameter of the extension. If there is no need to filter by this parameter, the default value is "0".</param>
/// <param name="VersionMinor">The input/output Version Minor parameter of the extension. If there is no need to filter by this parameter, the default value is "0"..</param>
/// <param name="VersionBuild">The input/output Version Build parameter of the extension. If there is no need to filter by this parameter, the default value is "0".</param>
/// <param name="VersionRevision">The input/output Version Revision parameter of the extension. If there is no need to filter by this parameter, the default value is "0".</param>
/// <returns>The package ID of the extension with the specified paramters.</returns>
procedure GetSpecificVersionPackageIdByAppId(AppId: Guid; Name: Text; VersionMajor: Integer; VersionMinor: Integer; VersionBuild: Integer; VersionRevision: Integer): Guid
begin
exit(ExtensionOperationImpl.GetSpecificVersionPackageIdByAppId(AppId, Name,
VersionMajor, VersionMinor, VersionBuild, VersionRevision));
end;
/// <summary>
/// Gets the logo of an extension.
/// </summary>
/// <param name="AppId">The App ID of the extension.</param>
/// <param name="Logo">Out parameter holding the logo of the extension.</param>
procedure GetExtensionLogo(AppId: Guid; var Logo: Codeunit "Temp Blob")
begin
ExtensionOperationImpl.GetExtensionLogo(AppId, Logo);
end;
/// <summary>
/// Uploads an extension to current version, next minor or next major, using a File Stream and based on the Locale Identifier.
/// This method is only applicable in SaaS environment.
/// </summary>
/// <param name="FileStream">The File Stream containing the extension to be uploaded.</param>
/// <param name="lcid">The Locale Identifier.</param>
/// <param name="DeployTo">The version that the extension will be deployed to.</param>
procedure UploadExtensionToVersion(FileStream: InStream; lcid: Integer; DeployTo: Enum "Extension Deploy To")
begin
ExtensionOperationImpl.DeployAndUploadExtension(FileStream, lcid, DeployTo);
end;
/// <summary>
/// Returns a link to appsource market page
/// </summary>
/// <returns></returns>
[Obsolete('Replaced by "Extension Marketplace".GetMarketplaceEmbeddedUrl procedure.', '17.0')]
PROCEDURE GetMarketplaceEmbeddedUrl(): Text;
BEGIN
EXIT(ExtensionMarketplace.GetMarketplaceEmbeddedUrl());
END;
/// <summary>
/// Extraxts the message type from appsource response.
/// </summary>
/// <param name="JObject">Appsourece response payload as a json object</param>
/// <returns></returns>
[Obsolete('Replaced by "Extension Marketplace".GetMessageType procedure.', '17.0')]
PROCEDURE GetMessageType(JObject: DotNet JObject): Text;
BEGIN
EXIT(ExtensionMarketplace.GetMessageType(JObject));
END;
/// <summary>
/// Extraxts the appsource application ID from appsource response.
/// </summary>
/// <param name="JObject">Appsourece response payload as a json object</param>
/// <returns>Application Id in text format</returns>
[Obsolete('Replaced by "Extension Marketplace".GetApplicationIdFromData procedure.', '17.0')]
PROCEDURE GetApplicationIdFromData(JObject: DotNet JObject): Text;
BEGIN
exit(ExtensionMarketplace.GetApplicationIdFromData(JObject));
END;
/// <summary>
/// Extraxts the package ID from appsource response.
/// </summary>
/// <param name="ApplicationId">Appsource market application ID</param>
/// <returns>Package ID as a GUID</returns>
[Obsolete('Replaced by "Extension Marketplace".MapMarketplaceIdToPackageId procedure.', '17.0')]
PROCEDURE MapMarketplaceIdToPackageId(ApplicationId: Text): GUID;
BEGIN
exit(ExtensionMarketplace.MapMarketplaceIdToPackageId(ApplicationId));
END;
/// <summary>
/// Extracts the telemetry URL from appsource response.
/// </summary>
/// <param name="JObject">Appsourece response payload as a json object</param>
/// <returns></returns>
[Obsolete('Replaced by "Extension Marketplace".GetTelementryUrlFromData procedure.', '17.0')]
PROCEDURE GetTelementryUrlFromData(JObject: DotNet JObject): Text;
BEGIN
exit(ExtensionMarketplace.GetTelementryUrlFromData(JObject));
END;
/// <summary>
/// Extraxts the app ID from appsource response.
/// </summary>
/// <param name="ApplicationId">Appsource market application ID</param>
/// <returns></returns>
[Obsolete('Replaced by "Extension Marketplace".MapMarketplaceIdToAppId procedure.', '17.0')]
PROCEDURE MapMarketplaceIdToAppId(ApplicationId: Text): GUID;
BEGIN
exit(ExtensionMarketplace.MapMarketplaceIdToAppId(ApplicationId));
END;
}
| 50.088235 | 183 | 0.701768 |
ed8ca76700c210992e73892ccff748f11ce153b2 | 2,803 | pl | Perl | pgmig.pl | pgmig/pgmig.perl | 577ba5c1d93c577d48cdc7a854262ca7b5cae48a | [
"Apache-2.0"
] | null | null | null | pgmig.pl | pgmig/pgmig.perl | 577ba5c1d93c577d48cdc7a854262ca7b5cae48a | [
"Apache-2.0"
] | null | null | null | pgmig.pl | pgmig/pgmig.perl | 577ba5c1d93c577d48cdc7a854262ca7b5cae48a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env perl
use strict;
use warnings;
use DBI;
use DBD::Pg;
use Try::Tiny;
use Data::Dumper;
use JSON::XS;
use File::Find;
use utf8;
# some code from
# https://www.perlmonks.org/?node_id=1147720
my $total = 0;
my $current = 1;
my $do_commit = 1;
my @buf;
my $json_xs = JSON::XS->new()->utf8(1);
my $base = 'sql/pgmig/';
# raise messages as json strings
my $proto_ver = q(select set_config('pgmig.assert_proto', '1', true););
sub handle_error {
my $message = shift;
my $h = shift;
printf "%s: %s\n", $h->state, $h->errstr;
return "$message";
}
sub handle_message{
my $message = shift;
if ($message =~ /^NOTICE: \{/) {
$message =~ s/^NOTICE: //;
chomp $message;
my $data = $json_xs->decode($message);
if ($data->{'code'} eq '01998') {
$total = $data->{'message'};
$current = 1;
} elsif ($data->{'code'} eq '01999') {
printf "(%d/%d) %-20s: Ok\n", $current++, $total, $data->{'message'};
@buf = ();
} elsif ($data->{'code'} eq '02999') {
printf "(%d/%d) %-20s: Not Ok\ngot: %s\nwant: %s\n", $current++, $total, $data->{'message'}
, $data->{'data'}{'got'},$data->{'data'}{'want'};
print @buf;
(@buf) = ();
$do_commit = 0;
} else {
push @buf, $message;
}
} else {
# save message
# push @buf, $message;
print $message;
}
}
sub main() {
my $dbh = DBI->connect('dbi:Pg:', undef, undef,
{
AutoCommit => 0, # enable transactions
RaiseError => 1,
HandleError => \&handle_error,
pg_server_prepare => 0,
}
) or handle_error(DBI->errstr);
local $SIG{__WARN__} = \&handle_message;
my @files;
find(sub {
if (-f and /\.sql$/) {
push @files, $_;
}
}, $base);
print "Found:",Dumper(\@files);
try {
$dbh->do($proto_ver) or die 'Setup failed';
#$dbh->do('set client_encoding to utf8') or die 'Setup failed';
foreach my $f (sort @files) {
my $file_path = $base."$f";# 'testdata/'.$_;
printf "Load: %s\n", $file_path;
open my $fh, '<', $file_path
or die "Error opening $file_path - $!\n";
my $cmds;
{ local $/ = undef; $cmds = <$fh>; }
$dbh->do($cmds) or die 'Error found';
$do_commit or die 'Test failed';
}
$dbh->commit; # commit the changes if we get this far
print "Finished\n";
} catch {
if (!$do_commit) {
printf "Rollback ($_)\n";
} else {
printf "Transaction aborted (%s:%s) $_\n", $dbh->state, $dbh->errstr; # Try::Tiny copies $@ into $_
}
# now rollback to undo the incomplete changes
# but do it in an eval{} as it may also fail
eval { $dbh->rollback };
# add other application on-error-clean-up code here
return 1;
};
return 0;
}
exit(main());
| 22.604839 | 105 | 0.541919 |
ed97ceddd4004b35bf14d86429fe176c8e95fb59 | 133 | t | Perl | t/01-basic.t | ufobat/p6-I18N-LangTags | 3f46983f9b14e2d4bc6832901a5369abecb7b9bd | [
"Artistic-2.0"
] | 1 | 2021-07-17T12:01:40.000Z | 2021-07-17T12:01:40.000Z | t/01-basic.t | ufobat/p6-I18N-LangTags | 3f46983f9b14e2d4bc6832901a5369abecb7b9bd | [
"Artistic-2.0"
] | 1 | 2018-04-19T16:10:03.000Z | 2018-04-19T16:10:03.000Z | t/01-basic.t | ufobat/p6-I18N-LangTags | 3f46983f9b14e2d4bc6832901a5369abecb7b9bd | [
"Artistic-2.0"
] | 1 | 2018-04-19T16:07:25.000Z | 2018-04-19T16:07:25.000Z | use v6.c;
use Test;
use I18N::LangTags;
is extract_language_tags("de-at, en or something x"),
('de-at', 'en', 'or');
done-testing;
| 14.777778 | 53 | 0.661654 |
ed5119b70ae10864790d8582474a2fd94f293314 | 146 | pl | Perl | Chapter03/5.pl | PacktPublishing/Perl-6-Deep-Dive | b47fadd6bd65efd38ed4860109edc5018ce98924 | [
"MIT"
] | 9 | 2017-12-28T13:41:36.000Z | 2021-12-20T03:31:06.000Z | Chapter03/5.pl | PacktPublishing/Perl-6-Deep-Dive | b47fadd6bd65efd38ed4860109edc5018ce98924 | [
"MIT"
] | 1 | 2020-01-29T07:23:03.000Z | 2020-12-01T07:38:06.000Z | Chapter03/5.pl | PacktPublishing/Perl-6-Deep-Dive | b47fadd6bd65efd38ed4860109edc5018ce98924 | [
"MIT"
] | 2 | 2017-12-13T10:11:15.000Z | 2019-05-24T00:38:23.000Z | my $a = 3;
my $b = 4;
my $c = sqrt($a * $a + $b * $b);
say "If the sides of a right triangle are $a and $b, ";
say "then the third one is $c.";
| 18.25 | 55 | 0.527397 |
ed56ec3c8db3fcdc536134a7c869cbe81948d243 | 606 | t | Perl | test/blackbox-tests/test-cases/variables/named-dep-in-diff-question-mark.t | mndrix/dune | 30b84ff370351b13f05db34fd952dfe5d0249bcb | [
"MIT"
] | 245 | 2016-12-02T14:13:32.000Z | 2018-01-14T20:00:40.000Z | test/blackbox-tests/test-cases/variables/named-dep-in-diff-question-mark.t | mndrix/dune | 30b84ff370351b13f05db34fd952dfe5d0249bcb | [
"MIT"
] | 380 | 2017-01-28T18:46:52.000Z | 2018-01-17T13:34:51.000Z | test/blackbox-tests/test-cases/variables/named-dep-in-diff-question-mark.t | mndrix/dune | 30b84ff370351b13f05db34fd952dfe5d0249bcb | [
"MIT"
] | 59 | 2016-12-02T13:58:19.000Z | 2018-01-06T18:23:02.000Z | Regression test for using %{test} in (diff ...)
The action expander treats the second argument of diff? as "consuming
a target". Since targets needs to be known at rule creation time
rather than at rule evaluation time and dependencies are usually
evaluated at the latter stage, the below pattern could break if we
are not careful. We want to support it because it is a common pattern.
$ echo '(lang dune 2.8)' > dune-project
$ cat > dune <<EOF
> (rule
> (alias runtest)
> (deps
> (:x test.ml))
> (action
> (diff? %{x} %{x}.corrected)))
> EOF
$ touch test.ml
$ dune runtest
| 28.857143 | 70 | 0.679868 |
ed395369c2e1ff41c71f876b910ee4edc145d1fa | 551 | pl | Perl | msys64/usr/share/perl5/core_perl/unicore/lib/Jg/Alef.pl | andresfaagit/sg-desarrollo-ruby | 2c9bc5dad657d9713cc0f7fc4e883a85b83537e5 | [
"Ruby"
] | 9 | 2018-04-19T05:08:30.000Z | 2021-11-23T07:36:58.000Z | msys64/usr/share/perl5/core_perl/unicore/lib/Jg/Alef.pl | andresfaagit/sg-desarrollo-ruby | 2c9bc5dad657d9713cc0f7fc4e883a85b83537e5 | [
"Ruby"
] | 98 | 2017-11-02T19:00:44.000Z | 2022-03-22T16:15:39.000Z | msys64/usr/share/perl5/core_perl/unicore/lib/Jg/Alef.pl | andresfaagit/sg-desarrollo-ruby | 2c9bc5dad657d9713cc0f7fc4e883a85b83537e5 | [
"Ruby"
] | 9 | 2017-10-24T21:53:36.000Z | 2021-11-23T07:36:59.000Z | # !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 12.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly. Use Unicode::UCD to access the Unicode character data
# base.
return <<'END';
V12
1570
1572
1573
1574
1575
1576
1649
1652
1653
1654
1907
1909
END
| 20.407407 | 77 | 0.696915 |
ed31fb0fca8d7ab6e040032e7aebe200b23906a6 | 129 | pl | Perl | cluster-software/module.info.pl | A-damW/webmin | 90708a102f8b4eea536a80518ee6bfa837ef8f5b | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1,863 | 2015-01-04T21:45:45.000Z | 2022-03-30T09:10:50.000Z | cluster-software/module.info.pl | A-damW/webmin | 90708a102f8b4eea536a80518ee6bfa837ef8f5b | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1,233 | 2015-01-03T12:45:51.000Z | 2022-03-31T02:39:58.000Z | cluster-software/module.info.pl | A-damW/webmin | 90708a102f8b4eea536a80518ee6bfa837ef8f5b | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 546 | 2015-01-05T13:07:28.000Z | 2022-03-25T21:47:51.000Z | desc_pl=Klaster - Pakiety oprogramowania
longdesc_pl=Instaluj pakiety RPM, debian i Solaris na wielu serwerach z jednego źródła.
| 43 | 87 | 0.837209 |
ed737dbba10772ced0b01d60e2d54caad4a9d103 | 1,003 | pm | Perl | eg/tutorial/tut04/Galook.pm | polettix/App-Command | f0ccd2425658a0b5c1eeac7a40fdddd33984a8b6 | [
"Artistic-2.0",
"Apache-2.0"
] | null | null | null | eg/tutorial/tut04/Galook.pm | polettix/App-Command | f0ccd2425658a0b5c1eeac7a40fdddd33984a8b6 | [
"Artistic-2.0",
"Apache-2.0"
] | null | null | null | eg/tutorial/tut04/Galook.pm | polettix/App-Command | f0ccd2425658a0b5c1eeac7a40fdddd33984a8b6 | [
"Artistic-2.0",
"Apache-2.0"
] | null | null | null | package Galook;
use Moo;
use Log::Log4perl::Tiny qw< :easy :dead_if_first >;
extends 'App::Command';
sub BUILD_help { 'galook your foos' }
sub BUILD_description {
return <<'END';
This is some description.
On multiple lines, I mean.
Here it ends the description.
END
}
sub BUILD_parameters {
return [
{
name => 'config',
help => 'path to the configuration file',
getopt => 'config|c=s',
environment => 'GALOOK_CONFIG',
default => undef,
},
{
name => 'foo',
help => 'foo for bar',
getopt => 'foo|f!',
environment => 'GALOOK_FOO',
default => 1, # --no-foo
},
];
} ## end sub BUILD_parameters
sub BUILD_sources { return [qw< +CmdLine +Environment +JSON +Default >] }
sub execute {
my $self = shift;
return $self->dispatch if $self->args;
INFO "foo is ", $self->configuration('foo') ? 'true' : 'false';
}
1;
| 21.804348 | 73 | 0.537388 |
ed66c4ec831a57be9116869a4d0056ab5da6645a | 517 | pl | Perl | TAO/tests/Portable_Interceptors/Register_Initial_References/run_test.pl | cflowe/ACE | 5ff60b41adbe1772372d1a43bcc1f2726ff8f810 | [
"DOC"
] | 36 | 2015-01-10T07:27:33.000Z | 2022-03-07T03:32:08.000Z | TAO/tests/Portable_Interceptors/Register_Initial_References/run_test.pl | cflowe/ACE | 5ff60b41adbe1772372d1a43bcc1f2726ff8f810 | [
"DOC"
] | 2 | 2018-08-13T07:30:51.000Z | 2019-02-25T03:04:31.000Z | TAO/tests/Portable_Interceptors/Register_Initial_References/run_test.pl | cflowe/ACE | 5ff60b41adbe1772372d1a43bcc1f2726ff8f810 | [
"DOC"
] | 38 | 2015-01-08T14:12:06.000Z | 2022-01-19T08:33:00.000Z | eval '(exit $?0)' && eval 'exec perl -S $0 ${1+"$@"}'
& eval 'exec perl -S $0 $argv:q'
if 0;
# $Id: run_test.pl 83710 2008-11-13 07:09:12Z johnnyw $
# -*- perl -*-
use lib "$ENV{ACE_ROOT}/bin";
use PerlACE::TestTarget;
my $server = PerlACE::TestTarget::create_target (1) || die "Create target 1 failed\n";
$SV = $server->CreateProcess ("server");
$test = $SV->SpawnWaitKill ($server->ProcessStartWaitInterval());
if ($test != 0) {
print STDERR "ERROR: test returned $test\n";
exit 1;
}
exit 0;
| 22.478261 | 86 | 0.613153 |
ed1858f2bd6d723404e4c48abc7f04783603f81d | 488 | pm | Perl | t/lib/Fake/Tweet.pm | yannk/loudtwitter | 1ffd9b5e0172b784fcdf04b40653af914d488705 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2015-11-05T07:16:49.000Z | 2015-11-05T07:16:49.000Z | t/lib/Fake/Tweet.pm | yannk/loudtwitter | 1ffd9b5e0172b784fcdf04b40653af914d488705 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | t/lib/Fake/Tweet.pm | yannk/loudtwitter | 1ffd9b5e0172b784fcdf04b40653af914d488705 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | package Fake::Tweet;
use strict;
use base qw(Class::Accessor::Fast);
__PACKAGE__->mk_accessors(qw/tweet_id text created_at user in_reply_to_status_id in_reply_to_user_id/);
use Twittary::Model::Tweet;
*created_at_obj = \&Twittary::Model::Tweet::created_at_obj;
*is_reply = \&Twittary::Model::Tweet::is_reply;
*is_lifetweet = \&Twittary::Model::Tweet::is_lifetweet;
*is_noise = \&Twittary::Model::Tweet::is_noise;
*lt_regex = \&Twittary::Model::Tweet::lt_regex;
1;
| 30.5 | 103 | 0.72541 |
73e93b73b2f784c8b22e16c08cac1cbd8e785326 | 3,826 | pm | Perl | src/cpp/SPL/script/SPL/Operator/Instance/CatchContext.pm | IBMStreams/OSStreams | c6287bd9ec4323f567d2faf59125baba8604e1db | [
"Apache-2.0"
] | 10 | 2021-02-19T20:19:24.000Z | 2021-09-16T05:11:50.000Z | src/cpp/SPL/script/SPL/Operator/Instance/CatchContext.pm | xguerin/openstreams | 7000370b81a7f8778db283b2ba9f9ead984b7439 | [
"Apache-2.0"
] | 7 | 2021-02-20T01:17:12.000Z | 2021-06-08T14:56:34.000Z | src/cpp/SPL/script/SPL/Operator/Instance/CatchContext.pm | IBMStreams/OSStreams | c6287bd9ec4323f567d2faf59125baba8604e1db | [
"Apache-2.0"
] | 4 | 2021-02-19T18:43:10.000Z | 2022-02-23T14:18:16.000Z | #
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DOXIGEN_SKIP_FOR_USERS
## @class SPL::Operator::Instance::CatchContext
# \brief Class that represents the context of the operator regarding a catch annotation. The
# CatchContext is an OptionalContext that can be obtained with the "Catch"
# string.
package SPL::Operator::Instance::CatchContext;
use strict;
use warnings;
use SPL::Operator::Instance::OptionalContext;
our @ISA = qw(SPL::Operator::Instance::OptionalContext);
## @method bool mustCatchException()
# Check if the operator must catch exceptions
# @return true if the operator must catch exceptions, false otherwise.
sub mustCatchExceptions()
{
my ($self) = @_;
return $self->{mustCatchException_};
}
## @method bool mustCatchStreamsExceptions()
# Check if the operator must catch Streams Exceptions only.
# @return true if the operator must catch only Streams exceptions, false otherwise.
sub mustCatchStreamsExceptions
{
my ($self) = @_;
if ($self->{exceptionKind_} eq "streams") {
return 1;
}
return 0;
}
## @method bool mustCatchStandardExceptions()
# Check if the operator must catch Standard Exceptions, which also includes Streams exceptions.
# @return true if the operator must catch std exceptions, false otherwise.
sub mustCatchStandardExceptions
{
my ($self) = @_;
if ($self->{exceptionKind_} eq "std") {
return 1;
}
return 0;
}
## @method bool mustCatchAllExceptions()
# Check if the operator must catch all Exceptions, which also includes standard exceptions.
# @return true if the operator must catch all exceptions, false otherwise.
sub mustCatchAllExceptions
{
my ($self) = @_;
if ($self->{exceptionKind_} eq "all") {
return 1;
}
return 0;
}
## @method bool mustTraceTuples()
# Check if the operator must trace tuples that caused an exception to be triggered.
# @return true if tuples must be traced, false otherwise
sub mustTraceTuples
{
my ($self) = @_;
return $self->{tupleTrace_};
}
## @method bool mustPrintStackTrace()
# Check if the operator must print the stack trace that caused an exception to be triggered.
# @return true if must print the stack trace, false otherwise
sub mustPrintStackTrace
{
my ($self) = @_;
return $self->{stackTrace_};
}
sub new
{
my ($class, $opContext) = @_;
my $self = {
mustCatchException_ => 0,
exceptionKind_ => "",
tupleTrace_ => 0,
stackTrace_ => 1
};
foreach my $annot (@{$opContext->getAnnotations()}) {
if ($annot->getName() eq "catch") {
my $value = $annot->getValue();
foreach my $attribute (@{$annot->getValue()->getAttributes()}) {
if ($attribute->getName() eq "exception") {
my $noQuotes = $attribute->getValue();
$noQuotes =~ s/\"//;
$noQuotes =~ s/\"//;
$self->{exceptionKind_} = $noQuotes;
$self->{mustCatchException_} = ($self->{exceptionKind_} eq "none") ? 0 : 1;
} elsif ($attribute->getName() eq "tupleTrace") {
if ($attribute->getValue() eq "true") {
$self->{tupleTrace_} = 1;
}
} elsif ($attribute->getName() eq "stackTrace") {
if ($attribute->getValue() eq "false") {
$self->{stackTrace_} = 0;
}
}
}
}
}
return bless($self, $class);
}
1;
| 28.340741 | 95 | 0.676163 |
ed9fa9ac86210a64e8923f3556acf0e068e9d69d | 1,649 | pm | Perl | auto-lib/Paws/Datasync/TaskSchedule.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 164 | 2015-01-08T14:58:53.000Z | 2022-02-20T19:16:24.000Z | auto-lib/Paws/Datasync/TaskSchedule.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 348 | 2015-01-07T22:08:38.000Z | 2022-01-27T14:34:44.000Z | auto-lib/Paws/Datasync/TaskSchedule.pm | 0leksii/aws-sdk-perl | b2132fe3c79a06fd15b6137e8a0eb628de722e0f | [
"Apache-2.0"
] | 87 | 2015-04-22T06:29:47.000Z | 2021-09-29T14:45:55.000Z | # Generated by default/object.tt
package Paws::Datasync::TaskSchedule;
use Moose;
has ScheduleExpression => (is => 'ro', isa => 'Str', required => 1);
1;
### main pod documentation begin ###
=head1 NAME
Paws::Datasync::TaskSchedule
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::Datasync::TaskSchedule object:
$service_obj->Method(Att1 => { ScheduleExpression => $value, ..., ScheduleExpression => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::Datasync::TaskSchedule object:
$result = $service_obj->Method(...);
$result->Att1->ScheduleExpression
=head1 DESCRIPTION
Specifies the schedule you want your task to use for repeated
executions. For more information, see Schedule Expressions for Rules
(https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html).
=head1 ATTRIBUTES
=head2 B<REQUIRED> ScheduleExpression => Str
A cron expression that specifies when AWS DataSync initiates a
scheduled transfer from a source to a destination location.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::Datasync>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: L<https://github.com/pplu/aws-sdk-perl>
Please report bugs to: L<https://github.com/pplu/aws-sdk-perl/issues>
=cut
| 26.596774 | 102 | 0.755609 |
73f0180d779da1f81532e59e7bee7b697b6646ca | 19,062 | pl | Perl | build.pl | Acidburn0zzz/source | 91132175bb22f39e9f6d37083e9ce7937312db07 | [
"MIT"
] | null | null | null | build.pl | Acidburn0zzz/source | 91132175bb22f39e9f6d37083e9ce7937312db07 | [
"MIT"
] | null | null | null | build.pl | Acidburn0zzz/source | 91132175bb22f39e9f6d37083e9ce7937312db07 | [
"MIT"
] | null | null | null | #! /usr/bin/perl -w
package main;
use Template;
use strict;
use Cwd;
use FindBin qw($Bin);
use lib "$Bin/perl";
use vars qw( @LOCALE %COMPRESS $RUN_YUIC_JS $RUN_YUIC_CSS @PROCESS @PROCESS_APACHE $VARS $INSTALL %mtime_cache);
use Getopt::Long;
use IO::File;
use Digest::MD5 qw(md5 md5_hex);
use POSIX qw(strftime);
use Template::Provider;
use Template::Provider::Locale;
use Template::Constants qw(:debug);
use Template::Plugin::GT;
use Parallel::ForkManager;
use FSi18n;
use FSi18n::PO;
use strict;
@LOCALE = get_locale("po/dl/*/falling-sky.*.po");
chdir $Bin or die "Could not chdir $Bin : $!";
$| = 1;
my ( $usage, %argv, %input ) = "";
%input = (
"debug" => "Produce debug output (no compression)",
"install" => "put files in install directory instead of compile directory",
"config=s" => "config file (default: config.inc + config.local)",
"locale=s" => "build just one locale (default: all known)",
"type=s" => "build just one type of file (default: all types; options: php html js)",
"verbose" => "be chatty about what's going on",
"maxjobs=i" => "max jobs to run in parallel",
"h|help" => "show option help"
);
my $result = GetOptions( \%argv, keys %input );
$argv{"v"} ||= $argv{"n"};
$argv{"maxjobs"} ||= 0;
$argv{"maxjobs"} = 0 if (defined &DB::DB) ;
if ( ( !$result ) || ( $argv{h} ) ) {
&showOptionsHelp;
exit 0;
}
$argv{"config"} ||= "./config.inc";
@LOCALE = split( /[\s,]+/, $argv{"locale"} ) if ( $argv{"locale"} );
my %NAMES = (
'en_US'=>'English',
'cs_CZ'=>'Čeština',
'de_DE'=>'Deutcsh',
'es_ES'=>'Español',
'fr_FR'=>'Français',
'hr_HR'=>'Hrvatski',
'hu_HU'=>'Magyar',
'ja_JP'=>'日本語',
'nb_NO'=>'Norsk bokmål',
'nl_NL'=>'Nederlands',
'pt_BR'=>'Português (Brasil)',
'ru_RU'=>'Pусский',
'sv_SE'=>'Svenska',
'zh_CN'=>'筒体中文',
);
foreach my $name (@LOCALE) {
$NAMES{$name} ||= '';
}
$VARS->{NAMES} = \%NAMES;
require( $argv{config} );
require( $argv{config} . ".local" ) if ( -f ( $argv{config} . ".local" ) );
$VARS->{"subdomain"} ||= $VARS->{"domain"}; # When generating hostnames, use this specific domain instead. Defaults to just the domain name.
#get_svn();
get_git();
#$VARS->{"version"} = $VARS->{"svn_Revision"} . "-" . time;
$VARS->{"version"} = `../dist_support/git-revision.sh`;
$VARS->{"AddLanguage"} = get_addlanguage(@LOCALE);
# Text portion is seperate.
@PROCESS = (
[ "js", "index.js" ],
[ "css", "index.css" ],
[ "html", "index.html" ],
[ "html", "neg.html" ],
[ "html", "comcast.html" ],
[ "html", "locale.html" ],
[ "html", "faq_helpdesk.html" ],
[ "html", "broken.html" ],
[ "html", "faq_disable.html" ],
[ "html", "version.html" ],
[ "html", "faq.html" ],
[ "html", "stats.html" ],
[ "html", "mission.html" ],
[ "html", "mirrors.html" ],
[ "html", "mirrorstats.html" ],
[ "html", "source.html" ],
[ "html", "6to4.html" ],
[ "html", "faq_6to4.html" ],
[ "html", "faq_ipv4_only.html" ],
[ "html", "faq_no_ipv6.html" ],
[ "html", "faq_teredo_minimum.html" ],
[ "html", "faq_teredo.html" ],
[ "html", "faq_v6ns_bad.html" ],
[ "html", "faq_broken_aaaa.html" ],
[ "html", "faq_firefox_plugins.html" ],
[ "html", "faq_browser_plugins.html" ],
[ "html", "simple_test.html" ],
[ "html", "faq_whyipv6.html" ],
[ "html", "faq_pmtud.html" ],
[ "html", "attributions.html" ],
[ "html", "faq_buggydns1.html" ],
[ "html", "faq_avoids_ipv6.html" ],
[ "html", "faq_tunnel.html" ],
[ "html", "faq_tunnel_6rd.html" ],
[ "html", "nat.html" ],
[ "php", "common.php" ],
[ "php", "comment.php" ],
[ "php", "survey.php" ],
[ "php", "errors.php" ],
);
@PROCESS_APACHE = (
[ "dot.htaccess", ".htaccess" ],
[ "ip.htaccess", "ip/.htaccess" ],
[ "images.htaccess", "images/.htaccess" ],
[ "images-nc.htaccess", "images-nc/.htaccess" ],
[ "vhost-short.conf.example", "vhost-short.conf.example" ],
[ "vhost-long.conf.example", "vhost-long.conf.example" ],
[ "config.js.example", "config.js.example" ],
[ "private.js.example", "private.js.example" ],
);
# create Template object
my @errors;
my $pm = new Parallel::ForkManager( $argv{"maxjobs"} );
$pm->run_on_finish(
sub {
my ( $pid, $exit_code, $ident ) = @_;
# print "** $ident just got out of the pool " . "with PID $pid and exit code: $exit_code\n";
push( @errors, $ident ) if ($exit_code);
}
);
sub run_locale {
my $locale = shift;
my $pid = $pm->start($locale) and return;
my $i18n;
print "Preparing locale for $locale\n";
if ( $locale eq "pot" ) {
# Create a new pot.
$i18n = new FSi18n( locale => $locale );
$i18n->filename("po/falling-sky.pot");
my $poheader = $i18n->poheader();
$i18n->add( "", undef, $poheader );
} elsif ($locale eq "en_US") {
$i18n = new FSi18n( locale => $locale );
$i18n->filename("po/falling-sky.pot");
$i18n->read_file(); # Dies if missing.
} else {
$DB::single=1;
$i18n = new FSi18n( locale => $locale );
my $pattern = "po/dl/*/falling-sky.$locale.po";
my ($found) = glob($pattern);
if ($found) {
$i18n->filename($found);
} else {
die "could not find $pattern\n";
}
$i18n->filename("falling-sky.pot") if ($locale eq "en_US");
$i18n->read_file(); # Dies if missing.
}
foreach my $p (@PROCESS) {
process( $p, $locale, $i18n );
}
$i18n->write_file();
$pm->finish();
} ## end sub run_locale
# Always run "pot". With force.
{
local $argv{"force"} = 1;
run_locale("pot");
}
foreach my $locale (@LOCALE) {
run_locale($locale) unless ($locale eq "pot");
} ## end foreach my $locale (@LANG)
$pm->wait_all_children;
die "Errors with --locale value(s) @errors" if (@errors);
foreach my $p (@PROCESS_APACHE) {
process_apache( $p, "en_US" );
}
fixup_apache( "$INSTALL/.htaccess", "$INSTALL/vhost-long.conf.example" );
system( "rsync", "-av", "transparent", "$INSTALL" );
system("cd $INSTALL && ln -s . isp");
system("cd $INSTALL && ln -s . helpdesk");
sub prepdir_for_file {
my ($filename) = @_;
if ( $filename =~ m#^(.*)/[^/]+$# ) {
system( "mkdir", "-p", $1 ) unless ( -d $1 );
}
}
sub process {
my ( $aref, $locale, $i18n ) = @_;
my ( $type, $name ) = @{$aref};
return if ( ( $argv{"type"} ) && ( $argv{"type"} ne $type ) );
my $cwd = cwd;
my $lname = $name;
if ( $name =~ /html|js/ ) {
$lname .= "." . $locale; # For localized content.
} else {
return unless ( $locale =~ /en_US/ );
}
print "Processing: $locale\: $type/$name\n";
my $new_mtime = ( stat("$INSTALL/$lname") )[9];
prepdir_for_file("$INSTALL/$lname");
chdir $type or die "Failed to chdir $type : $!";
my $output;
my %provider_config = ( INCLUDE_PATH => "." );
my $locale_handler = Template::Provider::Locale->new( { VARS => $VARS, LOCALE => $locale, VERBOSE => $argv{"verbose"} } );
my $template_config = {
LOAD_PERL => 1, # Locally defined plugins
INTERPOLATE => 0, # expand "$var" in plain text
EVAL_PERL => 1, # evaluate Perl code blocks
OUTPUT => sub { $output = shift; },
LOAD_TEMPLATES => [$locale_handler],
POST_CHOMP => 1,
PRE_CHOMP => 1,
TRIM => 1,
RELATIVE => 1,
ENCODING => 'UTF-8',
FILTERS => {
i18n => [ \&filter_i18n_factory, 1 ]
},
# DEBUG => DEBUG_ALL,
};
my $newest = newest_mtime( $name, $locale );
if ( $name =~ /(index|version).html/ ) {
# $DB::single=1;
}
if ( ( $name =~ /(index|version).html/ ) ) {
# $DB::single=1;
$newest = time(); # Force index.html updates whenever we see prior items have built
}
# if ( !$argv{"force"} ) {
# if ( ($newest) && ($new_mtime) ) {
# if ( $newest <= $new_mtime ) {
# chdir $cwd or die "Failed to return to $cwd: $!";
# return;
# }
# }
# }
print "Compiling: $type/$lname\n" if ( $argv{"debug"} );
$VARS->{i18n} = $i18n;
$VARS->{"date_utc"} = strftime( '%d-%b-%Y', gmtime time );
$VARS->{"compiled"} = scalar localtime time;
$VARS->{"locale"} = $locale;
# In HTML, the locale code should be xx-YY
$VARS->{"localeUC"} = $locale;
$VARS->{"localeUC"} =~ s/_/-/g;
$VARS->{"localeUC"} =~ s/(-[a-z]+)/uc $1/ge;
($VARS->{"lang"}) = split(/[-_]/,$locale);
$VARS->{"langUC"} = uc $VARS->{"lang"};
my $template = Template->new($template_config) or die "Could not create template object";
my $success = $template->process( $name, $VARS ) || die( "crap!" . $template->error() );
# if ($output =~ /\[%/) {
# $template_process(\$output,$VARS);
# }
# We now have $output - we need to return back to the $cwd
# so any relative path names are still correct
chdir $cwd or die "Could not return to $cwd : $!";
# Now save $output
mkdir $INSTALL unless ( -d $INSTALL );
die "Missing directory: $INSTALL" unless ( -d $INSTALL );
if ( $locale ne "pot" ) {
$DB::single=1;
our_save( "$INSTALL/$lname", $output );
our_yui( "$INSTALL/$lname", $type );
our_gzip("$INSTALL/$lname");
}
} ## end sub process
sub process_apache {
my ( $aref, $locale ) = @_;
my ( $name, $lname ) = @{$aref};
my $type = "apache";
my $cwd = cwd;
print "Processing: $name -> $lname\n";
my $new_mtime = ( stat("$INSTALL/$lname") )[9];
prepdir_for_file("$INSTALL/$lname");
chdir $type or die "Failed to chdir $type : $!";
my $output;
my %provider_config = ( INCLUDE_PATH => "." );
my $locale_handler = Template::Provider::Locale->new( { VARS => $VARS, LOCALE => $locale, VERBOSE => $argv{"verbose"} } );
my $template_config = {
LOAD_PERL => 1, # Locally defined plugins
INTERPOLATE => 0, # expand "$var" in plain text
EVAL_PERL => 1, # evaluate Perl code blocks
OUTPUT => sub { $output = shift; },
LOAD_TEMPLATES => [$locale_handler],
POST_CHOMP => 0,
PRE_CHOMP => 0,
TRIM => 0,
# DEBUG => DEBUG_ALL,
};
print "Compiling: $type/$lname\n" if ( $argv{"debug"} );
$VARS->{"date_utc"} = strftime( '%d-%b-%Y', gmtime time );
$VARS->{"compiled"} = scalar localtime time;
my $template = Template->new($template_config) or die "Could not create template object";
my $success = $template->process( $name, $VARS ) || die( "crap!" . $template->error() );
# We now have $output - we need to return back to the $cwd
# so any relative path names are still correct
chdir $cwd or die "Could not return to $cwd : $!";
# Now save $output
mkdir $INSTALL unless ( -d $INSTALL );
die "Missing directory: $INSTALL" unless ( -d $INSTALL );
# Debug mode: Don't do YUI
our_save( "$INSTALL/$lname", $output );
} ## end sub process_apache
sub fixup_apache {
my (@files) = @_;
my ($first) = @files;
my $ctx = Digest::MD5->new;
my $digest;
$digest = $ctx->hexdigest;
print "digest 1 $digest\n";
my $file = IO::File->new("<$first") or die "could not open $first : $!";
while (<$file>) {
# Clean up some stuff, so we don't make changes for things like comments and whitespace
s/#.*$//;
s/^\s+//;
s/\s+$//;
s/\s+/ /g;
next unless (/./);
$ctx->add($_) unless (/BUILDMD5HASH/); # Ignore the lines that might be volatile
}
$digest = join( "-", $VARS->{"git_Revision"}, $ctx->hexdigest );
print "digest 2 $digest\n";
system( "perl", "-pi", "-e", "s/BUILDMD5HASH/$digest/g", @files );
} ## end sub fixup_apache
sub our_yui {
my ( $file, $type ) = @_;
my $run = $COMPRESS{$type} if ( exists $COMPRESS{$type} );
return unless ($run);
return if ( $argv{"debug"} );
my $cwd = cwd;
$file =~ s#$INSTALL/*##;
chdir $INSTALL or die;
our_rename( "$file", "$file.orig" );
$run =~ s#\[INPUT]#$file.orig#g;
$run =~ s#\[OUTPUT]#$file#g;
print "% $run\n" if ( $argv{"debug"} );
my $rc = system($run);
if ( ($rc) || ( !-f $file ) || ( !-s $file ) ) {
if ( ( $rc == 256 ) && ( $run =~ /tidy/ ) ) {
# ignore
} else {
# failed!
die "Failed to run: $run (RC=$rc)\n";
}
}
chdir $cwd; # Restore directory
} ## end sub our_yui
sub our_save {
my ( $filename, $buffer ) = @_;
open( SAVEFILE, ">$filename.new" ) or die "Failed to create $filename.new: $!";
# binmode SAVEFILE, ":utf8";
print SAVEFILE $buffer;
close SAVEFILE;
our_rename( "$filename.new", "$filename" );
}
sub our_rename {
my ( $old, $new ) = @_;
print "% mv $old $new\n" if ( $argv{"debug"} );
rename( $old, $new ) or die "Failed to rename $old $new : $!";
}
sub our_gzip {
my ($file) = @_;
my $newname = $file;
$newname =~ s#(\.(html|js|css))#${1}.gz#;
return if ( $file eq $newname );
my $cmd = "cat $file | gzip -f -9 -Sgz > $newname";
if ( $file =~ /html/ ) {
$cmd = "cat $file | perl -pi -e 's#/index.js#/index.js.gz#' | gzip -f -9 -Sgz > $newname";
}
print "% $cmd\n" if ( $argv{"debug"} );
my $rc = system($cmd);
if ($rc) {
# failed!
die "Failed to run: $cmd\n";
}
} ## end sub our_gzip
sub showOptionsHelp {
my ( $left, $right, $a, $b, $key );
my (@array);
print "Usage: $0 [options] $usage\n";
print "where options can be:\n";
foreach $key ( sort keys(%input) ) {
( $left, $right ) = split( /[=:]/, $key );
( $a, $b ) = split( /\|/, $left );
if ($b) {
$left = "-$a --$b";
} else {
$left = " --$a";
}
$left = substr( "$left" . ( ' ' x 20 ), 0, 20 );
push( @array, "$left $input{$key}\n" );
}
print sort @array;
}
sub newest_mtime {
my ( $file, $locale ) = @_;
return unless ($file);
# If this is a localizable file... localize it.
if ( $file =~ /\.(html|js|css)/ ) {
if ( -f "$file\.$locale" ) {
$file = "$file\.$locale";
} elsif ( -f "$file.en_US" ) {
$file = "$file\.en_US";
}
}
my $key = cwd . "/" . $file;
if ( !exists $mtime_cache{$key} ) {
my $mtime ||= ( stat($file) )[9];
# Are there any dependencies?
my $fh = IO::File->new("<$file");
if ($fh) {
while (<$fh>) {
my (@includes) = ( $_ =~ m#\[\%\s+PROCESS\s+"(.*?)"#g );
foreach my $inc (@includes) {
print " (scanning $inc)\n" if ( $argv{"debug"} );
my $m = newest_mtime( $inc, $locale );
if ( ($m) && ( $m > $mtime ) ) {
$mtime = $m;
}
}
}
close $fh;
}
$mtime_cache{$key} = $mtime;
} ## end if ( !exists $mtime_cache{$key} )
return $mtime_cache{$key};
} ## end sub newest_mtime
sub my_process {
my ( $context, $file ) = @_;
}
sub get_svn {
my @svn = `TZ=UTC svn info`;
foreach my $svn ( grep( /./, @svn ) ) {
chomp $svn;
my ( $a, $b ) = split( /: /, $svn );
$a =~ s/ /_/g;
$VARS->{ "svn_" . $a } = $b;
}
}
sub get_git {
my $remotes = `TZ=UTC git remote -v`;
my ($fetch,$push);
$DB::single=1;
if ($remotes =~ /origin\s+(\S+)\s+\(fetch\)/ms) {
$fetch=$1;
}
if ($remotes =~ /origin\s+(\S+)\s+\(push\)/ms) {
$push=$1;
}
my $revisioncount=`git log --oneline | wc -l`;
my $projectversion=`git describe --tags --long`;
my ($cleanversion) = split(/-/,$projectversion);
my $version = "$cleanversion.$revisioncount";
my $last = `TZ=UTC git log -1 --format=%cd`;
chomp $fetch;
chomp $push;
chomp $revisioncount;
chomp $projectversion;
chomp $cleanversion;
chomp $version;
chomp $last;
$VARS->{git_URL} = $fetch||$push;
$VARS->{git_Revision} = $version;
$VARS->{git_Last_Changed_Date}=$last;
#echo "$projectversion-$revisioncount"
#echo "$cleanversion.$revisioncount"
}
sub get_addlanguage {
my (@list) = @_;
my ($string)="";
my %seen;
foreach my $locale (@list) {
next if ( $locale eq "pot" );
my($a,$b) = split(/_/,$locale);
$string .= "AddLanguage $a .$locale\n" unless ($seen{$a}++);
$string .= "AddLanguage $a-$b .$locale\n";
}
return $string;
}
sub filter_i18n_factory {
my ( $context, $arg1 ) = @_;
# What file did we find this in? We'll possibly want to make a note of it.
my $component_name = $context->stash->get("component")->name;
my $modtime = $context->stash->get("component")->modtime;
my $locale = $context->stash->get("locale");
my $localeUC = $context->stash->get("localeUC");
my $i18n = $context->stash->get("i18n");
if ( $locale eq "pot" ) {
return sub {
my $text = shift;
my $lo = FSi18n::PO->new(); # new() does not (today) actually take the msgid, reference args
$text =~ s/^\s+//;
$text =~ s/\s+/ /g; # Canonicalize any size whitespace to single space
$text =~ s/\s+$//;
$lo->msgid($text);
$lo->msgstr("");
$lo->reference($component_name);
$lo->msgctxt($arg1) if ($arg1);
$i18n->add( $text, $arg1,$lo );
return $text;
};
} else {
#TODO Actually do .po lookups
return sub {
my $text = shift;
$text =~ s/^\s+//;
$text =~ s/\s+/ /g; # Canonicalize any size whitespace to single space
$text =~ s/\s+$//;
$DB::single=1;
my $found = $i18n->find_text($text,$arg1);
return $found;
};
}
} ## end sub filter_i18n_factory
sub get_locale {
my($glob) = @_;
my(@files) = glob($glob);
my @return = ("en_US");
foreach my $file (@files) {
if ($file =~ /\.([^.]+)\.po/) {
next if ($1 eq "en_US");
push(@return,$1);
}
}
return @return;
}
| 28.535928 | 144 | 0.500997 |
ed8a81e9629458644cfa96ebca78a0a9c46610bf | 5,900 | pm | Perl | nt_mdt_pal/src/perl/NtMdtPal.pm | kaitai-io/formats-kaitai-io.github.io | 2700514a2a8f67c5351fe93962c70abea02fd3d3 | [
"0BSD"
] | 4 | 2018-12-10T09:21:19.000Z | 2021-11-03T16:43:22.000Z | nt_mdt_pal/src/perl/NtMdtPal.pm | kaitai-io/formats-kaitai-io.github.io | 2700514a2a8f67c5351fe93962c70abea02fd3d3 | [
"0BSD"
] | null | null | null | nt_mdt_pal/src/perl/NtMdtPal.pm | kaitai-io/formats-kaitai-io.github.io | 2700514a2a8f67c5351fe93962c70abea02fd3d3 | [
"0BSD"
] | 3 | 2019-04-08T08:22:22.000Z | 2021-10-10T19:11:51.000Z | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
use strict;
use warnings;
use IO::KaitaiStruct 0.009_000;
use Encode;
########################################################################
package NtMdtPal;
our @ISA = 'IO::KaitaiStruct::Struct';
sub from_file {
my ($class, $filename) = @_;
my $fd;
open($fd, '<', $filename) or return undef;
binmode($fd);
return new($class, IO::KaitaiStruct::Stream->new($fd));
}
sub new {
my ($class, $_io, $_parent, $_root) = @_;
my $self = IO::KaitaiStruct::Struct->new($_io);
bless $self, $class;
$self->{_parent} = $_parent;
$self->{_root} = $_root || $self;;
$self->_read();
return $self;
}
sub _read {
my ($self) = @_;
$self->{signature} = $self->{_io}->read_bytes(26);
$self->{count} = $self->{_io}->read_u4be();
$self->{meta} = ();
my $n_meta = $self->count();
for (my $i = 0; $i < $n_meta; $i++) {
$self->{meta}[$i] = NtMdtPal::Meta->new($self->{_io}, $self, $self->{_root});
}
$self->{something2} = $self->{_io}->read_bytes(1);
$self->{tables} = ();
my $n_tables = $self->count();
for (my $i = 0; $i < $n_tables; $i++) {
$self->{tables}[$i] = NtMdtPal::ColTable->new($self->{_io}, $self, $self->{_root});
}
}
sub signature {
my ($self) = @_;
return $self->{signature};
}
sub count {
my ($self) = @_;
return $self->{count};
}
sub meta {
my ($self) = @_;
return $self->{meta};
}
sub something2 {
my ($self) = @_;
return $self->{something2};
}
sub tables {
my ($self) = @_;
return $self->{tables};
}
########################################################################
package NtMdtPal::Meta;
our @ISA = 'IO::KaitaiStruct::Struct';
sub from_file {
my ($class, $filename) = @_;
my $fd;
open($fd, '<', $filename) or return undef;
binmode($fd);
return new($class, IO::KaitaiStruct::Stream->new($fd));
}
sub new {
my ($class, $_io, $_parent, $_root) = @_;
my $self = IO::KaitaiStruct::Struct->new($_io);
bless $self, $class;
$self->{_parent} = $_parent;
$self->{_root} = $_root || $self;;
$self->_read();
return $self;
}
sub _read {
my ($self) = @_;
$self->{unkn00} = $self->{_io}->read_bytes(3);
$self->{unkn01} = $self->{_io}->read_bytes(2);
$self->{unkn02} = $self->{_io}->read_bytes(1);
$self->{unkn03} = $self->{_io}->read_bytes(1);
$self->{colors_count} = $self->{_io}->read_u2le();
$self->{unkn10} = $self->{_io}->read_bytes(2);
$self->{unkn11} = $self->{_io}->read_bytes(1);
$self->{unkn12} = $self->{_io}->read_bytes(2);
$self->{name_size} = $self->{_io}->read_u2be();
}
sub unkn00 {
my ($self) = @_;
return $self->{unkn00};
}
sub unkn01 {
my ($self) = @_;
return $self->{unkn01};
}
sub unkn02 {
my ($self) = @_;
return $self->{unkn02};
}
sub unkn03 {
my ($self) = @_;
return $self->{unkn03};
}
sub colors_count {
my ($self) = @_;
return $self->{colors_count};
}
sub unkn10 {
my ($self) = @_;
return $self->{unkn10};
}
sub unkn11 {
my ($self) = @_;
return $self->{unkn11};
}
sub unkn12 {
my ($self) = @_;
return $self->{unkn12};
}
sub name_size {
my ($self) = @_;
return $self->{name_size};
}
########################################################################
package NtMdtPal::Color;
our @ISA = 'IO::KaitaiStruct::Struct';
sub from_file {
my ($class, $filename) = @_;
my $fd;
open($fd, '<', $filename) or return undef;
binmode($fd);
return new($class, IO::KaitaiStruct::Stream->new($fd));
}
sub new {
my ($class, $_io, $_parent, $_root) = @_;
my $self = IO::KaitaiStruct::Struct->new($_io);
bless $self, $class;
$self->{_parent} = $_parent;
$self->{_root} = $_root || $self;;
$self->_read();
return $self;
}
sub _read {
my ($self) = @_;
$self->{red} = $self->{_io}->read_u1();
$self->{unkn} = $self->{_io}->read_u1();
$self->{blue} = $self->{_io}->read_u1();
$self->{green} = $self->{_io}->read_u1();
}
sub red {
my ($self) = @_;
return $self->{red};
}
sub unkn {
my ($self) = @_;
return $self->{unkn};
}
sub blue {
my ($self) = @_;
return $self->{blue};
}
sub green {
my ($self) = @_;
return $self->{green};
}
########################################################################
package NtMdtPal::ColTable;
our @ISA = 'IO::KaitaiStruct::Struct';
sub from_file {
my ($class, $filename) = @_;
my $fd;
open($fd, '<', $filename) or return undef;
binmode($fd);
return new($class, IO::KaitaiStruct::Stream->new($fd));
}
sub new {
my ($class, $_io, $_parent, $_root) = @_;
my $self = IO::KaitaiStruct::Struct->new($_io);
bless $self, $class;
$self->{_parent} = $_parent;
$self->{_root} = $_root || $self;;
$self->_read();
return $self;
}
sub _read {
my ($self) = @_;
$self->{size1} = $self->{_io}->read_u1();
$self->{unkn} = $self->{_io}->read_u1();
$self->{title} = Encode::decode("UTF-16", $self->{_io}->read_bytes(@{$self->_root()->meta()}[$self->index()]->name_size()));
$self->{unkn1} = $self->{_io}->read_u2be();
$self->{colors} = ();
my $n_colors = (@{$self->_root()->meta()}[$self->index()]->colors_count() - 1);
for (my $i = 0; $i < $n_colors; $i++) {
$self->{colors}[$i] = NtMdtPal::Color->new($self->{_io}, $self, $self->{_root});
}
}
sub size1 {
my ($self) = @_;
return $self->{size1};
}
sub unkn {
my ($self) = @_;
return $self->{unkn};
}
sub title {
my ($self) = @_;
return $self->{title};
}
sub unkn1 {
my ($self) = @_;
return $self->{unkn1};
}
sub colors {
my ($self) = @_;
return $self->{colors};
}
sub index {
my ($self) = @_;
return $self->{index};
}
1;
| 20.205479 | 128 | 0.502542 |
ed15153e5a46f86713d4b516ac7b44e617681fef | 24,460 | pm | Perl | src/MGRAST/lib/resources/compute.pm | wilke/MG-RAST | 508e4736bafcf2a45d3f67d87dd196890f2da7a0 | [
"BSD-2-Clause"
] | null | null | null | src/MGRAST/lib/resources/compute.pm | wilke/MG-RAST | 508e4736bafcf2a45d3f67d87dd196890f2da7a0 | [
"BSD-2-Clause"
] | null | null | null | src/MGRAST/lib/resources/compute.pm | wilke/MG-RAST | 508e4736bafcf2a45d3f67d87dd196890f2da7a0 | [
"BSD-2-Clause"
] | null | null | null | package resources::compute;
use strict;
use warnings;
no warnings('once');
use MGRAST::Analysis;
use List::MoreUtils qw(any uniq);
use File::Temp qw(tempfile tempdir);
use Conf;
use parent qw(resources::resource);
# Override parent constructor
sub new {
my ($class, @args) = @_;
# Call the constructor of the parent class
my $self = $class->SUPER::new(@args);
# Add name / attributes
$self->{name} = "compute";
$self->{example} = '"columns": ["mgm4441619.3","mgm4441656.4","mgm4441680.3","mgm4441681.3"], "rows": ["Eukaryota","Bacteria","Archaea"], "data": [[135,410,848,1243],[4397,6529,71423,204413],[1422,2156,874,1138]]';
$self->{attributes} = { alphadiversity => { "id" => [ "string", "unique metagenome identifier" ],
"url" => [ "string", "resource location of this object instance" ],
"data" => [ 'float', 'alpha diversity value' ] },
normalize => { 'data' => ['list', ['list', ['float', 'normalized value']]],
'rows' => ['list', ['string', 'row id']],
'columns' => ['list', ['string', 'column id']] },
significance => { 'data' => ['list', ['list', ['float', 'significance value']]],
'rows' => ['list', ['string', 'row name']],
'columns' => ['list', ['string', 'column name']] },
distance => { 'data' => ['list', ['list', ['float', 'distance value']]],
'rows' => ['list', ['string', 'row id']],
'columns' => ['list', ['string', 'column id']] },
heatmap => { 'data' => ['list', ['list', ['float', 'normalized value']]],
'rows' => ['list', ['string', 'row id']],
'columns' => ['list', ['string', 'column id']],
'colindex' => ['list', ['float', 'column id index']],
'rowindex' => ['list', ['float', 'row id index']],
'coldend' => ['object', 'dendrogram object for columns'],
'rowdend' => ['object', 'dendrogram object for rows'] },
pcoa => { 'data' => [ 'list', ['object', [
{'id' => ['string', 'column id'], 'pco' => ['list', ['float', 'principal component value']]},
"pcoa object" ]
] ],
'pco' => ['list', ['float', 'average principal component value']] }
};
$self->{norm} = ["DESeq_blind","standardize","quantile","DESeq_per_condition","DESeq_pooled","DESeq_pooled_CR"];
$self->{distance} = ["bray-curtis", "euclidean", "maximum", "manhattan", "canberra", "minkowski", "difference"];
$self->{cluster} = ["ward", "single", "complete", "mcquitty", "median", "centroid"];
$self->{significance} = ["Kruskal-Wallis", "t-test-paired", "Wilcoxon-paired", "t-test-unpaired", "Mann-Whitney-unpaired-Wilcoxon", "ANOVA-one-way"];
return $self;
}
# resource is called without any parameters
# this method must return a description of the resource
sub info {
my ($self) = @_;
my $content = { 'name' => $self->name,
'url' => $self->cgi->url."/".$self->name,
'description' => "Calculate various statistics for given input data.",
'type' => 'object',
'documentation' => $self->cgi->url.'/api.html#'.$self->name,
'requests' => [
{ 'name' => "info",
'request' => $self->cgi->url."/".$self->name,
'description' => "Returns description of parameters and attributes.",
'method' => "GET",
'type' => "synchronous",
'attributes' => "self",
'parameters' => { 'options' => {},
'required' => {},
'body' => {} }
},
{ 'name' => "alphadiversity",
'request' => $self->cgi->url."/".$self->name."/alphadiversity/{ID}",
'description' => "Calculate alpha diversity value for given ID and taxon level.",
'example' => [ $self->cgi->url."/".$self->name."/alphadiversity/mgm4447943.3?level=order",
"retrieve alpha diversity for order taxon" ],
'method' => "GET",
'type' => "synchronous",
'attributes' => $self->{attributes}{alphadiversity},
'parameters' => { 'options' => { 'level' => [ 'cv', $self->hierarchy->{organism} ],
'source' => [ 'cv', [@{$self->source->{protein}}, @{$self->source->{rna}}] ] },
'required' => { 'id' => ["string", "unique object identifier"] },
'body' => {} }
},
{ 'name' => "normalize",
'request' => $self->cgi->url."/".$self->name."/normalize",
'description' => "Calculate normalized values for given input data.",
'example' => [ 'curl -X POST -d \'{'.$self->{example}.'}\' "'.$self->cgi->url."/".$self->name.'/normalize"',
"retrieve normalized values for input abundances" ],
'method' => "POST",
'type' => "synchronous",
'attributes' => $self->{attributes}{normalize},
'parameters' => { 'options' => {},
'required' => {},
'body' => { "data" => ['list', ['list', ['int', 'raw value']]],
"rows" => ['list', ['string', 'row id']],
"columns" => ['list', ['string', 'column id']],
"norm" => ['cv', [map {[$_, $_." normalization method"]} @{$self->{norm}}]] } }
},
{ 'name' => "significance",
'request' => $self->cgi->url."/".$self->name."/significance",
'description' => "Calculate significance values for given input data.",
'example' => [ 'curl -X POST -d \'{"test":"Kruskal-Wallis","groups":["whale","whale","cow","cow"],'.$self->{example}.'}\' "'.$self->cgi->url."/".$self->name.'/significance"',
"retrieve significance values for input abundances and groups using the 'Kruskal-Wallis' significance test" ],
'method' => "POST",
'type' => "synchronous",
'attributes' => $self->{attributes}{significance},
'parameters' => { 'options' => {},
'required' => {},
'body' => { "data" => ['list', ['list', ['int', 'raw value']]],
"rows" => ['list', ['string', 'row id']],
"columns" => ['list', ['string', 'column id']],
"groups" => ['list', ['string', 'group name']],
"test" => ['cv', [map {[$_, $_." significance testing method"]} @{$self->{significance}}]],
"norm" => ['cv', [map {[$_, $_." normalization method"]} @{$self->{norm}}]],
"raw" => ["boolean", "option to use raw data (not normalize)"] } }
},
{ 'name' => "distance",
'request' => $self->cgi->url."/".$self->name."/distance",
'description' => "Calculate a distance matrix for given input data.",
'example' => [ 'curl -X POST -d \'{distance":"euclidean",'.$self->{example}.'}\' "'.$self->cgi->url."/".$self->name.'/distance"',
"retrieve distance matrix of normalized input abundances using 'euclidean' distance method" ],
'method' => "POST",
'type' => "synchronous",
'attributes' => $self->{attributes}{distance},
'parameters' => { 'options' => {},
'required' => {},
'body' => { "data" => ['list', ['list', ['float', 'raw or normalized value']]],
"rows" => ['list', ['string', 'row id']],
"columns" => ['list', ['string', 'column id']],
"distance" => ['cv', [map {[$_, $_." distance method"]} @{$self->{distance}}]],
"norm" => ['cv', [map {[$_, $_." normalization method"]} @{$self->{norm}}]],
"raw" => ["boolean", "option to use raw data (not normalize)"] } }
},
{ 'name' => "heatmap",
'request' => $self->cgi->url."/".$self->name."/heatmap",
'description' => "Calculate a dendrogram for given input data.",
'example' => [ 'curl -X POST -d \'{"raw":0,"cluster":"mcquitty",'.$self->{example}.'}\' "'.$self->cgi->url."/".$self->name.'/heatmap"',
"retrieve dendrogram of normalized input abundances using 'mcquitty' cluster method" ],
'method' => "POST",
'type' => "synchronous",
'attributes' => $self->{attributes}{heatmap},
'parameters' => { 'options' => {},
'required' => {},
'body' => { "data" => ['list', ['list', ['float', 'raw or normalized value']]],
"rows" => ['list', ['string', 'row id']],
"columns" => ['list', ['string', 'column id']],
"cluster" => ['cv', [map {[$_, $_." cluster method"]} @{$self->{cluster}}]],
"distance" => ['cv', [map {[$_, $_." distance method"]} @{$self->{distance}}]],
"norm" => ['cv', [map {[$_, $_." normalization method"]} @{$self->{norm}}]],
"raw" => ["boolean", "option to use raw data (not normalize)"] } }
},
{ 'name' => "pcoa",
'request' => $self->cgi->url."/".$self->name."/pcoa",
'description' => "Calculate a PCoA for given input data.",
'example' => [ 'curl -X POST -d \'{"raw":1,"distance":"euclidean",'.$self->{example}.'}\' "'.$self->cgi->url."/".$self->name.'/pcoa"',
"retrieve PCO of raw input abundances using 'euclidean' distance method" ],
'method' => "POST",
'type' => "synchronous",
'attributes' => $self->{attributes}{pcoa},
'parameters' => { 'options' => {},
'required' => {},
'body' => { "data" => ['list', ['list', ['float', 'raw or normalized value']]],
"rows" => ['list', ['string', 'row id']],
"columns" => ['list', ['string', 'column id']],
"distance" => ['cv', [map {[$_, $_." distance method"]} @{$self->{distance}}]],
"norm" => ['cv', [map {[$_, $_." normalization method"]} @{$self->{norm}}]],
"raw" => ["boolean", "option to use raw data (not normalize)"] } }
}
]
};
$self->return_data($content);
}
# Override parent request function
sub request {
my ($self) = @_;
# determine sub-module to use
if (scalar(@{$self->rest}) == 0) {
$self->info();
} elsif (($self->rest->[0] eq 'alphadiversity') && $self->rest->[1]) {
$self->diversity_compute($self->rest->[1]);
} elsif (any {$self->rest->[0] eq $_} ('normalize', 'significance', 'distance', 'heatmap', 'pcoa')) {
$self->abundance_compute($self->rest->[0]);
} elsif (any {$self->rest->[0] eq $_} ('stats', 'drisee', 'kmer')) {
$self->sequence_compute($self->rest->[0]);
} else {
$self->info();
}
}
sub sequence_compute {
my ($self, $type) = @_;
$self->return_data( {"ERROR" => "compute request $type is not currently available"}, 404 );
}
sub diversity_compute {
my ($self, $mgid) = @_;
# check id format
my (undef, $id) = $mgid =~ /^(mgm)?(\d+\.\d+)$/;
if (! $id) {
$self->return_data( {"ERROR" => "invalid id format: " . $mgid}, 400 );
}
# paramaters
my $level = $self->cgi->param('level') || 'species';
my $source = $self->cgi->param('source') || 'RefSeq';
# initialize analysis obj with mgid
my $master = $self->connect_to_datasource();
my $mgdb = MGRAST::Analysis->new( $master->db_handle );
unless (ref($mgdb)) {
$self->return_data({"ERROR" => "could not connect to analysis database"}, 500);
}
$mgdb->set_jobs([$id]);
my @alpha = values %{ $mgdb->get_rarefaction_curve([$source], 1, $level) };
if (@alpha != 1) {
$self->return_data({"ERROR" => "unable to calculate alpha diversity"}, 500);
}
my $data = { id => 'mgm'.$id,
url => $self->cgi->url.'/alphadiversity/mgm'.$id.'?level='.$level.'&source='.$source,
data => sprintf("%.3f", $alpha[0]) };
$self->return_data($data);
}
sub abundance_compute {
my ($self, $type) = @_;
# paramaters
my $raw = $self->cgi->param('raw') || 0;
my $test = $self->cgi->param('test') || 'Kruskal-Wallis';
my $norm = $self->cgi->param('norm') || 'DESeq_blind';
my $cluster = $self->cgi->param('cluster') || 'ward';
my $distance = $self->cgi->param('distance') || 'bray-curtis';
my $groups = $self->cgi->param('groups') ? [split(/,/, $self->cgi->param('groups'))] : [];
my $infile = '';
# posted data
my $post_data = $self->cgi->param('POSTDATA') ? $self->cgi->param('POSTDATA') : join("", $self->cgi->param('keywords'));
if ($post_data) {
my ($data, $col, $row) = ([], [], []);
eval {
my $json_data = $self->json->decode($post_data);
if (exists $json_data->{raw}) { $raw = $json_data->{raw}; }
if (exists $json_data->{test}) { $test = $json_data->{test}; }
if (exists $json_data->{norm}) { $norm = $json_data->{norm}; }
if (exists $json_data->{cluster}) { $cluster = $json_data->{cluster}; }
if (exists $json_data->{distance}) { $distance = $json_data->{distance}; }
$data = $json_data->{data};
$col = $json_data->{columns};
$row = $json_data->{rows};
$groups = exists($json_data->{groups}) ? $json_data->{groups} : [];
};
if ($@ || (@$data == 0)) {
$self->return_data( {"ERROR" => "unable to obtain POSTed data: ".$@}, 500 );
}
if (scalar(@$col) < 2) {
$self->return_data( {"ERROR" => "a minimum of 2 columns are required"}, 400 );
}
if (scalar(@$row) < 2) {
$self->return_data( {"ERROR" => "a minimum of 2 rows are required"}, 400 );
}
if ($type eq 'significance') {
if (scalar(@$groups) < 3) {
$self->return_data( {"ERROR" => "a minimum of 3 groups are required"}, 400 );
}
if (scalar(@$groups) != scalar(@$col)) {
$self->return_data( {"ERROR" => "number of groups must match number of columns"}, 400 );
}
}
# transform POSTed json to input file format
my ($tfh, $tfile) = tempfile($type."XXXXXXX", DIR => $Conf::temp, SUFFIX => '.txt');
eval {
print $tfh "\t".join("\t", @$col)."\n";
for (my $i=0; $i<scalar(@$data); $i++) {
print $tfh $row->[$i]."\t".join("\t", @{$data->[$i]})."\n";
}
};
if ($@) {
$self->return_data( {"ERROR" => "POSTed data format is invalid: ".$@}, 500 );
}
close $tfh;
chmod 0666, $tfile;
$infile = $tfile;
# data sent in file upload
} elsif ($self->cgi->param('data')) {
$infile = $self->form_file('data', $type, 'txt');
} else {
$self->return_data( {"ERROR" => "POST request missing data"}, 400 );
}
# check cv
unless (any {$_ eq $test} @{$self->{significance}}) {
$self->return_data({"ERROR" => "test '$test' is invalid, use one of: ".join(",", @{$self->{significance}})}, 400);
}
unless (any {$_ eq $norm} @{$self->{norm}}) {
$self->return_data({"ERROR" => "norm '$norm' is invalid, use one of: ".join(",", @{$self->{norm}})}, 400);
}
unless (any {$_ eq $cluster} @{$self->{cluster}}) {
$self->return_data({"ERROR" => "cluster '$cluster' is invalid, use one of: ".join(",", @{$self->{cluster}})}, 400);
}
unless (any {$_ eq $distance} @{$self->{distance}}) {
$self->return_data({"ERROR" => "distance '$distance' is invalid, use one of: ".join(",", @{$self->{distance}})}, 400);
}
my $data;
# nomalize
if ($type eq 'normalize') {
$data = $self->normalize($infile, $norm, 1);
}
# significance
elsif ($type eq 'significance') {
if (! $raw) {
$infile = $self->normalize($infile, $norm);
}
$data = $self->significance($infile, $groups, $test, 1);
}
# distance
elsif ($type eq 'distance') {
if (! $raw) {
$infile = $self->normalize($infile, $norm);
}
$data = $self->distance($infile, $distance, 1);
}
# heatmap
elsif ($type eq 'heatmap') {
if (! $raw) {
$infile = $self->normalize($infile, $norm);
}
$data = $self->heatmap($infile, $distance, $cluster, 1);
}
# pcoa
elsif ($type eq 'pcoa') {
if (! $raw) {
$infile = $self->normalize($infile, $norm);
}
$data = $self->pcoa($infile, $distance, 1);
}
$self->return_data($data);
}
sub form_file {
my ($self, $param, $prefix, $suffix) = @_;
my $infile = '';
my $fname = $self->cgi->param($param);
if ($fname) {
if ($fname =~ /\.\./) {
$self->return_data({"ERROR" => "Invalid parameters, trying to change directory with filename, aborting"}, 400);
}
if ($fname !~ /^[\w\d_\.]+$/) {
$self->return_data({"ERROR" => "Invalid parameters, filename allows only word, underscore, dot (.), and number characters"}, 400);
}
my $fhdl = $self->cgi->upload($param);
if (defined $fhdl) {
my ($bytesread, $buffer);
my $io_handle = $fhdl->handle;
my ($tfh, $tfile) = tempfile($prefix."XXXXXXX", DIR => $Conf::temp, SUFFIX => '.'.$suffix);
while ($bytesread = $io_handle->read($buffer, 4096)) {
print $tfh $buffer;
}
close $tfh;
chmod 0666, $tfile;
$infile = $tfile;
} else {
$self->return_data({"ERROR" => "storing object failed - could not open target file"}, 507);
}
} else {
$self->return_data({"ERROR" => "Invalid parameters, requires filename and data"}, 400);
}
return $infile;
}
sub normalize {
my ($self, $fname, $method, $json) = @_;
my $time = time;
my $src = $Conf::bin."/norm_deseq.r";
my $fout = $Conf::temp."/rdata.normalize.".$time;
my $rcmd = qq(source("$src")
MGRAST_preprocessing(
norm_method="$method",
file_in="$fname",
file_out="$fout",
produce_fig=FALSE )
);
$self->run_r($rcmd);
if ($json) {
return $self->parse_matrix($fout);
} else {
return $fout;
}
}
sub significance {
my ($self, $fname, $groups, $test, $json) = @_;
my $time = time;
my $src = $Conf::bin."/group_stats_plot.r";
my $fout = $Conf::temp."/rdata.significance.".$time;
my $grps = 'c('.join(',', map {'"'.$_.'"'} @$groups).')';
my $rcmd = qq(source("$src")
group_stats_plot(
file_in="$fname",
file_out="$fout",
stat_test="$test",
order_by=NULL,
order_decreasing=TRUE,
my_grouping=$grps )
);
$self->run_r($rcmd);
if ($json) {
return $self->parse_matrix($fout);
} else {
return $fout;
}
}
sub distance {
my ($self, $fname, $dist, $json) = @_;
my $time = time;
my $src = $Conf::bin."/calc_distance.r";
my $fout = $Conf::temp."/rdata.distance.".$time;
my $rcmd = qq(source("$src")
MGRAST_distance(
file_in="$fname",
file_out="$fout",
dist_method="$dist" )
);
$self->run_r($rcmd);
if ($json) {
return $self->parse_matrix($fout);
} else {
return $fout;
}
}
sub heatmap {
my ($self, $fname, $dist, $clust, $json) = @_;
my $time = time;
my $src = $Conf::bin."/dendrogram.r";
my ($fcol, $frow) = ($Conf::temp."/rdata.col.$time", $Conf::temp."/rdata.row.$time");
my $rcmd = qq(source("$src")
MGRAST_dendrograms(
file_in="$fname",
file_out_column="$fcol",
file_out_row="$frow",
dist_method="$dist",
clust_method="$clust",
produce_figures=FALSE )
);
$self->run_r($rcmd);
if ($json) {
my $data = $self->parse_matrix($fname);
($data->{colindex}, $data->{coldend}) = $self->ordered_distance($fcol);
($data->{rowindex}, $data->{rowdend}) = $self->ordered_distance($frow);
return $data;
} else {
return ($fcol, $frow);
}
}
sub pcoa {
my ($self, $fname, $dist, $json) = @_;
my $time = time;
my $src = $Conf::bin."/plot_pco.r";
my $fout = $Conf::temp."/rdata.pcoa.".$time;
my $rcmd = qq(source("$src")
MGRAST_plot_pco(
file_in="$fname",
file_out="$fout",
dist_method="$dist",
headers=0 )
);
$self->run_r($rcmd);
if ($json) {
my $data = { data => [], pco => [] };
my @matrix = map { [split(/\t/, $_)] } split(/\n/, $self->read_file($fout));
foreach my $row (@matrix) {
my $r = shift @$row;
@$row = map {$_ * 1.0} @$row;
$r =~ s/\"//g;
if ($r =~ /^PCO/) {
push @{$data->{pco}}, $row->[0];
} else {
push @{$data->{data}}, {'id' => $r, 'pco' => $row};
}
}
return $data;
} else {
return $fout;
}
}
sub run_r {
my ($self, $rcmd) = @_;
eval {
my $R = ($Conf::r_executable) ? $Conf::r_executable : "R";
system(qq(echo '$rcmd' | $R --vanilla --slave));
};
if ($@) {
$self->return_data({"ERROR" => "Error running R: ".$@}, 500);
}
}
sub read_file {
my ($self, $fname) = @_;
my $data = "";
eval {
open(DFH, "<$fname");
$data = do { local $/; <DFH> };
close DFH;
unlink $fname;
};
if ($@ || (! $data)) {
$self->return_data({"ERROR" => "Unable to retrieve results: ".$@}, 400);
}
return $data;
}
sub ordered_distance {
my ($self, $fname) = @_;
my @lines = split(/\n/, $self->read_file($fname));
my $line1 = shift @lines;
my @order_dist = map { int($_) } split(/,/, $line1);
my @dist_matrix = ();
shift @lines;
foreach my $l (@lines) {
my @row = map { int($_) } split(/\t/, $l);
push @dist_matrix, \@row;
}
return (\@order_dist, \@dist_matrix);
}
sub parse_matrix {
my ($self, $fname) = @_;
my $data = { data => [], rows => [], columns => [] };
my @matrix = map { [split(/\t/, $_)] } split(/\n/, $self->read_file($fname));
$data->{columns} = shift @matrix;
shift @{$data->{columns}};
foreach my $row (@matrix) {
my $r = shift @$row;
@$row = map {$_ * 1.0} @$row;
push @{$data->{rows}}, $r;
push @{$data->{data}}, $row;
}
return $data;
}
1;
| 43.06338 | 218 | 0.450859 |
73db8b8cba6ab444b18f743c229f19bba0c94e4d | 956 | pm | Perl | lib/MusicBrainz/Server/Controller/WS/js/ReleaseGroup.pm | kellnerd/musicbrainz-server | 9e058e10219ea6b8942cfd64160ffe19769f747b | [
"BSD-2-Clause"
] | 577 | 2015-01-15T12:18:50.000Z | 2022-03-16T20:41:57.000Z | lib/MusicBrainz/Server/Controller/WS/js/ReleaseGroup.pm | kellnerd/musicbrainz-server | 9e058e10219ea6b8942cfd64160ffe19769f747b | [
"BSD-2-Clause"
] | 1,227 | 2015-04-16T01:00:29.000Z | 2022-03-30T15:08:46.000Z | lib/MusicBrainz/Server/Controller/WS/js/ReleaseGroup.pm | kellnerd/musicbrainz-server | 9e058e10219ea6b8942cfd64160ffe19769f747b | [
"BSD-2-Clause"
] | 280 | 2015-01-04T08:39:41.000Z | 2022-03-10T17:09:59.000Z | package MusicBrainz::Server::Controller::WS::js::ReleaseGroup;
use Moose;
BEGIN { extends 'MusicBrainz::Server::ControllerBase::WS::js' }
with 'MusicBrainz::Server::Controller::WS::js::Role::Autocompletion::WithArtistCredits';
with 'MusicBrainz::Server::Controller::WS::js::Role::Autocompletion::PrimaryAlias' => {
model => 'ReleaseGroup',
};
my $ws_defs = Data::OptList::mkopt([
'release-group' => {
method => 'GET',
required => [ qw(q) ],
optional => [ qw(direct limit page timestamp) ]
}
]);
with 'MusicBrainz::Server::WebService::Validator' =>
{
defs => $ws_defs,
version => 'js',
default_serialization_type => 'json',
};
sub type { 'release_group' }
sub search : Chained('root') PathPart('release-group')
{
my ($self, $c) = @_;
$self->dispatch_search($c);
}
after _load_entities => sub {
my ($self, $c, @entities) = @_;
$c->model('ReleaseGroupType')->load(@entities);
};
1;
| 23.9 | 88 | 0.626569 |
ed79ccd087c046543ac7408574c8b3d4a21eee94 | 3,784 | pm | Perl | auto-lib/Paws/Textract/StartDocumentTextDetection.pm | torrentalle/aws-sdk-perl | 70cc5c7b7a494e422f8412da619161a99de1f1ec | [
"Apache-2.0"
] | null | null | null | auto-lib/Paws/Textract/StartDocumentTextDetection.pm | torrentalle/aws-sdk-perl | 70cc5c7b7a494e422f8412da619161a99de1f1ec | [
"Apache-2.0"
] | 1 | 2021-05-26T19:13:58.000Z | 2021-05-26T19:13:58.000Z | auto-lib/Paws/Textract/StartDocumentTextDetection.pm | torrentalle/aws-sdk-perl | 70cc5c7b7a494e422f8412da619161a99de1f1ec | [
"Apache-2.0"
] | null | null | null |
package Paws::Textract::StartDocumentTextDetection;
use Moose;
has ClientRequestToken => (is => 'ro', isa => 'Str');
has DocumentLocation => (is => 'ro', isa => 'Paws::Textract::DocumentLocation', required => 1);
has JobTag => (is => 'ro', isa => 'Str');
has NotificationChannel => (is => 'ro', isa => 'Paws::Textract::NotificationChannel');
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'StartDocumentTextDetection');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::Textract::StartDocumentTextDetectionResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Textract::StartDocumentTextDetection - Arguments for method StartDocumentTextDetection on L<Paws::Textract>
=head1 DESCRIPTION
This class represents the parameters used for calling the method StartDocumentTextDetection on the
L<Amazon Textract|Paws::Textract> service. Use the attributes of this class
as arguments to method StartDocumentTextDetection.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to StartDocumentTextDetection.
=head1 SYNOPSIS
my $textract = Paws->service('Textract');
my $StartDocumentTextDetectionResponse =
$textract->StartDocumentTextDetection(
DocumentLocation => {
S3Object => {
Bucket => 'MyS3Bucket', # min: 3, max: 255; OPTIONAL
Name => 'MyS3ObjectName', # min: 1, max: 1024; OPTIONAL
Version => 'MyS3ObjectVersion', # min: 1, max: 1024; OPTIONAL
}, # OPTIONAL
},
ClientRequestToken => 'MyClientRequestToken', # OPTIONAL
JobTag => 'MyJobTag', # OPTIONAL
NotificationChannel => {
RoleArn => 'MyRoleArn', # min: 20, max: 2048
SNSTopicArn => 'MySNSTopicArn', # min: 20, max: 1024
}, # OPTIONAL
);
# Results:
my $JobId = $StartDocumentTextDetectionResponse->JobId;
# Returns a L<Paws::Textract::StartDocumentTextDetectionResponse> object.
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
For the AWS API documentation, see L<https://docs.aws.amazon.com/goto/WebAPI/textract/StartDocumentTextDetection>
=head1 ATTRIBUTES
=head2 ClientRequestToken => Str
The idempotent token that's used to identify the start request. If you
use the same token with multiple C<StartDocumentTextDetection>
requests, the same C<JobId> is returned. Use C<ClientRequestToken> to
prevent the same job from being accidentally started more than once.
=head2 B<REQUIRED> DocumentLocation => L<Paws::Textract::DocumentLocation>
The location of the document to be processed.
=head2 JobTag => Str
An identifier you specify that's included in the completion
notification that's published to the Amazon SNS topic. For example, you
can use C<JobTag> to identify the type of document, such as a tax form
or a receipt, that the completion notification corresponds to.
=head2 NotificationChannel => L<Paws::Textract::NotificationChannel>
The Amazon SNS topic ARN that you want Amazon Textract to publish the
completion status of the operation to.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method StartDocumentTextDetection in L<Paws::Textract>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: L<https://github.com/pplu/aws-sdk-perl>
Please report bugs to: L<https://github.com/pplu/aws-sdk-perl/issues>
=cut
| 35.698113 | 249 | 0.705338 |
73e311be06bf023cf025be682b212ee858670b5c | 117 | plx | Perl | rhyming.plx | doodersrage/Mini-Useless-Perl-scripts | 15fcb149a4631ac5f6e01aa2bdfdb4920e61eb57 | [
"MIT"
] | 1 | 2015-11-06T00:08:56.000Z | 2015-11-06T00:08:56.000Z | rhyming.plx | doodersrage/Mini-Useless-Perl-scripts | 15fcb149a4631ac5f6e01aa2bdfdb4920e61eb57 | [
"MIT"
] | null | null | null | rhyming.plx | doodersrage/Mini-Useless-Perl-scripts | 15fcb149a4631ac5f6e01aa2bdfdb4920e61eb57 | [
"MIT"
] | null | null | null | #!/usr/bin/perl
# rhyming.plx
use warnings;
use strict;
my $syllable = "ink";
while (<>) {
print if /$syllable$/;
} | 13 | 23 | 0.623932 |
ed97ca5b093771d0851c1d7ba4e4bc06d4881739 | 4,155 | pm | Perl | apps/kingdee/eas/mode/oracleksqltemptable.pm | Rico29/centreon-plugins | 3fcaa41001c20fdc35df49db95e5ad0516137de6 | [
"Apache-2.0"
] | null | null | null | apps/kingdee/eas/mode/oracleksqltemptable.pm | Rico29/centreon-plugins | 3fcaa41001c20fdc35df49db95e5ad0516137de6 | [
"Apache-2.0"
] | null | null | null | apps/kingdee/eas/mode/oracleksqltemptable.pm | Rico29/centreon-plugins | 3fcaa41001c20fdc35df49db95e5ad0516137de6 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author : CHEN JUN , aladdin.china@gmail.com
package apps::kingdee::eas::mode::oracleksqltemptable;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"urlpath:s" => { name => 'url_path', default => "/easportal/tools/nagios/checkoraclevt.jsp" },
"datasource:s" => { name => 'datasource' },
"warning:s" => { name => 'warning' },
"critical:s" => { name => 'critical' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (!defined($self->{option_results}->{datasource}) || $self->{option_results}->{datasource} eq "") {
$self->{output}->add_option_msg(short_msg => "Missing datasource name.");
$self->{output}->option_exit();
}
$self->{option_results}->{url_path} .= "?ds=" . $self->{option_results}->{datasource};
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
my $webcontent = $options{custom}->request(path => $self->{option_results}->{url_path});
if ($webcontent !~ /^COUNT.*?=\d+/i) {
$self->{output}->output_add(
severity => 'UNKNOWN',
short_msg => "Cannot find ksql temptable status."
);
$self->{output}->option_exit();
}
my $count = $1 if $webcontent =~ /^COUNT.*?=(\d+)/i;
my $exit = $self->{perfdata}->threshold_check(value => $count, threshold => [
{ label => 'critical', 'exit_litteral' => 'critical' },
{ label => 'warning', exit_litteral => 'warning' } ]);
$self->{output}->output_add(severity => $exit, short_msg => sprintf("KSQLTempTableCount: %d", $count));
$self->{output}->perfdata_add(label => "KSQLTempTableCount", unit => '',
value => sprintf("%d", $count),
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical'),
);
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check ksql temp table count for specify datasource.
=over 8
=item B<--urlpath>
Set path to get status page. (Default: '/easportal/tools/nagios/checkoraclevt.jsp')
=item B<--datasource>
Specify the datasource name.
=item B<--warning>
Warning Threshold.
=item B<--critical>
Critical Threshold.
=back
=cut
| 32.97619 | 128 | 0.586763 |
73ebd9c320658d72e09bc91a62c517c1171431d0 | 31 | t | Perl | plugins/robots/generators/iotik/iotikRuCGeneratorLibrary/templates_english/luaPrinting/exponentiation.t | Victor-Y-Fadeev/qreal | 2c7f14d5eb24753d4a7c038e13a3fa3026adce18 | [
"Apache-2.0"
] | 6 | 2017-07-03T13:55:35.000Z | 2018-11-28T03:39:51.000Z | plugins/robots/generators/iotik/iotikRuCGeneratorLibrary/templates_english/luaPrinting/exponentiation.t | Victor-Y-Fadeev/qreal | 2c7f14d5eb24753d4a7c038e13a3fa3026adce18 | [
"Apache-2.0"
] | 27 | 2017-06-29T09:36:37.000Z | 2017-11-25T14:50:04.000Z | plugins/robots/generators/iotik/iotikRuCGeneratorLibrary/templates_english/luaPrinting/exponentiation.t | Victor-Y-Fadeev/qreal | 2c7f14d5eb24753d4a7c038e13a3fa3026adce18 | [
"Apache-2.0"
] | null | null | null | exp(log(@@LEFT@@) * @@RIGHT@@) | 31 | 31 | 0.483871 |
ed66c6b33114196565f520c45a6d70154f295781 | 6,368 | pm | Perl | lib/Rex.pm | samuelet/Rex | ed74ef269027a4c99435c810d5108efb5c8744f9 | [
"Apache-2.0"
] | null | null | null | lib/Rex.pm | samuelet/Rex | ed74ef269027a4c99435c810d5108efb5c8744f9 | [
"Apache-2.0"
] | null | null | null | lib/Rex.pm | samuelet/Rex | ed74ef269027a4c99435c810d5108efb5c8744f9 | [
"Apache-2.0"
] | null | null | null | #
# (c) Jan Gehring <jan.gehring@gmail.com>
#
# vim: set ts=3 sw=3 tw=0:
# vim: set expandtab:
=head1 NAME
Rex - Remote Execution
=head1 DESCRIPTION
(R)?ex is a small script to ease the execution of remote commands. You can write small tasks in a file named I<Rexfile>.
You can find examples and howtos on L<http://rexify.org/>
=head1 GETTING HELP
=over 4
=item * Web Site: L<http://rexify.org/>
=item * IRC: irc.freenode.net #rex
=item * Bug Tracker: L<https://rt.cpan.org/Dist/Display.html?Queue=Rex>
=item * Twitter: L<http://twitter.com/jfried83>
=back
=head1 Dependencies
=over 4
=item *
L<Net::SSH2>
=item *
L<Expect>
Only if you want to use the Rsync module.
=item *
L<DBI>
Only if you want to use the DB module.
=back
=head1 SYNOPSIS
desc "Show Unix version";
task "uname", sub {
say run "uname -a";
};
bash# rex -H "server[01..10]" uname
See L<Rex::Commands> for a list of all commands you can use.
=head1 CLASS METHODS
=over 4
=cut
package Rex;
use strict;
use warnings;
use Net::SSH2;
use Rex::Logger;
use Rex::Cache;
our (@EXPORT,
$VERSION,
@CONNECTION_STACK,
$GLOBAL_SUDO);
$VERSION = "0.29.99.0";
sub push_connection {
push @CONNECTION_STACK, $_[0];
}
sub pop_connection {
pop @CONNECTION_STACK;
Rex::Logger::debug("Connections in queue: " . scalar(@CONNECTION_STACK));
}
=item get_current_connection
This function is deprecated since 0.28! See Rex::Commands::connection.
Returns the current connection as a hashRef.
=over 4
=item server
The server name
=item ssh
1 if it is a ssh connection, 0 if not.
=back
=cut
sub get_current_connection {
$CONNECTION_STACK[-1];
}
=item is_ssh
Returns 1 if the current connection is a ssh connection. 0 if not.
=cut
sub is_ssh {
if($CONNECTION_STACK[-1]) {
my $ref = ref($CONNECTION_STACK[-1]->{"conn"});
if($ref =~ m/SSH/) {
return $CONNECTION_STACK[-1]->{"conn"}->get_connection_object();
}
}
return 0;
}
=item is_sudo
Returns 1 if the current operation is executed within sudo.
=cut
sub is_sudo {
if($GLOBAL_SUDO) { return 1; }
if($CONNECTION_STACK[-1]) {
return $CONNECTION_STACK[-1]->{"use_sudo"};
}
return 0;
}
sub global_sudo {
my ($on) = @_;
$GLOBAL_SUDO = $on;
}
=item get_sftp
Returns the sftp object for the current ssh connection.
=cut
sub get_sftp {
if($CONNECTION_STACK[-1]) {
return $CONNECTION_STACK[-1]->{"conn"}->get_fs_connection_object();
}
return 0;
}
sub get_cache {
if($CONNECTION_STACK[-1]) {
return $CONNECTION_STACK[-1]->{"cache"};
}
return Rex::Cache->new;
}
=item connect
Use this function to create a connection if you use Rex as a library.
use Rex;
use Rex::Commands::Run;
use Rex::Commands::Fs;
Rex::connect(
server => "remotehost",
user => "root",
password => "f00b4r",
private_key => "/path/to/private/key/file",
public_key => "/path/to/public/key/file",
);
if(is_file("/foo/bar")) {
print "Do something...\n";
}
my $output = run("upime");
=cut
sub connect {
my ($param) = { @_ };
my $server = $param->{server};
my $port = $param->{port} || 22;
my $timeout = $param->{timeout} || 5;
my $user = $param->{"user"};
my $pass = $param->{"password"};
my $conn = Rex::Interface::Connection->create("SSH");
$conn->connect(
user => $user,
password => $pass,
server => $server,
port => $port,
timeout => $timeout,
);
unless($conn->is_connected) {
die("Connetion error or refused.");
}
# push a remote connection
Rex::push_connection({
conn => $conn,
ssh => $conn->get_connection_object,
server => $server,
cache => Rex::Cache->new(),
});
# auth unsuccessfull
unless($conn->is_authenticated) {
Rex::Logger::info("Wrong username or password. Or wrong key.", "warn");
# after jobs
die("Wrong username or password. Or wrong key.");
}
}
sub deprecated {
my ($func, $version, @msg) = @_;
if($func) {
Rex::Logger::info("The call to $func is deprecated.");
}
if(@msg) {
for (@msg) {
Rex::Logger::info($_);
}
}
Rex::Logger::info("");
Rex::Logger::info("Please rewrite your code. This function will disappear in (R)?ex version $version.");
Rex::Logger::info("If you need assistance please join #rex on irc.freenode.net or our google group.");
}
sub import {
my ($class, $what) = @_;
$what ||= "";
my ($register_to, $file, $line) = caller;
if($what eq "-base" || $what eq "base") {
require Rex::Commands;
Rex::Commands->import(register_in => $register_to);
require Rex::Commands::Run;
Rex::Commands::Run->import(register_in => $register_to);
require Rex::Commands::Fs;
Rex::Commands::Fs->import(register_in => $register_to);
require Rex::Commands::File;
Rex::Commands::File->import(register_in => $register_to);
require Rex::Commands::Download;
Rex::Commands::Download->import(register_in => $register_to);
require Rex::Commands::Upload;
Rex::Commands::Upload->import(register_in => $register_to);
require Rex::Commands::Gather;
Rex::Commands::Gather->import(register_in => $register_to);
require Rex::Commands::Kernel;
Rex::Commands::Kernel->import(register_in => $register_to);
require Rex::Commands::Pkg;
Rex::Commands::Pkg->import(register_in => $register_to);
require Rex::Commands::Service;
Rex::Commands::Service->import(register_in => $register_to);
require Rex::Commands::Sysctl;
Rex::Commands::Sysctl->import(register_in => $register_to);
require Rex::Commands::Tail;
Rex::Commands::Tail->import(register_in => $register_to);
require Rex::Commands::Process;
Rex::Commands::Process->import(register_in => $register_to);
}
# we are always strict
strict->import;
}
=back
=head1 CONTRIBUTORS
Many thanks to the contributors for their work (alphabetical order).
=over 4
=item Alexandr Ciornii
=item Gilles Gaudin, for writing a french howto
=item Hiroaki Nakamura
=item Jean Charles Passard
=item Jeen Lee
=item Jose Luis Martinez
=item Samuele Tognini
=item Sascha Guenther
=item Sven Dowideit
=back
=cut
1;
| 18.298851 | 120 | 0.62924 |
ed93c4e4c7f44868b4b40cdb444e27526ac8c709 | 560 | pl | Perl | external/win_perl/lib/unicore/lib/Scx/Takr.pl | phixion/l0phtcrack | 48ee2f711134e178dbedbd925640f6b3b663fbb5 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-10-20T00:25:39.000Z | 2021-11-08T12:52:42.000Z | external/win_perl/lib/unicore/lib/Scx/Takr.pl | Brute-f0rce/l0phtcrack | 25f681c07828e5e68e0dd788d84cc13c154aed3d | [
"Apache-2.0",
"MIT"
] | null | null | null | external/win_perl/lib/unicore/lib/Scx/Takr.pl | Brute-f0rce/l0phtcrack | 25f681c07828e5e68e0dd788d84cc13c154aed3d | [
"Apache-2.0",
"MIT"
] | 1 | 2022-03-14T06:41:16.000Z | 2022-03-14T06:41:16.000Z | # !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by ..\lib\unicore\mktables from the Unicode
# database, Version 9.0.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly. Use Unicode::UCD to access the Unicode character data
# base.
return <<'END';
V8
2404
2406
43056
43066
71296
71352
71360
71370
END
| 24.347826 | 78 | 0.664286 |
ed4c062f6637a129140aca3f320df6a6a6f7ba75 | 2,133 | t | Perl | t/multi_socket.t | api7/lua-resty-logger-socket | a554c5b00cbf8deb79308d657266098404225274 | [
"Unlicense"
] | null | null | null | t/multi_socket.t | api7/lua-resty-logger-socket | a554c5b00cbf8deb79308d657266098404225274 | [
"Unlicense"
] | 7 | 2020-04-09T09:50:56.000Z | 2020-05-24T11:30:55.000Z | t/multi_socket.t | api7/lua-resty-logger-socket | a554c5b00cbf8deb79308d657266098404225274 | [
"Unlicense"
] | 5 | 2020-04-10T15:58:14.000Z | 2021-06-29T16:56:23.000Z | # vim:set ft= ts=4 sw=4 et:
use Test::Nginx::Socket::Lua "no_plan";
use Cwd qw(cwd);
repeat_each(1);
my $pwd = cwd();
our $HttpConfig = qq{
lua_package_path "$pwd/lib/?.lua;;";
};
no_long_string();
log_level('info');
run_tests();
__DATA__
=== TEST 1: create 2 logger_socket oblects
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua 'ngx.say("foo")';
log_by_lua '
collectgarbage() -- to help leak testing
local logger_socket = require "resty.logger.socket"
local logger = logger_socket:new()
if not logger:initted() then
local ok, err = logger:init{
host = "127.0.0.1",
port = 29999,
flush_limit = 1,
}
local bytes, err = logger:log(ngx.var.request_uri)
if err then
ngx.log(ngx.ERR, err)
end
end
';
}
--- request eval
["GET /t?a=1&b=2", "GET /t?c=3&d=4"]
--- wait: 0.1
--- tcp_listen: 29999
--- tcp_reply:
--- no_error_log
[error]
--- tcp_query eval: "/t?a=1&b=2/t?c=3&d=4"
--- tcp_query_len: 20
--- response_body eval
["foo\n", "foo\n"]
=== TEST 2: new2 (new + init)
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua_block {
ngx.print("foo")
}
log_by_lua_block {
collectgarbage() -- to help leak testing
local logger_socket = require("resty.logger.socket")
local logger, err = logger_socket:new({
host = "127.0.0.1",
port = 29999,
flush_limit = 1,
})
if not logger then
ngx.log(ngx.ERR, "failed to create logger: ", err)
end
local bytes, err = logger:log(ngx.var.request_uri)
if err then
ngx.log(ngx.ERR, err)
end
}
}
--- request eval
["GET /t?a=1&b=2", "GET /t?c=3&d=4"]
--- wait: 0.1
--- tcp_listen: 29999
--- tcp_reply:
--- no_error_log
[error]
--- tcp_query eval: "/t?a=1&b=2/t?c=3&d=4"
--- tcp_query_len: 20
--- response_body eval
["foo", "foo"]
| 21.989691 | 66 | 0.531646 |
ed784ec277ce9c451ffbaf45f8814e46d494f008 | 316 | pl | Perl | old_logen/testcases/bta/inter_simple.pl | leuschel/logen | 0ea806f54628162615e25177c3ed98f6b2c27935 | [
"Apache-2.0"
] | 14 | 2015-10-16T11:35:30.000Z | 2021-05-12T15:31:16.000Z | old_logen/testcases/bta/inter_simple.pl | leuschel/logen | 0ea806f54628162615e25177c3ed98f6b2c27935 | [
"Apache-2.0"
] | null | null | null | old_logen/testcases/bta/inter_simple.pl | leuschel/logen | 0ea806f54628162615e25177c3ed98f6b2c27935 | [
"Apache-2.0"
] | 5 | 2015-10-16T12:44:41.000Z | 2019-10-02T02:45:38.000Z |
int(cst(X),X).
int(+(X,Y),Res) :- int(X,RX), int(Y,RY). %, Res is RX+RY.
int(-(X,Y),Res) :- int(X,RX), int(Y,RY). %, Res is RX-RY.
int(fun(X),Res) :- def(X,Def), int(Def,Res).
%
%def(inc(X),cst(R)) :- R is X+1.
%
def(rec,fun(rec)).
def(rec,fun(rec)).
%def(rec, fun(_)).
%def(one,cst(1)).
| 7.9 | 57 | 0.481013 |
73e3b63d50f6ab7f7f88c6d6bd27e6a0521f6f48 | 267 | pm | Perl | auto-lib/Azure/Advisor/OperationDisplayInfo.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/Advisor/OperationDisplayInfo.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | null | null | null | auto-lib/Azure/Advisor/OperationDisplayInfo.pm | pplu/azure-sdk-perl | 26cbef2d926f571bc1617c26338c106856f95568 | [
"Apache-2.0"
] | 1 | 2021-04-08T15:26:39.000Z | 2021-04-08T15:26:39.000Z | package Azure::Advisor::OperationDisplayInfo;
use Moose;
has 'description' => (is => 'ro', isa => 'Str' );
has 'operation' => (is => 'ro', isa => 'Str' );
has 'provider' => (is => 'ro', isa => 'Str' );
has 'resource' => (is => 'ro', isa => 'Str' );
1;
| 29.666667 | 52 | 0.509363 |
ed4b7c1ca738aa64e47c7fbd00c2fab67970911a | 15,175 | t | Perl | t/plugin/limit-count.t | cuiweixie/apisix | 7c5b834d188902ff711aaec7941ac5e24f28c12d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | t/plugin/limit-count.t | cuiweixie/apisix | 7c5b834d188902ff711aaec7941ac5e24f28c12d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | t/plugin/limit-count.t | cuiweixie/apisix | 7c5b834d188902ff711aaec7941ac5e24f28c12d | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-02-01T13:28:35.000Z | 2020-02-01T13:28:35.000Z | BEGIN {
if ($ENV{TEST_NGINX_CHECK_LEAK}) {
$SkipReason = "unavailable for the hup tests";
} else {
$ENV{TEST_NGINX_USE_HUP} = 1;
undef $ENV{TEST_NGINX_USE_STAP};
}
}
use t::APISIX 'no_plan';
repeat_each(1);
no_long_string();
no_shuffle();
no_root_location();
run_tests;
__DATA__
=== TEST 1: sanity
--- config
location /t {
content_by_lua_block {
local plugin = require("apisix.plugins.limit-count")
local ok, err = plugin.check_schema({count = 2, time_window = 60, rejected_code = 503, key = 'remote_addr'})
if not ok then
ngx.say(err)
end
ngx.say("done")
}
}
--- request
GET /t
--- response_body
done
--- no_error_log
[error]
=== TEST 2: wrong value of key
--- config
location /t {
content_by_lua_block {
local plugin = require("apisix.plugins.limit-count")
local ok, err = plugin.check_schema({count = 2, time_window = 60, rejected_code = 503, key = 'host'})
if not ok then
ngx.say(err)
end
ngx.say("done")
}
}
--- request
GET /t
--- response_body
invalid "enum" in docuement at pointer "#/key"
done
--- no_error_log
[error]
=== TEST 3: set route(id: 1)
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PUT,
[[{
"methods": ["GET"],
"plugins": {
"limit-count": {
"count": 2,
"time_window": 60,
"rejected_code": 503,
"key": "remote_addr"
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
},
"uri": "/hello"
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.say(body)
}
}
--- request
GET /t
--- response_body
passed
--- no_error_log
[error]
=== TEST 4: up the limit
--- pipelined_requests eval
["GET /hello", "GET /hello", "GET /hello", "GET /hello"]
--- error_code eval
[200, 200, 503, 503]
--- no_error_log
[error]
=== TEST 5: up the limit
--- pipelined_requests eval
["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"]
--- error_code eval
[404, 503, 404, 503, 503]
--- no_error_log
[error]
=== TEST 6: set route(id: 1)
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PUT,
[[{
"methods": ["GET"],
"plugins": {
"limit-count": {
"count": 3,
"time_window": 60,
"rejected_code": 503,
"key": "remote_addr"
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
},
"uri": "/hello"
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.say(body)
}
}
--- request
GET /t
--- response_body
passed
--- no_error_log
[error]
=== TEST 7: up the limit
--- pipelined_requests eval
["GET /hello", "GET /hello", "GET /hello", "GET /hello"]
--- error_code eval
[200, 200, 200, 503]
--- no_error_log
[error]
=== TEST 8: invalid route: missing key
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PUT,
[[{
"plugins": {
"limit-count": {
"count": 2,
"time_window": 60,
"rejected_code": 503
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
},
"uri": "/hello"
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.print(body)
}
}
--- request
GET /t
--- error_code: 400
--- response_body
{"error_msg":"failed to check the configuration of plugin limit-count err: invalid \"required\" in docuement at pointer \"#\""}
--- no_error_log
[error]
=== TEST 9: invalid route: wrong count
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PUT,
[[{
"plugins": {
"limit-count": {
"count": -100,
"time_window": 60,
"rejected_code": 503,
"key": "remote_addr"
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
},
"uri": "/hello"
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.print(body)
}
}
--- request
GET /t
--- error_code: 400
--- response_body
{"error_msg":"failed to check the configuration of plugin limit-count err: invalid \"minimum\" in docuement at pointer \"#\/count\""}
--- no_error_log
[error]
=== TEST 10: invalid route: wrong count + POST method
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/routes',
ngx.HTTP_POST,
[[{
"plugins": {
"limit-count": {
"count": -100,
"time_window": 60,
"rejected_code": 503,
"key": "remote_addr"
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
},
"uri": "/hello"
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.print(body)
}
}
--- request
GET /t
--- error_code: 400
--- response_body
{"error_msg":"failed to check the configuration of plugin limit-count err: invalid \"minimum\" in docuement at pointer \"#\/count\""}
--- no_error_log
[error]
=== TEST 11: invalid service: missing key
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/services/1',
ngx.HTTP_PUT,
[[{
"plugins": {
"limit-count": {
"count": 2,
"time_window": 60,
"rejected_code": 503
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
}
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.print(body)
}
}
--- request
GET /t
--- error_code: 400
--- response_body
{"error_msg":"failed to check the configuration of plugin limit-count err: invalid \"required\" in docuement at pointer \"#\""}
--- no_error_log
[error]
=== TEST 12: invalid service: wrong count
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/services/1',
ngx.HTTP_PUT,
[[{
"plugins": {
"limit-count": {
"count": -100,
"time_window": 60,
"rejected_code": 503,
"key": "remote_addr"
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
}
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.print(body)
}
}
--- request
GET /t
--- error_code: 400
--- response_body
{"error_msg":"failed to check the configuration of plugin limit-count err: invalid \"minimum\" in docuement at pointer \"#\/count\""}
--- no_error_log
[error]
=== TEST 13: invalid service: wrong count + POST method
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/services',
ngx.HTTP_POST,
[[{
"plugins": {
"limit-count": {
"count": -100,
"time_window": 60,
"rejected_code": 503,
"key": "remote_addr"
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
}
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.print(body)
}
}
--- request
GET /t
--- error_code: 400
--- response_body
{"error_msg":"failed to check the configuration of plugin limit-count err: invalid \"minimum\" in docuement at pointer \"#\/count\""}
--- no_error_log
[error]
=== TEST 14: set route without id in post body
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PUT,
[[{
"plugins": {
"limit-count": {
"count": 2,
"time_window": 60,
"rejected_code": 503,
"key": "remote_addr"
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
},
"uri": "/hello"
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.say(body)
}
}
--- request
GET /t
--- response_body
passed
--- no_error_log
[error]
=== TEST 15: up the limit
--- pipelined_requests eval
["GET /hello", "GET /hello", "GET /hello", "GET /hello"]
--- error_code eval
[200, 200, 503, 503]
--- no_error_log
[error]
=== TEST 16: disable plugin
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PUT,
[[{
"plugins": {
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
},
"uri": "/hello"
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.say(body)
}
}
--- request
GET /t
--- response_body
passed
--- no_error_log
[error]
=== TEST 17: up the limit
--- pipelined_requests eval
["GET /hello", "GET /hello", "GET /hello", "GET /hello"]
--- error_code eval
[200, 200, 200, 200]
--- no_error_log
[error]
=== TEST 18: set route(key: server_addr)
--- config
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
local code, body = t('/apisix/admin/routes/1',
ngx.HTTP_PUT,
[[{
"methods": ["GET"],
"plugins": {
"limit-count": {
"count": 2,
"time_window": 60,
"rejected_code": 503,
"key": "server_addr"
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
},
"uri": "/hello"
}]]
)
if code >= 300 then
ngx.status = code
end
ngx.say(body)
}
}
--- request
GET /t
--- response_body
passed
--- no_error_log
[error]
=== TEST 19: up the limit
--- pipelined_requests eval
["GET /hello", "GET /hello", "GET /hello", "GET /hello"]
--- error_code eval
[200, 200, 503, 503]
--- no_error_log
[error]
| 26.52972 | 133 | 0.372323 |
ed502ab692b6ff9b94feff737679e34cfaa0cb83 | 2,723 | pm | Perl | auto-lib/Paws/IAM/GetOpenIDConnectProvider.pm | galenhuntington/aws-sdk-perl | 13b775dcb5f0b3764f0a82f3679ed5c7721e67d3 | [
"Apache-2.0"
] | null | null | null | auto-lib/Paws/IAM/GetOpenIDConnectProvider.pm | galenhuntington/aws-sdk-perl | 13b775dcb5f0b3764f0a82f3679ed5c7721e67d3 | [
"Apache-2.0"
] | null | null | null | auto-lib/Paws/IAM/GetOpenIDConnectProvider.pm | galenhuntington/aws-sdk-perl | 13b775dcb5f0b3764f0a82f3679ed5c7721e67d3 | [
"Apache-2.0"
] | null | null | null |
package Paws::IAM::GetOpenIDConnectProvider;
use Moose;
has OpenIDConnectProviderArn => (is => 'ro', isa => 'Str', required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'GetOpenIDConnectProvider');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::IAM::GetOpenIDConnectProviderResponse');
class_has _result_key => (isa => 'Str', is => 'ro', default => 'GetOpenIDConnectProviderResult');
1;
### main pod documentation begin ###
=head1 NAME
Paws::IAM::GetOpenIDConnectProvider - Arguments for method GetOpenIDConnectProvider on L<Paws::IAM>
=head1 DESCRIPTION
This class represents the parameters used for calling the method GetOpenIDConnectProvider on the
L<AWS Identity and Access Management|Paws::IAM> service. Use the attributes of this class
as arguments to method GetOpenIDConnectProvider.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to GetOpenIDConnectProvider.
=head1 SYNOPSIS
my $iam = Paws->service('IAM');
my $GetOpenIDConnectProviderResponse = $iam->GetOpenIDConnectProvider(
OpenIDConnectProviderArn => 'MyarnType',
);
# Results:
my $ClientIDList = $GetOpenIDConnectProviderResponse->ClientIDList;
my $CreateDate = $GetOpenIDConnectProviderResponse->CreateDate;
my $ThumbprintList = $GetOpenIDConnectProviderResponse->ThumbprintList;
my $Url = $GetOpenIDConnectProviderResponse->Url;
# Returns a L<Paws::IAM::GetOpenIDConnectProviderResponse> object.
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
For the AWS API documentation, see L<https://docs.aws.amazon.com/goto/WebAPI/iam/GetOpenIDConnectProvider>
=head1 ATTRIBUTES
=head2 B<REQUIRED> OpenIDConnectProviderArn => Str
The Amazon Resource Name (ARN) of the OIDC provider resource object in
IAM to get information for. You can get a list of OIDC provider
resource ARNs by using the ListOpenIDConnectProviders operation.
For more information about ARNs, see Amazon Resource Names (ARNs) and
AWS Service Namespaces
(http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
in the I<AWS General Reference>.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method GetOpenIDConnectProvider in L<Paws::IAM>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: L<https://github.com/pplu/aws-sdk-perl>
Please report bugs to: L<https://github.com/pplu/aws-sdk-perl/issues>
=cut
| 36.306667 | 249 | 0.755417 |
ed8ab66b08eaf6f927fff0dd50f78ea5b96d4b62 | 1,772 | pm | Perl | compiled/perl/EnumLongRangeS.pm | dgelessus/ci_targets | bb1a0d76b7673920c832e5332a7b257614fa0e1b | [
"MIT"
] | 4 | 2017-04-08T12:55:11.000Z | 2020-12-05T21:09:31.000Z | compiled/perl/EnumLongRangeS.pm | dgelessus/ci_targets | bb1a0d76b7673920c832e5332a7b257614fa0e1b | [
"MIT"
] | 7 | 2018-04-23T01:30:33.000Z | 2020-10-30T23:56:14.000Z | compiled/perl/EnumLongRangeS.pm | dgelessus/ci_targets | bb1a0d76b7673920c832e5332a7b257614fa0e1b | [
"MIT"
] | 6 | 2017-04-08T11:41:14.000Z | 2020-10-30T22:47:31.000Z | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
use strict;
use warnings;
use IO::KaitaiStruct 0.009_000;
########################################################################
package EnumLongRangeS;
our @ISA = 'IO::KaitaiStruct::Struct';
sub from_file {
my ($class, $filename) = @_;
my $fd;
open($fd, '<', $filename) or return undef;
binmode($fd);
return new($class, IO::KaitaiStruct::Stream->new($fd));
}
our $CONSTANTS_LONG_MIN = -9223372036854775808;
our $CONSTANTS_INT_BELOW_MIN = -2147483649;
our $CONSTANTS_INT_MIN = -2147483648;
our $CONSTANTS_ZERO = 0;
our $CONSTANTS_INT_MAX = 2147483647;
our $CONSTANTS_INT_OVER_MAX = 2147483648;
our $CONSTANTS_LONG_MAX = 9223372036854775807;
sub new {
my ($class, $_io, $_parent, $_root) = @_;
my $self = IO::KaitaiStruct::Struct->new($_io);
bless $self, $class;
$self->{_parent} = $_parent;
$self->{_root} = $_root || $self;;
$self->_read();
return $self;
}
sub _read {
my ($self) = @_;
$self->{f1} = $self->{_io}->read_s8be();
$self->{f2} = $self->{_io}->read_s8be();
$self->{f3} = $self->{_io}->read_s8be();
$self->{f4} = $self->{_io}->read_s8be();
$self->{f5} = $self->{_io}->read_s8be();
$self->{f6} = $self->{_io}->read_s8be();
$self->{f7} = $self->{_io}->read_s8be();
}
sub f1 {
my ($self) = @_;
return $self->{f1};
}
sub f2 {
my ($self) = @_;
return $self->{f2};
}
sub f3 {
my ($self) = @_;
return $self->{f3};
}
sub f4 {
my ($self) = @_;
return $self->{f4};
}
sub f5 {
my ($self) = @_;
return $self->{f5};
}
sub f6 {
my ($self) = @_;
return $self->{f6};
}
sub f7 {
my ($self) = @_;
return $self->{f7};
}
1;
| 19.688889 | 98 | 0.549661 |
ed5aa30376859977926691a3dc45d2b6eb822d4f | 8,390 | t | Perl | test/parser_master_file_normal.t | hapa96/perl-final-project | 6848181f279fd1e7ea5c758ca1d485f92a898c9f | [
"MIT"
] | 1 | 2021-12-13T13:19:15.000Z | 2021-12-13T13:19:15.000Z | test/parser_master_file_normal.t | hapa96/perl-final-project | 6848181f279fd1e7ea5c758ca1d485f92a898c9f | [
"MIT"
] | null | null | null | test/parser_master_file_normal.t | hapa96/perl-final-project | 6848181f279fd1e7ea5c758ca1d485f92a898c9f | [
"MIT"
] | 1 | 2021-09-17T13:36:45.000Z | 2021-09-17T13:36:45.000Z | #!/usr/bin/env perl
use v5.34;
use warnings;
use Test::More;
use Data::Show;
use lib "../lib"; # use the parent directory
#module under test
use Parser;
# Tell the module how many tests you plan to run...
plan tests => 2;
#Files
my $valid_masterfile = "test_data/valid_master_file_normal.txt";
# Call the function being tested, and remember the actual data structure it returns...
my %master_exam= parse_master_file($valid_masterfile);
my $master_ref = \%master_exam;
my $EXPECTED = {
Exam => {
Intro => {
Delimeter => "________________________________________________________________________________\n\n",
Intro_Text => "Complete this exam by placing an 'X' in the box beside each correct\nanswer, like so:\n\n [ ] This is not the correct answer\n [ ] This is not the correct answer either\n [ ] This is an incorrect answer\n [X] This is the correct answer\n [ ] This is an irrelevant answer\n\nScoring: Each question is worth 2 points.\n Final score will be: SUM / 10\n\nWarning: Each question has only one correct answer. Answers to questions\n for which two or more boxes are marked with an 'X' will be scored as zero.\n\n",
},
Questions => [
{
Question => {
Answers => {
Correct_Answer => " [X] Nothing: Perl variables don't have a static type\n",
Other_Answer => [
" [ ] The name of the variable\n",
" [ ] The type of the first value placed in the variable\n",
" [ ] The compile-time type declarator of the variable\n",
" [ ] Random chance\n",
],
},
Task => "1. The type of a Perl variable is determined by:\n",
},
},
{
Question => {
Answers => {
Correct_Answer => " [X] List, scalar, and void\n",
Other_Answer => [
" [ ] List, linear, and void\n",
" [ ] List, scalar, and null\n",
" [ ] Null, scalar, and void\n",
" [ ] Blood, sweat, and tears\n",
],
},
Task => "2. Perl's three main types of call context (or \"amount context\") are:\n",
},
},
{
Question => {
Answers => {
Correct_Answer => " [X] \$_\n",
Other_Answer => [
" [ ] \@_\n",
" [ ] \$\$\n",
" [ ] \$=\n",
" [ ] The last variable that was successfully assigned to\n",
],
},
Task => "3. The \"default variable\" (or \"topic variable\") is:\n",
},
},
{
Question => {
Answers => {
Correct_Answer => " [X] \@ARGV\n",
Other_Answer => [
" [ ] \$ARGV\n",
" [ ] \@ARGS\n",
" [ ] \@ARG\n",
" [ ] \@_\n",
],
},
Task => "4. You can access the command-line arguments of a Perl program via:\n",
},
},
{
Question => {
Answers => {
Correct_Answer => " [X] CPAN\n",
Other_Answer => [
" [ ] CSPAN\n",
" [ ] Github\n",
" [ ] Perlhub\n",
" [ ] www.perl.org\n",
],
},
Task => "5. The main repository for Open Source Perl modules is:\n",
},
},
{
Question => {
Answers => {
Correct_Answer => " [X] '\$' for scalars, '\@' for arrays, '%' for hashes\n",
Other_Answer => [
" [ ] '\$' for scalars, '\@' for hashes, '%' for arrays\n",
" [ ] '\$' for scalars, '\@' for consts, '%' for literals\n",
" [ ] '\$' for numeric, '\@' for emails, '%' for percentages\n",
" [ ] '\$' for lookups, '\@' for reuses, '%' for declarations\n",
],
},
Task => "6. The three standard sigils for variable declarations are:\n",
},
},
{
Question => {
Answers => {
Correct_Answer => " [X] 'my' variables are lexically scoped; 'our' variables are package scoped\n",
Other_Answer => [
" [ ] 'my' variables are subroutine scoped; 'our' variables are block scoped\n",
" [ ] 'my' variables are compile-time; 'our' variables are run-time\n",
" [ ] 'my' variables must be scalars; 'our' variables must be arrays or hashes\n",
" [ ] 'my' variables are assignable; 'our' variables are constants\n",
],
},
Task => "7. The difference between a 'my' variable and an 'our' variable is:\n",
},
},
{
Question => {
Answers => {
Correct_Answer => " [X] ...does not interpolate variables or backslashed escape sequences\n",
Other_Answer => [
" [ ] ...only interpolates variables, but not backslashed escape sequences\n",
" [ ] ...only interpolates backslashed escape sequences, but not variables\n",
" [ ] ...interpolates both variables and backslashed escape sequences\n",
" [ ] ...converts its contents to ASCII, even if they are Unicode characters\n",
],
},
Task => "8. A single-quoted string (such as: 'I will achieve 100% on this exam')...\n",
},
},
{
Question => {
Answers => {
Correct_Answer => " [X] ...another way of writing the double-quoted string: \"XXXXX\"\n",
Other_Answer => [
" [ ] ...another way of writing the single-quoted string: 'XXXXX'\n",
" [ ] ...another way of writing the list of strings: ('X', 'X', 'X', 'X', 'X')\n",
" [ ] ...another way of writing the array of strings: ['X', 'X', 'X', 'X', 'X']\n",
" [ ] ...a call to the 'qq' function, passing it a block of code\n",
],
},
Task => "9. The term qq{XXXXX} is...\n",
},
},
{
Question => {
Answers => {
Correct_Answer => " [X] 1'042\n",
Other_Answer => [
" [ ] 1042\n",
" [ ] 1_042\n",
" [ ] 1.042e3\n",
" [ ] 0b10000010010\n",
],
},
Task => "10. Which of the following is NOT a single valid Perl number?\n",
},
},
],
},
};
#Basic Test
ok(scalar(@{$master_exam{Exam}{Questions}}) == 10);
# Are the two data structures identical at every point???
is_deeply($master_ref, $EXPECTED);
# Tell the testing module that we're finished (not required, but safer)...
done_testing(); | 47.670455 | 562 | 0.389154 |
ed8885b6e91931c298c27654014f179063a93664 | 24,617 | pm | Perl | extlib/lib/perl5/Bio/SeqFeature/Lite.pm | akourm910e/akourm910e.github.io | f0ea20c4d923ccdf457f9b308d5ab390b2bbf0fd | [
"Artistic-2.0"
] | 5 | 2017-10-27T15:03:19.000Z | 2020-04-25T17:44:49.000Z | extlib/lib/perl5/Bio/SeqFeature/Lite.pm | akourm910e/akourm910e.github.io | f0ea20c4d923ccdf457f9b308d5ab390b2bbf0fd | [
"Artistic-2.0"
] | 4 | 2021-01-28T20:49:55.000Z | 2022-03-25T19:02:54.000Z | extlib/lib/perl5/Bio/SeqFeature/Lite.pm | kbasecollaborations/Kbasecollaborations.github.io | 49b8dff001c04f89c4b2c96f08e93f450d4b36b5 | [
"Artistic-2.0"
] | 2 | 2019-02-22T10:51:15.000Z | 2019-02-22T12:35:35.000Z | package Bio::SeqFeature::Lite;
=head1 NAME
Bio::SeqFeature::Lite - Lightweight Bio::SeqFeatureI class
=head1 SYNOPSIS
# create a simple feature with no internal structure
$f = Bio::SeqFeature::Lite->new(-start => 1000,
-stop => 2000,
-type => 'transcript',
-name => 'alpha-1 antitrypsin',
-desc => 'an enzyme inhibitor',
);
# create a feature composed of multiple segments, all of type "similarity"
$f = Bio::SeqFeature::Lite->new(-segments => [[1000,1100],[1500,1550],[1800,2000]],
-name => 'ABC-3',
-type => 'gapped_alignment',
-subtype => 'similarity');
# build up a gene exon by exon
$e1 = Bio::SeqFeature::Lite->new(-start=>1,-stop=>100,-type=>'exon');
$e2 = Bio::SeqFeature::Lite->new(-start=>150,-stop=>200,-type=>'exon');
$e3 = Bio::SeqFeature::Lite->new(-start=>300,-stop=>500,-type=>'exon');
$f = Bio::SeqFeature::Lite->new(-segments=>[$e1,$e2,$e3],-type=>'gene');
=head1 DESCRIPTION
This is a simple Bio::SeqFeatureI-compliant object that is compatible
with Bio::Graphics::Panel. With it you can create lightweight feature
objects for drawing.
All methods are as described in L<Bio::SeqFeatureI> with the following additions:
=head2 The new() Constructor
$feature = Bio::SeqFeature::Lite->new(@args);
This method creates a new feature object. You can create a simple
feature that contains no subfeatures, or a hierarchically nested object.
Arguments are as follows:
-seq_id the reference sequence
-start the start position of the feature
-end the stop position of the feature
-stop an alias for end
-name the feature name (returned by seqname())
-type the feature type (returned by primary_tag())
-primary_tag the same as -type
-source the source tag
-score the feature score (for GFF compatibility)
-desc a description of the feature
-segments a list of subfeatures (see below)
-subtype the type to use when creating subfeatures
-strand the strand of the feature (one of -1, 0 or +1)
-phase the phase of the feature (0..2)
-seq a dna or protein sequence string to attach to feature
-id an alias for -name
-seqname an alias for -name
-display_id an alias for -name
-display_name an alias for -name (do you get the idea the API has changed?)
-primary_id unique database ID
-url a URL to link to when rendered with Bio::Graphics
-attributes a hashref of tag value attributes, in which the key is the tag
and the value is an array reference of values
-factory a reference to a feature factory, used for compatibility with
more obscure parts of Bio::DB::GFF
The subfeatures passed in -segments may be an array of
Bio::SeqFeature::Lite objects, or an array of [$start,$stop]
pairs. Each pair should be a two-element array reference. In the
latter case, the feature type passed in -subtype will be used when
creating the subfeatures.
If no feature type is passed, then it defaults to "feature".
=head2 Non-SeqFeatureI methods
A number of new methods are provided for compatibility with
Ace::Sequence, which has a slightly different API from SeqFeatureI:
=over 4
=item url()
Get/set the URL that the graphical rendering of this feature will link to.
=item add_segment(@segments)
Add one or more segments (a subfeature). Segments can either be
Feature objects, or [start,stop] arrays, as in the -segments argument
to new(). The feature endpoints are automatically adjusted.
=item segments()
An alias for sub_SeqFeature().
=item get_SeqFeatures()
Alias for sub_SeqFeature()
=item get_all_SeqFeatures()
Alias for sub_SeqFeature()
=item merged_segments()
Another alias for sub_SeqFeature().
=item stop()
An alias for end().
=item name()
An alias for seqname().
=item exons()
An alias for sub_SeqFeature() (you don't want to know why!)
=back
=cut
use strict;
use base qw(Bio::Root::Root Bio::SeqFeatureI Bio::LocationI Bio::SeqI);
*stop = \&end;
*info = \&name;
*seqname = \&name;
*exons = *sub_SeqFeature = *merged_segments = \&segments;
*get_all_SeqFeatures = *get_SeqFeatures = \&segments;
*method = \&primary_tag;
*source = \&source_tag;
*get_tag_values = \&each_tag_value;
*add_SeqFeature = \&add_segment;
*get_all_tags = \&all_tags;
*abs_ref = \&ref;
# implement Bio::SeqI and FeatureHolderI interface
sub primary_seq { return $_[0] }
sub annotation {
my ($obj,$value) = @_;
if( defined $value ) {
$obj->throw("object of class ".ref($value)." does not implement ".
"Bio::AnnotationCollectionI. Too bad.")
unless $value->isa("Bio::AnnotationCollectionI");
$obj->{'_annotation'} = $value;
} elsif( ! defined $obj->{'_annotation'}) {
$obj->{'_annotation'} = Bio::Annotation::Collection->new();
}
return $obj->{'_annotation'};
}
sub species {
my ($self, $species) = @_;
if ($species) {
$self->{'species'} = $species;
} else {
return $self->{'species'};
}
}
sub is_remote { return }
sub feature_count { return scalar @{shift->{segments} || []} }
sub target { return; }
sub hit { shift->target }
sub type {
my $self = shift;
my $method = $self->primary_tag;
my $source = $self->source_tag;
return $source ne '' ? "$method:$source" : $method;
}
# usage:
# Bio::SeqFeature::Lite->new(
# -start => 1,
# -end => 100,
# -name => 'fred feature',
# -strand => +1);
#
# Alternatively, use -segments => [ [start,stop],[start,stop]...]
# to create a multisegmented feature.
sub new {
my $class= shift;
$class = ref($class) if ref $class;
my %arg = @_;
my $self = bless {},$class;
$arg{-strand} ||= 0;
if ($arg{-strand} =~ /^[\+\-\.]$/){
($arg{-strand} eq "+") && ($self->{strand} = '1');
($arg{-strand} eq "-") && ($self->{strand} = '-1');
($arg{-strand} eq ".") && ($self->{strand} = '0');
} else {
$self->{strand} = $arg{-strand} ? ($arg{-strand} >= 0 ? +1 : -1) : 0;
}
$self->{name} = $arg{-name} || $arg{-seqname} || $arg{-display_id}
|| $arg{-display_name} || $arg{-id};
$self->{type} = $arg{-type} || $arg{-primary_tag} || 'feature';
$self->{subtype} = $arg{-subtype} if exists $arg{-subtype};
$self->{source} = $arg{-source} || $arg{-source_tag} || '';
$self->{score} = $arg{-score} if exists $arg{-score};
$self->{start} = $arg{-start};
$self->{stop} = exists $arg{-end} ? $arg{-end} : $arg{-stop};
$self->{ref} = $arg{-seq_id} || $arg{-ref};
$self->{attributes} = $arg{-attributes} || $arg{-tag};
for my $option (qw(class url seq phase desc primary_id)) {
$self->{$option} = $arg{"-$option"} if exists $arg{"-$option"};
}
# is_circular is needed for Bio::PrimarySeqI compliance
$self->{is_circular} = $arg{-is_circular} || 0;
# fix start, stop
if (defined $self->{stop} && defined $self->{start}
&& $self->{stop} < $self->{start}) {
@{$self}{'start','stop'} = @{$self}{'stop','start'};
$self->{strand} *= -1;
}
my @segments;
if (my $s = $arg{-segments}) {
# NB: when $self ISA Bio::DB::SeqFeature the following invokes
# Bio::DB::SeqFeature::add_segment and not
# Bio::DB::SeqFeature::add_segment (as might be expected?)
$self->add_segment(@$s);
}
$self;
}
sub add_segment {
my $self = shift;
my $type = $self->{subtype} || $self->{type};
$self->{segments} ||= [];
my $ref = $self->seq_id;
my $name = $self->name;
my $class = $self->class;
my $source_tag = $self->source_tag;
my $min_start = $self->start || 999_999_999_999;
my $max_stop = $self->end || -999_999_999_999;
my @segments = @{$self->{segments}};
for my $seg (@_) {
if (ref($seg) eq 'ARRAY') {
my ($start,$stop) = @{$seg};
next unless defined $start && defined $stop; # fixes an obscure bug somewhere above us
my $strand = $self->{strand};
if ($start > $stop) {
($start,$stop) = ($stop,$start);
$strand = -1;
}
push @segments,$self->new(-start => $start,
-stop => $stop,
-strand => $strand,
-ref => $ref,
-type => $type,
-name => $name,
-class => $class,
-phase => $self->{phase},
-score => $self->{score},
-source_tag => $source_tag,
-attributes => $self->{attributes},
);
$min_start = $start if $start < $min_start;
$max_stop = $stop if $stop > $max_stop;
} elsif (ref $seg) {
push @segments,$seg;
$min_start = $seg->start if ($seg->start && $seg->start < $min_start);
$max_stop = $seg->end if ($seg->end && $seg->end > $max_stop);
}
}
if (@segments) {
local $^W = 0; # some warning of an uninitialized variable...
$self->{segments} = \@segments;
$self->{ref} ||= $self->{segments}[0]->seq_id;
$self->{start} = $min_start;
$self->{stop} = $max_stop;
}
}
sub segments {
my $self = shift;
my $s = $self->{segments} or return wantarray ? () : 0;
@$s;
}
sub score {
my $self = shift;
my $d = $self->{score};
$self->{score} = shift if @_;
$d;
}
sub primary_tag {
my $self = shift;
my $d = $self->{type};
$self->{type} = shift if @_;
$d;
}
sub name {
my $self = shift;
my $d = $self->{name};
$self->{name} = shift if @_;
$d;
}
sub seq_id { shift->ref(@_) }
sub ref {
my $self = shift;
my $d = $self->{ref};
$self->{ref} = shift if @_;
$d;
}
sub start {
my $self = shift;
my $d = $self->{start};
$self->{start} = shift if @_;
if (my $rs = $self->{refseq}) {
my $strand = $rs->strand || 1;
return $strand >= 0 ? ($d - $rs->start + 1) : ($rs->end - $d + 1);
} else {
return $d;
}
}
sub end {
my $self = shift;
my $d = $self->{stop};
$self->{stop} = shift if @_;
if (my $rs = $self->{refseq}) {
my $strand = $rs->strand || 1;
return $strand >= 0 ? ($d - $rs->start + 1) : ($rs->end - $d + 1);
}
$d;
}
sub strand {
my $self = shift;
my $d = $self->{strand};
$self->{strand} = shift if @_;
if (my $rs = $self->{refseq}) {
my $rstrand = $rs->strand;
return 0 unless $d;
return 1 if $rstrand == $d;
return -1 if $rstrand != $d;
}
$d;
}
# this does nothing, but it is here for compatibility reasons
sub absolute {
my $self = shift;
my $d = $self->{absolute};
$self->{absolute} = shift if @_;
$d;
}
sub abs_start {
my $self = shift;
local $self->{refseq} = undef;
$self->start(@_);
}
sub abs_end {
my $self = shift;
local $self->{refseq} = undef;
$self->end(@_);
}
sub abs_strand {
my $self = shift;
local $self->{refseq} = undef;
$self->strand(@_);
}
sub length {
my $self = shift;
return $self->end - $self->start + 1;
}
#is_circular is needed for Bio::PrimarySeqI
sub is_circular {
my $self = shift;
my $d = $self->{is_circular};
$self->{is_circular} = shift if @_;
$d;
}
sub seq {
my $self = shift;
my $seq = exists $self->{seq} ? $self->{seq} : '';
return $seq;
}
sub dna {
my $seq = shift->seq;
$seq = $seq->seq if CORE::ref($seq);
return $seq;
}
=head2 display_name
Title : display_name
Usage : $id = $obj->display_name or $obj->display_name($newid);
Function: Gets or sets the display id, also known as the common name of
the Seq object.
The semantics of this is that it is the most likely string
to be used as an identifier of the sequence, and likely to
have "human" readability. The id is equivalent to the LOCUS
field of the GenBank/EMBL databanks and the ID field of the
Swissprot/sptrembl database. In fasta format, the >(\S+) is
presumed to be the id, though some people overload the id
to embed other information. Bioperl does not use any
embedded information in the ID field, and people are
encouraged to use other mechanisms (accession field for
example, or extending the sequence object) to solve this.
Notice that $seq->id() maps to this function, mainly for
legacy/convenience issues.
Returns : A string
Args : None or a new id
=cut
sub display_name { shift->name(@_) }
*display_id = \&display_name;
=head2 accession_number
Title : accession_number
Usage : $unique_biological_key = $obj->accession_number;
Function: Returns the unique biological id for a sequence, commonly
called the accession_number. For sequences from established
databases, the implementors should try to use the correct
accession number. Notice that primary_id() provides the
unique id for the implementation, allowing multiple objects
to have the same accession number in a particular implementation.
For sequences with no accession number, this method should return
"unknown".
Returns : A string
Args : None
=cut
sub accession_number {
return 'unknown';
}
=head2 alphabet
Title : alphabet
Usage : if( $obj->alphabet eq 'dna' ) { /Do Something/ }
Function: Returns the type of sequence being one of
'dna', 'rna' or 'protein'. This is case sensitive.
This is not called <type> because this would cause
upgrade problems from the 0.5 and earlier Seq objects.
Returns : a string either 'dna','rna','protein'. NB - the object must
make a call of the type - if there is no type specified it
has to guess.
Args : none
Status : Virtual
=cut
sub alphabet{
return 'dna'; # no way this will be anything other than dna!
}
=head2 desc
Title : desc
Usage : $seqobj->desc($string) or $seqobj->desc()
Function: Sets or gets the description of the sequence
Example :
Returns : The description
Args : The description or none
=cut
sub desc {
my $self = shift;
my ($d) = $self->notes;
$self->{desc} = shift if @_;
$d;
}
sub attributes {
my $self = shift;
if (@_) {
return $self->get_tag_values(@_);
} else {
return $self->{attributes} ? %{$self->{attributes}} : ();
}
}
sub primary_id {
my $self = shift;
my $d = $self->{primary_id};
$self->{primary_id} = shift if @_;
return $d;
# return $d if defined $d;
# return (overload::StrVal($self) =~ /0x([a-f0-9]+)/)[0];
}
sub notes {
my $self = shift;
my $notes = $self->{desc};
return $notes if defined $notes;
return $self->attributes('Note');
}
sub aliases {
my $self = shift;
return $self->attributes('Alias');
}
sub low {
my $self = shift;
return $self->start < $self->end ? $self->start : $self->end;
}
sub high {
my $self = shift;
return $self->start > $self->end ? $self->start : $self->end;
}
=head2 location
Title : location
Usage : my $location = $seqfeature->location()
Function: returns a location object suitable for identifying location
of feature on sequence or parent feature
Returns : Bio::LocationI object
Args : none
=cut
sub location {
my $self = shift;
require Bio::Location::Split unless Bio::Location::Split->can('new');
my $location;
if (my @segments = $self->segments) {
$location = Bio::Location::Split->new();
foreach (@segments) {
$location->add_sub_Location($_);
}
} else {
$location = $self;
}
$location;
}
sub each_Location {
my $self = shift;
require Bio::Location::Simple unless Bio::Location::Simple->can('new');
if (my @segments = $self->segments) {
return map {
Bio::Location::Simple->new(-start => $_->start,
-end => $_->end,
-strand => $_->strand);
} @segments;
} else {
return Bio::Location::Simple->new(-start => $self->start,
-end => $self->end,
-strand => $self->strand);
}
}
=head2 location_string
Title : location_string
Usage : my $string = $seqfeature->location_string()
Function: Returns a location string in a format recognized by gbrowse
Returns : a string
Args : none
This is a convenience function used by the generic genome browser. It
returns the location of the feature and its subfeatures in the compact
form "start1..end1,start2..end2,...". Use
$seqfeature-E<gt>location()-E<gt>toFTString() to obtain a standard
GenBank/EMBL location representation.
=cut
sub location_string {
my $self = shift;
my @segments = $self->segments or return $self->to_FTstring;
join ',',map {$_->to_FTstring} @segments;
}
sub coordinate_policy {
require Bio::Location::WidestCoordPolicy unless Bio::Location::WidestCoordPolicy->can('new');
return Bio::Location::WidestCoordPolicy->new();
}
sub min_start { shift->low }
sub max_start { shift->low }
sub min_end { shift->high }
sub max_end { shift->high}
sub start_pos_type { 'EXACT' }
sub end_pos_type { 'EXACT' }
sub to_FTstring {
my $self = shift;
my $low = $self->min_start;
my $high = $self->max_end;
my $strand = $self->strand;
my $str = defined $strand && $strand<0 ? "complement($low..$high)" : "$low..$high";
if (my $id = $self->seq_id()) {
$str = $id . ":" . $str;
}
$str;
}
sub phase {
my $self = shift;
my $d = $self->{phase};
$self->{phase} = shift if @_;
$d;
}
sub class {
my $self = shift;
my $d = $self->{class};
$self->{class} = shift if @_;
return defined($d) ? $d : 'Sequence'; # acedb is still haunting me - LS
}
# set GFF dumping version
sub version {
my $self = shift;
my $d = $self->{gff3_version} || 2;
$self->{gff3_version} = shift if @_;
$d;
}
sub gff_string {
my $self = shift;
if ($self->version == 3) {
return $self->gff3_string(@_);
}
my $recurse = shift;
my $name = $self->name;
my $class = $self->class;
my $group = "$class $name" if $name;
my $strand = ('-','.','+')[$self->strand+1];
my $string;
$string .= join("\t",
$self->ref||'.',$self->source||'.',$self->method||'.',
$self->start||'.',$self->stop||'.',
defined($self->score) ? $self->score : '.',
$strand||'.',
defined($self->phase) ? $self->phase : '.',
$group||''
);
$string .= "\n";
if ($recurse) {
foreach ($self->sub_SeqFeature) {
$string .= $_->gff_string($recurse);
}
}
$string;
}
# Suggested strategy for dealing with the multiple parentage issue.
# First recurse through object tree and record parent tree.
# Then recurse again, skipping objects we've seen before.
sub gff3_string {
my ($self,$recurse,$parent_tree,$seenit,$force_id) = @_;
$parent_tree ||= {};
$seenit ||= {};
my @rsf = ();
my @parent_ids;
if ($recurse) {
$self->_traverse($parent_tree) unless %$parent_tree; # this will record parents of all children
my $primary_id = defined $force_id ? $force_id : $self->_real_or_dummy_id;
return if $seenit->{$primary_id}++;
@rsf = $self->get_SeqFeatures;
if (@rsf) {
# Detect case in which we have a split location feature. In this case we
# skip to the grandchildren and trick them into thinking that our parent is theirs.
my %types = map {$_->primary_tag=>1} @rsf;
my @types = keys %types;
if (@types == 1 && $types[0] eq $self->primary_tag) {
return join ("\n",map {$_->gff3_string(1,$parent_tree,{},$primary_id)} @rsf);
}
}
@parent_ids = keys %{$parent_tree->{$primary_id}};
}
my $group = $self->format_attributes(\@parent_ids,$force_id);
my $name = $self->name;
my $class = $self->class;
my $strand = ('-','.','+')[$self->strand+1];
my $p = join("\t",
$self->seq_id||'.',
$self->source||'.',
$self->method||'.',
$self->start||'.',
$self->stop||'.',
defined($self->score) ? $self->score : '.',
$strand||'.',
defined($self->phase) ? $self->phase : '.',
$group||'');
return join("\n",
$p,
map {$_->gff3_string(1,$parent_tree,$seenit)} @rsf);
}
sub _real_or_dummy_id {
my $self = shift;
my $id = $self->primary_id;
return $id if defined $id;
return return (overload::StrVal($self) =~ /0x([a-f0-9]+)/)[0];
}
sub _traverse {
my $self = shift;
my $tree = shift; # tree => {$child}{$parent} = 1
my $parent = shift;
my $id = $self->_real_or_dummy_id;
defined $id or return;
$tree->{$id}{$parent->_real_or_dummy_id}++ if $parent;
$_->_traverse($tree,$self) foreach $self->get_SeqFeatures;
}
sub db { return }
sub source_tag {
my $self = shift;
my $d = $self->{source};
$self->{source} = shift if @_;
$d;
}
# This probably should be deleted. Not sure why it's here, but might
# have been added for Ace::Sequence::Feature-compliance.
sub introns {
my $self = shift;
return;
}
sub has_tag {
my $self = shift;
my $tag = shift;
return exists $self->{attributes}{$tag};
}
sub escape {
my $self = shift;
my $toencode = shift;
$toencode =~ s/([^a-zA-Z0-9_.:?^*\(\)\[\]@!+-])/uc sprintf("%%%02x",ord($1))/eg;
$toencode;
}
sub all_tags {
my $self = shift;
return keys %{$self->{attributes}};
}
sub add_tag_value {
my $self = shift;
my ($tag_name,@tag_values) = @_;
push @{$self->{attributes}{$tag_name}},@tag_values;
}
sub remove_tag {
my $self = shift;
my $tag_name = shift;
delete $self->{attributes}{$tag_name};
}
sub each_tag_value {
my $self = shift;
my $tag = shift;
my $value = $self->{attributes}{$tag} or return;
my $ref = CORE::ref $value;
return $ref && $ref eq 'ARRAY' ? @{$self->{attributes}{$tag}}
: $self->{attributes}{$tag};
}
sub get_Annotations {
my $self = shift;
my $tag = shift;
my @values = $self->get_tag_values($tag);
return $values[0] if @values == 1;
return @values;
}
sub format_attributes {
my $self = shift;
my $parent = shift;
my $fallback_id = shift;
my @tags = $self->get_all_tags;
my @result;
for my $t (@tags) {
my @values = $self->get_tag_values($t);
push @result,join '=',$self->escape($t),join(',', map {$self->escape($_)} @values) if @values;
}
#my $id = $self->escape($self->_real_or_dummy_id) || $fallback_id;
my $id = $fallback_id || $self->escape($self->_real_or_dummy_id);
my $parent_id;
if (@$parent) {
$parent_id = join (',',map {$self->escape($_)} @$parent);
}
my $name = $self->display_name;
unshift @result,"ID=".$id if defined $id;
unshift @result,"Parent=".$parent_id if defined $parent_id;
unshift @result,"Name=".$self->escape($name) if defined $name;
return join ';',@result;
}
=head2 clone
Title : clone
Usage : my $feature = $seqfeature->clone
Function: Create a deep copy of the feature
Returns : A copy of the feature
Args : none
=cut
sub clone {
my $self = shift;
my %clone = %$self;
# overwrite attributes
my $clone = bless \%clone,CORE::ref($self);
$clone{attributes} = {};
for my $k (keys %{$self->{attributes}}) {
@{$clone{attributes}{$k}} = @{$self->{attributes}{$k}};
}
return $clone;
}
=head2 refseq
Title : refseq
Usage : $ref = $s->refseq([$newseq] [,$newseqclass])
Function: get/set reference sequence
Returns : current reference sequence
Args : new reference sequence and class (optional)
Status : Public
This method will get or set the reference sequence. Called with no
arguments, it returns the current reference sequence. Called with any
Bio::SeqFeatureI object that provides the seq_id(), start(), end() and
strand() methods.
The method will generate an exception if you attempt to set the
reference sequence to a sequence that has a different seq_id from the
current feature.
=cut
sub refseq {
my $self = shift;
my $d = $self->{refseq};
if (@_) {
my $newref = shift;
$self->throw("attempt to set refseq using a feature that does not share the same seq_id")
unless $newref->seq_id eq $self->seq_id;
$self->{refseq} = $newref;
}
return $d;
}
sub DESTROY { }
1;
__END__
=head1 SEE ALSO
L<Bio::Graphics::Feature>
=head1 AUTHOR
Lincoln Stein E<lt>lstein@cshl.eduE<gt>.
Copyright (c) 2006 Cold Spring Harbor Laboratory
This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself. See DISCLAIMER.txt for
disclaimers of warranty.
=cut
| 26.874454 | 98 | 0.597514 |
73e05c196312e28f567f8d2b064f6143fb0054c5 | 2,207 | t | Perl | lua-resty-redis-0.18/t/pubsub.t | caidongyun/nginx-openresty-windows | c60fbbda4ab86f9176255eed03b4ac06cb3c7b16 | [
"BSD-2-Clause"
] | 6 | 2015-09-22T05:57:29.000Z | 2019-10-10T12:58:41.000Z | lua-resty-redis-0.18/t/pubsub.t | caidongyun/nginx-openresty-windows | c60fbbda4ab86f9176255eed03b4ac06cb3c7b16 | [
"BSD-2-Clause"
] | null | null | null | lua-resty-redis-0.18/t/pubsub.t | caidongyun/nginx-openresty-windows | c60fbbda4ab86f9176255eed03b4ac06cb3c7b16 | [
"BSD-2-Clause"
] | 4 | 2016-04-16T04:54:01.000Z | 2021-04-16T07:04:25.000Z | # vim:set ft= ts=4 sw=4 et:
use Test::Nginx::Socket::Lua;
use Cwd qw(cwd);
repeat_each(2);
plan tests => repeat_each() * (3 * blocks());
my $pwd = cwd();
our $HttpConfig = qq{
lua_package_path "$pwd/lib/?.lua;;";
lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;";
};
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
$ENV{TEST_NGINX_REDIS_PORT} ||= 6379;
no_long_string();
#no_diff();
run_tests();
__DATA__
=== TEST 1: single channel
--- http_config eval: $::HttpConfig
--- config
location /t {
content_by_lua '
local cjson = require "cjson"
local redis = require "resty.redis"
local red = redis:new()
local red2 = redis:new()
red:set_timeout(1000) -- 1 sec
red2:set_timeout(1000) -- 1 sec
local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT)
if not ok then
ngx.say("1: failed to connect: ", err)
return
end
ok, err = red2:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT)
if not ok then
ngx.say("2: failed to connect: ", err)
return
end
res, err = red:subscribe("dog")
if not res then
ngx.say("1: failed to subscribe: ", err)
return
end
ngx.say("1: subscribe: ", cjson.encode(res))
res, err = red2:publish("dog", "Hello")
if not res then
ngx.say("2: failed to publish: ", err)
return
end
ngx.say("2: publish: ", cjson.encode(res))
res, err = red:read_reply()
if not res then
ngx.say("1: failed to read reply: ", err)
return
end
ngx.say("1: receive: ", cjson.encode(res))
red:close()
red2:close()
';
}
--- request
GET /t
--- response_body
1: subscribe: ["subscribe","dog",1]
2: publish: 1
1: receive: ["message","dog","Hello"]
--- no_error_log
[error]
| 24.522222 | 99 | 0.491164 |
ed52f22847fa596d91d220f6c4f36c6ecc42f08e | 396 | t | Perl | t/classes/Dancer2-Core/camelize.t | gitpan/Dancer2 | 20186600ceb142bed8743bff43d0f32573c85717 | [
"Artistic-1.0"
] | null | null | null | t/classes/Dancer2-Core/camelize.t | gitpan/Dancer2 | 20186600ceb142bed8743bff43d0f32573c85717 | [
"Artistic-1.0"
] | null | null | null | t/classes/Dancer2-Core/camelize.t | gitpan/Dancer2 | 20186600ceb142bed8743bff43d0f32573c85717 | [
"Artistic-1.0"
] | null | null | null | use strict;
use warnings;
use Dancer2::Core;
use Test::More tests => 4;
my %tests = (
'test' => 'Test',
'class_name' => 'ClassName',
'class_nAME' => 'ClassNAME',
'class_NAME' => 'ClassNAME',
);
foreach my $test ( keys %tests ) {
my $value = $tests{$test};
is(
Dancer2::Core::camelize($test),
$value,
"$test camelized as $value",
);
}
| 17.217391 | 39 | 0.532828 |
ed7b7464498dece47018facff2565066059e2f77 | 504 | pl | Perl | kb/path.pl | LogicalContracts/LogicalEnglish | 6d2d149e3ea61a87c6567c5e0f4794ce99e45097 | [
"Apache-2.0"
] | 7 | 2022-01-16T15:28:25.000Z | 2022-03-05T13:51:04.000Z | kb/path.pl | LogicalContracts/LogicalEnglish | 6d2d149e3ea61a87c6567c5e0f4794ce99e45097 | [
"Apache-2.0"
] | 2 | 2021-12-12T02:33:49.000Z | 2022-01-26T19:50:37.000Z | kb/path.pl | LogicalContracts/LogicalEnglish | 6d2d149e3ea61a87c6567c5e0f4794ce99e45097 | [
"Apache-2.0"
] | 2 | 2022-02-08T12:11:56.000Z | 2022-02-17T00:07:28.000Z | :- module('path',[]).
en("the target language is: prolog.
the templates are:
*a vertex* edge *an other vertex*,
*a vertex* path *an other vertex*,
the knowledge base path includes:
a vertex X path a vertex Y
if X edge Y.
a vertex X path a vertex Y
if X edge a vertex Z
and Z path Y.
scenario one is:
a edge b.
b edge c.
c edge d.
d edge e.
d edge f.
query one is:
which b path a X.
").
/** <examples>
?- answer one with one.
?- show prolog.
*/ | 15.272727 | 38 | 0.593254 |
ed7ca0a4385ef639f535830da8ceabb082bb5f2a | 1,793 | pl | Perl | cgi/text.pl | pdurbin/philbot | 9ec64fd774189c7c6180a7c8502a0a924a61b925 | [
"Artistic-2.0"
] | 4 | 2020-05-05T15:13:09.000Z | 2022-01-05T01:30:12.000Z | cgi/text.pl | HurricaneHackers/ilbot | 0801978fd900c3d182bfdbfce943652f16927eb3 | [
"Artistic-2.0"
] | 4 | 2016-09-27T14:16:09.000Z | 2018-08-26T15:00:04.000Z | cgi/text.pl | HurricaneHackers/ilbot | 0801978fd900c3d182bfdbfce943652f16927eb3 | [
"Artistic-2.0"
] | 2 | 2020-05-05T15:13:14.000Z | 2022-01-05T01:30:43.000Z | #!/usr/bin/env perl
use warnings;
use strict;
use Carp qw(confess);
use CGI::Carp qw(fatalsToBrowser);
use CGI;
use Encode;
use HTML::Entities;
# evil hack: Text::Table lies somewhere near /irclog/ on the server...
use lib '../lib';
use lib 'lib';
use IrcLog qw(get_dbh gmt_today);
use IrcLog::WWW qw(my_encode my_decode);
use Text::Table;
my $default_channel = 'perl6';
# End of config
my $q = new CGI;
my $dbh = get_dbh();
my $channel = $q->param('channel') || $default_channel;
my $reverse = $q->param('reverse') || 0;
my $date = $q->param('date') || gmt_today;
if ($channel !~ m/^\w+(?:-\w+)*\z/sx){
# guard against channel=../../../etc/passwd or so
confess 'Invalid channel name';
}
#Check for reverse
my $statement = 'SELECT nick, timestamp, line FROM irclog '
. 'WHERE day = ? AND channel = ? AND NOT spam ORDER BY id';
$statement .= ' DESC' if $reverse;
my $db = $dbh->prepare($statement);
$db->execute($date, '#' . $channel);
print "Content-Type: text/html;charset=utf-8\n\n";
print <<HTML_HEADER;
<html>
<head>
<title>IRC Logs</title>
</head>
<body>
<pre>
HTML_HEADER
my $table = Text::Table->new(qw(Time Nick Message));
while (my $row = $db->fetchrow_hashref){
next unless length($row->{nick});
my ($hour, $minute) =(gmtime $row->{timestamp})[2,1];
$table->add(
sprintf("%02d:%02d", $hour, $minute),
$row->{nick},
my_decode($row->{line}),
);
}
my $text = encode_entities($table, '<>&');
# Text::Table will add trailing whitespace to pad messages to the
# longest message. I (avar) wasn't able to find out how to make it
# stop doing that so I'm hacking around it with regex!
$text =~ s/ +$//gm;
print encode("utf-8", $text);
print "</pre></body></html>\n";
# vim: sw=4 ts=4 expandtab
| 22.987179 | 70 | 0.624094 |
Subsets and Splits