Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
symphony-mt
|
symphony-mt-master/mt/src/main/scala/org/platanios/symphony/mt/vocabulary/CodedVocabulary.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.vocabulary
import org.platanios.symphony.mt.data.DataConfig
import better.files.File
/** Represents a vocabulary of coded words (e.g., using the byte-pair-encoding method).
*
* @param file File containing the vocabulary, with one word per line.
* @param size Size of this vocabulary (i.e., number of words).
* @param encoder Sentence encoding function (each sentence is represented as a sequence of words).
* @param decoder Sentence decoding function (each sentence is represented as a sequence of words).
*
* @author Emmanouil Antonios Platanios
*/
class CodedVocabulary protected (
override val file: File,
override val size: Int,
protected val encoder: Seq[String] => Seq[String],
protected val decoder: Seq[String] => Seq[String]
) extends Vocabulary(file, size) {
/** Encodes the provided sequence using this vocabulary. This is typically an identity function.
*
* This method is useful for coded vocabularies, such as the byte-pair-encoding vocabulary.
*
* @param sequence Sequence of tokens to encode.
* @return Encoded sequence of tokens that may differ in size from the input sequence.
*/
override def encodeSequence(sequence: Seq[String]): Seq[String] = {
encoder(sequence)
}
/** Decodes the provided sequence using this vocabulary. This is typically an identity function.
*
* This method is useful for coded vocabularies, such as the byte-pair-encoding vocabulary.
*
* @param sequence Sequence of tokens to decode.
* @return Decoded sequence of tokens that may differ in size from the input sequence.
*/
override def decodeSequence(sequence: Seq[String]): Seq[String] = {
decoder(sequence)
}
}
object CodedVocabulary {
/** Creates a new coded vocabulary.
*
* @param file File containing the vocabulary, with one word per line.
* @param size Size of this vocabulary (i.e., number of words).
* @param encoder Sentence encoding function (each sentence is represented as a sequence of words).
* @param decoder Sentence decoding function (each sentence is represented as a sequence of words).
* @return Created vocabulary.
*/
protected def apply(
file: File,
size: Int,
encoder: Seq[String] => Seq[String],
decoder: Seq[String] => Seq[String]
): CodedVocabulary = {
new CodedVocabulary(file, size, encoder, decoder)
}
/** Creates a new coded vocabulary from the provided vocabulary file.
*
* The method first checks if the specified vocabulary file exists and if it does, it checks that special tokens are
* being used correctly. If not, this method can optionally create a new file by prepending the appropriate tokens to
* the existing one.
*
* The special tokens check simply involves checking whether the first three tokens in the vocabulary file match the
* specified `unknownToken`, `beginSequenceToken`, and `endSequenceToken` values.
*
* @param file Vocabulary file to check.
* @param encoder Sentence encoding function (each sentence is represented as a sequence of words).
* @param decoder Sentence decoding function (each sentence is represented as a sequence of words).
* @param checkSpecialTokens Boolean value indicating whether or not to check for the use of special tokens, and
* prepend them while creating a new vocabulary file, if the check fails.
* @param directory Directory to use when creating the new vocabulary file, in case the special tokens
* check fails. Defaults to the current directory in which `file` is located, meaning
* that if the special tokens check fails, `file` will be replaced with the appended
* vocabulary file.
* @param dataConfig Data configuration that includes information about the special tokens.
* @return Constructed vocabulary.
* @throws IllegalArgumentException If the provided vocabulary file could not be loaded.
*/
@throws[IllegalArgumentException]
def apply(
file: File,
encoder: Seq[String] => Seq[String],
decoder: Seq[String] => Seq[String],
checkSpecialTokens: Boolean = true,
directory: File = null,
dataConfig: DataConfig = DataConfig()
): CodedVocabulary = {
val check = Vocabulary.check(file, checkSpecialTokens, directory)
check match {
case None => throw new IllegalArgumentException(s"Could not load the vocabulary file located at '$file'.")
case Some((size, path)) => CodedVocabulary(path, size, encoder, decoder)
}
}
}
| 5,380 | 45.387931 | 120 |
scala
|
symphony-mt
|
symphony-mt-master/docs/src/main/scala/LoadPairwiseDataset.scala
|
import org.platanios.symphony.mt.Language._
import org.platanios.symphony.mt.data._
import org.platanios.symphony.mt.data.loaders.IWSLT15Loader
import org.platanios.symphony.mt.data.processors._
import org.platanios.symphony.mt.vocabulary._
import java.nio.file.Paths
object LoadPairwiseDataset {
// #load_pairwise_dataset_example
val dataConfig = DataConfig(
// Loader
dataDir = Paths.get("data"),
loaderBufferSize = 8192,
tokenizer = MosesTokenizer(),
cleaner = MosesCleaner(),
vocabulary = GeneratedVocabulary(SimpleVocabularyGenerator(sizeThreshold = 50000, countThreshold = -1)),
// Corpus
trainBatchSize = 128,
inferBatchSize = 32,
evalBatchSize = 32,
numBuckets = 5,
srcMaxLength = 50,
tgtMaxLength = 50,
shuffleBufferSize = -1L,
numParallelCalls = 4)
val loader = IWSLT15Loader(English, Vietnamese, dataConfig)
// #load_pairwise_dataset_example
}
| 927 | 28 | 108 |
scala
|
symphony-mt
|
symphony-mt-master/docs/src/main/paradox/index.md
|
# Symphony Machine Translation
@@@ index
* [Getting Started](getting-started.md)
* [Data](data.md)
* [Models](models.md)
* [Learning](learning.md)
* [Experiments](experiments.md)
@@@
Symphony MT is a modular and extensible machine translation library that
supports both pairwise and multilingual translation, across an arbitrary
number of languages. It also supports zero-shot translation using either
pivoting/bridging, or by providing models that natively support multilingual
translation. It contains modules for:
- Downloading and preprocessing data:
- **Datasets:** IWSLT-14, IWSLT-15, IWSLT-16, IWSLT-17, WMT-16, etc.
- **Processors:** data cleaning, normalization, tokenization, etc.
- Constructing MT models using various features:
- Various encoders and decoders (e.g., RNNs, bi-directional RNNs, and
transformer).
- Attention models (e.g., Bahdanau, Luong, and multi-head).
- Greedy and beam search decoding.
- Training MT models:
- Lots of supported optimizers (e.g., SGD, Adam, AMSGrad, and YellowFin).
- Checkpointing.
- Monitoring (e.g., TensorBoard support).
- Distributed (e.g., multi-GPU) training.
- Evaluating MT models:
- Can evaluate both while training and after.
- Supports various metrics (e.g., BLEU, Rouge, Meteor, and TER).
- Using trained MT models to perform inference.
TODO: Add paper and link.
This library was initially developed to support the proposed methods in
[](), but currently supports a more diverse set of features than what was
presented in that paper.
## Citation
If you use this library in your work, we would really appreciate it if
you could cite the following paper:
TODO: Add citation information.
## License
Copyright 2017-2018, Emmanouil Antonios Platanios. All Rights Reserved.
Symphony Machine Translation is provided under the Apache 2.0 license.
| 1,881 | 33.851852 | 77 |
md
|
symphony-mt
|
symphony-mt-master/docs/src/main/paradox/models.md
|
# Models
## Encoders
- **Uni-directional RNN:** This represents one of the
simplest encoders; a uni-directional (i.e.,
left-to-right) RNN encoder. This encoder takes as input
a source sequence in some language and produces a tensor
containing the RNN outputs for each time step and a final
state which is a sequence of last computed RNN states
in layer order containing the states for each layer
(e.g., `Seq(state0, state1, ...)`).
@@@ note
Currently the encoders and the decoders are bridged using a
copy mechanism that simply copies the final encoder state
as the initial decoder state. This means that the total
number of encoder layers must match the total number of
decoder layers. This limitation will be lifted once we add
support for more bridging mechanisms.
@@@
| 802 | 32.458333 | 60 |
md
|
symphony-mt
|
symphony-mt-master/docs/src/main/paradox/learning.md
|
# Learning
| 11 | 5 | 10 |
md
|
symphony-mt
|
symphony-mt-master/docs/src/main/paradox/data.md
|
# Data
The simplest place to start for learning to work with the
data pipeline is an example for how to load a parallel
dataset between two languages:
@@snip [LoadPairwiseDataset.scala](../scala/LoadPairwiseDataset.scala) { #load_pairwise_dataset_example }
## Downloading
Depending on the data loader you choose to use
(e.g., `IWSLT15Loader` in the example above), and the
languages for which you request for data to be loaded,
Symphony Machine Translation will download the necessary
files on the working directory that you specify. It will
use a buffer with size `loaderBufferSize` while
downloading data.
## Preprocessing
| 638 | 30.95 | 105 |
md
|
symphony-mt
|
symphony-mt-master/docs/src/main/paradox/google-cloud-instructions.md
|
# Running on Google Compute Engine
## Installation
```bash
#!/bin/bash
export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
### Install CUDA
echo "Checking for CUDA and installing."
# Check for CUDA and try to install.
if ! dpkg-query -W cuda-9-2; then
# The 16.04 installer works with 16.10.
# Adds NVIDIA package repository.
sudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub
wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_9.2.148-1_amd64.deb
wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb
sudo dpkg -i cuda-repo-ubuntu1604_9.2.148-1_amd64.deb
sudo dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb
sudo apt-get update
# Includes optional NCCL 2.x.
sudo apt-get install cuda9.2 cuda-cublas-9-2 cuda-cufft-9-2 cuda-curand-9-2 \
cuda-cusolver-9-2 cuda-cusparse-9-2 libcudnn7=7.1.4.18-1+cuda9.2 \
libnccl2=2.2.13-1+cuda9.2 cuda-command-line-tools-9-2
# Optionally install TensorRT runtime, must be done after above cuda install.
sudo apt-get update
sudo apt-get install libnvinfer4=4.1.2-1+cuda9.2
fi
# Enable persistence mode
nvidia-smi -pm 1
### Install SBT
echo "deb https://dl.bintray.com/sbt/debian /" | sudo tee -a /etc/apt/sources.list.d/sbt.list
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2EE0EA64E40A89B84B2DF73499E82A75642AC823
sudo apt-get update
sudo apt-get install sbt
### Install Utilities
sudo apt-get install htop
```
| 1,639 | 36.272727 | 150 |
md
|
symphony-mt
|
symphony-mt-master/docs/src/main/paradox/getting-started.md
|
# Getting Started
The easiest way to start experimenting and playing around
with the SMT codebase for research purposes is to clone the
repository and start working with that. If you want to use
SMT in production, the best way is to add a dependency to
the published artifacts in your codebase. The following two
sections describe each option.
TODO: Add installation instructions.
## Cloning the Repository
### Requirements
TODO:
After you have cloned the repository, the simplest way to
reproduce the results from [our paper](TODO) and get going
with running experiments is to first create a JAR that
contains all dependencies using:
```bash
sbt assembly
cp target/scala-2.12/symphony-mt-0.1.0-SNAPSHOT.jar symphony-mt-0.1.0-SNAPSHOT.jar
```
Then, you can run experiments using commands like the
following:
```bash
java -jar symphony-mt-0.1.0-SNAPSHOT.jar \
--task train \
--working-dir temp/experiments \
--data-dir temp/data \
--dataset iwslt17 \
--languages de:en,de:it,de:ro,en:it,en:nl,en:ro,it:nl,nl:ro \
--eval-languages de:en,de:it,de:nl,de:ro,en:de,en:it,en:nl,en:ro,it:de,it:en,it:nl,it:ro,nl:de,nl:it,nl:en,nl:ro,ro:de,ro:it,ro:nl,ro:en \
--use-back-translations \
--parallel-portion 1.00 \
--eval-datasets dev2010:1.00,tst2017:1.00 \
--eval-metrics bleu,hyp_len,ref_len,sen_cnt \
--tokenizer moses \
--cleaner moses \
--vocabulary generated:20000:5 \
--batch-size 128 \
--num-buckets 5 \
--src-max-length 50 \
--tgt-max-length 50 \
--buffer-size 128 \
--model-arch bi_rnn:2:2 \
--model-cell lstm:tanh \
--model-type hyper_lang:8 \
--word-embed-size 512 \
--lang-embed-size 512 \
--residual \
--attention \
--dropout 0.2 \
--label-smoothing 0.1 \
--beam-width 10 \
--length-penalty 0.6 \
--opt amsgrad:0.001 \
--num-steps 1000000 \
--summary-steps 100 \
--checkpoint-steps 5000 \
--log-loss-steps 100 \
--log-eval-steps 5000 \
--num-gpus 1 \
--seed 10
```
The experiments command-line interface is described
[here](experiments-cli.md).
## Adding as a Dependency
In order to add SMT as a dependency in your own project,
you can use the following snippet:
@@dependency[sbt,Maven,Gradle] {
group="org.platanios"
artifact="symphony-mt"
version="0.1.0"
}
| 2,266 | 25.670588 | 140 |
md
|
symphony-mt
|
symphony-mt-master/docs/src/main/paradox/experiments.md
|
# Experiments
| 14 | 6.5 | 13 |
md
|
symphony-mt
|
symphony-mt-master/docs/src/main/paradox/experiments-cli.md
|
# Experiments Command-Line Interface (CLI)
| 45 | 10.5 | 42 |
md
|
symphony-mt
|
symphony-mt-master/docs/src/main/paradox/assets/custom.css
|
.md-typeset {
font-family: "Roboto", "Libre Franklin", "Open Sans", sans-serif;
font-size: 16px;
font-weight: 300;
}
[data-md-color-primary="white"] .md-header {
color: rgb(230, 230, 230);
background-color: rgb(40, 40, 40);
}
.md-header-nav {
color: rgb(230, 230, 230);
}
.md-nav__link:active, .md-nav__link--active {
color: rgb(227, 4, 0);
}
.md-header-nav__title {
font-family: 'Lato', sans-serif;
font-weight: 400;
}
.md-search__input {
color: rgb(230, 230, 230);
}
.md-search__form {
background-color: rgb(120, 120, 120);
}
*::placeholder {
color: rgb(230, 230, 230);
}
.md-typeset a {
color: rgb(227, 4, 0);
}
.active {
color: rgb(247, 4, 0);
}
.md-typeset pre > code {
font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace;
font-size: 9pt;
font-weight: 300;
}
.language-scala {
margin: 10px 0 10px 0;
}
.md-typeset table:not([class]) th {
background-color: rgba(0, 0, 0, 0.45);
}
/* latin-ext */
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 400;
src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v14/S6uyw4BMUTPHjxAwXiWtFCfQ7A.woff2) format('woff2');
unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF;
}
/* latin */
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 400;
src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v14/S6uyw4BMUTPHjx4wXiWtFCc.woff2) format('woff2');
unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD;
}
/* latin-ext */
@font-face {
font-family: 'Libre Franklin';
font-style: normal;
font-weight: 300;
src: local('Libre Franklin Light'), local('LibreFranklin-Light'), url(https://fonts.gstatic.com/s/librefranklin/v1/1_DGDtljMiPWFs5rl_p0yMq-pbAjJt28asgHno7W9sM.woff2) format('woff2');
unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;
}
/* latin */
@font-face {
font-family: 'Libre Franklin';
font-style: normal;
font-weight: 300;
src: local('Libre Franklin Light'), local('LibreFranklin-Light'), url(https://fonts.gstatic.com/s/librefranklin/v1/1_DGDtljMiPWFs5rl_p0yGISN6_59ECOhaitw-i87uk.woff2) format('woff2');
unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;
}
/* latin-ext */
@font-face {
font-family: 'Libre Franklin';
font-style: normal;
font-weight: 400;
src: local('Libre Franklin'), local('LibreFranklin-Regular'), url(https://fonts.gstatic.com/s/librefranklin/v1/PFwjf3aDdAQPvNKUrT3U7xHoxYlEhdfxgPTmEO-jGTc.woff2) format('woff2');
unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;
}
/* latin */
@font-face {
font-family: 'Libre Franklin';
font-style: normal;
font-weight: 400;
src: local('Libre Franklin'), local('LibreFranklin-Regular'), url(https://fonts.gstatic.com/s/librefranklin/v1/PFwjf3aDdAQPvNKUrT3U77v_weys7n7jZxU_6MdmmbI.woff2) format('woff2');
unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;
}
/* latin-ext */
@font-face {
font-family: 'Libre Franklin';
font-style: normal;
font-weight: 600;
src: local('Libre Franklin SemiBold'), local('LibreFranklin-SemiBold'), url(https://fonts.gstatic.com/s/librefranklin/v1/1_DGDtljMiPWFs5rl_p0yGwGxmvWtJ4lCHDfbg-API0.woff2) format('woff2');
unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;
}
/* latin */
@font-face {
font-family: 'Libre Franklin';
font-style: normal;
font-weight: 600;
src: local('Libre Franklin SemiBold'), local('LibreFranklin-SemiBold'), url(https://fonts.gstatic.com/s/librefranklin/v1/1_DGDtljMiPWFs5rl_p0yIw9bV740MgqmR6w4gD2Gfs.woff2) format('woff2');
unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;
}
/* cyrillic-ext */
@font-face {
font-family: 'Open Sans';
font-style: italic;
font-weight: 400;
src: local('Open Sans Italic'), local('OpenSans-Italic'), url(https://fonts.gstatic.com/s/opensans/v14/xjAJXh38I15wypJXxuGMBvZraR2Tg8w2lzm7kLNL0-w.woff2) format('woff2');
unicode-range: U+0460-052F, U+20B4, U+2DE0-2DFF, U+A640-A69F;
}
/* cyrillic */
@font-face {
font-family: 'Open Sans';
font-style: italic;
font-weight: 400;
src: local('Open Sans Italic'), local('OpenSans-Italic'), url(https://fonts.gstatic.com/s/opensans/v14/xjAJXh38I15wypJXxuGMBl4sYYdJg5dU2qzJEVSuta0.woff2) format('woff2');
unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;
}
/* greek-ext */
@font-face {
font-family: 'Open Sans';
font-style: italic;
font-weight: 400;
src: local('Open Sans Italic'), local('OpenSans-Italic'), url(https://fonts.gstatic.com/s/opensans/v14/xjAJXh38I15wypJXxuGMBlBW26QxpSj-_ZKm_xT4hWw.woff2) format('woff2');
unicode-range: U+1F00-1FFF;
}
/* greek */
@font-face {
font-family: 'Open Sans';
font-style: italic;
font-weight: 400;
src: local('Open Sans Italic'), local('OpenSans-Italic'), url(https://fonts.gstatic.com/s/opensans/v14/xjAJXh38I15wypJXxuGMBgt_Rm691LTebKfY2ZkKSmI.woff2) format('woff2');
unicode-range: U+0370-03FF;
}
/* vietnamese */
@font-face {
font-family: 'Open Sans';
font-style: italic;
font-weight: 400;
src: local('Open Sans Italic'), local('OpenSans-Italic'), url(https://fonts.gstatic.com/s/opensans/v14/xjAJXh38I15wypJXxuGMBtDiNsR5a-9Oe_Ivpu8XWlY.woff2) format('woff2');
unicode-range: U+0102-0103, U+1EA0-1EF9, U+20AB;
}
/* latin-ext */
@font-face {
font-family: 'Open Sans';
font-style: italic;
font-weight: 400;
src: local('Open Sans Italic'), local('OpenSans-Italic'), url(https://fonts.gstatic.com/s/opensans/v14/xjAJXh38I15wypJXxuGMBqE8kM4xWR1_1bYURRojRGc.woff2) format('woff2');
unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;
}
/* latin */
@font-face {
font-family: 'Open Sans';
font-style: italic;
font-weight: 400;
src: local('Open Sans Italic'), local('OpenSans-Italic'), url(https://fonts.gstatic.com/s/opensans/v14/xjAJXh38I15wypJXxuGMBogp9Q8gbYrhqGlRav_IXfk.woff2) format('woff2');
unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;
}
/* cyrillic-ext */
@font-face {
font-family: 'Open Sans';
font-style: normal;
font-weight: 400;
src: local('Open Sans Regular'), local('OpenSans-Regular'), url(https://fonts.gstatic.com/s/opensans/v14/K88pR3goAWT7BTt32Z01m4X0hVgzZQUfRDuZrPvH3D8.woff2) format('woff2');
unicode-range: U+0460-052F, U+20B4, U+2DE0-2DFF, U+A640-A69F;
}
/* cyrillic */
@font-face {
font-family: 'Open Sans';
font-style: normal;
font-weight: 400;
src: local('Open Sans Regular'), local('OpenSans-Regular'), url(https://fonts.gstatic.com/s/opensans/v14/RjgO7rYTmqiVp7vzi-Q5UYX0hVgzZQUfRDuZrPvH3D8.woff2) format('woff2');
unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;
}
/* greek-ext */
@font-face {
font-family: 'Open Sans';
font-style: normal;
font-weight: 400;
src: local('Open Sans Regular'), local('OpenSans-Regular'), url(https://fonts.gstatic.com/s/opensans/v14/LWCjsQkB6EMdfHrEVqA1KYX0hVgzZQUfRDuZrPvH3D8.woff2) format('woff2');
unicode-range: U+1F00-1FFF;
}
/* greek */
@font-face {
font-family: 'Open Sans';
font-style: normal;
font-weight: 400;
src: local('Open Sans Regular'), local('OpenSans-Regular'), url(https://fonts.gstatic.com/s/opensans/v14/xozscpT2726on7jbcb_pAoX0hVgzZQUfRDuZrPvH3D8.woff2) format('woff2');
unicode-range: U+0370-03FF;
}
/* vietnamese */
@font-face {
font-family: 'Open Sans';
font-style: normal;
font-weight: 400;
src: local('Open Sans Regular'), local('OpenSans-Regular'), url(https://fonts.gstatic.com/s/opensans/v14/59ZRklaO5bWGqF5A9baEEYX0hVgzZQUfRDuZrPvH3D8.woff2) format('woff2');
unicode-range: U+0102-0103, U+1EA0-1EF9, U+20AB;
}
/* latin-ext */
@font-face {
font-family: 'Open Sans';
font-style: normal;
font-weight: 400;
src: local('Open Sans Regular'), local('OpenSans-Regular'), url(https://fonts.gstatic.com/s/opensans/v14/u-WUoqrET9fUeobQW7jkRYX0hVgzZQUfRDuZrPvH3D8.woff2) format('woff2');
unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;
}
/* latin */
@font-face {
font-family: 'Open Sans';
font-style: normal;
font-weight: 400;
src: local('Open Sans Regular'), local('OpenSans-Regular'), url(https://fonts.gstatic.com/s/opensans/v14/cJZKeOuBrn4kERxqtaUH3ZBw1xU1rKptJj_0jans920.woff2) format('woff2');
unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;
}
| 9,126 | 37.673729 | 192 |
css
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/ParameterExtractor.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments
import org.platanios.symphony.mt.Language
import org.platanios.tensorflow.api.core.Shape
import org.platanios.tensorflow.api.io.CheckpointReader
import better.files.File
import net.schmizz.sshj.SSHClient
import net.schmizz.sshj.xfer.FileSystemFile
import java.io.PrintWriter
/**
* @author Emmanouil Antonios Platanios
*/
object ParameterExtractor {
def checkpointFiles(prefix: String): Set[String] = {
Set(
s"$prefix.data-00000-of-00002",
s"$prefix.data-00001-of-00002",
s"$prefix.index",
s"$prefix.meta")
}
def main(args: Array[String]): Unit = {
val server = GPU3Server
val workingDir = File.currentWorkingDirectory / "temp" / "results"
workingDir.createIfNotExists(asDirectory = true)
// val remotePath = "~/code/symphony-mt/temp/experiments/ted_talks.en.es.fr.it.nl.ro.de.vi.hi.ta.tw:true.ae:true.bi_rnn:2:2.lstm:tanh.hyper_lang.l:8.w:512.r.a.d:0.2.ls:0.1.t:none.c:none.v:bpe-10000.pp:100.bs:128.nb:5.sml:50.tml:50/"
val remotePath = "~/code/symphony-mt/temp/experiments/iwslt17.de-en.tw:false.ae:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.d:0.2.ls:0.1.t:moses.c:moses.v:simple-20000-5.pp:100.bs:128.nb:5.sml:50.tml:50/"
val filePrefix = "model.ckpt-25000"
val files = checkpointFiles(filePrefix) // + "languages.index"
val ssh = new SSHClient()
ssh.useCompression()
ssh.loadKnownHosts()
ssh.connect(server.hostname)
try {
ssh.authPublickey(server.username)
files.foreach(f => ssh.newSCPFileTransfer().download(remotePath + f, new FileSystemFile((workingDir / f).toJava)))
val checkpointReader = CheckpointReader((workingDir / filePrefix).path)
val numParameters = checkpointReader.variableShapes
.filter(!_._1.startsWith("AMSGrad"))
.values
.map(_.numElements)
.sum
checkpointReader.close()
// // Obtain the languages index.
// val languages = (workingDir / "languages.index").lines.map(line => {
// val lineParts = line.split(',')
// (lineParts(1).toInt, lineParts(0))
// }).toSeq.sortBy(_._1).map(_._2)
//
// // Obtain the language embeddings from the checkpoint file.
// val checkpointReader = CheckpointReader((workingDir / filePrefix).path)
// val variableShapes = checkpointReader.variableShapes.filter(_._1.endsWith("LanguageEmbeddings"))
// val variableName = variableShapes.find(_._2 == Shape(languages.size, 8)).get._1
// val variableValue = checkpointReader.getTensor(variableName).get
// val languageEmbeddings = languages.zipWithIndex.map {
// case (language, index) => language -> variableValue(index)
// }
//
// // Write the language embeddings to a CSV file to use for plotting.
// val writer = new PrintWriter((workingDir / "iwslt15_language_embeddings.csv").toJava)
// languageEmbeddings.foreach(l => {
// writer.write(s"${Language.fromName(l._1).abbreviation},${l._2.entriesIterator.map(_.toString).mkString(",")}\n")
// })
// checkpointReader.close()
// writer.close()
} finally {
ssh.disconnect()
}
}
}
| 3,801 | 40.78022 | 235 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/Server.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments
/**
* @author Emmanouil Antonios Platanios
*/
trait Server {
val hostname: String
val username: String
}
case object GPU3Server extends Server {
override val hostname: String = "gpu3.learning.cs.cmu.edu"
override val username: String = "eplatani"
}
case object GoogleCloudServer extends Server {
override val hostname: String = "104.155.134.36"
override val username: String = "e.a.platanios"
}
| 1,096 | 30.342857 | 80 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/Experiment.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments
import org.platanios.symphony.mt.{Environment, Language}
import org.platanios.symphony.mt.data._
import org.platanios.symphony.mt.data.loaders._
import org.platanios.symphony.mt.experiments.config._
import org.platanios.symphony.mt.models.Model
import org.platanios.symphony.mt.models.parameters._
import org.platanios.symphony.mt.vocabulary.Vocabulary
import ch.qos.logback.classic.LoggerContext
import ch.qos.logback.classic.encoder.PatternLayoutEncoder
import ch.qos.logback.classic.spi.ILoggingEvent
import ch.qos.logback.core.FileAppender
import com.typesafe.config.{Config, ConfigFactory}
import org.slf4j.{Logger, LoggerFactory}
import java.nio.file.{Files, Path, Paths}
/**
* @author Emmanouil Antonios Platanios
*/
class Experiment(val configFile: Path) {
lazy val config: Config = ConfigFactory.parseFile(configFile.toFile).resolve()
lazy val task: Experiment.Task = {
config.get[String]("task") match {
case "train" => Experiment.Train
case "translate" => Experiment.Translate
case "evaluate" => Experiment.Evaluate
case value => throw new IllegalArgumentException(s"'$value' does not represent a valid task.")
}
}
lazy val dataset: String = {
config.get[String]("data.dataset")
}
lazy val (datasets, languages): (Seq[FileParallelDataset], Seq[(Language, Vocabulary)]) = {
val languagePairs = Experiment.parseLanguagePairs(config.get[String]("training.languages"))
val providedEvalLanguagePairs = Experiment.parseLanguagePairs(config.get[String]("evaluation.languages"))
val evalLanguagePairs = if (providedEvalLanguagePairs.isEmpty) languagePairs else providedEvalLanguagePairs
ParallelDatasetLoader.load(
loaders = dataset match {
case "iwslt14" => (languagePairs ++ evalLanguagePairs).toSeq.map(l => IWSLT14Loader(l._1, l._2, dataConfig))
case "iwslt15" => (languagePairs ++ evalLanguagePairs).toSeq.map(l => IWSLT15Loader(l._1, l._2, dataConfig))
case "iwslt16" => (languagePairs ++ evalLanguagePairs).toSeq.map(l => IWSLT16Loader(l._1, l._2, dataConfig))
case "iwslt17" => (languagePairs ++ evalLanguagePairs).toSeq.map(l => IWSLT17Loader(l._1, l._2, dataConfig))
case "wmt16" => (languagePairs ++ evalLanguagePairs).toSeq.map(l => WMT16Loader(l._1, l._2, dataConfig))
case "ted_talks" => (languagePairs ++ evalLanguagePairs).toSeq.map(l => TEDTalksLoader(l._1, l._2, dataConfig))
},
workingDir = Some(environment.workingDir))
}
private lazy val parametersParser = {
new ParametersParser(dataConfig)
}
lazy val environment: Environment = {
new EnvironmentParser(
dataset = dataset,
modelName = config.get[String]("model.name")
).parse(config.get[Config]("environment"))
}
lazy val dataConfig: DataConfig = {
DataConfigParser.parse(config.get[Config]("data"))
}
lazy val parameters: ParametersParser.Parameters = {
parametersParser.parse(config.get[Config]("model.parameters"))
}
lazy val parameterManager: ParameterManager = {
parameters.parameterManager
}
private lazy val modelParser = {
// TODO: [EXPERIMENTS] Add support for other data types.
new ModelParser[Float](
task, dataset, datasets, languages, environment, parameterManager, dataConfig)
}
lazy val model: Model[_] = {
modelParser.parse(config)
}
def initialize(): Unit = {
val loggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
val patternLayoutEncoder = new PatternLayoutEncoder()
patternLayoutEncoder.setPattern("%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n")
patternLayoutEncoder.setContext(loggerContext)
patternLayoutEncoder.start()
val fileAppender = new FileAppender[ILoggingEvent]()
fileAppender.setFile(environment.workingDir.resolve("experiment.log").toAbsolutePath.toString)
fileAppender.setEncoder(patternLayoutEncoder)
fileAppender.setContext(loggerContext)
fileAppender.start()
LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME)
.asInstanceOf[ch.qos.logback.classic.Logger]
.addAppender(fileAppender)
}
def run(): Unit = {
task match {
case Experiment.Train => model.train(datasets.map(_.filterTypes(Train)))
case Experiment.Translate => ???
case Experiment.Evaluate => model.evaluate(model.evaluationConfig.datasets)
}
}
protected def languagesStringHelper(languagePairs: Seq[(Language, Language)]): (String, Seq[String]) = {
"Language Pairs" -> languagePairs.map(p => s"${p._1.abbreviation}-${p._2.abbreviation}").sorted
}
// def logSummary(): Unit = {
// Experiment.logger.info("Running an experiment with the following configuration:")
// val configTable = Seq(
// "Experiment" -> Seq("Type" -> {
// task match {
// case ExperimentConfig.Train => "Train"
// case ExperimentConfig.Translate => "Translate"
// case ExperimentConfig.Evaluate => "Evaluate"
// }
// }),
// "Dataset" -> Seq(
// "Name" -> """(\p{IsAlpha}+)(\p{IsDigit}+)""".r.replaceAllIn(dataset.map(_.toUpper), "$1-$2"),
// "Both Directions" -> trainBothDirections.toString,
// "Languages" -> providedLanguages,
// "Evaluation Languages" -> providedEvalLanguages,
// "Evaluation Tags" -> evalDatasetTags.mkString(", "),
// "Evaluation Metrics" -> evalMetrics.mkString(", ")),
// "Model" -> {
// Seq(
// "Architecture" -> modelArchitecture.toString,
// "Cell" -> {
// val parts = modelCell.split(":")
// s"${parts(0).toUpperCase()}[${parts(1)}]"
// },
// "Type" -> modelType.toString) ++ {
// if (modelType.isInstanceOf[HyperLanguage] || modelType.isInstanceOf[HyperLanguagePair])
// Seq("Language Embeddings Size" -> languageEmbeddingsSize.toString)
// else
// Seq.empty[(String, String)]
// } ++ Seq("Word Embeddings Size" -> wordEmbeddingsSize.toString) ++ {
// if (!modelArchitecture.isInstanceOf[GNMT])
// Seq(
// "Residual" -> residual.toString,
// "Attention" -> attention.toString)
// else
// Seq.empty[(String, String)]
// } ++ Seq(
// "Dropout" -> dropout.map(_.toString).getOrElse("Not Used"),
// "Label Smoothing" -> labelSmoothing.toString,
// "Identity Translations" -> trainUseIdentityTranslations.toString,
// "Beam Width" -> beamWidth.toString,
// "Length Penalty Weight" -> lengthPenaltyWeight.toString,
// "Decoding Max Length Factor" -> decoderMaxLengthFactor.toString,
// "" -> "", // This acts as a separator to help improve readability of the table.
// "Steps" -> numSteps.toString,
// "Summary Steps" -> summarySteps.toString,
// "Checkpoint Steps" -> checkpointSteps.toString,
// "" -> "", // This acts as a separator to help improve readability of the table.
// "Optimizer" -> {
// val parts = optString.split(":")
// s"${parts(0).capitalize}[lr=${parts(1)}]"
// },
// "Max Gradients Norm" -> optConfig.maxGradNorm.toString,
// "Colocate Gradients with Ops" -> optConfig.colocateGradientsWithOps.toString,
// "" -> "", // This acts as a separator to help improve readability of the table.
// "Log Loss Steps" -> logConfig.logLossSteps.toString,
// "Log Eval Steps" -> logConfig.logEvalSteps.toString,
// "Launch TensorBoard" -> logConfig.launchTensorBoard.toString,
// "TensorBoard Host" -> logConfig.tensorBoardConfig._1,
// "TensorBoard Port" -> logConfig.tensorBoardConfig._2.toString
// )
// },
// "Data Configuration" -> Seq(
// "Directory" -> dataConfig.dataDir.toString,
// "Loader Buffer Size" -> dataConfig.loaderBufferSize.toString,
// "Tokenizer" -> dataConfig.tokenizer.toString,
// "Cleaner" -> dataConfig.cleaner.toString,
// "Vocabulary" -> dataConfig.vocabulary.toString,
// "" -> "", // This acts as a separator to help improve readability of the table.
// "Percent Parallel" -> (dataConfig.parallelPortion * 100).toInt.toString,
// "Train Batch Size" -> dataConfig.trainBatchSize.toString,
// "Inference Batch Size" -> dataConfig.inferBatchSize.toString,
// "Evaluation Batch Size" -> dataConfig.evalBatchSize.toString,
// "Number of Buckets" -> dataConfig.numBuckets.toString,
// "Maximum Source Length" -> dataConfig.srcMaxLength.toString,
// "Maximum Target Length" -> dataConfig.tgtMaxLength.toString,
// "Prefetching Buffer Size" -> dataConfig.bufferSize.toString,
// "TF - Number of Parallel Calls" -> dataConfig.numParallelCalls.toString,
// "" -> "", // This acts as a separator to help improve readability of the table.
// "Unknown Token" -> dataConfig.unknownToken,
// "Begin-of-Sequence Token" -> dataConfig.beginOfSequenceToken,
// "End-of-Sequence Token" -> dataConfig.endOfSequenceToken),
// "Environment" -> Seq(
// "Working Directory" -> env.workingDir.toString,
// "Number of GPUs" -> env.numGPUs.toString,
// "Random Seed" -> env.randomSeed.getOrElse("Not Set").toString,
// "TF - Trace Steps" -> env.traceSteps.map(_.toString).getOrElse("Not Set"),
// "TF - Allow Soft Placement" -> env.allowSoftPlacement.toString,
// "TF - Log Device Placement" -> env.logDevicePlacement.toString,
// "TF - Allow GPU Memory Growth" -> env.gpuAllowMemoryGrowth.toString,
// "TF - Use XLA" -> env.useXLA.toString,
// "TF - Parallel Iterations" -> env.parallelIterations.toString,
// "TF - Swap Memory" -> env.swapMemory.toString))
// ExperimentConfig.logTable(configTable, message => Experiment.logger.info(message))
// }
override def toString: String = {
val stringBuilder = new StringBuilder(s"$dataset")
stringBuilder.append(s".${modelParser.tag(config, model).get}")
stringBuilder.append(s".${parametersParser.tag(config.get[Config]("model.parameters"), parameters).get}")
stringBuilder.append(s".${DataConfigParser.tag(config.get[Config]("data"), dataConfig).get}")
stringBuilder.toString
}
}
object Experiment {
sealed trait Task
case object Train extends Task
case object Translate extends Task
case object Evaluate extends Task
private[experiments] def parseLanguagePairs(languages: String): Set[(Language, Language)] = {
languages match {
case l if l == "" => Set.empty[(Language, Language)]
case l if l.contains(":") =>
languages.split(',').map(p => {
val parts = p.split(":")
if (parts.length != 2)
throw new IllegalArgumentException(s"'$p' is not a valid language pair.")
(Language.fromAbbreviation(parts(0)), Language.fromAbbreviation(parts(1)))
}).toSet
case l =>
l.split(',')
.map(Language.fromAbbreviation)
.combinations(2).map(p => (p(0), p(1)))
.flatMap(p => Seq(p, (p._2, p._1)))
.toSet
}
}
private[Experiment] def logTable(table: Seq[(String, Seq[(String, String)])], logger: String => Unit): Unit = {
val firstColumnWidth = Math.max(Math.max(table.map(_._1.length).max, table.flatMap(_._2.map(_._1.length)).max), 10)
val secondColumnWidth = Math.max(table.flatMap(_._2.map(_._2.length)).max, 10)
logger(s"╔═${"═" * firstColumnWidth}══${"═" * (secondColumnWidth + 2)}╗")
table.zipWithIndex.foreach {
case ((section, values), index) =>
logger(s"║ %-${firstColumnWidth}s ".format(section) + s"${" " * (secondColumnWidth + 2)}║")
logger(s"╠═${"═" * firstColumnWidth}═╤${"═" * (secondColumnWidth + 2)}╣")
values.foreach {
case (key, value) if key != "" =>
logger(s"║ %-${firstColumnWidth}s │ %-${secondColumnWidth}s ║".format(key, value))
case _ => logger(s"╟─${"─" * firstColumnWidth}─┼${"─" * (secondColumnWidth + 2)}╢")
}
if (index < table.length - 1)
logger(s"╠═${"═" * firstColumnWidth}═╧${"═" * (secondColumnWidth + 2)}╣")
}
logger(s"╚═${"═" * firstColumnWidth}═╧${"═" * (secondColumnWidth + 2)}╝")
}
@throws[IllegalArgumentException]
def main(args: Array[String]): Unit = {
val configFile = Paths.get(args(0))
if (!Files.exists(configFile))
throw new IllegalArgumentException(s"The provided configuration file ($configFile) does not exist.")
val experiment = new Experiment(configFile)
experiment.initialize()
// experiment.logSummary()
experiment.run()
}
}
| 13,610 | 45.613014 | 119 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/package.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt
import org.platanios.symphony.mt.models.rnn.Cell
import org.platanios.tensorflow.api.implicits.helpers.{OutputStructure, OutputToShape, Zero}
/**
* @author Emmanouil Antonios Platanios
*/
package object experiments {
implicit def cellToEvOutputStructureState[T](
cell: Cell[T, _, _]
): OutputStructure[cell.StateType] = {
cell.evOutputStructureState.asInstanceOf[OutputStructure[cell.StateType]]
}
implicit def cellToEvOutputToShapeState[T](
cell: Cell[T, _, _]
): OutputToShape.Aux[cell.StateType, cell.StateShapeType] = {
cell.evOutputToShapeState.asInstanceOf[OutputToShape.Aux[cell.StateType, cell.StateShapeType]]
}
implicit def cellToEvZeroState[T](
cell: Cell[T, _, _]
): Zero.Aux[cell.StateType, cell.StateShapeType] = {
cell.evZeroState.asInstanceOf[Zero.Aux[cell.StateType, cell.StateShapeType]]
}
}
| 1,536 | 34.744186 | 98 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/ResultsAggregator.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments
import better.files._
import net.schmizz.sshj.SSHClient
import net.schmizz.sshj.xfer.FileSystemFile
// TODO: Make this more generic.
/**
* @author Emmanouil Antonios Platanios
*/
object ResultsAggregator {
def iwslt15ExperimentDirectories(method: String, percentParallel: Int): Seq[String] = method match {
case "pairwise" => Seq(
s"iwslt15.en-cs.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80",
s"iwslt15.cs-en.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80",
s"iwslt15.en-de.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80",
s"iwslt15.de-en.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80",
s"iwslt15.en-fr.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80",
s"iwslt15.fr-en.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80",
s"iwslt15.en-th.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80",
s"iwslt15.th-en.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80",
s"iwslt15.en-vi.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80",
s"iwslt15.vi-en.both:false.back:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80")
case m => Seq(
s"iwslt15.en-cs.en-de.en-fr.en-th.en-vi.both:true.back:true.bi_rnn:2:2.lstm:tanh.$m.l:8.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:$percentParallel.bs:128.nb:5.sml:80.tml:80")
}
def main(args: Array[String]): Unit = {
// val workingDir = File.currentWorkingDirectory / "temp" / "results"
// workingDir.createIfNotExists(asDirectory = true)
// val hostname = "gpu3.learning.cs.cmu.edu"
// val ssh = new SSHClient()
// ssh.useCompression()
// ssh.loadKnownHosts()
// ssh.connect(hostname)
// try {
// ssh.authPublickey("eplatani")
// val results = Seq(
// "P-512" -> Map(
// "1%" -> iwslt15ExperimentDirectories("pairwise", 1),
// "10%" -> iwslt15ExperimentDirectories("pairwise", 10)
// ),
// // "100%" -> iwslt15ExperimentDirectories("pairwise", 100)),
// "HL-512-8-BT" -> Map(
// "1%" -> iwslt15ExperimentDirectories("hyper_lang", 1),
// "10%" -> iwslt15ExperimentDirectories("hyper_lang", 10)
// ))
// // "100%" -> iwslt15ExperimentDirectories("hyper_lang", 100)))
// val parsedResults = results.flatMap(r => r._2.map(rr => {
// (r._1, rr._1, rr._2.flatMap(d => {
// (workingDir / d).createIfNotExists(asDirectory = true)
// val localDestination = workingDir / d / "experiment.log"
// val remotePath = s"~/code/symphony-mt/temp/experiments/$d/experiment.log"
// ssh.newSCPFileTransfer().download(remotePath, new FileSystemFile(localDestination.toJava))
// LogParser.parseEvaluationResults(localDestination)
// }))
// }))
//
// ExperimentResults.plot(
// results = parsedResults,
// metric = BLEU,
// datasets = Set("IWSLT-15"),
// datasetTags = Set("tst2013"),
// evalDatasets = Set("IWSLT-15"),
// evalDatasetTags = Set("tst2012"),
// title = "IWSLT-15")
// } finally {
// ssh.disconnect()
// }
val workingDir = File.currentWorkingDirectory / "temp" / "results"
workingDir.createIfNotExists(asDirectory = true)
val server = GPU3Server
// val remotePath = "~/code/symphony-mt/temp/experiments/iwslt15.en-cs.en-de.en-fr.en-th.en-vi.both:true.back:true.bi_rnn:2:2.lstm:tanh.hyper_lang.l:8.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-simple-20000-5.pp:100.bs:128.nb:5.sml:80.tml:80/experiment.log"
// val remotePath = "~/code/symphony-mt/temp/experiments/iwslt15.en-cs.en-de.en-fr.en-th.en-vi.both:true.back:true.bi_rnn:2:2.lstm:tanh.hyper_lang.l:8.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-bpe-10000.pp:100.bs:128.nb:5.sml:80.tml:80/experiment.log"
// val remotePath = "~/code/symphony-mt/temp/experiments/iwslt14.en-de.en-es.en-fr.en-he.en-it.en-nl.en-pt-br.en-ro.en-ru.both:true.back:true.bi_rnn:2:2.lstm:tanh.hyper_lang.l:8.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-bpe-10000.pp:100.bs:128.nb:5.sml:80.tml:80/experiment.log"
// val remotePath = "~/code/symphony-mt/temp/experiments/iwslt17.both:true.back:true.bi_rnn:2:2.lstm:tanh.hyper_lang.l:8.w:512.r.a.dropout:0.2.ls:0.1.t:moses.c:moses.v:generated-bpe-10000.pp:100.bs:128.nb:5.sml:80.tml:80/experiment.log"
val zsLanguagePairs = Set("it-ro", "ro-it", "nl-de", "de-nl")
val languagePairs = Set("en", "de", "it", "nl", "ro").toSeq.combinations(2).map(p => s"${p(0)}-${p(1)}").toSet -- zsLanguagePairs
// val remotePath = "~/code/symphony-mt/temp/experiments/iwslt15.en-cs.en-de.en-fr.en-th.en-vi.tw:true.ae:true.bi_rnn:2:2.lstm:tanh.google_multilingual.w:512.r.a.d:0.2.ls:0.1.t:moses.c:moses.v:simple-20000-5.pp:1.bs:128.nb:5.sml:50.tml:50/experiment.log"
// val remotePath = "~/code/symphony-mt/temp/experiments/iwslt17.de:en.de:it.de:ro.en:it.en:nl.en:ro.it:nl.nl:ro.tw:true.ae:true.bi_rnn:2:2.lstm:tanh.hyper_lang:4.l:8.w:512.r.a.d:0.2.ls:0.1.t:moses.c:moses.v:simple-20000-5.pp:100.bs:128.nb:5.sml:50.tml:50/experiment.log"
// val remotePath = "~/../e_a_platanios/code/symphony-mt/temp/experiments/iwslt17.de:en.de:it.de:ro.en:it.en:nl.en:ro.it:nl.nl:ro.tw:true.ae:true.bi_rnn:2:2.lstm:tanh.hyper_lang:8.l:512.w:512.r.a.d:0.2.ls:0.1.t:moses.c:moses.v:simple-20000-5.pp:100.bs:128.nb:5.sml:50.tml:50/experiment.log"
// val remotePath = "~/code/symphony-mt/temp_new/experiments/iwslt17.de:en.tw:false.ae:false.bi_rnn:2:2.lstm:tanh.pairwise.w:256.r.a.d:0.2.ls:0.1.t:moses.c:moses.v:simple-20000-5.pp:100.bs:128.nb:5.sml:50.tml:50/experiment.log"
val remotePath = "~/code/symphony-mt/temp_new/experiments/iwslt17.it:en.en:ro.tw:false.ae:false.bi_rnn:2:2.lstm:tanh.pairwise.w:256.r.a.d:0.2.ls:0.1.t:moses.c:moses.v:simple-10000-20.pp:100.bs:128.nb:5.sml:50.tml:50/experiment.log"
// val languagePairs = Set("de", "ro").toSeq.combinations(2).map(p => s"${p(0)}-${p(1)}").toSet
// val remotePath = "~/code/symphony-mt/temp/experiments/iwslt17.de-ro.tw:false.ae:false.bi_rnn:2:2.lstm:tanh.pairwise.w:512.r.a.d:0.2.ls:0.1.t:moses.c:moses.v:simple-20000-5.pp:100.bs:128.nb:5.sml:50.tml:50/experiment.log"
val localDestination = workingDir / "test.log"
val ssh = new SSHClient()
ssh.useCompression()
ssh.loadKnownHosts()
ssh.connect(server.hostname)
try {
ssh.authPublickey(server.username)
ssh.newSCPFileTransfer().download(remotePath, new FileSystemFile(localDestination.toJava))
val bestResults = ExperimentResult.best(results = LogParser.parseEvaluationResults(localDestination))(
metric = BLEU, datasets = Set("IWSLT-17"), datasetTags = Set("tst2017"), languagePairs = languagePairs)
val mean = ExperimentResult.mean(results = bestResults)(
metric = BLEU,
datasets = Set("IWSLT-17"),
datasetTags = Set("tst2017"),
languagePairs = languagePairs)
println(mean)
} finally {
ssh.disconnect()
}
}
}
| 8,836 | 68.582677 | 296 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/Metric.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments
import org.platanios.symphony.mt.Language
import org.platanios.symphony.mt.evaluation
import org.platanios.symphony.mt.evaluation._
import org.platanios.symphony.mt.vocabulary.Vocabulary
/**
* @author Emmanouil Antonios Platanios
*/
sealed trait Metric {
val header: String
}
case object ExactMatchAccuracy extends Metric {
override val header: String = "ExactMatchAccuracy"
}
case object BLEU extends Metric {
override val header: String = "BLEU"
}
case object Meteor extends Metric {
override val header: String = "Meteor"
}
case object TER extends Metric {
override val header: String = "TER"
}
case object HypLen extends Metric {
override val header: String = "HypLen"
}
case object RefLen extends Metric {
override val header: String = "RefLen"
}
case object SentenceCount extends Metric {
override val header: String = "#Sentences"
}
object Metric {
@throws[IllegalArgumentException]
def fromHeader(header: String): Metric = {
header match {
case "ExactMatchAccuracy" => ExactMatchAccuracy
case "BLEU" => BLEU
case "Meteor" => Meteor
case "TER" => TER
case "HypLen" => HypLen
case "RefLen" => RefLen
case "#Sentences" => SentenceCount
case _ => throw new IllegalArgumentException(s"'$header' does not represent a valid metric header name.")
}
}
@throws[IllegalArgumentException]
def cliToMTMetric(metric: String)(implicit languages: Seq[(Language, Vocabulary)]): MTMetric = {
metric.split(":") match {
case Array("exact_match_accuracy") => evaluation.ExactMatchAccuracy()(languages)
case Array("bleu") => evaluation.BLEU()(languages)
case Array("bleu", maxOrder) => evaluation.BLEU(maxOrder.toInt)(languages)
case Array("bleu_nist") => evaluation.BLEU(smoothing = evaluation.BLEU.NISTSmoothing)(languages)
case Array("bleu_nist", maxOrder) => evaluation.BLEU(maxOrder.toInt, smoothing = evaluation.BLEU.NISTSmoothing)(languages)
case Array("meteor") => evaluation.Meteor()(languages)
case Array("ter") => evaluation.TER()(languages)
case Array("hyp_len") => evaluation.SentenceLength(forHypothesis = true, name = "HypLen")
case Array("ref_len") => evaluation.SentenceLength(forHypothesis = false, name = "RefLen")
case Array("sen_cnt") => evaluation.SentenceCount(name = "#Sentences")
case _ => throw new IllegalArgumentException(s"'$metric' does not represent a valid metric.")
}
}
}
| 3,146 | 33.966667 | 128 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/ExperimentResult.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments
/**
* @author Emmanouil Antonios Platanios
*/
case class ExperimentResult(
metric: Metric,
step: Long,
dataset: String,
datasetTag: String,
languagePair: String,
value: Double)
object ExperimentResult {
def best(results: Seq[ExperimentResult])(
metric: Metric,
datasets: Set[String] = results.map(_.dataset).toSet,
datasetTags: Set[String] = results.map(_.datasetTag).toSet,
languagePairs: Set[String] = results.map(_.languagePair).toSet
): Seq[ExperimentResult] = {
val bestStep = results.groupBy(_.step).maxBy {
case (_, stepResults) =>
val filteredResults = stepResults.filter(r =>
r.metric == metric &&
datasets.contains(r.dataset) &&
datasetTags.contains(r.datasetTag) &&
languagePairs.contains(r.languagePair))
val filteredValues = filteredResults.map(_.value).filter(!_.isNaN)
filteredValues.sum
}
bestStep._2
}
def mean(results: Seq[ExperimentResult])(
metric: Metric,
datasets: Set[String] = results.map(_.dataset).toSet,
datasetTags: Set[String] = results.map(_.datasetTag).toSet,
languagePairs: Set[String] = results.map(_.languagePair).toSet
): (Long, Double) = {
val filteredResults = results.filter(r =>
r.metric == metric &&
datasets.contains(r.dataset) &&
datasetTags.contains(r.datasetTag) &&
languagePairs.contains(r.languagePair))
val filteredValues = filteredResults.map(_.value).filter(!_.isNaN)
(results.head.step, filteredValues.sum / filteredValues.size)
}
}
| 2,298 | 34.921875 | 80 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/LogParser.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments
import better.files._
import java.nio.charset.StandardCharsets
import scala.util.matching.Regex
/**
* @author Emmanouil Antonios Platanios
*/
object LogParser {
protected[LogParser] val evaluationStepRegex : Regex = """.*Learn / Hooks / Evaluation - Step (.*) Evaluation:""".r
protected[LogParser] val evaluationStartRegex: Regex = """.*Learn / Hooks / Evaluation - ╔[═╤]*╗""".r
protected[LogParser] val evaluationStopRegex : Regex = """.*Learn / Hooks / Evaluation - ╚[═╧]*╝""".r
protected[LogParser] val evaluationLineRegex : Regex = """.*Learn / Hooks / Evaluation - ║ ([^│]*) │ (.*) ║""".r
def parseEvaluationResults(file: File): Seq[ExperimentResult] = {
var results = Seq.empty[ExperimentResult]
var step = -1L
var metrics = Seq.empty[String]
file.lineIterator(StandardCharsets.UTF_8).foreach(line => {
if (step == -1L) {
evaluationStepRegex.findFirstMatchIn(line) match {
case Some(m) => step = m.group(1).toLong
case None => ()
}
} else {
if (evaluationStopRegex.findFirstMatchIn(line).nonEmpty) {
step = -1L
metrics = Seq.empty[String]
} else {
evaluationLineRegex.findFirstMatchIn(line) match {
case Some(m) if metrics.isEmpty =>
metrics = m.group(2).split('│').map(_.trim)
case Some(m) =>
val dataset = m.group(1).split('/')
results ++= m.group(2).split('│').map(_.trim.toDouble).zip(metrics).map(p => {
ExperimentResult(Metric.fromHeader(p._2), step, dataset(0), dataset(1), dataset(2), p._1)
})
case None => ()
}
}
}
})
results
}
}
| 2,398 | 36.484375 | 117 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/config/InferenceConfigParser.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments.config
import org.platanios.symphony.mt.config.InferenceConfig
import org.platanios.symphony.mt.models.decoders.{GoogleLengthPenalty, NoLengthPenalty}
import org.platanios.symphony.mt.models.pivoting.NoPivot
import com.typesafe.config.Config
/**
* @author Emmanouil Antonios Platanios
*/
object InferenceConfigParser extends ConfigParser[InferenceConfig] {
@throws[IllegalArgumentException]
override def parse(config: Config): InferenceConfig = {
InferenceConfig(
pivot = NoPivot,
beamWidth = config.get[Int]("beam-width"),
lengthPenalty = config.getOption[Float]("length-penalty").map(GoogleLengthPenalty).getOrElse(NoLengthPenalty),
maxDecodingLengthFactor = config.get[Float]("max-decoding-length-factor"))
}
}
| 1,437 | 37.864865 | 116 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/config/ParametersParser.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments.config
import org.platanios.symphony.mt.data.{DataConfig, GeneratedVocabulary}
import org.platanios.symphony.mt.models.parameters._
import org.platanios.tensorflow.api.tf
import com.typesafe.config.Config
/**
* @author Emmanouil Antonios Platanios
*/
class ParametersParser(val dataConfig: DataConfig) extends ConfigParser[ParametersParser.Parameters] {
@throws[IllegalArgumentException]
override def parse(config: Config): ParametersParser.Parameters = {
val wordEmbeddingsSize = config.get[Int]("word-embeddings-size")
val manager = config.get[String]("manager")
val sharedWordEmbeddings: Boolean = dataConfig.vocabulary match {
case GeneratedVocabulary(_, true) => true
case _ => false
}
val wordEmbeddingsType = {
if (sharedWordEmbeddings)
SharedWordEmbeddings(wordEmbeddingsSize)
else if (manager == "pairwise")
WordEmbeddingsPerLanguagePair(wordEmbeddingsSize)
else
WordEmbeddingsPerLanguage(wordEmbeddingsSize)
}
val parameterManager = manager match {
case "pairwise" =>
PairwiseManager(
wordEmbeddingsType = wordEmbeddingsType,
variableInitializer = tf.VarianceScalingInitializer(
1.0f,
tf.VarianceScalingInitializer.FanAverageScalingMode,
tf.VarianceScalingInitializer.UniformDistribution))
case "contextual-language" =>
val languageEmbeddingsSize = config.get[Int]("language-embeddings-size")
val hiddenLayers = config.get[String]("contextual-hidden-layers", default = "")
LanguageEmbeddingsManager(
languageEmbeddingsSize = languageEmbeddingsSize,
wordEmbeddingsType = wordEmbeddingsType,
hiddenLayers = if (hiddenLayers.nonEmpty) hiddenLayers.split('-').map(_.toInt) else Seq.empty)
case "contextual-language-pair" =>
val languageEmbeddingsSize = config.get[Int]("language-embeddings-size")
val hiddenLayers = config.get[String]("contextual-hidden-layers", default = "")
LanguageEmbeddingsPairManager(
languageEmbeddingsSize = languageEmbeddingsSize,
wordEmbeddingsType = wordEmbeddingsType,
hiddenLayers = if (hiddenLayers.nonEmpty) hiddenLayers.split('-').map(_.toInt) else Seq.empty)
case "google-multilingual" =>
GoogleMultilingualManager(
wordEmbeddingsType = wordEmbeddingsType,
variableInitializer = tf.VarianceScalingInitializer(
1.0f,
tf.VarianceScalingInitializer.FanAverageScalingMode,
tf.VarianceScalingInitializer.UniformDistribution))
case _ => throw new IllegalArgumentException(s"'$manager' does not represent a valid parameter manager type.")
}
ParametersParser.Parameters(wordEmbeddingsSize, parameterManager)
}
override def tag(config: Config, parsedValue: => ParametersParser.Parameters): Option[String] = {
val wordEmbeddingsSize = config.get[Int]("word-embeddings-size")
val manager = config.get[String]("manager")
val parameterManager = manager match {
case "pairwise" => "pairwise"
case "contextual-language" =>
val languageEmbeddingsSize = config.get[Int]("language-embeddings-size")
val hiddenLayers = config.get[String]("contextual-hidden-layers", default = "")
if (hiddenLayers.isEmpty)
s"contextual-language:$languageEmbeddingsSize"
else
s"contextual-language:$languageEmbeddingsSize:$hiddenLayers"
case "contextual-language-pair" =>
val languageEmbeddingsSize = config.get[Int]("language-embeddings-size")
val hiddenLayers = config.get[String]("contextual-hidden-layers", default = "")
if (hiddenLayers.isEmpty)
s"contextual-language-pair:$languageEmbeddingsSize"
else
s"contextual-language-pair:$languageEmbeddingsSize:$hiddenLayers"
case "google-multilingual" => "google-multilingual"
case _ => throw new IllegalArgumentException(s"'$manager' does not represent a valid parameter manager type.")
}
Some(s"w:$wordEmbeddingsSize.pm:$parameterManager")
}
}
object ParametersParser {
case class Parameters(wordEmbeddingsSize: Int, parameterManager: ParameterManager)
}
| 4,922 | 45.009346 | 116 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/config/package.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments
import com.typesafe.config.Config
import scala.reflect.runtime.universe._
/**
* @author Emmanouil Antonios Platanios
*/
package object config {
implicit class ConfigWithDefaults(config: Config) {
def get[T: TypeTag](path: String): T = {
val value = typeOf[T] match {
case t if t =:= typeOf[Config] => config.getConfig(path)
case t if t =:= typeOf[Boolean] => config.getBoolean(path)
case t if t =:= typeOf[Int] => config.getInt(path)
case t if t =:= typeOf[Long] => config.getLong(path)
case t if t =:= typeOf[Float] => config.getDouble(path).toFloat
case t if t =:= typeOf[Double] => config.getDouble(path)
case t if t =:= typeOf[String] => config.getString(path)
}
value.asInstanceOf[T]
}
def getOption[T: TypeTag](path: String): Option[T] = {
if (config.hasPath(path)) {
val value = typeOf[T] match {
case t if t =:= typeOf[Config] => config.getConfig(path)
case t if t =:= typeOf[Boolean] => config.getBoolean(path)
case t if t =:= typeOf[Int] => config.getInt(path)
case t if t =:= typeOf[Long] => config.getLong(path)
case t if t =:= typeOf[Float] => config.getDouble(path).toFloat
case t if t =:= typeOf[Double] => config.getDouble(path)
case t if t =:= typeOf[String] => config.getString(path)
}
Some(value.asInstanceOf[T])
} else {
None
}
}
def get[T: TypeTag](path: String, default: T): T = {
getOption[T](path).getOrElse(default)
}
}
}
| 2,266 | 35.564516 | 80 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/config/ConfigParser.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments.config
import com.typesafe.config.Config
/**
* @author Emmanouil Antonios Platanios
*/
trait ConfigParser[T] {
@throws[IllegalArgumentException]
def parse(config: Config): T
def tag(config: Config, parsedValue: => T): Option[String] = None
}
| 937 | 31.344828 | 80 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/config/EnvironmentParser.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments.config
import org.platanios.symphony.mt.Environment
import com.typesafe.config.Config
import java.nio.file.Paths
/**
* @author Emmanouil Antonios Platanios
*/
class EnvironmentParser(
val dataset: String,
val modelName: String
) extends ConfigParser[Environment] {
@throws[IllegalArgumentException]
override def parse(config: Config): Environment = {
Environment(
workingDir = Paths.get(config.get[String]("working-dir")).resolve(dataset).resolve(modelName),
allowSoftPlacement = config.get[Boolean]("allow-soft-placement"),
logDevicePlacement = config.get[Boolean]("log-device-placement"),
gpuAllowMemoryGrowth = config.get[Boolean]("gpu-allow-memory-growth"),
useXLA = config.get[Boolean]("use-xla"),
numGPUs = config.get[Int]("num-gpus"),
parallelIterations = config.get[Int]("parallel-iterations"),
swapMemory = config.get[Boolean]("swap-memory"),
randomSeed = {
val value = config.get[String]("random-seed")
if (value == "none")
None
else
Some(value.toInt)
},
traceSteps = {
if (config.hasPath("trace-steps"))
Some(config.get[Int]("trace-steps"))
else
None
})
}
}
| 1,929 | 32.859649 | 100 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/config/ModelParser.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments.config
import org.platanios.symphony.mt.{Environment, Language}
import org.platanios.symphony.mt.config.EvaluationConfig
import org.platanios.symphony.mt.data.{DataConfig, FileParallelDataset}
import org.platanios.symphony.mt.data.loaders._
import org.platanios.symphony.mt.experiments.{Experiment, Metric}
import org.platanios.symphony.mt.models.Model
import org.platanios.symphony.mt.models.Transformation.{Decoder, Encoder}
import org.platanios.symphony.mt.models.decoders.{OutputLayer, ProjectionToWordEmbeddings, ProjectionToWords}
import org.platanios.symphony.mt.models.parameters.{PairwiseManager, ParameterManager}
import org.platanios.symphony.mt.models.pivoting.{NoPivot, Pivot, SinglePivot}
import org.platanios.symphony.mt.models.rnn.attention.{BahdanauRNNAttention, LuongRNNAttention}
import org.platanios.symphony.mt.models.rnn._
import org.platanios.symphony.mt.models.transformer.{TransformerDecoder, TransformerEncoder}
import org.platanios.symphony.mt.models.transformer.helpers.{DenseReLUDenseFeedForwardLayer, DotProductAttention}
import org.platanios.symphony.mt.vocabulary.Vocabulary
import org.platanios.tensorflow.api._
import org.platanios.tensorflow.api.implicits.helpers.{OutputStructure, OutputToShape, Zero}
import com.typesafe.config.Config
/**
* @author Emmanouil Antonios Platanios
*/
class ModelParser[T: TF : IsHalfOrFloatOrDouble](
task: Experiment.Task,
dataset: String,
datasets: => Seq[FileParallelDataset],
languages: => Seq[(Language, Vocabulary)],
environment: => Environment,
parameterManager: => ParameterManager,
dataConfig: => DataConfig
) extends ConfigParser[Model[_]] {
protected val trainingConfigParser: TrainingConfigParser = {
new TrainingConfigParser(dataset, datasets, dataConfig)
}
@throws[IllegalArgumentException]
override def parse(config: Config): Model[_] = {
val trainingConfig = trainingConfigParser.parse(config.get[Config]("training"))
val evalLanguagePairs = {
val providedPairs = Experiment.parseLanguagePairs(config.get[String]("evaluation.languages"))
if (providedPairs.isEmpty) trainingConfig.languagePairs else providedPairs
}
val evalDatasets: Seq[(String, FileParallelDataset)] = {
val evalDatasetTags = config.get[String]("evaluation.datasets").split(',')
task match {
case Experiment.Train | Experiment.Evaluate =>
val evalTags = dataset match {
case "iwslt14" => evalDatasetTags.map(t => (s"IWSLT-14/$t", IWSLT14Loader.Tag.fromName(t)))
case "iwslt15" => evalDatasetTags.map(t => (s"IWSLT-15/$t", IWSLT15Loader.Tag.fromName(t)))
case "iwslt16" => evalDatasetTags.map(t => (s"IWSLT-16/$t", IWSLT16Loader.Tag.fromName(t)))
case "iwslt17" => evalDatasetTags.map(t => (s"IWSLT-17/$t", IWSLT17Loader.Tag.fromName(t)))
case "wmt16" => evalDatasetTags.map(t => (s"WMT-16/$t", WMT16Loader.Tag.fromName(t)))
case "ted_talks" => evalDatasetTags.map(t => (s"TED-Talks/$t", TEDTalksLoader.Tag.fromName(t)))
}
evalTags.flatMap(t => datasets.map(d => (t._1, d.filterTags(t._2))))
case Experiment.Translate => Seq.empty
}
}
val evalMetrics = config.get[String]("evaluation.metrics").split(',').map(Metric.cliToMTMetric(_)(languages))
val encoder = ModelParser.encoderFromConfig[T](config.get[Config]("model.encoder"))
val decoder = ModelParser.decoderFromConfig[T](config.get[Config]("model.decoder"))
new Model(
name = config.get[String]("model.name"),
encoder = encoder,
decoder = decoder,
languages = languages,
env = environment,
parameterManager = parameterManager,
dataConfig = dataConfig,
trainingConfig = trainingConfig,
inferenceConfig = InferenceConfigParser.parse(config.get[Config]("inference")).copy(
pivot = ModelParser.pivot(parameterManager, trainingConfig.languagePairs)),
evaluationConfig = EvaluationConfig(
frequency = config.get[Int]("evaluation.frequency"),
metrics = evalMetrics,
datasets = evalDatasets,
languagePairs = evalLanguagePairs))
}
override def tag(config: Config, parsedValue: => Model[_]): Option[String] = {
val encoderConfig = config.get[Config]("model.encoder")
val decoderConfig = config.get[Config]("model.decoder")
// TODO: !!! Make this more detailed.
val stringBuilder = new StringBuilder()
stringBuilder.append(config.get[String]("model.name"))
// stringBuilder.append(s".enc:${encoderConfig.get[String]("type")}")
// if (encoderConfig.hasPath("num-layers"))
// stringBuilder.append(s":${encoderConfig.get[Int]("num-layers")}")
// if (encoderConfig.hasPath("residual") && encoderConfig.get[Boolean]("residual"))
// stringBuilder.append(":r")
// if (encoderConfig.get[Boolean]("remove-first-layer-residual-connection", false))
// stringBuilder.append(":no-first-residual")
// stringBuilder.append(s".dec:${decoderConfig.get[String]("type")}")
// if (decoderConfig.hasPath("num-layers"))
// stringBuilder.append(s":${decoderConfig.get[Int]("num-layers")}")
// if (decoderConfig.hasPath("residual") && decoderConfig.get[Boolean]("residual"))
// stringBuilder.append(":r")
// if (decoderConfig.hasPath("use-attention") && decoderConfig.get[Boolean]("use-attention"))
// stringBuilder.append(":a")
// if (decoderConfig.get[Boolean]("remove-first-layer-residual-connection", false))
// stringBuilder.append(":no-first-residual")
// decoderConfig.get[String]("output-layer") match {
// case "projection-to-words" => stringBuilder.append(":pw")
// case "projection-to-word-embeddings" => stringBuilder.append(":pwe")
// case _ => ()
// }
stringBuilder.append(s".${trainingConfigParser.tag(config.get[Config]("training"), parsedValue.trainingConfig).get}")
Some(stringBuilder.toString)
}
}
object ModelParser {
@throws[IllegalArgumentException]
private def encoderFromConfig[T: TF : IsHalfOrFloatOrDouble](encoderConfig: Config): Encoder[Any] = {
val encoderType = encoderConfig.get[String]("type")
encoderType match {
case "rnn" =>
val cell: Cell[T, _, _] = cellFromConfig[T](encoderConfig.get[Config]("cell"))
implicit val evOutputStructureState: OutputStructure[cell.StateType] = {
cell.evOutputStructureState.asInstanceOf[OutputStructure[cell.StateType]]
}
implicit val evOutputToShapeState: OutputToShape.Aux[cell.StateType, cell.StateShapeType] = {
cell.evOutputToShapeState.asInstanceOf[OutputToShape.Aux[cell.StateType, cell.StateShapeType]]
}
implicit val evZeroState: Zero.Aux[cell.StateType, cell.StateShapeType] = {
cell.evZeroState.asInstanceOf[Zero.Aux[cell.StateType, cell.StateShapeType]]
}
new UnidirectionalRNNEncoder[T, cell.StateType, cell.StateShapeType](
cell = cell.asInstanceOf[Cell[cell.DataType, cell.StateType, cell.StateShapeType]],
numUnits = encoderConfig.get[Int]("num-units"),
numLayers = encoderConfig.get[Int]("num-layers"),
residual = encoderConfig.get[Boolean]("residual", default = true),
dropout = encoderConfig.get[Float]("dropout", default = 0.0f)
).asInstanceOf[Encoder[Any]]
case "bi-rnn" =>
val cell: Cell[T, _, _] = cellFromConfig[T](encoderConfig.get[Config]("cell"))
implicit val evOutputStructureState: OutputStructure[cell.StateType] = {
cell.evOutputStructureState.asInstanceOf[OutputStructure[cell.StateType]]
}
implicit val evOutputToShapeState: OutputToShape.Aux[cell.StateType, cell.StateShapeType] = {
cell.evOutputToShapeState.asInstanceOf[OutputToShape.Aux[cell.StateType, cell.StateShapeType]]
}
implicit val evZeroState: Zero.Aux[cell.StateType, cell.StateShapeType] = {
cell.evZeroState.asInstanceOf[Zero.Aux[cell.StateType, cell.StateShapeType]]
}
new BidirectionalRNNEncoder[T, cell.StateType, cell.StateShapeType](
cell = cell.asInstanceOf[Cell[cell.DataType, cell.StateType, cell.StateShapeType]],
numUnits = encoderConfig.get[Int]("num-units"),
numLayers = encoderConfig.get[Int]("num-layers"),
residual = encoderConfig.get[Boolean]("residual", default = true),
dropout = encoderConfig.get[Float]("dropout", default = 0.0f)
).asInstanceOf[Encoder[Any]]
case "gnmt" =>
val cell: Cell[T, _, _] = cellFromConfig[T](encoderConfig.get[Config]("cell"))
implicit val evOutputStructureState: OutputStructure[cell.StateType] = {
cell.evOutputStructureState.asInstanceOf[OutputStructure[cell.StateType]]
}
implicit val evOutputToShapeState: OutputToShape.Aux[cell.StateType, cell.StateShapeType] = {
cell.evOutputToShapeState.asInstanceOf[OutputToShape.Aux[cell.StateType, cell.StateShapeType]]
}
implicit val evZeroState: Zero.Aux[cell.StateType, cell.StateShapeType] = {
cell.evZeroState.asInstanceOf[Zero.Aux[cell.StateType, cell.StateShapeType]]
}
new GNMTEncoder[T, cell.StateType, cell.StateShapeType](
cell = cell.asInstanceOf[Cell[cell.DataType, cell.StateType, cell.StateShapeType]],
numUnits = encoderConfig.get[Int]("num-units"),
numBiLayers = encoderConfig.get[Int]("num-bi-layers"),
numUniLayers = encoderConfig.get[Int]("num-uni-layers"),
numUniResLayers = encoderConfig.get[Int]("num-uni-res-layers"),
dropout = encoderConfig.get[Float]("dropout", default = 0.0f)
).asInstanceOf[Encoder[Any]]
case "transformer" =>
val numUnits = encoderConfig.get[Int]("num-units")
new TransformerEncoder[T](
numUnits = numUnits,
numLayers = encoderConfig.get[Int]("num-layers"),
useSelfAttentionProximityBias = encoderConfig.get[Boolean]("use-self-attention-proximity-bias", default = false),
postPositionEmbeddingsDropout = encoderConfig.get[Float]("post-position-embeddings-dropout"),
removeFirstLayerResidualConnection = encoderConfig.get[Boolean]("remove-first-layer-residual-connection", false),
attentionKeysDepth = encoderConfig.get[Int]("attention-keys-depth"),
attentionValuesDepth = encoderConfig.get[Int]("attention-values-depth"),
attentionNumHeads = encoderConfig.get[Int]("attention-num-heads"),
selfAttention = DotProductAttention(
dropoutRate = encoderConfig.get[Float]("dot-product-attention-dropout", default = 0.1f),
dropoutBroadcastAxes = Set.empty,
name = "DotProductAttention"),
feedForwardLayer = DenseReLUDenseFeedForwardLayer(
encoderConfig.get[Int]("feed-forward-filter-size"),
numUnits,
encoderConfig.get[Float]("feed-forward-relu-dropout"),
Set.empty, "FeedForward")
).asInstanceOf[Encoder[Any]]
case _ => throw new IllegalArgumentException(s"'$encoderType' does not represent a valid encoder type.")
}
}
@throws[IllegalArgumentException]
private def decoderFromConfig[T: TF : IsHalfOrFloatOrDouble](decoderConfig: Config): Decoder[Any] = {
val decoderType = decoderConfig.get[String]("type")
decoderType match {
case "rnn" =>
val cell: Cell[T, _, _] = cellFromConfig[T](decoderConfig.get[Config]("cell"))
val numUnits = decoderConfig.get[Int]("num-units")
val numLayers = decoderConfig.get[Int]("num-layers")
val residual = decoderConfig.get[Boolean]("residual", default = true)
val dropout = decoderConfig.get[Float]("dropout", default = 0.0f)
val useAttention = decoderConfig.get[Boolean]("use-attention", default = false)
implicit val evOutputStructureState: OutputStructure[cell.StateType] = {
cell.evOutputStructureState.asInstanceOf[OutputStructure[cell.StateType]]
}
implicit val evOutputToShapeState: OutputToShape.Aux[cell.StateType, cell.StateShapeType] = {
cell.evOutputToShapeState.asInstanceOf[OutputToShape.Aux[cell.StateType, cell.StateShapeType]]
}
if (useAttention) {
new UnidirectionalRNNDecoderWithAttention(
cell = cell.asInstanceOf[Cell[cell.DataType, cell.StateType, cell.StateShapeType]],
numUnits = numUnits,
numLayers = numLayers,
residual = residual,
dropout = dropout,
attention = new LuongRNNAttention(
scaled = true,
probabilityFn = (o: Output[T]) => tf.softmax(o)),
outputAttention = true,
outputLayer = outputLayerFromConfig(decoderConfig)
).asInstanceOf[Decoder[Any]]
} else {
new UnidirectionalRNNDecoder(
cell = cell.asInstanceOf[Cell[cell.DataType, cell.StateType, cell.StateShapeType]],
numUnits = numUnits,
numLayers = numLayers,
residual = residual,
dropout = dropout,
outputLayer = outputLayerFromConfig(decoderConfig)
).asInstanceOf[Decoder[Any]]
}
case "gnmt" =>
val cell: Cell[T, _, _] = cellFromConfig[T](decoderConfig.get[Config]("cell"))
val dropout = decoderConfig.get[Float]("dropout", default = 0.0f)
val useNewAttention = decoderConfig.get[Boolean]("use-new-attention", default = false)
implicit val evOutputStructureState: OutputStructure[cell.StateType] = {
cell.evOutputStructureState.asInstanceOf[OutputStructure[cell.StateType]]
}
implicit val evOutputToShapeState: OutputToShape.Aux[cell.StateType, cell.StateShapeType] = {
cell.evOutputToShapeState.asInstanceOf[OutputToShape.Aux[cell.StateType, cell.StateShapeType]]
}
new GNMTDecoder(
cell = cell.asInstanceOf[Cell[cell.DataType, cell.StateType, cell.StateShapeType]],
numUnits = decoderConfig.get[Int]("num-units"),
numLayers = decoderConfig.get[Int]("num-layers"), // TODO: Should be equal to `numBiLayers + numUniLayers`
numResLayers = decoderConfig.get[Int]("num-res-layers"),
attention = new BahdanauRNNAttention(
normalized = true,
probabilityFn = (o: Output[T]) => tf.softmax(o)),
dropout = dropout,
useNewAttention = useNewAttention,
outputLayer = outputLayerFromConfig(decoderConfig)
).asInstanceOf[Decoder[Any]]
case "transformer" =>
new TransformerDecoder[T](
numUnits = decoderConfig.get[Int]("num-units"),
numLayers = decoderConfig.get[Int]("num-layers"),
useSelfAttentionProximityBias = decoderConfig.get[Boolean]("use-self-attention-proximity-bias", default = false),
postPositionEmbeddingsDropout = decoderConfig.get[Float]("post-position-embeddings-dropout"),
attentionKeysDepth = decoderConfig.get[Int]("attention-keys-depth"),
attentionValuesDepth = decoderConfig.get[Int]("attention-values-depth"),
attentionNumHeads = decoderConfig.get[Int]("attention-num-heads"),
selfAttention = DotProductAttention(
dropoutRate = decoderConfig.get[Float]("dot-product-attention-dropout", default = 0.1f),
dropoutBroadcastAxes = Set.empty,
name = "DotProductAttention"),
feedForwardLayer = DenseReLUDenseFeedForwardLayer(
decoderConfig.get[Int]("feed-forward-filter-size"),
decoderConfig.get[Int]("num-units"),
decoderConfig.get[Float]("feed-forward-relu-dropout"),
Set.empty, "FeedForward"),
removeFirstLayerResidualConnection = decoderConfig.get[Boolean]("remove-first-layer-residual-connection", false),
useEncoderDecoderAttentionCache = decoderConfig.get[Boolean]("use-encoder-decoder-attention-cache", default = true),
outputLayer = outputLayerFromConfig(decoderConfig)
).asInstanceOf[Decoder[Any]]
case _ => throw new IllegalArgumentException(s"'$decoderType' does not represent a valid decoder type.")
}
}
@throws[IllegalArgumentException]
private def cellFromConfig[T: TF : IsReal](cellConfig: Config): Cell[T, _, _] = {
// Parse the cell activation function.
val cellActivation = cellConfig.get[String]("activation")
val activation: Output[T] => Output[T] = cellActivation match {
case "sigmoid" => tf.sigmoid(_)
case "tanh" => tf.tanh(_)
case "relu" => tf.relu(_)
case "relu6" => tf.relu6(_)
case "elu" => tf.elu(_)
case "selu" => tf.selu(_)
case _ => throw new IllegalArgumentException(s"'$cellActivation' does not represent a valid activation function.")
}
// Parse the cell type.
val cellType = cellConfig.get[String]("type")
cellType match {
case "gru" => GRU(activation)
case "lstm" =>
val forgetBias = if (cellConfig.hasPath("forget-bias")) cellConfig.get[Double]("forget-bias").toFloat else 1.0f
BasicLSTM(activation, forgetBias)
case _ => throw new IllegalArgumentException(s"'$cellType' does not represent a valid RNN cell type.")
}
}
private def pivot(
parameterManager: ParameterManager,
languagePairs: Set[(Language, Language)]
): Pivot = {
parameterManager match {
case _: PairwiseManager => SinglePivot(Language.English, languagePairs)
case _ => NoPivot
}
}
@throws[IllegalArgumentException]
private def outputLayerFromConfig(decoderConfig: Config): OutputLayer = {
val outputLayer = decoderConfig.get[String]("output-layer")
outputLayer match {
case "projection-to-words" => ProjectionToWords
case "projection-to-word-embeddings" => ProjectionToWordEmbeddings
case _ => throw new IllegalArgumentException(s"'$outputLayer' does not represent a valid output layer.")
}
}
}
| 18,760 | 49.980978 | 126 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/config/DataConfigParser.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments.config
import org.platanios.symphony.mt.data.{DataConfig, GeneratedVocabulary, MergedVocabularies, NoVocabulary}
import org.platanios.symphony.mt.data.processors._
import org.platanios.symphony.mt.vocabulary.{BPEVocabularyGenerator, SimpleVocabularyGenerator}
import com.typesafe.config.Config
import java.nio.file.Paths
/**
* @author Emmanouil Antonios Platanios
*/
object DataConfigParser extends ConfigParser[DataConfig] {
@throws[IllegalArgumentException]
override def parse(config: Config): DataConfig = {
val tokenizer = config.get[String]("tokenizer")
val cleaner = config.get[String]("cleaner")
val vocabularyType = config.get[String]("vocabulary.type")
DataConfig(
dataDir = Paths.get(config.get[String]("data-dir")),
loaderBufferSize = config.get[Int]("loader-buffer-size"),
tokenizer = tokenizer.split(":") match {
case Array(name) if name == "none" => NoTokenizer
case Array(name) if name == "moses" => MosesTokenizer()
case Array(name) if name == "mteval13" => MTEval13Tokenizer(preserveCase = true)
case _ => throw new IllegalArgumentException(s"'$tokenizer' does not represent a valid tokenizer.")
},
cleaner = cleaner.split(":") match {
case Array(name) if name == "none" => NoCleaner
case Array(name) if name == "moses" => MosesCleaner()
case _ => throw new IllegalArgumentException(s"'$cleaner' does not represent a valid cleaner.")
},
vocabulary = vocabularyType match {
case "none" => NoVocabulary
case "merged" => MergedVocabularies
case "word-count" =>
val shared = config.get[Boolean]("vocabulary.shared")
val size = config.get[Int]("vocabulary.size")
val minCount = config.get[Int]("vocabulary.min-count", default = -1)
GeneratedVocabulary(
generator = SimpleVocabularyGenerator(size, minCount),
shared = shared)
case "bpe" =>
val shared = config.get[Boolean]("vocabulary.shared")
val caseSensitive = config.get[Boolean]("vocabulary.case-sensitive")
val numMergeOps = config.get[Int]("vocabulary.num-merge-ops")
val minCount = config.get[Int]("vocabulary.min-count", default = -1)
GeneratedVocabulary(
generator = BPEVocabularyGenerator(numMergeOps, caseSensitive = caseSensitive, countThreshold = minCount),
shared = shared)
case _ => throw new IllegalArgumentException(s"'$vocabularyType' does not represent a valid vocabulary type.")
},
trainBatchSize = config.get[Int]("train-batch-size"),
inferBatchSize = config.get[Int]("infer-batch-size"),
evalBatchSize = config.get[Int]("eval-batch-size"),
numBuckets = config.get[Int]("num-buckets"),
bucketAdaptedBatchSize = config.get[Boolean]("bucket-adapted-batch-size"),
srcMaxLength = config.get[Int]("src-max-length"),
tgtMaxLength = config.get[Int]("tgt-max-length"),
shuffleBufferSize = config.get[Int]("shuffle-buffer-size"),
numPrefetchedBatches = config.get[Int]("num-prefetched-batches"),
numParallelCalls = config.get[Int]("input-pipeline-num-parallel-calls"))
}
override def tag(config: Config, parsedValue: => DataConfig): Option[String] = {
val stringBuilder = new StringBuilder()
stringBuilder.append(s"${parsedValue.tokenizer}")
stringBuilder.append(s".${parsedValue.cleaner}")
stringBuilder.append(s".${parsedValue.vocabulary}")
stringBuilder.append(s".bs:${parsedValue.trainBatchSize}")
stringBuilder.append(s".nb:${parsedValue.numBuckets}")
stringBuilder.append(s".sml:${parsedValue.srcMaxLength}")
stringBuilder.append(s".tml:${parsedValue.tgtMaxLength}")
Some(stringBuilder.toString)
}
}
| 4,475 | 47.129032 | 118 |
scala
|
symphony-mt
|
symphony-mt-master/experiments/src/main/scala/org/platanios/symphony/mt/experiments/config/TrainingConfigParser.scala
|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.symphony.mt.experiments.config
import org.platanios.symphony.mt.Language
import org.platanios.symphony.mt.config.TrainingConfig
import org.platanios.symphony.mt.data.{DataConfig, FileParallelDataset}
import org.platanios.symphony.mt.data.scores.{SentenceLength, SentenceRarity, SentenceScore, WordCounts}
import org.platanios.symphony.mt.experiments.Experiment
import org.platanios.symphony.mt.models.SentencePairsWithScores
import org.platanios.symphony.mt.models.curriculum.{DifficultyBasedCurriculum, SentencePairCurriculum}
import org.platanios.symphony.mt.models.curriculum.SentencePairCurriculum.{SourceSentenceScore, TargetSentenceScore}
import org.platanios.symphony.mt.models.curriculum.competency._
import org.platanios.symphony.mt.models.helpers.{AdaptiveAMSGrad, NoamSchedule}
import org.platanios.tensorflow.api._
import org.platanios.tensorflow.api.ops.training.optimizers.schedules._
import com.typesafe.config.Config
import java.nio.file.Paths
/**
* @author Emmanouil Antonios Platanios
*/
class TrainingConfigParser(
dataset: String,
datasets: => Seq[FileParallelDataset],
dataConfig: => DataConfig
) extends ConfigParser[TrainingConfig] {
@throws[IllegalArgumentException]
override def parse(config: Config): TrainingConfig = {
val bothDirections = config.get[Boolean]("both-directions")
val languagePairs = {
val languagePairs = Experiment.parseLanguagePairs(config.get[String]("languages"))
if (bothDirections)
languagePairs.flatMap(p => Set(p, (p._2, p._1)))
else
languagePairs
}
val optimizer = config.get[String]("optimization.optimizer")
val learningRate = config.getOption[Float]("optimization.learning-rate.value")
val schedule = config.getOption[Config]("optimization.learning-rate.schedule") match {
case Some(c) => c.get[String]("type") match {
case "noam" => NoamSchedule(c.get[Int]("warmup-steps"), c.get[Int]("hidden-size"))
case s => throw new IllegalArgumentException(s"'$s' does not represent a valid learning rate schedule type.")
}
case None => FixedSchedule[Float]()
}
TrainingConfig(
languagePairs = languagePairs,
useIdentityTranslations = bothDirections && config.get[Boolean]("use-identity-translations"),
cacheData = config.get[Boolean]("cache-data"),
labelSmoothing = config.get[Float]("label-smoothing"),
numSteps = config.get[Int]("num-steps"),
summarySteps = config.get[Int]("summary-frequency"),
summaryDir = Paths.get(config.get[String]("summary-dir")).resolve(dataset),
checkpointSteps = config.get[Int]("checkpoint-frequency"),
optimization = TrainingConfig.OptimizationConfig(
optimizer = optimizer match {
case "gd" => tf.train.GradientDescent(learningRate.get, decay = schedule, learningRateSummaryTag = "LearningRate")
case "adadelta" => tf.train.AdaDelta(learningRate.get, decay = schedule, learningRateSummaryTag = "LearningRate")
case "adafactor" => tf.train.Adafactor(learningRate, decay = schedule, learningRateSummaryTag = "LearningRate")
case "adagrad" => tf.train.AdaGrad(learningRate.get, decay = schedule, learningRateSummaryTag = "LearningRate")
case "rmsprop" => tf.train.RMSProp(learningRate.get, decay = schedule, learningRateSummaryTag = "LearningRate")
case "adam" =>
val beta1 = config.get[Float]("optimization.beta1", 0.9f)
val beta2 = config.get[Float]("optimization.beta2", 0.999f)
tf.train.Adam(learningRate.get, decay = schedule, beta1 = beta1, beta2 = beta2, learningRateSummaryTag = "LearningRate")
case "lazy_adam" =>
val beta1 = config.get[Float]("optimization.beta1", 0.9f)
val beta2 = config.get[Float]("optimization.beta2", 0.999f)
tf.train.LazyAdam(learningRate.get, decay = schedule, beta1 = beta1, beta2 = beta2, learningRateSummaryTag = "LearningRate")
case "amsgrad" =>
val beta1 = config.get[Float]("optimization.beta1", 0.9f)
val beta2 = config.get[Float]("optimization.beta2", 0.999f)
tf.train.AMSGrad(learningRate.get, decay = schedule, beta1 = beta1, beta2 = beta2, learningRateSummaryTag = "LearningRate")
case "lazy_amsgrad" =>
val beta1 = config.get[Float]("optimization.beta1", 0.9f)
val beta2 = config.get[Float]("optimization.beta2", 0.999f)
tf.train.LazyAMSGrad(learningRate.get, decay = schedule, beta1 = beta1, beta2 = beta2, learningRateSummaryTag = "LearningRate")
case "adaptive_amsgrad" =>
val beta1 = config.get[Float]("optimization.beta1", 0.9f)
val beta2 = config.get[Float]("optimization.beta2", 0.999f)
AdaptiveAMSGrad(learningRate.get, decay = schedule, beta1 = beta1, beta2 = beta2, learningRateSummaryTag = "LearningRate")
case "yellowfin" => tf.train.YellowFin(learningRate.get, decay = schedule, learningRateSummaryTag = "LearningRate")
case _ => throw new IllegalArgumentException(s"'$optimizer' does not represent a valid optimizer.")
},
maxGradNorm = config.getOption[Float]("optimization.max-grad-norm"),
colocateGradientsWithOps = config.get[Boolean]("optimization.colocate-gradients-with-ops")),
logging = TrainingConfig.LoggingConfig(
logLossFrequency = config.get[Int]("logging.log-loss-frequency"),
launchTensorBoard = config.get[Boolean]("tensorboard.automatic-launch"),
tensorBoardConfig = (
config.get[String]("tensorboard.host"),
config.get[Int]("tensorboard.port"))),
curriculum = config.getOption[Config]("curriculum").flatMap(parseCurriculum(_, languagePairs)))
}
@throws[IllegalArgumentException]
private def parseCurriculum(
curriculumConfig: Config,
languagePairs: Set[(Language, Language)]
): Option[DifficultyBasedCurriculum[SentencePairsWithScores[String]]] = {
curriculumConfig.get[String]("type") match {
case "difficulty" =>
val competency = parseCompetency(curriculumConfig.get[Config]("competency"))
val score = parseScore(curriculumConfig.get[Config]("score"))
val scoreSelectorString = curriculumConfig.get[String]("score.selector")
val scoreSelector = scoreSelectorString match {
case "source-sentence" => SourceSentenceScore
case "target-sentence" => TargetSentenceScore
case _ => throw new IllegalArgumentException(
s"'$scoreSelectorString' does not represent a valid score selector.")
}
val maxNumHistogramBins = curriculumConfig.get[Int]("max-num-histogram-bins")
Some(new SentencePairCurriculum(competency, score, scoreSelector, maxNumHistogramBins))
case curriculumType =>
throw new IllegalArgumentException(s"'$curriculumType' does not represent a valid curriculum type.")
}
}
@throws[IllegalArgumentException]
private def parseCompetency(competencyConfig: Config): Competency[Output[Float]] = {
competencyConfig.get[String]("type") match {
case "linear-step" =>
val initialValue = competencyConfig.get[Float]("initial-value")
val numStepsToFullCompetency = competencyConfig.get[Float]("num-steps-full-competency")
new LinearStepCompetency[Float](initialValue, numStepsToFullCompetency)
case "exp-step" =>
val initialValue = competencyConfig.get[Float]("initial-value")
val numStepsToFullCompetency = competencyConfig.get[Float]("num-steps-full-competency")
val power = competencyConfig.get[Int]("power")
new ExponentialStepCompetency[Float](initialValue, numStepsToFullCompetency, power)
case competencyType =>
throw new IllegalArgumentException(s"'$competencyType' does not represent a valid competency type.")
}
}
@throws[IllegalArgumentException]
private def parseScore(scoreConfig: Config): SentenceScore = {
scoreConfig.get[String]("type") match {
case "sentence-length" => SentenceLength
case "sentence-rarity" =>
val wordFrequenciesPooling = scoreConfig.get[String]("pooling") match {
case "min" => SentenceRarity.MinPooling
case "max" => SentenceRarity.MaxPooling
case "mean" => SentenceRarity.MeanPooling
case "product" => SentenceRarity.ProductPooling
case pooling =>
throw new IllegalArgumentException(s"'$pooling' does not represent a valid word frequencies pooling method.")
}
val caseSensitive = scoreConfig.get[Boolean]("case-sensitive", false)
val wordCounts = WordCounts(caseSensitive = caseSensitive)
SentenceRarity(wordFrequenciesPooling, wordCounts)
case scoreType =>
throw new IllegalArgumentException(s"'$scoreType' does not represent a valid score type.")
}
}
override def tag(config: Config, parsedValue: => TrainingConfig): Option[String] = {
val bothDirections = config.get[Boolean]("both-directions")
val stringBuilder = new StringBuilder()
stringBuilder.append(s"bd:$bothDirections")
stringBuilder.append(s".it:${bothDirections && config.get[Boolean]("use-identity-translations")}")
stringBuilder.append(s".ls:${config.get[String]("label-smoothing")}")
val optimizer = config.get[String]("optimization.optimizer")
stringBuilder.append(s".opt:$optimizer")
if (config.hasPath("optimization.learning-rate"))
stringBuilder.append(s":${config.get[String]("optimization.learning-rate")}")
optimizer match {
case "adam" | "lazy_adam" | "amsgrad" | "lazy_amsgrad" =>
stringBuilder.append(s":beta1:${config.get[String]("optimization.beta1", "0.9")}")
stringBuilder.append(s":beta2:${config.get[String]("optimization.beta2", "0.999")}")
case _ => ()
}
if (config.hasPath("curriculum.type")) {
val curriculumType = config.get[String]("curriculum.type")
stringBuilder.append(s".curr:$curriculumType")
if (config.hasPath("curriculum.competency.type")) {
val competencyType = config.get[String]("curriculum.competency.type")
stringBuilder.append(s".comp:$competencyType")
competencyType match {
case "linear-step" =>
val initialValue = config.get[String]("curriculum.competency.initial-value")
val numStepsToFullCompetency = config.get[String]("curriculum.competency.num-steps-full-competency")
stringBuilder.append(s":$initialValue:$numStepsToFullCompetency")
case "exp-step" =>
val initialValue = config.get[String]("curriculum.competency.initial-value")
val numStepsToFullCompetency = config.get[String]("curriculum.competency.num-steps-full-competency")
val power = config.get[String]("curriculum.competency.power")
stringBuilder.append(s":$initialValue:$numStepsToFullCompetency:$power")
case _ => ()
}
}
}
Some(stringBuilder.toString)
}
}
| 11,696 | 53.152778 | 139 |
scala
|
vgspectra
|
vgspectra-master/plot_exp02.py
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# AUTHOR: Delia Fano Yela
# DATE: February 2019
# CONTACT: d.fanoyela@qmul.ac.uk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# ------------------------------------------------------------------------------
# PLOT EXPERIMENT 02
# ------------------------------------------------------------------------------
# Import the results experiment 02 for DSD100 dataset
df_mrr = pd.read_csv('results_experiments/df_mrr_DSD100.csv')
# Rearrange the data frame so it's easier to use in seaborn
df_mrr['song'] = range(100)
df_mrr = pd.melt(df_mrr, id_vars=['song'], value_vars=['Ae', 'Ke', 'Pe', 'Ac', 'Kc', 'Pc'])
labels = ["Euclidean Distance"] *100*3 + ["Cosine Distance"]*100*3
df_mrr['type'] = labels
df_mrr = df_mrr.replace("Ae", "spectrum")
df_mrr = df_mrr.replace("Ac", "spectrum")
df_mrr = df_mrr.replace("Ke", "degree")
df_mrr = df_mrr.replace("Kc", "degree")
df_mrr = df_mrr.replace("Pe", "degree distribution")
df_mrr = df_mrr.replace("Pc", "degree distribution")
# Set the layout for the plots in seaborn
sns.set(style="whitegrid", font = 'serif')
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": 1.0})
# Boxplot
g = sns.catplot( x = "type", y='value', hue = "variable", data=df_mrr, kind = 'box',
palette="deep", legend = False,
size=8, aspect=1)
# Remove top and left spines, set labels and titles at certain fontsizes
ax = g.axes
for i in xrange(ax.shape[0]):
for j in xrange(ax.shape[1]):
ax[i,j].spines['right'].set_visible(False)
ax[i,j].spines['top'].set_visible(False)
ax[i,j].yaxis.set_ticks_position('left')
ax[i,j].xaxis.set_ticks_position('bottom')
ax[i,j].set_xlabel('')
ax[i,j].set_ylabel('MRR', fontsize=14)
# Plot it
plt.suptitle("EXPERIMENT 02", fontsize=16, fontweight="bold", va = 'center')
plt.legend(loc='best')
plt.savefig('exp02.png')
plt.show()
| 2,075 | 34.793103 | 91 |
py
|
vgspectra
|
vgspectra-master/plot_representation.py
|
from librosa import load
import librosa
import numpy as np
from visibility_algorithms import nvg_dc
import librosa.display
import matplotlib.pyplot as plt
mix_path = "AUDIO/DSDTEST/Mixtures/Dev/051 - AM Contra - Heart Peripheral/mixture.wav"
mixture, sr = load(mix_path, sr=44100, mono=True)
M = abs(librosa.core.stft(mixture, n_fft=2046, hop_length=1024, win_length=None, window='hann'))
M = M[:500,:]
# Calculate natural visibility graph(NVg) of each spectrum in A (i.e. columns) and its degree
Nf = M.shape[0]
Na = M.shape[1]
freq_bins = range(Nf)
K = np.empty([Nf,0]) # Degree matrix
for col in xrange(Na):
NVg_edges = nvg_dc(series = M[:,col].tolist() , timeLine = freq_bins , left = 0, right = Na)
# Adjacency matrix from the natural visibility edges (i.e. connections):
Adj = np.zeros((Nf, Nf))
for edge in NVg_edges:
Adj[edge] = 1
Adj[edge[-1::-1]] = 1 # NVg is an undirected graph so the Adjacency matrix is symmetric
# Degree from adjancecy matrix:
NVg_degree = np.sum(Adj, axis = 0)
# Degree distribution
NVg_dist = np.bincount(NVg_degree.astype(int), minlength = Nf).astype('float64') / Nf
# Store results
K = np.hstack((K, NVg_degree[:,None]))
# Normalise
Mn = M/np.max(M)
Kn = K/np.max(K)
Mn = Mn[:300,:]
Kn = Kn[:300,:]
mil = 47
fig, ax = plt.subplots(1,2, figsize=(10,5))
plt.subplot(1,2,1)
librosa.display.specshow(Mn**0.6, sr = 44100, hop_length =1024 , x_axis = 'time', cmap = 'Greys')#, y_axis = 'linear') #librosa.amplitude_to_db(M,ref=np.max)
plt.yticks(np.arange(0,6*mil, mil), np.arange(0,6000,1000) )
plt.ylim([0, 6*mil])
plt.ylabel("Frequency (Hz)", fontname = 'serif')
plt.xlabel("Time (s)", fontname = 'serif')
plt.title('A. Spectrogram', fontname = 'serif')
plt.subplot(1,2,2)
librosa.display.specshow(Kn**0.6, sr = 44100, hop_length =1024, x_axis = 'time',cmap = 'Greys')#, y_axis = 'linear' )
plt.ylim([0, 6*mil])
plt.xlabel("Time (s)", fontname = 'serif')
plt.title('B. Spectral Visibility Graph Degree', fontname = 'serif')
plt.tight_layout()
plt.savefig("plot_representation_6k.png")
plt.show()
| 2,102 | 30.863636 | 158 |
py
|
vgspectra
|
vgspectra-master/README.md
|
# vgspectra : Spectral Visibility Graphs
This code accompanies the [paper](http://eusipco2019.org/Proceedings/papers/1570533774.pdf) "Spectral Visibility Graphs: Application to Similarity of Harmonic Signals" by Delia Fano Yela, Dan Stowell and Mark Sandler (EUSIPCO 2019) where we introduce the visibility graph for audio spectra and propose a novel representation for audio analysis: the spectral visibility graph degree.Such representation inherently captures the harmonic content of the signal whilst being resilient to broadband noise. We present experiments demonstrating its utility to measure robust similarity between harmonic signals in real and synthesised audio data.
In the paper we present two experiments demonstrating the utility of the proposed representation of audio signals for harmonic similarity measure.
Experiment 01 for synthesised audio data:
- Audio data used: AUDIO/synth_dataset
- Script to run experiment: experiment01.py
- Script to plot the results (Figure 3 in the paper): plot_exp01.py
Experiment 02 for real audio data:
- Audio data used: DSD100 dataset available at https://sigsep.github.io/datasets/dsd100.html
- Script to run experiment: experiment02.py
- Script to plot the results (Figure 4 in the paper): plot_exp02.py
Other:
- plot_representation.py : Script to plot Figure 2 of the paper showing an example of a spectrogram and its corresponding spectral visibility graph degree proposed representation. The sample audio can be found in AUDIO/sample_used_plot_representation
- visibility_algorithms.py : our implementations of the different visibility graphs algorithms.
- figures : folder containing the images used in the paper
- results_experiments : folder containing the results (in form .csv) obtained from the experiments scripts and used by the plotting scripts.
# References
**If you use this work for your research please cite:**
```
@INPROCEEDINGS{vgspectra,
author={D. F. {Yela} and D. {Stowell} and M. {Sandler}},
booktitle={2019 27th European Signal Processing Conference (EUSIPCO)},
title={Spectral Visibility Graphs: Application to Similarity of Harmonic Signals},
year={2019},
volume={},
number={},
pages={1-5}}
```
------------------------------------------------------------------------------
AUTHOR: Delia Fano Yela
DATE: April 2020
CONTACT: d.fanoyela@qmul.ac.uk - most recently : delia@chordify.net
| 2,410 | 51.413043 | 638 |
md
|
vgspectra
|
vgspectra-master/visibility_algorithms.py
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# AUTHOR: Delia Fano Yela
# DATE: December 2018
# CONTACT: d.fanoyela@qmul.ac.uk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Different implementations for computing the natural visibility graph (NVG) [1]
# and the horizontal visibility graph (HVG) [2].
# Here we only implement the undirected graphs versions.
# For the original implementation in Fortran 90/94 of both directed and undirected
# versions, please refer to [3].
# Here we can find two different implementations to compute the NVG and HVG
# of a given series of numbers:
# 1. The original implementation proposed in [3]
# 2. A divide and conquer (D&C) approach presented in [4]
# REFERENCES
# [1]: "From time series to complex networks: the visibility graph"
# Lucas Lacasa, Bartolo Luque, Fernando Ballesteros, Jordi Luque, Juan C. Nuno
# PNAS, vol. 105, no. 13 (2008) 4972-4975
# [2]: "Horizontal visibility graphs: exact results for random time series"
#Bartolo Luque, Lucas Lacasa, Jordi Luque, Fernando J. Ballesteros
#Physical Review E 80, 046103 (2009)
# [3]: http://www.maths.qmul.ac.uk/~lacasa/Software.html
# [4]: "Fast transformation from time series to visibility graphs"
# Xin Lan, Hongming Mo, Shiyu Chen, Qi Liu, and Yong decending
# Chaos 25, 083105 (2015); doi: 10.1063/1.4927835
import numpy as np
# ------------------------------------------------------------------------------
# NATURAL VISIBILITY GRAPH ( NVG )
# ------------------------------------------------------------------------------
# a. BASIC IMPLEMENTATION
# --------------------------
def nvg(series, timeLine):
# series is the data vector to be transformed
# Get length of input series
L = len(series)
# timeLine is the vector containing the time stamps
#if timeLine == None: timeLine = range(L)
# initialise output
all_visible = []
for i in xrange(L-1):
node_visible = []
ya = float(series[i])
ta = timeLine[i]
for j in xrange(i+1,L):
yb = float(series[j])
tb = timeLine[j]
yc = series[i+1:j]
tc = timeLine[i+1:j]
if all( yc[k] < (ya + (yb - ya)*(tc[k] - ta)/(tb-ta)) for k in xrange(len(yc)) ):
node_visible.append(tb)
if len(node_visible)>0 : all_visible.append([ta, node_visible])
return all_visible
# b. DIVIDE & CONQUER <---------------------- QUICKEST NVG IMPLEMENTATION
# --------------------------
def nvg_dc(series, timeLine, left, right, all_visible = None):
#if timeLine == None : timeLine = range(len(series))
if all_visible == None : all_visible = []
node_visible = []
if left < right : # there must be at least two nodes in the time series
k = series[left:right].index(max(series[left:right])) + left
# check if k can see each node of series[left...right]
for i in xrange(left,right):
if i != k :
a = min(i,k)
b = max(i,k)
ya = float(series[a])
ta = timeLine[a]
yb = float(series[b])
tb = timeLine[b]
yc = series[a+1:b]
tc = timeLine[a+1:b]
if all( yc[j] < (ya + (yb - ya)*(tc[j] - ta)/(tb-ta)) for j in xrange(len(yc)) ):
node_visible.append(timeLine[i])
if len(node_visible) > 0 : all_visible.append([timeLine[k], node_visible])
nvg_dc(series,timeLine,left, k, all_visible = all_visible)
nvg_dc(series,timeLine, k+1, right, all_visible = all_visible)
return all_visible
# a. NUMPY ORIGINAL IMPLEMENTATION
# --------------------------------
def nvg_np(series, timeLine):
# !! SERIES ARE A NUMPY ARRAY HERE and SO IS TIMELINE !!
# series is the data vector to be transformed
# Get length of input series
L = len(series)
# timeLine is the vector containing the time stamps
#if timeLine == None: timeLine = np.arange(L)
# initialise output
all_visible = []
for i in np.arange(L-1):
node_visible = []
ya = float(series[i])
ta = timeLine[i]
for j in np.arange(i+1,L):
yb = float(series[j])
tb = timeLine[j]
yc = series[i+1:j]
tc = timeLine[i+1:j]
if np.all( yc < (ya + (yb - ya)*(tc - ta)/(tb-ta))):
node_visible.append(tb)
if len(node_visible)>0 : all_visible.append([ta, node_visible])
return all_visible
# b. NUMPY DIVIDE & CONQUER
# --------------------------
# !!!! SERIES IS A NUMPY ARRAY HERE AND SO IS TIMELINE !!!!
def nvg_dc_np(series, timeLine, left, right, all_visible = None):
if all_visible == None : all_visible = []
node_visible = []
if left < right : # there must be at least two nodes in the time series
k = np.argmax(series[left:right]) + left
# check if k can see each node of series[left...right]
for i in np.arange(left,right):
if i != k :
a = min(i,k)
b = max(i,k)
ya = series[a]
ta = timeLine[a]
yb = series[b]
tb = timeLine[b]
yc = series[a+1:b]
tc = timeLine[a+1:b]
if np.all( yc < (ya + (yb - ya)*(tc - ta)/(tb-ta))):
node_visible.append(timeLine[i])
if len(node_visible) > 0 : all_visible.append([timeLine[k], node_visible])
nvg_dc_np(series,timeLine, left, k, all_visible = all_visible)
nvg_dc_np(series,timeLine, k+1, right, all_visible = all_visible)
return all_visible
# ------------------------------------------------------------------------------
# HORIZONTAL VISIBILITY GRAPH ( HVG )
# ------------------------------------------------------------------------------
# a. ORIGINAL IMPLEMENTATION
# --------------------------
def hvg(series, timeLine):
# series is the data vector to be transformed
#if timeLine == None: timeLine = range(len(series))
# Get length of input series
L = len(series)
# initialise output
all_visible = []
for i in xrange(L-1):
node_visible = []
ya = series[i]
ta = timeLine[i]
for j in xrange(i+1,L):
yb = series[j]
tb = timeLine[j]
yc = series[i+1:j]
tc = timeLine[i+1:j]
if all( yc[k] < min(ya,yb) for k in xrange(len(yc)) ):
node_visible.append(tb)
elif all( yc[k] >= max(ya,yb) for k in xrange(len(yc)) ):
break
if len(node_visible)>0 : all_visible.append([ta, node_visible])
return all_visible
# b. DIVIDE & CONQUER HVG
# --------------------------
def hvg_dc(series,timeLine, left, right, all_visible = None):
if all_visible == None : all_visible = []
node_visible = []
if left < right : # there must be at least two nodes in the time series
k = series[left:right].index(max(series[left:right])) + left
# check if k can see each node of series[left...right]
for i in xrange(left,right):
if i != k :
a = min(i,k)
b = max(i,k)
ya = series[a]
ta = timeLine[a]
yb = series[b]
tb = timeLine[b]
yc = series[a+1:b]
tc = timeLine[a+1:b]
if all( yc[k] < min(ya,yb) for k in xrange(len(yc)) ):
node_visible.append(timeLine[i])
elif all( yc[k] >= max(ya,yb) for k in xrange(len(yc)) ):
break
if len(node_visible) > 0 : all_visible.append([timeLine[k], node_visible])
hvg_dc(series,timeLine, left, k, all_visible = all_visible)
hvg_dc(series,timeLine, k+1, right, all_visible = all_visible)
return all_visible
| 7,960 | 31.230769 | 97 |
py
|
vgspectra
|
vgspectra-master/plot_exp01.py
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# AUTHOR: Delia Fano Yela
# DATE: February 2019
# CONTACT: d.fanoyela@qmul.ac.uk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# ------------------------------------------------------------------------------
# PLOT EXPERIMENT 01
# ------------------------------------------------------------------------------
# Import the results from experiment 01
df_mrr = pd.read_csv('results_experiments/df_mrr_exp01.csv')
# Set the layout for the plots in seaborn
sns.set(style="whitegrid", font = 'serif')
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": 1.0})
# Point plot
g = sns.catplot( x = "SNR", y='MRR', hue = "ftype", data=df_mrr, col = "dtype", legend = False, kind = "point",
markers=['d','o','s'], linestyles=[":", "-", "-."],
size=8, aspect=1)
# Set the limits
plt.ylim((-0.1, 1.1))
# Remove top and left spines, set labels and titles at certain fontsizes
ax = g.axes
for i in xrange(ax.shape[0]):
for j in xrange(ax.shape[1]):
ax[i,j].spines['right'].set_visible(False)
ax[i,j].spines['top'].set_visible(False)
ax[i,j].yaxis.set_ticks_position('left')
ax[i,j].xaxis.set_ticks_position('bottom')
ax[i,j].set_xlabel(' SNR in dB', fontsize=12)
if j == 0:
ax[i,j].set_ylabel('MRR', fontsize=12)
ax[i,j].set_title('Euclidean Distance', fontsize=14)
else:
ax[i,j].set_ylabel('')
ax[i,j].set_title('Cosine Distance', fontsize=14)
# Plot it
plt.suptitle("EXPERIMENT 01", fontsize=16, fontweight="bold")
plt.legend(scatterpoints=1, loc='best')
plt.savefig('exp01.png')
plt.show()
| 1,827 | 36.306122 | 111 |
py
|
vgspectra
|
vgspectra-master/experiment02.py
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# AUTHOR: Delia Fano Yela
# DATE: February 2019
# CONTACT: d.fanoyela@qmul.ac.uk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# WARNING: this script uses multiprocessing => CPU intensive
# you can change the number of processes below if you wish
# Compare all vocal tracks with all the mixture (for all the dataset)
import os
from itertools import chain
from librosa import load
import multiprocessing as mp
import random
import sklearn.metrics
import pandas as pd
import librosa
import numpy as np
from visibility_algorithms import nvg_dc
import sys
def processFile(subdir, filename):
# Get the path for each mixture and its corresponding vocal stem
mix_path = os.path.join(subdir, filename)
vox_path = os.path.join(subdir.replace("Mixtures", "Sources"), "vocals.wav")
# Load the files:
mixture, sr = load(mix_path, sr=44100, mono=True)
vocals, sr = load(vox_path, sr=44100, mono=True)
vocals[np.where(vocals < 0.001)[0]] = 0 # gated by audible level (in theory) - -60dB (0.01 for -40dB)
# STFT
M = abs(librosa.core.stft(mixture, n_fft=2046, hop_length=1024, win_length=None, window='hann'))
V = abs(librosa.core.stft(vocals, n_fft=2046, hop_length=1024, win_length=None, window='hann'))
# Index of frames vocal activation:
Ev = np.sum(V**2, axis=0) / V.shape[1]
Emin = np.min(Ev[np.where(Ev>0.)[0]]) # get the minimum excluding zeros
Emax = np.max(Ev[np.where(Ev>0.)[0]])
lamda = (Emax - Emin) / Emax
th = (1 - lamda) * Emax + lamda * Emin # "Approach for Energy-Based Voice Detector with Adaptive Scaling Factor", K. Sakhnov and al., 36:4, IJCS_36_4_16, 2009
vox_frames = np.where( Ev > th )[0]
other_frames = np.delete(range(V.shape[1]), vox_frames)
# Stack them in a matrix: corresponding mixture frames, other frames
A = np.hstack((V[:,vox_frames], M[:,vox_frames], M[:,other_frames]))
# To reduce computation time, focus on precise band (<10KHz)
A = A[:500,:]
# Calculate natural visibility graph(NVg) of each spectrum in A (i.e. columns) and its degree
Nf = A.shape[0]
Na = A.shape[1]
freq_bins = range(Nf)
K = np.empty([Nf,0]) # Degree matrix
P = np.empty([Nf,0]) # Degree distribution matrix
for col in xrange(Na):
NVg_edges = nvg_dc(series = A[:,col].tolist() , timeLine = freq_bins , left = 0, right = Nf)
# Adjacency matrix from the natural visibility edges (i.e. connections):
Adj = np.zeros((Nf, Nf))
for edge in NVg_edges:
Adj[edge] = 1
Adj[edge[-1::-1]] = 1 # NVg is an undirected graph so the Adjacency matrix is symmetric
# Degree from adjancecy matrix:
NVg_degree = np.sum(Adj, axis = 0)
# Degree distribution
NVg_dist = np.bincount(NVg_degree.astype(int), minlength = Nf).astype('float64') / Nf
# Store results
K = np.hstack((K, NVg_degree[:,None]))
P = np.hstack((P, NVg_dist[:,None]))
# Vocal frames
Lv = len(vox_frames)
frames2check = range(Lv) # only the first 100 ones are the clean vocals
# Distance analysis:
# Euclidean
dAe = sklearn.metrics.pairwise_distances(np.transpose(A[:,:Lv]),np.transpose(A[:,Lv:]), metric='euclidean')
dKe = sklearn.metrics.pairwise_distances(np.transpose(K[:,:Lv]),np.transpose(K[:,Lv:]), metric='euclidean')
dPe = sklearn.metrics.pairwise_distances(np.transpose(P[:,:Lv]),np.transpose(P[:,Lv:]), metric='euclidean')
# Cosine
dAc = sklearn.metrics.pairwise_distances(np.transpose(A[:,:Lv]),np.transpose(A[:,Lv:]), metric='cosine')
dKc = sklearn.metrics.pairwise_distances(np.transpose(K[:,:Lv]),np.transpose(K[:,Lv:]), metric='cosine')
dPc = sklearn.metrics.pairwise_distances(np.transpose(P[:,:Lv]),np.transpose(P[:,Lv:]), metric='cosine')
# Sort the distances in ascending order and keep the location
dAe_s = np.argsort(dAe, axis = 1)
dKe_s = np.argsort(dKe, axis = 1)
dPe_s = np.argsort(dPe, axis = 1)
dAc_s = np.argsort(dAc, axis = 1)
dKc_s = np.argsort(dKc, axis = 1)
dPc_s = np.argsort(dPc, axis = 1)
# Find rank of the wanted neighbour (the correspondent mixture frame of the vocal clean frame)
retrieved_rank_Ae = np.array([np.where( dAe_s[i,:] == i )[0][0] for i in frames2check ])
retrieved_rank_Ae[retrieved_rank_Ae == 0] = 1 # in the case where the vocals are alone in the mix
retrieved_rank_Ke = np.array([np.where( dKe_s[i,:] == i )[0][0] for i in frames2check ])
retrieved_rank_Ke[retrieved_rank_Ke == 0] = 1
retrieved_rank_Pe = np.array([np.where( dPe_s[i,:] == i )[0][0] for i in frames2check ])
retrieved_rank_Pe[retrieved_rank_Pe == 0] = 1
retrieved_rank_Ac = np.array([np.where( dAc_s[i,:] == i)[0][0] for i in frames2check ])
retrieved_rank_Ac[retrieved_rank_Ac == 0] = 1
retrieved_rank_Kc = np.array([np.where( dKc_s[i,:] == i)[0][0] for i in frames2check ])
retrieved_rank_Kc[retrieved_rank_Kc == 0] = 1
retrieved_rank_Pc = np.array([np.where( dPc_s[i,:] == i)[0][0] for i in frames2check ])
retrieved_rank_Pc[retrieved_rank_Pc == 0] = 1
# Mean reciprocal rank
Ae_mrr = np.mean(1./retrieved_rank_Ae)
Ke_mrr = np.mean(1./retrieved_rank_Ke)
Pe_mrr = np.mean(1./retrieved_rank_Pe)
Ac_mrr = np.mean(1./retrieved_rank_Ac)
Kc_mrr = np.mean(1./retrieved_rank_Kc)
Pc_mrr = np.mean(1./retrieved_rank_Pc)
# Store results:
df_mrr = pd.DataFrame([[Ac_mrr,Ae_mrr,Kc_mrr,Ke_mrr,Pc_mrr,Pe_mrr]], columns=['Ac', 'Ae', 'Kc' ,'Ke','Pc','Pe'])
df_mrr.to_csv ('results04_Test/df_mrr_%s.csv' % os.path.basename(subdir), index = None, header=True)
----------------------------------------------------------------------------------------------------------
# Path to the audio dataset:
dir = "AUDIO/DSD100/Mixtures/Test" # <----------------------- SET PATH TO DATASET
# Setup multiprocessing
procs = []
for subdir, dirs, files in os.walk(dir) :
for filename in files:
print "Processing %s" % subdir
proc = mp.Process(target=processFile, args=(subdir, filename))
proc.start()
procs.append(proc)
for pr in procs:
pr.join()
print("All done!")
| 6,301 | 37.426829 | 162 |
py
|
vgspectra
|
vgspectra-master/experiment01.py
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# AUTHOR: Delia Fano Yela
# DATE: February 2019
# CONTACT: d.fanoyela@qmul.ac.uk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Import the libraries that help for this experiment,
# however, none are strictly necessary here
import os
import librosa
from librosa import load
import numpy as np
import pandas as pd
# import seaborn as sns
import sklearn.metrics
import time
import sys
#from tabulate import tabulate
# Import the Divide & Conquer natural visibility graph implementation (necessary)
from visibility_algorithms import nvg_dc
# Testing parameters:
dir = "AUDIO/synth_dataset/" # Define the path to the synthesised test data
snr_values = [-24, -12, -6, -3, 0, 3, 6]
metrics = ['euclidean', 'cosine']
# Define the divisions for the audio file corresponding to the following MIDI notes:
# [A2,B2,C3,D3,E3,F3,G3,A3,B3,C4,D4,E4,F4,G4 ]
L = 22000 # length of a note in samples
div = range(0,2*L,L) + range(573000,(12*L + 573000), L)
# INITS ------------------------------------------------------------------------
# Max frequency is sampling rate 44100, and the FFT size is 16384; so the frequency increase is 44100/16384 = 2.7 Hz
# The highest note in the dataset is C5, 523.25Hz. If we want to include 10 harmonics we need 523.25 * 10 / 2.7 bins of
# the FFT, 1943.974 bins, rounding up to 2000.
Nf = 16384 # FFT size in samples
N = 2000 # number of bins of interest in the FFT
mu, sigma = 0, 1 # mean and standard deviation for gaussian noise
# Pandas DataFrame that will contain the results
df_mrr = pd.DataFrame({'MRR':[], 'ftype':[], 'dtype':[], 'SNR':[]})
for dis in metrics:
print "/n Distance Metric: ", dis
for snr in snr_values:
print "/n SNR:", snr
# Start processing--------------------------------------------------------------
A = np.empty([N,0]) # pair column : abs(FT(cleannote)), impair colum : abs(FT(noisynote))
K = np.empty([N,0]) # pair column : degree(FT(cleannote)), impair colum : degree(FT(noisynote))
P = np.empty([N,0]) # pair column : degreedistribution(FT(cleannote)), impair colum : degree(FT(noisynote))
for subdir, dirs, files in os.walk(dir):
for i, filename in enumerate(files):
signal, fs = load(os.path.join(subdir, filename), sr=44100, mono=True)
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("[%s%s] %d%%" % ('='*i, ' '*((len(files)-1)-i), 100.*i/(len(files)-1)))
sys.stdout.flush()
time.sleep(0.25)
for index in div:
# TIME DOMAIN ---------------------------------------------------
s = signal[index:index + Nf]
n = np.random.normal(mu, sigma, Nf)
#sfc = snr_scaling_factor( signal = s, noise = n, SNRdB = snr )
sfc = np.sqrt(np.sum(s**2)/np.sum(n**2))*10**(-snr/20.)
m = s + sfc*n
# FFT DOMAIN ---------------------------------------------------
sa = np.abs(np.fft.fft(np.array(s)))
ma = np.abs(np.fft.fft(np.array(m)))
# Crop FFT to relevant part:
sa = sa[:N]
ma = ma[:N]
# NVG DOMAIN ---------------------------------------------------
sn = nvg_dc(series = sa.tolist() , timeLine = range(N), left = 0, right = N)
mn = nvg_dc(series = ma.tolist() , timeLine = range(N), left = 0, right = N)
# Adjacency matrix from the horizontal connections:
Adj_s = np.zeros((N, N))
for el in sn:
Adj_s[el] = 1
Adj_s[el[-1::-1]] = 1
Adj_m = np.zeros((N, N))
for el in mn:
Adj_m[el] = 1
Adj_m[el[-1::-1]] = 1
# Degree from adjancecy matrix:
sk = np.sum(Adj_s, axis = 0)
mk = np.sum(Adj_m, axis = 0)
# Degree distribution
sp = np.bincount(sk.astype(int), minlength = N).astype('float64') / N
mp = np.bincount(mk.astype(int), minlength = N).astype('float64') / N
# Stack in processing matrix -----------------------------------
A = np.hstack((A,sa[:,None],ma[:,None]))
K = np.hstack((K,sk[:,None],mk[:,None]))
P = np.hstack((P,sp[:,None],mp[:,None]))
# SELF-SIMILARITY MATRIX -----------------------------------
dA = sklearn.metrics.pairwise_distances(np.transpose(A), metric=dis)
dK = sklearn.metrics.pairwise_distances(np.transpose(K), metric=dis)
dP = sklearn.metrics.pairwise_distances(np.transpose(P), metric=dis)
# Sort -----------------------------------------------------
dA_s = np.argsort(dA, axis = 1)
dK_s = np.argsort(dK, axis = 1)
dP_s = np.argsort(dP, axis = 1)
# MEAN RECIPROCAL RANK -------------------------------------
noisy = range(1,dA.shape[0],2)
clean = range(0,dA.shape[0],2)
retrieved_rank_A = [np.where(dA_s[clean[i],:] == noisy[i])[0][0] for i in range(0,len(clean)) ]
retrieved_rank_K = [np.where(dK_s[clean[i],:] == noisy[i])[0][0] for i in range(0,len(clean)) ]
retrieved_rank_P = [np.where(dP_s[clean[i],:] == noisy[i])[0][0] for i in range(0,len(clean)) ]
mrrA = np.mean(1./np.array(retrieved_rank_A))
mrrK = np.mean(1./np.array(retrieved_rank_K))
mrrP = np.mean(1./np.array(retrieved_rank_P))
print "\nmean reciprocal rank A: ", np.mean(1./np.array(retrieved_rank_A))
print "mean reciprocal rank K: ", np.mean(1./np.array(retrieved_rank_K))
# Store ----------------------------------------------------
df_mrr = df_mrr.append(pd.DataFrame([[mrrA, 'spectrum', dis, snr]], columns = ['MRR', 'ftype', 'dtype', 'SNR']), ignore_index=True)#,sort=True)
df_mrr = df_mrr.append(pd.DataFrame([[mrrK, 'degree', dis, snr]], columns = ['MRR', 'ftype', 'dtype', 'SNR']), ignore_index=True)#,sort=True)
df_mrr = df_mrr.append(pd.DataFrame([[mrrP, 'degree distribution', dis, snr]], columns = ['MRR', 'ftype', 'dtype', 'SNR']), ignore_index=True)#,sort=True)
df_mrr.to_csv ('df_mrr_exp01.csv', index = None, header=True)
| 6,640 | 47.123188 | 162 |
py
|
prfmap
|
prfmap-master/README.md
|
# PRFMap v2.0
Pythonic version of PRFMap, also including several changes in the intimate structure of the code.
The original PRFMap was developed by Andreas Faisst (afaisst@ipac.caltech.edu) and Peter Capak. The program creates a set of FITS files modelling the Spizter/IRAC Point Response Function (PRF) at different positions across an IRAC mosaic.
**PRFMap in a nutshell**. The code creates a grid of points across an IRAC mosaic (provided in input as FITS image). For each point, it finds the overlapping frames (i.e., the observing blocks) that contributed to the mosaic. PRFMap creates a specific PRF model for each of those frames, as the IRAC PRF is *not* rotationally symmetric and it does depend on the orientation of the frame (its Position Angle, PA). All these PRFs are stackeded and the result is the PRF profile of the mosaic at that location.
## Main improvements in the present version
- 100% Python 3, no need to install IRAF and/or R;
- "verbose" and "debug" options to run the code in different modes;
- higher computational efficency and less disk space needed (To Be Completed);
- additional tools e.g. to resample the pixel grid of output PRF models;
- ...
## Known issues
- Parallelization has been designed to work on Candide cluster computer: it may be sub-optimal on other machines.
- Centroid of PRF models still offset with respect to their FITS file pixel grid.
## Installing PRFMap v2
Besides the python scripts in the `src/` folder, one has to populate the `prfmod/` folder, which should contain the basic PRF models of the IRAC camera, which have been characterized as a function of the IRAC channel (from 1 to 4) and the position orelative to the detector (i.e., the PRF at the center of a frame is different from the corner). These **basic models** can be downloaded from [the IPAC website](https://irsa.ipac.caltech.edu/data/SPITZER/docs/irac/calibrationfiles/psfprf/).
## How it works
If you prefer to learn by examples, go to the **Test run** section below. The main program is `prfmap.py` and is executed as a python script via command line. It has several functionalities ("tasks") that can be specified with the corresponding argument right after the script name (see below). It also requires a **configuration file** where the most important parameters are specified. The list of functionalities and options that can be printed out with the command `python src/prfmap.py -h`. Note that all the example codes are in a bash shell assuming that the present working directory is the one where you cloned/installed the PRFMAPv2 package.
### The configuration file
It is compulsory to specify the location of the configuration file through the `-c` or `--config` option:
```
$ python task -c /path/to/config.file
$ python task --config=/path/to/config.file
```
where `task` can be any of the tasks decribed in the following. To have an example of configuration file, open `example.par`:
```
## Input config file for PRFMap v2.0 ##
### paths to useful directories
PATH_OUTPUT example_data/prfout_test #the directory where the output will be saved
### file names
FILE_MOSAIC example_data/mosaic/A2744.0.irac.1.mosaic.fits #the IRAC mosaic to be mapped
FILE_FRAMELIST example_data/frames.lst #frames that made the mosaic (list of *_bcd.fits files)
FILE_PRFMOD ch1_prfmap_x10.tbl #full path to the table (downloaded from the IPAC website) including the basic PRF models (in different detector coordinates). FILE_PRFMOD must be located in the directory where those basic PRF models are stored.
FILE_GRID example_data/prfout_test/map_prf_grid.txt #full path to grid file indicating where to evaluate the PRF
FILE_PRFS example_data/prfout_test/prfmap_models_ch1.txt #full path to file listing all frames associated to every grid point
### PRF details
PRF_RAD 8 # core of the PRF (in mosaic's pixels)
GRID_SPACE 6 # to set the distance between two nodes on the PRF map (=PRF_RAD*GRID_SPACE)
PRF_SAMP 100 # (over-)sampling of the basic PRF models listed in FILE_PRFMOD
### Facultative (comment out to activate):
#RA_LIM 149.05,151.07 #min and max RA of the grid
#DEC_LIM 1.38,3.08 #min and max Dec of the grid
```
Please note that any of the input FITS frames listed in FILE_FRAMELIST, or FILE_MOSAIC, can be replaced by an ASCII file containing the FITS header. When the file name does not end with '.fits', PRFMAP assumes it is a header-only ASCII file.
### Other options
- `-v` or `--verbose`: print out comments while running
- `-d` or `--debug`: save additional files for sanity checks
- `-p` or `--parallel`: multi-thread processing
- Moreover, all the paths and file names can be set also via command line, over-writing the parameter in the configuration file. For example: `python src/prfmap.py task -v -d -p PATH_OUTPUT /new/path/`.
### Task 1: draw the grid
```bash
$ python src/prfmap.py grid -c example.par
```
The program analyses the geometry of the IRAC mosaic (specified as FITS file `FILE_MOSAIC`) and draw a rectangular grid where the distance between points is set in the configuration file (`PRF_RAD` times `GRID_SPACE`). To cover only a portion of the mosaic, use the parameters `RA_LIM` and `DEC_LIM` in the configuration file. Coordinates of the grid points, and their ID number, are saved in the ASCII file `FILE_GRID`. The user can create their own (e.g., irregualr) grid, as long as the `FILE_GRID` format is respected:
```
# RA Dec X Y ID_GRIDPT
3.7558143 -30.4636978 25 25 1
3.7558042 -30.4465922 25 105 2
...
```
See and example in `example_data/comparison_output/map_prf_grid.txt`. The match between grid points and IRAC frames is made in WCS, so the RA,Dec coordinates of a hand-made `FILE_GRID` must be correct; X,Y are not used in the process so in principle they can be dummy values like -99.
The `--debug` option will pring a .reg file to visualize the grid in DS9, and individual grids for each frame in a dedicated sub-folder of the `PATH_OUTPUT` directory.
### Task 2: associate frame PRFs to each grid point
```bash
$ python src/prfmap.py models -c example.par
```
This is a preliminary step analysizing each grid point included in `FILE_GRID`, before preparing their PRF model (Task 3). All frames (`*_bcd.fits` files) overlapping a given grid point are identified, with their orientation (PA) and position on the detector (corrsiponding to a specific PRF among the basic models included in `FILE_PRFMOD`). This information is saved in the ASCII file `FILE_PRFS` and will be used in the next step of the PRFMAP proceudre. Note that the file may be very large: a frame appears multiple times, depending on how many grid points overlap the area of the frame. See below (**Large mosaics and parallelization**) for more details about mosaics made by a large number of frames (or fine grids with a very large number of points).
### Task 3: create the final PRF model for each grid point
```bash
$ python src/prfmap.py stack -c example.par
```
The individual PRFs (corresponding to single frames) that are associated to the same grid point are rotated (accordingly to their PA value) and stacked together. Corresponding grid point IDs and PAs are found in `FILE_PRFS`. The stacked image is the final PRF in that point of the mosaic. All PRFs are stored in `PATH_OUTPUT` in the sub-folder `PRFstack`. Their file names are always `mosaic_gp??????.fits` where ?????? is the integer ID number of that grid point.
### Large mosaics and parallelization
Very deep IRAC observations are made by thousands of frames, and result in a computationally expensive run of PRFMAP. Moreover, the size of `FILE_PRFS` might be so big that Python would not read it. Parameters `RA_LIM` and `DEC_LIM` can be used to create several (Samller) grids to run in parallel. Another possibility is to run the script `break_list.py`, which will fragment a large `FILE_PRFS` into smaller files:
```bash
$ python src/break_list.py -c example.par -b 3
```
In this example, the original `FILE_PRFS` indicated in the configuration file will be divided in there (shorter) files, easier to handle possibly in parallel.
Both Task 2 and 3 can be executed in multi-thread mode with the option `-p`. To comply with the structure of the computer cluster *Candide*, the parallelization is very simple: the list of grid points is divided among the threads and each one (for Task 2) creates a temporary file to stor the results. After using `break_list.py`, Task 3 can also be parallelized in the following way (Torque example):
```
#!/bin/bash
#PBS -l nodes=1:ppn=1
#PBS -t 0-2
python src/prfmap.py stack -c example.par FILE_PRFS example_data/prfout_test/prfmap_models_ch1.txt_sub${PBS_ARRAYID}.txt
```
## Test run
```
$ python src/prfmap.py grid -c example.par
$ python src/prfmap.py models -c example.par
$ python src/prfmap.py stack -c example.par
```
Results should be compared to the files included in `example_data/comparison_output/`
## Other utility scripts
To change the resolution of the PRF models generated by PRFMAP, e.g. rescale the FITS file to a 0.6 arcsec/pixel grid:
```
$ python src/resample_prf.py -c example.par -r 0.6 [-v -d -w]
$ python src/resample_prf.py --config=example.par --resample=0.6 [--verbose --debug --overwrite]
```
This takes *all* the FITS file from $PATH_OUTPUT/PRFstack/ and save the resampled PRFs into $PATH_OUTPUT/PRFstack_06/ (in case the new pixel scale is 0.6"/pxl). If for example --rescale=0.15 the new directory will be `PRFstack_015`. A given PRF is not rescaled if the corresponding file already exists in the target directory, unless option -w or --overwrite is activated.
| 9,635 | 74.28125 | 760 |
md
|
prfmap
|
prfmap-master/src/manip_prf.py
|
import os,sys,string
import subprocess
import numpy as np
from astropy.io import fits,ascii
from scipy import ndimage
import multiprocessing as mp
import utils as utl
def rotate(fits_in,fits_out,angle):
"""To ratate a PRF model by a PA angle. This replaces pyraf/IRAF imlintran"""
iin = fits.getdata(fits_in)
return ndimage.interpolation.rotate(iin,-angle,reshape=False,axes=(1,0))
def stack_at_gp(igp,dat,paths):
"""To stack all PRFs at a given grid point.
This is an individual function to allow parallelization."""
sel_frames = np.where(dat['ID_GRIDPT']==igp)
if len(sel_frames[0])==0: return
#avoid overwriting
if os.path.isfile('{}/mosaic_gp{:06d}.fits'.format(paths[1],igp)): return
i0 = sel_frames[0][0]
#copy header from the first PRF
stack_hdr = utl.read_fits_head(paths[0]+dat['FILENAME'][i0])
stack_img = np.zeros([stack_hdr['NAXIS1'],stack_hdr['NAXIS2']])
for i in sel_frames[0]:
i_frm = dat['FRAME'][i][:-9]
img_in = paths[0]+dat['FILENAME'][i]
# rotation and stacking
stack_img += rotate(img_in,'N/A',dat['PA'][i])
# save the stacking into a new fits file
new_hdu = fits.PrimaryHDU(stack_img,stack_hdr)
#add new info in the header
new_hdu.header['MOSAIC'] = 'N/A'
new_hdu.header['GRID_PT'] = igp
new_hdu.header['RA_PT'] = dat['RA_CEN'][i0]
new_hdu.header['DEC_PT'] = dat['DEC_CEN'][i0]
new_hdu.header['NFRAMES'] = len(sel_frames[0])
new_hdu.writeto('{}/mosaic_gp{:06d}.fits'.format(paths[1],igp))
return
def worker(ids,dat,paths):
for i in ids:
stack_at_gp(i,dat,paths)
def rotate_and_stack(models,opt={},id_list=[],parallel=False,verbose=False):
""" rotate_and_stack(filein,paths=['./','./'],id_list=[],parallel=False,verbose=False)
From the info collected in previous steps of PRFMap, individual PRFs are stacked
according to
paths : list
"""
#The path needed is the output directory (PATH_OUTPUT)
filein = opt['FILE_PRFS']
cut = opt['FILE_PRFMOD'].rfind('/') #extract the path to the basic PRF models provided by IPAC
paths=[opt['FILE_PRFMOD'][:cut+1],opt['PATH_OUTPUT']]
#dat = utl.read_fits(filein,hdu=1)
dat = ascii.read(filein)
dat['FILENAME'] = [models[i-1] for i in dat['PRF_NUMBER']]
ids = dat['ID_GRIDPT'] # all grid points with repetitions (x N_frames)
if len(id_list)==0:
id_list = list(set(ids)) # unique IDs
id_list.sort(reverse=False)
nid = len(id_list)
paths[1] += 'PRFstack/'
subprocess.run(['mkdir','-p',paths[1]]) #subdirectory for the stacking result
if parallel:
n_avail = mp.cpu_count() - 1 #keep a core free, just in case
chunk = nid//n_avail
if verbose: print(n_avail,' cores parallel run')
#loop over grid_points
pool = mp.Pool(processes=n_avail-1) #no real reason to keep one core free
pool.starmap(worker,[(id_list[i:i+chunk],dat,paths) for i in range(0,nid,chunk)])
pool.close()
else:
if verbose: print('Single thread (parallel=False)')
for igp in id_list:
stack_at_gp(igp,dat,paths)
if verbose: print('results are stored in ',paths[1])
| 3,253 | 36.837209 | 99 |
py
|
prfmap
|
prfmap-master/src/rotate_list.py
|
# Script to fast rotate images
# USAGE
#
# python rotate.py INPUTFILE
#
# where INPUTFILE is file name of file containing:
#
# img_in_1 img_out_1 pa_1
# img_in_2 img_out_2 pa_2
# img_in_3 img_out_3 pa_3
#
# Jan 21, 2016 by AF (anfaisst@gmail.com)
#
# MODIFICATION HISTORY
# Aug 17, 2016 by AF: changed dtype=('S1000','S1000','f6') to dtype=('S1000','S1000','f')
#
######################
import os,sys,string
import subprocess
import numpy as np
from pyraf import iraf
#from iraf import images,imgeom,stsdas
# which file
file_name = sys.argv[1]
txt = np.genfromtxt(file_name,dtype=('U1000','U1000','f'))
for ii in range(0,txt.size):
# talk
if (ii % 10) == 0:
prec = round((ii*1.)/(txt.size*1.)*100.,2)
print("{:5.2f}".format(prec))
img_in = txt[ii][0]
img_out = txt[ii][1]
pa = txt[ii][2]
#dirty trick: copy the file in the same partition
subprocess.run(['cp',img_in,'tmp.fits'])
iraf.imlintran('tmp.fits', 'tmpout.fits',
xmag=1, ymag=1,
xin="INDEF",yin="INDEF",
xout="INDEF",yout="INDEF",
interp="drizzle",
xrot=pa, yrot=pa, verbose=0)
subprocess.run(['cp','tmpout.fits',img_out])
subprocess.run(['rm','tmp.fits','tmpout.fits'])
| 1,262 | 24.26 | 89 |
py
|
prfmap
|
prfmap-master/src/utils.py
|
import numpy as np
from astropy.io import ascii,fits
from astropy import table
def read_fits(filename,hdu=0):
"""extract a table from a FITS file"""
hdul = fits.open(filename)
dat = hdul[hdu].data
hdul.close()
return dat
def read_fits_head(filename,hdu=0):
"""extract the header from a FITS file"""
hdul = fits.open(filename)
hdr = hdul[hdu].header
hdul.close()
return hdr
def read_ascii_head(filename):
"""read a FITS header already extracted into an ASCII file"""
f = open(filename,'r')
s = f.read()
f.close()
return fits.Header.fromstring(s,sep='\n')
def xy_to_sky(img_hdr,x,y,start=1):
"""convert a set of points from pxl coordinates to RA,Dec
using a FITS header to extract WCS info."""
from astropy.wcs import WCS
wcs = WCS(header=img_hdr)
return wcs.all_pix2world(x,y,start)
def sky_to_xy(img_hdr,ra,dec,start=1):
"""convert a set of points from RA,Dec to image coordinates
using a FITS header to extract WCS info."""
from astropy.wcs import WCS
wcs = WCS(header=img_hdr)
return wcs.all_world2pix(ra,dec,start)
def make_grid(filename,step=8,ra_lim=[],dec_lim=[],hdu=0,write=False):
"""Given a FITS image, create a grid spanning the entire area
with a given `step` in pixels"""
if filename[-5:]=='.fits':
hdr = read_fits_head(filename,hdu=hdu)
else:
hdr = read_ascii_head(filename)
xmax = hdr['NAXIS1']
ymax = hdr['NAXIS2']
box_pxl = [(1,1), (1,xmax), (xmax,ymax), (1,ymax)]
#grid in pixel, over the whole image
x_gr = []; y_gr = []
buff = 24 #put a buffer on the borders
for i in range(1+buff,xmax-buff,step):
for j in range(1+buff,ymax-buff,step):
x_gr.append(i)
y_gr.append(j)
x_gr = np.array(x_gr)
y_gr = np.array(y_gr)
#convert to WCS
ra_gr,dec_gr = xy_to_sky(hdr,x_gr,y_gr)
#keep grid points only inside the limits (if they are given)
if len(ra_lim)==2:
in_ra = np.where( (ra_gr>ra_lim[0])&(ra_gr<ra_lim[1]) )[0]
x_gr = x_gr[in_ra]; y_gr = y_gr[in_ra]
ra_gr = ra_gr[in_ra]; dec_gr = dec_gr[in_ra]
if len(dec_lim)==2:
in_dec = np.where( (dec_gr>dec_lim[0])&(dec_gr<dec_lim[1]) )[0]
x_gr = x_gr[in_dec]; y_gr = y_gr[in_dec]
ra_gr = ra_gr[in_dec]; dec_gr = dec_gr[in_dec]
xy_grid = np.array([x_gr,y_gr])
radec_grid = np.array([ra_gr,dec_gr])
points = table.Table()
points['RA'] = radec_grid[0]
points['Dec'] = radec_grid[1]
points['X'] = xy_grid[0]
points['Y'] = xy_grid[1]
points['ID_GRIDPT'] = np.arange(1,len(xy_grid[0])+1,dtype='int32')
if write:
points.write(write,format='ascii.commented_header')
return points
def grid_in_frame(points,fname):
"""take the list of all the grid points and select those inside the given frame"""
# find which grid points are within this frame
if fname[-5:] == '.fits':
frame_hdr = read_fits_head(fname)
else:
frame_hdr = read_ascii_head(fnam)
type = frame_hdr['BITPIX']
edges_xy = np.array([ (1,1), (1,frame_hdr['NAXIS1']), (frame_hdr['NAXIS2'],1), (frame_hdr['NAXIS2'],frame_hdr['NAXIS1'])])
edges_sky = xy_to_sky(frame_hdr,x=edges_xy[:,0],y=edges_xy[:,1])
gp_in = np.where( (points['RA']>edges_sky[0].min()) & (points['RA']<edges_sky[0].max()) & (points['Dec']>edges_sky[1].min()) & (points['Dec']<edges_sky[1].max()) )[0]
points_frm = points['RA','Dec'][gp_in]
#and convert them to pixel coord in the frame ref system
x,y = sky_to_xy(frame_hdr,ra=points_frm['RA'],dec=points_frm['Dec'],start=1)
points_frm['X'] = x
points_frm['Y'] = y
points_frm['ID_GRIDPT'] = [int(i) for i in points['ID_GRIDPT'][gp_in]] #dirty trick to make integer type
gp_in2 = np.where( (x>0.) & (y>0.) & (x<frame_hdr['NAXIS1']) & (y<frame_hdr['NAXIS2']) ) #this is because RA,Dec approx takes also points outside the image
return points_frm[gp_in2]
| 4,002 | 37.12381 | 170 |
py
|
prfmap
|
prfmap-master/src/break_list.py
|
import sys,os,getopt
from astropy.io import fits,ascii
from read_par import read_par
import numpy as np
if __name__ == '__main__':
#################################
### GET OPTIONS from COMMAND LINE
verbose = False #default value
debug = False #default value
prll = False #default value
options, args=getopt.getopt(sys.argv[1:],"hvdpc:b:",["help","verbose","debug","parallel","config=","break="])
for opt, arg in options:
if opt in ('-b','--break'): pcs = int(arg) #number of pieces (ie, sub-lists) to divide the original prf_models list
elif opt in ('-c','--config'):
paramfile = arg
fopt = read_par(paramfile)
elif opt in ('-v','--verbose'): verbose = True
elif opt in ('-d','--debug'): debug = True
elif opt in ('-p','--parallel'): print('Multi-thread not avilable (yet)')
elif opt in ('-h','--help'):
print("""
Help message TBD
""")
sys.exit()
else:
sys.exit("Incorrect syntax. Use -h to print list of options.")
for a,arg in enumerate(args):
if arg in read_par(paramfile,list_out=True):
fopt[arg] = args[a+1]
print("Option {} manually set to {}".format(arg,args[a+1]))
# original list
lgp = ascii.read(fopt['FILE_GRID'])
# create the new sub-lists
lof = [] #list of output files (ie, the sub-lists)
for i in range(pcs):
lof.append(open(fopt['FILE_PRFS']+'_sub{}.txt'.format(i),'w'))
bpt = len(lgp)//pcs #number of grid pt per sub-list
break_id = [] #list of grid pt IDs where to cut the original list
for i,idgp in enumerate(lgp['ID_GRIDPT']):
if (i+1)%bpt==0: break_id.append(idgp)
break_id[-1] = max(lgp['ID_GRIDPT'])+1
if verbose:
print('ID numbers where to divide the original list:\n',break_id)
break_id = np.array(break_id)
with open(fopt['FILE_PRFS'],'r') as filein:
for l in filein:
if l[0]=="#":
for i in range(pcs): lof[i].write(l) #write header
else:
idgp = int(l.split()[0])
pc = np.where(idgp//break_id==0)[0][0]
lof[pc].write(l)
for i in range(pcs): lof[i].close() #write header
| 2,331 | 36.612903 | 125 |
py
|
prfmap
|
prfmap-master/src/prfmap.py
|
import sys,os,getopt
import numpy as np
import subprocess
from astropy.io import ascii
import utils as utl
from read_par import read_par
from draw_grid import draw_grid
from find_models import find_models
from manip_prf import rotate_and_stack
#############################################
if __name__ == '__main__':
#################################
### GET OPTIONS from COMMAND LINE
verbose = False #default value
debug = False #default value
prll = False #default value
task = sys.argv[1]
if task!='-h' and task!='--help' and task[0] == '-': sys.exit("ERROR: it seems you forgot to define the task to perform")
options, args=getopt.getopt(sys.argv[2:],"hvdpc:",["help","verbose","debug","parallel","config="])
for opt, arg in options:
if opt in ('-c','--config'): paramfile = arg
if opt in ('-v','--verbose'): verbose = True
if opt in ('-d','--debug'): debug = True
if opt in ('-p','--parallel'): prll = True
if opt in ('-h','--help'):
print("""
Help message TBD
""")
if verbose and prll: print('--- Multi-thread ON')
##################
### PARAMETERS ###
opt = read_par(paramfile) #get parameters from the config file
#modify options from command line
for a,arg in enumerate(args):
if arg in read_par(paramfile,list_out=True):
opt[arg] = args[a+1]
if verbose: print("--- Option {} set to {} from command line".format(arg,args[a+1]))
# create the output dir
subprocess.run(['mkdir','-p',opt['PATH_OUTPUT']])
#############################################
### GATHER INFO ON PRF MODELS and FRAMES ###
modelfile = opt['FILE_PRFMOD']
prfmod = ascii.read(modelfile,format='ipac') #(PRFNum NAXIS1 NAXIS2 PRFPos1 PRFPos2)
# file names are in the header but for some reason astropy doesnt read the meta values as it should
# thus, a dirty trick is used
# \char PRF_Filename_1 = apex_sh_IRAC1_col025_row025_x100.fits
fin = open(modelfile,'r')
fin.readline() #first line is not used
n_mod = fin.readline().split()[-1]
n_mod = int(n_mod)
xmax_mod = fin.readline().split()[-1]
ymax_mod = fin.readline().split()[-1]
if debug: print('----- The basic PRF models are:')
f_mod = []
for i in range(n_mod):
f_mod.append(fin.readline().split()[-1])
if debug: print(f_mod[i])
if debug: print('----- ')
# list of frames to analyse
frame_list = np.loadtxt(opt['FILE_FRAMELIST'],comments="#",dtype='str')
nam = [] #only the file names
for inm in frame_list:
r = inm[::-1]
r = r[:r.find('/')]
nam.append(r[::-1])
opt['NAME_FRAME'] = [f[:f.find('_bcd.fits')] for f in nam]
###############
### TASKS ###
#Step 1: MAKE THE GRID OVER THE ENTIRE MOSAIC
# (or a sub-region, if RA_LIM and DEC_LIM are set in the config file)
if task=='grid':
draw_grid(frame_list,opt=opt,verbose=verbose,debug=debug)
#Step 2: CREATE A LIST WITH INDIVIDUAL PRFs IN EACH GRID POINT
# for every frame overlapping on any grid point
elif task=='models':
find_models(frame_list,prfmod,opt=opt,verbose=verbose,debug=debug,parallel=prll)
#Step 3: STACK THE INDIVIDUAL PRFs
# (use `id_list` to specify a sub-sample of grid points where to stack)
elif task=='stack':
rotate_and_stack(f_mod,opt=opt,id_list=[],parallel=prll,verbose=verbose)
#Wrong task in input
else:
sys.exit("ERROR: task '{}' does not exist".format(task))
| 3,602 | 35.765306 | 125 |
py
|
prfmap
|
prfmap-master/src/find_models.py
|
import sys,os
import subprocess
import numpy as np
from astropy import table
from astropy.io import ascii
import utils as utl
import multiprocessing as mp
from pathlib import Path
#global
# this are the column names of FILE_PRFS
colnames = ['ID_GRIDPT','PRF_NUMBER','RA_CEN','DEC_CEN','PA','FRAME']
# need this because np.argmin breaks in multiprocessing (why?)
def argmin(a):
return min(range(len(a)), key=lambda x: a[x])
#select models for single frame
def single_frame(fname,points,coord,verbose):
points1 = utl.grid_in_frame(points,fname)
#extract just the name of the frame, not the full path
rows = []
for i in range(len(points1)):
x0 = points1['X'][i]; y0 = points1['Y'][i]
x = coord[0]; y = coord[1]
# simple Euclidean distance; k-tree should be faster for findinf the nearest neighbor model
dist = (x-x0)**2 + (y-y0)**2 #this is the squared distance
i0 = argmin(dist.data) # nearest model to grid point i (np.argmin doesnt work in parallel!)
if fname[-5:]=='.fits':
pa = utl.read_fits_head(fname)['PA'] #rotation
else:
pa = utl.read_ascii_head(fname)['PA']
rows.append( [points1['ID_GRIDPT'][i],i0+1,points1['RA'][i],points1['Dec'][i],pa,fname[fname.rfind('/')+1:]] )
if verbose: print('--- Inspected frame {} ---'.format(fname))
return rows
#select models for a list of frames
def multi_frame(fnames,points,coord,verbose,queue):
out_tmp = open(queue,'w')
for fname in fnames:
points1 = utl.grid_in_frame(points,fname)
#extract just the name of the frame, not the full path
for i in range(len(points1)):
x0 = points1['X'][i]; y0 = points1['Y'][i]
x = coord[0]; y = coord[1]
# simple Euclidean distance; k-tree should be faster for findinf the nearest neighbor model
dist = (x-x0)**2 + (y-y0)**2 #this is the squared distance
i0 = argmin(dist.data) # nearest model to grid point i (np.argmin doesnt work in parallel!)
if fname[-5:]=='.fits':
pa = utl.read_fits_head(fname)['PA'] #rotation
else:
pa = utl.read_ascii_head(fname)['PA']
out_tmp.write('{:9d} {:6d} {:11.6f} {:11.6f} {:9.4f} {}\n'.format( points1['ID_GRIDPT'][i],i0+1,points1['RA'][i],points1['Dec'][i],pa,fname[fname.rfind('/')+1:] ))
#if verbose: print('--- Inspected frame {} ---'.format(fname))
out_tmp.close()
#body of the program to associate PRF models to grid points
def find_models(frame_list,prfmap,opt={},debug=False,verbose=False,parallel=False):
"""
To associate PRF models to each grid point, with PA accoring to frames;
the summary file resulting from this is a new table (named as the FILE_PRFS variable),
with columns:
##REDO###
grid-point_ID |total_prfs|prf_model_id|frame_name|rotation_angle
total_prf = the number of prfs in that position (depends on the number of overlapping frames)
prf_model_id = range of models from 1 to 25
frame_name = filename of each frame that will be stacked at this grid point
rotation angle = PRF orientation as the PA of the given frame
"""
if verbose:
modelfile = opt['FILE_PRFMOD']
print('--- Select PRF models \n--- originary in {} \n--- and now re-arranged in {} ---'.format(modelfile,opt['FILE_GRID']))
points = ascii.read(opt['FILE_GRID'])
filename = '{}'.format(opt['FILE_PRFS'])
if Path(filename).is_file(): sys.exit('--- ERROR: file exists ({}) '.format(filename))
fout = open(filename,'w')
fout.write('# {} {} {} {} {} {}\n'.format(colnames[0],colnames[1],colnames[2],colnames[3],colnames[4],colnames[5]))
fout.close
if parallel:
nproc = mp.cpu_count()-1 #no real reason to keep one CPU free
nfram = len(frame_list)
nchunk = nfram//nproc
nproc = 0
proc = []
output0 = '{}/fm_tmp_{}.txt'
df = nchunk
for b in range(0,nfram,df):
if b+nchunk>=nfram: df = nfram-b
if verbose: print('frames from {} to {}'.format(b+1,b+df+1))
fnames = frame_list[b:b+df]
output = output0.format(opt['PATH_OUTPUT'],nproc)
proc.append( mp.Process(target=multi_frame,args=(fnames,points,(prfmap['PRFPos1'],prfmap['PRFPos2']),verbose,output)) )
nproc += 1
if verbose: print('--- There are {} frames split into {} cores for parallel run'.format(nfram,nproc))
for p in proc: p.start()
for p in proc: p.join()
if verbose: print('--- Writing output now for the {} processes'.format(nproc))
for p in range(nproc):
fin = open(output0.format(opt['PATH_OUTPUT'],p),'r')
#d = ascii.read(output0.format(opt['PATH_OUTPUT'],p),guess=False,delimiter=' ',data_start=0,names=colnames)
lines = fin.readlines()
fin.close()
fout = open(filename,'a')
for l in lines: fout.write(l)
fout.close()
subprocess.run(['rm',output0.format(opt['PATH_OUTPUT'],p)])
else:
for fname in frame_list:
rows = single_frame(fname,points,(prfmap['PRFPos1'],prfmap['PRFPos2']),verbose=verbose)
for r in rows:
fout.write('{:9d} {:6d} {:11.6f} {:11.6f} {:9.4f} {}\n'.format(r[0],r[1],r[2],r[3],r[4],r[5]))
if verbose: print('--- The PRF models correctly oriented \n--- are described in {} ---'.format(filename))
| 5,529 | 45.470588 | 176 |
py
|
prfmap
|
prfmap-master/src/draw_grid.py
|
import numpy as np
import subprocess
import utils as utl
def draw_grid(frame_list,opt={},verbose=False,debug=False):
""" draw_grid(frame_list,opt={},verbose=False,debug=False)
Read the mosaic properties from its FITS file and define a grid of points where the PRF will be estimated.
Grid coordinates (and ID of the nodes) are saved into an ASCII file (FILE_GRID).
frame_list: str
ASCII file with list of frames (*_bcd.fits files) resulting in the final image mosaic
(same file is indicated as FILE_FRAMELIST in PRFMap config file)
opt: dict
all the other parameters of PRFMap config file (FILE_MOSAIC, PRF_RAD, etc.)
verbose: bool
print on screen additional information
debug: bool
if True, a sub-folder $PATH_OUTPUT/frame_grids/ is created and the individual grid for each frame
is saved in *_GRID.dat ASCII files.
"""
if verbose: print('--- Make the grid over the entire mosaic \n--- which is {} ---'.format(opt['FILE_MOSAIC']))
points = utl.make_grid(opt['FILE_MOSAIC'],step=opt['PRF_RAD']*opt['GRID_SPACE'],ra_lim=opt['RA_LIM'],dec_lim=opt['DEC_LIM'],write=opt['FILE_GRID'])
if verbose: print('--- Ouput file with list of grid points \n--- is located in {} ---'.format(opt['FILE_GRID']))
### select the grid points within each frame
if debug:
print("------ Print a DS9 .reg file in the same directory ------")
gr = np.loadtxt(opt['FILE_GRID'])
fds9 = open(opt['FILE_GRID']+'.reg','w')
fds9.write('# Region file format \nfk5\n')
for i in gr:
fds9.write("circle({:.6f},{:.6f}, 1.0\") # text = {{ {:g} }}\n".format(i[0],i[1],i[4]))
fds9.close()
for f,fname in enumerate(frame_list):
if verbose: print('------ Processing frame #{}'.format(f+1))
#extract just the name of the frame, not the full path
points1 = utl.grid_in_frame(points,fname)
subprocess.run(['mkdir','-p','{}/frame_grids/'.format(opt['PATH_OUTPUT'])])
points1.write('{}/{}_GRID.dat'.format(opt['PATH_OUTPUT']+'/frame_grids/',opt['NAME_FRAME'][f]),format='ascii.commented_header',formats={'ID_GRIDPT':'%8g','RA':'%12.6f','Dec':'%12.6f','X':'%12.3f','Y':'%12.3f'},overwrite=False)
| 2,368 | 49.404255 | 240 |
py
|
prfmap
|
prfmap-master/src/read_par.py
|
def read_par(paramfile,list_out=False):
# These are the parameter names should be included in that file:
keywords = ('PATH_OUTPUT','FILE_MOSAIC','FILE_PRFMOD','FILE_FRAMELIST','FILE_GRID','FILE_PRFS','PRF_RAD','GRID_SPACE','PRF_SAMP','RA_LIM','DEC_LIM')
if list_out:
return keywords
optpar = 2 #number of optinal parameters (to be added at the end of tuple `keywords`)
opt = {}
with open(paramfile) as f:
for l in f:
if len(l)<=1 or l[0]=="#":
continue
elif l.split()[0] in keywords:
par = l.split()[0]
val = l.split()[1]
if par in opt.keys(): sys.exit("ERROR: keword defined more than once in param file {}".format(paramfile))
opt[par] = val
#adjust RA_LIM and DEC_LIM
if 'RA_LIM' in opt.keys(): opt['RA_LIM'] = [float(i) for i in opt['RA_LIM'].split(',')]
else: opt['RA_LIM'] = []
if 'DEC_LIM' in opt.keys(): opt['DEC_LIM'] = [float(i) for i in opt['DEC_LIM'].split(',')]
else: opt['DEC_LIM'] = []
#sanity check and format conversion
for par in keywords[:-optpar]:
if par not in opt.keys(): sys.exit("ERROR: missing parameter {} in the config file {}".format(par,paramfile))
if par in ('PRF_RAD','GRID_SPACE','PRF_SAMP'): opt[par] = int(opt[par])
if 'PATH_' in par and opt[par][-1]!='/' : opt[par]+='/' #add slash at the end of a directory path
dumm = opt['FILE_MOSAIC'].strip('.fits')
i_dumm = dumm[::-1].index('/')
opt['NAME_MOSAIC'] = dumm[-i_dumm:]
return opt
| 1,591 | 44.485714 | 152 |
py
|
prfmap
|
prfmap-master/src/resample_prf.py
|
import sys,os,getopt
import subprocess
from scipy.ndimage import zoom
from astropy.io import fits
from read_par import read_par
if __name__ == '__main__':
#################################
### GET OPTIONS from COMMAND LINE
verbose = False #default value
debug = False #default value
prll = False #default value
overwrite = False #default value
options, args=getopt.getopt(sys.argv[1:],"hvdpwc:r:",["help","verbose","debug","parallel","overwrite","config=","resample="])
for opt, arg in options:
if opt in ('-r','--resample'): pxl = float(arg) #the final image grid in arcsec/pixel units
if opt in ('-c','--config'): paramfile = arg
if opt in ('-v','--verbose'): verbose = True
if opt in ('-d','--debug'): debug = True
if opt in ('-p','--parallel'): print('Multi-thread not avilable (yet)')
if opt in ('-w','--overwrite'): overwrite = True
if opt in ('-h','--help'):
print("""
Usage:
$ python resample_psf.py -c configfile.par -r 0.6 [-v -d -p -w]
$ python resample_psf.py --config configfile.par --resample=0.6 [--verbose --debug --parallel -overwrite]
Rescale all the PRF models stored in the directory indicated in the config file ($PATH_OUTPUT/PRFstack/)
to the arcsec/pixel value set by the option -r or --resample. The config file must have the PRFMAP v2 format.
The output PRFs are FITS files stored in a new directory called e.g. PRFstack_06/, with the suffix depeneding
on the rescaling option (in this case 0.6"/pxl). The parent directory is $PATH_OTUPUT as indicated in the
PRFMAP config file.
Options:
-c | --config : specify the confi file (PRFMAPv2 format)
-r | --resample : specify the grid scale of the output (arcsec/pixel units)
-v | --verbose : print more std output
-d | --debug : perform additional sanity checks [TBD]
-p | --parallel : multi-thread [TBD]
-w | --overwrite : write the resampled FITS file even if it is already present in the target directory.
-h | --help : print this help page.
""")
sys.exit()
opt = read_par(paramfile)
pxl0 = 1.22/opt['PRF_SAMP'] #1.2" is the native pixel scale in IRAC detectors
factor = pxl0/pxl #zoom factor
suff = str(pxl).replace('.','')
dir_in = opt['PATH_OUTPUT']+'PRFstack/'
dir_out = opt['PATH_OUTPUT']+'PRFstack_{}/'.format(suff)
if verbose: print("results in directory",dir_out)
subprocess.run(["mkdir","-p",dir_out])
a = subprocess.run(["ls", dir_in],stdout=subprocess.PIPE)
l = a.stdout.decode("utf-8") # ls output in a single string
l = l.split("\n") #create the list of files
if l[-1]=='': l = l[:-1] #split may create an empty item if the original string ends with "\n"
if verbose: print("Resample {} PSFs".format(len(l)))
#ensure output has odd number of pxl
prf0 = fits.getdata(dir_in+l[0]) #assuming all PRFs have same size
side_in = prf0.shape[0] #assuming the PRF cutout is a square (NAXIS1==NAXIS2)
side_out = round(side_in*factor)
if side_out%2==0:
pad = round(1/factor)//2
if verbose: print('Pad =', pad)
else:
pad = 0
x0 = pad; x1 = side_in-pad
y0 = x0; y1 = x1 #assuming the PRF cutout is a square (NAXIS1==NAXIS2)
for fits_in in l:
if os.path.isfile(dir_out+fits_in) and not overwrite:
if verbose: print(dir_out+fits_in,' already exists')
continue
prf_in,hdr = fits.getdata(dir_in+fits_in,header=True)
prf_out = zoom(prf_in[x0:x1,y0:y1],factor,mode='nearest')
if verbose: print('PRF model rescaled from {}x{} to {}x{} pxl'.format(prf_in.shape[0],prf_in.shape[1],prf_out.shape[0],prf_out.shape[1]))
prf_out /= sum(prf_out.flat) #normalize
hdr['SAMP_ORI'] = opt['PRF_SAMP']
hdr['SAMP_NEW'] = opt['PRF_SAMP']*factor
hdu_new = fits.PrimaryHDU(prf_out,hdr)
#hdu_new.writeto(dir_out+fits_in.replace('.fits',suff+'.fits'))
hdu_new.writeto(dir_out+fits_in,overwrite=overwrite)
| 4,298 | 46.766667 | 145 |
py
|
RepairThemAll_experiment
|
RepairThemAll_experiment-master/README.md
|
# RepairThemAll Experiment
This repository contains the raw results of the execution of 11 repair tools on 5 bug benchmarks presented in the following paper:
```bibtex
@inproceedings{RepairThemAll2019,
author = {Thomas Durieux and Fernanda Madeiral and Matias Martinez and Rui Abreu},
title = {{Empirical Review of Java Program Repair Tools: A Large-Scale Experiment on 2,141 Bugs and 23,551 Repair Attempts}},
booktitle = {Proceedings of the 27th ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering (ESEC/FSE '19)},
year = {2019},
url = {https://arxiv.org/abs/1905.11973}
}
```
Benchmark:
* Bears
* Bugs.jar
* [Defects4J version 1.4.0](https://github.com/rjust/defects4j/tree/v1.4.0)
* IntroClassJava
* QuixBugs
The execution framework that has been used is available at: https://github.com/program-repair/RepairThemAll
## Repository Structure
The repository is structured as follow:
```
├── docs: content for the website
├── results:
│ └── <benchmark>
│ └── <project>
│ └── <bug_id>
│ └── <repair tool>
│ └── <seed>
│ ├── grid5k.stderr.log: stderr of the execution
│ ├── grid5k.stdout.log: stdout of the execution (without the repair)
│ ├── repair.log: repair log
│ ├── result.json: standardize output
│ └── detailed-result.json: raw output of the repair tool, if it generates a json file
└── script
└── get_patched_bugs.py: the script that are used to generate the table for the paper
```
## Patched Bugs
The data of this repository is also available as a website: http://program-repair.org/RepairThemAll_experiment
| Repair Tools | Bears | Bugs.jar | Defects4J | IntroClassJava | QuixBugs | Total |
| ------------ | -------:| ---------:| ---------:| --------------:| --------:| --------:|
| ARJA | 12 (4%) | 21 (1%) | 86 (21%) | 23 (7%) | 4 (10%) | 146 (6%) |
| GenProg-A | 1 (0%) | 9 (0%) | 45 (11%) | 18 (6%) | 4 (10%) | 77 (3%) |
| Kali-A | 15 (5%) | 24 (2%) | 72 (18%) | 5 (1%) | 2 (5%) | 118 (5%) |
| RSRepair-A | 1 (0%) | 6 (0%) | 62 (15%) | 22 (7%) | 4 (10%) | 95 (4%) |
| Cardumen | 13 (5%) | 12 (1%) | 17 (4%) | 0 (0%) | 4 (10%) | 46 (2%) |
| jGenProg | 13 (5%) | 14 (1%) | 31 (7%) | 4 (1%) | 3 (7%) | 65 (3%) |
| jKali | 10 (3%) | 8 (0%) | 27 (6%) | 5 (1%) | 2 (5%) | 52 (2%) |
| jMutRepair | 7 (2%) | 11 (0%) | 20 (5%) | 24 (8%) | 3 (7%) | 65 (3%) |
| Nopol | 1 (0%) | 72 (6%) | 107 (27%) | 32 (10%) | 1 (2%) | 213 (9%) |
| DynaMoth | 0 (0%) | 124 (10%) | 74 (18%) | 6 (2%) | 2 (5%) | 206 (9%) |
| NPEFix | 1 (0%) | 3 (0%) | 9 (2%) | 0 (0%) | 2 (5%) | 15 (0%) |
| Total | 74 | 304 | 550 | 139 | 31 | 1,098 |
| Total unique | 25 (9%) | 175 (15%) | 185 (46%) | 62 (20%) | 12 (30%) | 459 (21%)|
Total generated patch: 67,211
Execution time 314 days, 12:29:19.419491
## Chi-square Test of independence
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| ARJA on Defects4J | 86 | 309 |
| ARJA on Others | 60 | 1686 |
Chi2 value= 170.43487132271886 p-value= 5.945480330471514e-39 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| GenProg-A on Defects4J | 45 | 350 |
| GenProg-A on Others | 32 | 1714 |
Chi2 value= 84.90652479289551 p-value= 3.128091736130167e-20 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| Kali-A on Defects4J | 72 | 323 |
| Kali-A on Others | 46 | 1700 |
Chi2 value= 150.4020168750391 p-value= 1.4160845009256217e-34 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| RSRepair-A on Defects4J | 62 | 333 |
| RSRepair-A on Others | 33 | 1713 |
Chi2 value= 144.80217516680622 p-value= 2.372523759882535e-33 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| Cardumen on Defects4J | 17 | 378 |
| Cardumen on Others | 29 | 1717 |
Chi2 value= 10.701973234378928 p-value= 0.0010702132907778191 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| jGenProg on Defects4J | 31 | 364 |
| jGenProg on Others | 34 | 1712 |
Chi2 value= 38.10114926497659 p-value= 6.717055566199569e-10 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| jKali on Defects4J | 27 | 368 |
| jKali on Others | 25 | 1721 |
Chi2 value= 39.69012031778793 p-value= 2.976273080413384e-10 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| jMutRepair on Defects4J | 20 | 375 |
| jMutRepair on Others | 45 | 1701 |
Chi2 value= 6.76253623850222 p-value= 0.009309135821381086 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| Nopol on Defects4J | 107 | 288 |
| Nopol on Others | 106 | 1640 |
Chi2 value= 158.83167769741897 p-value= 2.036659201530019e-36 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| DynaMoth on Defects4J | 74 | 321 |
| DynaMoth on Others | 132 | 1614 |
Chi2 value= 46.25197019724452 p-value= 1.0398193445599021e-11 Degrees of freedom= 1
| | # Patched | # Non-Patched |
| -------------- | --------- | ------------- |
| NPEFix on Defects4J | 9 | 386 |
| NPEFix on Others | 6 | 1740 |
Chi2 value= 17.333764012540335 p-value= 3.1356574417361234e-05 Degrees of freedom= 1
| 6,014 | 39.1 | 163 |
md
|
RepairThemAll_experiment
|
RepairThemAll_experiment-master/docs/index.html
|
<!-- index.html -->
<!DOCTYPE html>
<html lang="en" ng-app="defects4j-website" ng-controller="mainController">
<head>
<meta charset="UTF-8">
<title>Repair Them All</title>
<!-- CSS -->
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<!-- Optional theme -->
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap-theme.min.css" integrity="sha384-rHyoN1iRsVXV4nD0JutlnGaslCJuC7uwjduW9SVrLvRYooPp2bWYgmgJQIXwl/Sp" crossorigin="anonymous">
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css">
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css">
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/diff2html/2.5.0/diff2html.min.css">
<link rel="stylesheet" href="style/style.css">
<!-- JS -->
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.6.6/angular.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.6.6/angular-animate.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.6.6/angular-touch.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.6.6/angular-route.js"></script>
<script src="//angular-ui.github.io/bootstrap/ui-bootstrap-tpls-2.5.0.min.js"></script>
<script src="//cdn.jsdelivr.net/npm/angu-fixed-header-table@1.2.1/angu-fixed-header-table.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/jquery/2.2.3/jquery.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/languages/java.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/diff2html/2.5.0/diff2html.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/diff2html/2.5.0/diff2html-ui.min.js"></script>
<script src="//overset.github.io/javascript-natural-sort/naturalSort.js"></script>
<script src="js/app.js"></script>
</head>
<body keypress-events>
<div id="wrapper">
<!-- Navigation -->
<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-ex1-collapse">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<h2>Repair Them All is showing {{filteredBugs.length}} bugs </h2>
</div>
<!-- Sidebar Menu Items - These collapse to the responsive navigation menu on small screens -->
<div class="collapse navbar-collapse navbar-ex1-collapse">
<ul id="menu" class="nav navbar-nav side-nav">
<li>
<h3>Filters</h3>
</li>
<li>
<div>
Match
<label><input type="radio" name="match" value="any" ng-model="match"> Any</label>
<label><input type="radio" name="match" value="all" ng-model="match"> All</label>
filters
</div>
</li>
<li>
<h4>Benchmarks</h4>
<ul class="nav">
<li ng-repeat="benchmark in benchmarks">
<label for="{{ benchmark }}" ng-class="{'actived': filters[benchmark]}">
<input type="checkbox" id="{{ benchmark }}" ng-model="filters[benchmark]">
{{ benchmark }}
</label>
</li>
</ul>
</li>
<li>
<h4>Repair Tools</h4>
<ul class="nav">
<li ng-repeat="tool in tools">
<label for="{{ tool }}" ng-class="{'actived': filters[tool]}">
<input type="checkbox" id="{{ tool }}" ng-model="filters[tool]">
{{ tool }}
</label>
</li>
</ul>
</li>
</ul>
</div>
<!-- /.navbar-collapse -->
</nav>
<div id="page-wrapper">
<div class="container-fluid">
<div class="row" id="main" >
<section class="filters" ng-show="filteredBugs.length != bugs.length">
Match {{match}} of these filter(s):
<span class="filter" ng-repeat="(k,v) in filter" ng-if="v" ng-click="filter[k]=false">{{filterName(k)}} <i class="fa fa-fw fa-times"></i></span>
</section>
<table fixed-header id="mainTable" class="table table-striped">
<thead>
<tr>
<th>
<a href="#" ng-click="sort(['benchmark', 'project', 'bug_id'])">
Benchmark
<span ng-show="sortType[0] == 'benchmark' && sortReverse" class="fa fa-caret-down"></span>
<span ng-show="sortType[0] == 'benchmark' && !sortReverse" class="fa fa-caret-up"></span>
</a>
</th>
<th>
<a href="#" ng-click="sort(['benchmark', 'project', 'bug_id'])">
Bug id
<span ng-show="sortType[0] == 'benchmark' && sortReverse" class="fa fa-caret-down"></span>
<span ng-show="sortType[0] == 'benchmark' && !sortReverse" class="fa fa-caret-up"></span>
</a>
</th>
<th>
<a href="#" ng-click="sort('repairs.length')">
# Tool
<span ng-show="sortType == 'repairs.length' && sortReverse" class="fa fa-caret-down"></span>
<span ng-show="sortType == 'repairs.length' && !sortReverse" class="fa fa-caret-up"></span>
</a>
</th>
</tr>
</thead>
<tbody>
<tr ng-repeat="bug in bugs| orderBy:sortType:sortReverse:naturalCompare| filter:bugsFilter as filteredBugs" ng-click="openBug(bug)">
<td>{{ bug.benchmark }}</td>
<td>{{ bug.project }} {{ bug.bug_id }}</td>
<td>{{ bug.repairs.length }}</td>
</tr>
</tbody>
</table>
</div>
</div>
<!-- /.container-fluid -->
</div>
<!-- /#page-wrapper -->
</div><!-- /#wrapper -->
<span ng-controller="bugController"></span>
</body>
<script type="text/ng-template" id="modelPatch.html">
<div class="modal-header">
<h3 class="modal-title" id="modal-title">{{ $ctrl.bug.benchmark }} {{ $ctrl.bug.project }} {{ $ctrl.bug.bug_id }}</h3>
<a ng-click="$ctrl.previousPatch()"><i class="fa fa-arrow-left" aria-hidden="true"></i> Previous Patch</a> | <a ng-click="$ctrl.nextPatch()">Next Patch <i class="fa fa-arrow-right" aria-hidden="true"></i></a>
</div>
<div class="modal-body" id="modal-body">
<div ng-if="$ctrl.human != null">
<h3>Human Patch</h3>
<div diff="$ctrl.human"></div>
</div>
<div ng-repeat="tool in $ctrl.bug.repairs">
<h3>{{tool.tool}} {{tool.nb_patch}} generated patches</h3>
<div diff="tool.result.patches[0]"></div>
</div>
</div>
<div class="modal-footer">
<button class="btn btn-primary" type="button" ng-click="$ctrl.ok()">OK</button>
</div>
</script>
</html>
| 6,926 | 42.29375 | 211 |
html
|
RepairThemAll_experiment
|
RepairThemAll_experiment-master/docs/style/style.css
|
h2 {
margin-top: 5px;
}
a {
color: #5A6B7D;
}
#wrapper {
padding-left: 0;
}
#page-wrapper {
width: 100%;
padding: 0;
background-color: #fff;
margin-top: 40px;
}
@media(min-width:768px) {
#wrapper {
padding-left: 300px;
}
#page-wrapper {
padding: 22px 10px;
}
}
/* Side Navigation */
@media(min-width:768px) {
.side-nav {
position: fixed;
top: 0px;
left: 300px;
width: 300px;
margin-left: -300px;
border: none;
border-radius: 0;
overflow-y: auto;
background-color: #5A6B7D;
bottom: 0;
overflow-x: hidden;
padding-bottom: 0px;
}
.side-nav>li ul li label {
width: 300px;
border-bottom: 1px rgba(0,0,0,.3) solid;
}
.side-nav li ul li label.actived,
.side-nav li ul li label:hover,
.side-nav li ul li label:focus {
outline: none;
background-color: #4d5c6b !important;
}
}
.side-nav>li ul {
padding: 0;
}
.side-nav, .side-nav a {
color: #f5f5f5;
}
.side-nav a:focus, .side-nav a:hover {
background-color: #4d5c6b!important;
}
.side-nav li {
float: none;
}
.side-nav li h2, .side-nav li h3, .side-nav li h4, .side-nav li div {
padding: 3px 4px 3px 18px;
margin: 0;
}
.side-nav>li>ul>li label {
display: block;
padding: 3px 4px 3px 18px;
text-decoration: none;
color: #d4d4d4;
margin: 0;
cursor: pointer;
word-wrap: break-word;
}
.side-nav>li>ul>li label>input {
margin-right: 3px;
}
.navbar .nav > li a:hover > .label {
top: 10px;
}
.filters {
margin-bottom: 5px;
}
.filters .filter {
background: #5A6B7D;
color: #f5f5f5;
cursor: pointer;
padding: 5px;
margin-right: 4px;
}
.modal-dialog{
width:80%;
}
.modal-content {
border-radius: 0;
border: 0;
}
.table {
margin-bottom: 0px;
}
table {
border: 0;
}
table tr {
cursor: pointer;
}
table tr:hover {
background: #DDD!important;
}
.navbar-header {
position: absolute;
display: block;
background: #5a6b7d;
left: 300px;
right:0;
color: white;
padding: 4px;
}
.navbar-header a {
color: #FFF;
}
| 2,136 | 13.944056 | 69 |
css
|
RepairThemAll_experiment
|
RepairThemAll_experiment-master/script/get_patched_bugs.py
|
import os
import json
import re
import datetime
benchmarks = ["Bears", "Bugs.jar", "Defects4J", "IntroClassJava", "QuixBugs"]
tools = ["Arja", "GenProg", "Kali", "RSRepair", "Cardumen", "jGenProg", "jKali", "jMutRepair", "Nopol", "DynaMoth", "NPEFix"]
def percent(value, total):
return int(value * 10000 / total)/100.0
def percent_round(value, total):
return int(round(percent(value, total), 0))
def bench_name(name):
benchmarks = ["Bears", "Bugs.jar", "Defects4J", "IntroClassJava", "QuixBugs", "xtotal"]
benchmark_names = ["Bears", "Bugs.jar", "Defects4J", "IntroClassJava", "QuixBugs", "Average"]
return benchmark_names[benchmarks.index(name)]
def tool_name(name):
t = tools + ["xtotal"]
tool_names = ["ARJA", "GenProg-A", "Kali-A", "RSRepair-A", "Cardumen","jGenProg", "jKali", "jMutRepair", "Nopol", "DynaMoth", "NPEFix", "Average"]
return tool_names[t.index(name)]
def format_time(t):
t = str(datetime.timedelta(seconds=average_tool)).split('.', 2)[0]
if t[0] == "0":
return t[2:]
return t
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
ROOT = os.path.join(os.path.dirname(__file__), "..")
nb_bugs_bench = {
"Bears": 251,
"Bugs.jar": 1158,
"Defects4J": 395,
"IntroClassJava": 297,
"QuixBugs": 40,
}
nb_bugs = 0
for benchmark in benchmarks:
nb_bugs += nb_bugs_bench[benchmark]
total_nb_patch = 0
nb_patch = 0
total_attempts = 0
patch_per_tool = {}
patch_per_bench = {}
repaired_bugs = {}
tool_bugs = {}
bugs_tool = {}
results = []
times = {
'patched': {},
'timeout': {},
'nopatch': {},
'error': {}
}
rvs = {}
not_runned = []
total_time = 0
for benchmark in benchmarks:
benchmark_path = os.path.join(ROOT, "results", benchmark)
if benchmark not in rvs:
rvs[benchmark] = []
for project in natural_sort(os.listdir(benchmark_path)):
project_path = os.path.join(benchmark_path, project)
folders = os.listdir(project_path)
if benchmark == "QuixBugs":
folders = [""]
for bug_id in natural_sort(folders):
bug_path = os.path.join(project_path, bug_id)
for repair_tool in natural_sort(os.listdir(bug_path)):
tool_path = os.path.join(bug_path, repair_tool)
for seed in natural_sort(os.listdir(tool_path)):
total_attempts += 1
seed_path = os.path.join(tool_path, seed)
is_error = False
stat = None
repair_log_path = os.path.join(seed_path, "repair.log")
if not os.path.exists(repair_log_path):
repair_log_path = os.path.join(seed_path, "repair.log.gz")
if not os.path.exists(repair_log_path):
is_error = True
not_runned.append("%s\t%s\t%s_%s" % (repair_tool, bench_name(benchmark), project, bug_id))
else:
stat = os.stat(repair_log_path)
#if stat.st_size < 20000:
with open(repair_log_path) as fd:
content = fd.read()
if 'Exception in thread "main"' in content or 'Usage: ' in content:
is_error = True
results_path = os.path.join(seed_path, "result.json")
if os.path.exists(results_path):
with open(results_path) as fd:
data = json.load(fd)
if 'repair_begin' in data:
begin = datetime.datetime.strptime(data['repair_begin'], "%Y-%m-%d %H:%M:%S.%f")
end = datetime.datetime.strptime(data['repair_end'], "%Y-%m-%d %H:%M:%S.%f")
time_spend = (end - begin).total_seconds()
times_dict = times['nopatch']
rvs[benchmark].append('patches' in data and len(data['patches']) > 0)
if 'patches' in data and len(data['patches']) > 0:
times_dict = times['patched']
elif is_error:
times_dict = times['error']
elif time_spend > 2 * 3600:
times_dict = times['timeout']
if benchmark not in times_dict:
times_dict[benchmark] = {}
if repair_tool not in times_dict[benchmark]:
times_dict[benchmark][repair_tool] = []
times_dict[benchmark][repair_tool].append(time_spend)
total_time += time_spend
if repair_tool not in patch_per_tool:
patch_per_tool[repair_tool] = {}
if benchmark not in patch_per_bench:
patch_per_bench[benchmark] = 0
if benchmark not in patch_per_tool[repair_tool]:
patch_per_tool[repair_tool][benchmark] = []
if 'patches' in data and len(data['patches']) > 0:
unique_bug_id = "%s_%s_%s" % (benchmark, project, bug_id)
if unique_bug_id not in tool_bugs:
tool_bugs[unique_bug_id] = []
tool_bugs[unique_bug_id].append(repair_tool)
if repair_tool not in bugs_tool:
bugs_tool[repair_tool] = []
bugs_tool[repair_tool].append(unique_bug_id)
patch_per_tool[repair_tool][benchmark].append(unique_bug_id)
patch_per_bench[benchmark] += 1
nb_patch += 1
nb_tool_patch = len(data['patches'])
total_nb_patch += nb_tool_patch
data['patches'] = [data['patches'][0]]
if unique_bug_id not in repaired_bugs:
repaired_bugs[unique_bug_id] = {
"benchmark": benchmark,
"project": project,
"bug_id": bug_id,
"tools": []
}
results.append(
{
"benchmark": benchmark,
"project": project,
"bug_id": bug_id,
"tool": tool_name(repair_tool),
"result": data,
"nb_patch": nb_tool_patch
}
)
repaired_bugs[unique_bug_id]['tools'].append(repair_tool)
elif is_error:
times_dict = times['error']
if benchmark not in times_dict:
times_dict[benchmark] = {}
if repair_tool not in times_dict[benchmark]:
times_dict[benchmark][repair_tool] = []
times_dict[benchmark][repair_tool].append(1)
rvs[benchmark].append(False)
else:
rvs[benchmark].append(False)
stderr_path = os.path.join(seed_path, "grid5k.stderr.log")
if os.path.exists(stderr_path):
with open(stderr_path) as fd:
# timeout
if "KILLED" in fd.read():
times_dict = times['timeout']
if benchmark not in times_dict:
times_dict[benchmark] = {}
if repair_tool not in times_dict[benchmark]:
times_dict[benchmark][repair_tool] = []
times_dict[benchmark][repair_tool].append(2 * 3600)
total_time += 2 * 3600 # 2h
with open(os.path.join(ROOT, "docs", "data", "patches.json"), "w+") as fd:
json.dump(results, fd)
index = 0
print("| # | Benchmark | Bug | # Repair Tools | Repair Tools |")
print("| ---:| -------------- | ---------------------- | --------------:| ------------ |")
for i in natural_sort(repaired_bugs.iterkeys()):
bug = repaired_bugs[i]
index += 1
bug_id = bug['bug_id']
if len(bug_id) > 8:
bug_id = bug_id[-8:]
project = bug['project'].split("-")[-1]
t = ""
for repair_tool in bug['tools']:
t += tool_name(repair_tool) + " "
print ("| {:3} | {:14} | {:22} | {:16} | {:11} |".format(index, bench_name(bug['benchmark']), ("%s %s" % (project, bug_id)).strip(), len(bug['tools']), t))
print("\n")
line = " Repair Tools "
for benchmark in benchmarks:
line += "& {:} ".format(bench_name(benchmark))
line += "& Total \\\\\\midrule"
print(line)
nb_patch_tool = {}
nb_patch_tool_bench = {}
for repair_tool in tools:
line = " {:12} ".format(tool_name(repair_tool))
nb_patch_tool[repair_tool] = 0
nb_patch_tool_bench[repair_tool] = {}
for benchmark in benchmarks:
nb_patches = 0
t = benchmark
if t != 'Defects4J':
t = 'Others'
if t not in nb_patch_tool_bench[repair_tool]:
nb_patch_tool_bench[repair_tool][t] = 0
if benchmark in patch_per_tool[repair_tool]:
nb_patches = len(patch_per_tool[repair_tool][benchmark])
nb_patch_tool_bench[repair_tool][t] += nb_patches
nb_patch_tool[repair_tool] += nb_patches
if nb_patches > 0 and percent(nb_patches, nb_bugs_bench[benchmark]) < 1:
line += "& {:{width}} ".format("%d (<1\\%%)" % (nb_patches), width=len(bench_name(benchmark)))
else:
line += "& {:{width}} ".format("%d (%d\\%%)" % (nb_patches, percent(nb_patches, nb_bugs_bench[benchmark])), width=len(bench_name(benchmark)))
if nb_patch_tool[repair_tool] > 0 and percent(nb_patch_tool[repair_tool], nb_bugs) < 1:
line += "& {:5} \\\\".format("%d (<1\\%%)" % (nb_patch_tool[repair_tool]))
else:
line += "& {:5} \\\\".format("%d (%d\\%%)" % (nb_patch_tool[repair_tool], percent(nb_patch_tool[repair_tool], nb_bugs)))
print(line)
print(" \\midrule")
line = "Total "
for benchmark in benchmarks:
nb_patches = 0
if benchmark in patch_per_bench:
nb_patches = patch_per_bench[benchmark]
line += "& {:{width}} ".format(nb_patches, width=len(bench_name(benchmark)))
line += "& {:5} \\\\".format(nb_patch)
print(line)
repaired_benchmarks = {}
for i in natural_sort(repaired_bugs.iterkeys()):
bug = repaired_bugs[i]
if bug['benchmark'] not in repaired_benchmarks:
repaired_benchmarks[bug['benchmark']] = 0
repaired_benchmarks[bug['benchmark']] += 1
total = 0
line = "Total unique "
for benchmark in natural_sort(repaired_benchmarks):
line += "& {:{width}} ".format("%d (%d\\%%)" % (repaired_benchmarks[benchmark], percent(repaired_benchmarks[benchmark], nb_bugs_bench[benchmark])), width=len(bench_name(benchmark)))
total += repaired_benchmarks[benchmark]
line += "& {:5} \\\\".format("%d (%d\\%%)" % (total, percent(total, nb_bugs)))
print(line + "\n")
for repair_tool in tools:
print('| | # Patched | # Non-Patched |')
print('| -------------- | --------- | ------------- |')
print('| %s on Defects4J | %d | %d |' % (tool_name(repair_tool), nb_patch_tool_bench[repair_tool]['Defects4J'], nb_bugs_bench['Defects4J'] - nb_patch_tool_bench[repair_tool]['Defects4J']))
print('| %s on Others | %d | %d |' % (tool_name(repair_tool), nb_patch_tool_bench[repair_tool]['Others'], (nb_bugs - nb_bugs_bench['Defects4J']) - nb_patch_tool_bench[repair_tool]['Others']))
print("\nTotal generated patch: %d\n" % total_nb_patch)
# for graph
tool_totals = []
line = " "
for repair_tool in tools:
line += ("& {0:11} ").format("\\multicolumn{1}{c}{%s}" % tool_name(repair_tool))
print("%s \\\\\\midrule" % line)
overlaps = {}
for repair_tool_line in tools:
line = " {0:10} ".format(tool_name(repair_tool_line))
for repair_tool_column in tools:
number = 0
if repair_tool_line == repair_tool_column:
if repair_tool_column in bugs_tool:
# count unique
for p in bugs_tool[repair_tool_column]:
if len(tool_bugs[p]) == 1:
number += 1
line += ("& {0:11} ").format("\\textbf{%s\\%% (%d)}" % (percent_round(number, nb_patch_tool[repair_tool_column]), number))
tool_totals.append(
{
"tool": tool_name(repair_tool_line),
"unique": number,
"overlapped": len(bugs_tool[repair_tool_column]) - number,
"total": len(bugs_tool[repair_tool_column])
}
)
else:
if repair_tool_column in bugs_tool:
for p in bugs_tool[repair_tool_column]:
if repair_tool_line in bugs_tool and p in bugs_tool[repair_tool_line]:
number += 1
p = percent_round(number, nb_patch_tool[repair_tool_line])
if repair_tool_line not in overlaps:
overlaps[repair_tool_line] = {
"40-50": [],
"50-60": [],
"60-70": [],
"70-80": [],
"80-100": [],
}
for s in overlaps[repair_tool_line]:
(min, max) = s.split("-")
if int(min)<= p and p < int(max):
overlaps[repair_tool_line][s].append('%s (%d)' % (tool_name(repair_tool_column), number))
break
if number < 10:
line += ("& {0:11} ").format("\\cca{%s}\\%% \\enspace(%d)" % (p, number))
else:
line += ("& {0:11} ").format("\\cca{%s}\\%% (%d)" % (p, number))
print("%s \\\\" % line)
print("\n {0:10} & 0-20 \\% & 20-40 \\% & 40-60 \\% & 60-80 \\% & 80-100 \\% \\\\ \\midrule".format(''))
for tool in sorted(overlaps):
line = " {0:10} ".format(tool_name(tool))
for c in sorted(overlaps[tool]):
line += '& %s ' % ", ".join(overlaps[tool][c])
print("%s \\\\" % line)
print "\nFor repairability graph"
tool_totals_view = sorted(tool_totals, key = lambda i: i['total'],reverse=True)
for repair_tool in tool_totals_view:
print "%s,%d,%d" % (repair_tool['tool'], repair_tool['unique'], repair_tool['overlapped'])
times_tools = {
'patched': {},
'timeout': {},
'nopatch': {},
'error': {}
}
for state in times:
for bench in sorted(times[state]):
for tool in tools:
if tool not in times_tools[state]:
times_tools[state][tool] = {}
if bench not in times_tools[state][tool]:
times_tools[state][tool][bench] = {}
if tool not in times[state][bench]:
continue
times_tools[state][tool][bench] = times[state][bench][tool]
for state in times_tools:
print("\n" + state)
line = " {0:11} ".format(' ')
for benchmark in sorted(benchmarks):
line += "& {0} ".format(bench_name(benchmark))
print("%s& Average \\\\\\midrule" % line)
total_bench = {}
for tool in tools:
line = " {0:11} ".format(tool_name(tool))
total_tools = []
for bench in sorted(benchmarks):
if bench not in total_bench:
total_bench[bench] = []
if bench not in times_tools[state][tool]:
line += "& {:{width}} ".format('N.A.', width=len(bench_name(bench)))
else:
total_bench[bench] += times_tools[state][tool][bench]
total_tools += times_tools[state][tool][bench]
if state == "timeout" or state == 'error':
line += "& {:{width}} ".format(
percent(len(times_tools[state][tool][bench]), nb_bugs_bench[bench]), width=len(bench_name(bench)))
continue
total_tool = sum(times_tools[state][tool][bench])
average_tool = 0
if len(times_tools[state][tool][bench]) != 0:
average_tool = total_tool/len(times_tools[state][tool][bench])
line += "& {:{width}} ".format(format_time(datetime.timedelta(seconds=average_tool)), width=len(bench_name(bench)))
if 'xtotal' not in total_bench:
total_bench['xtotal'] = []
total_bench['xtotal'] += total_tools
if state == "timeout" or state == 'error':
line += "& {:{width}} ".format(percent(len(total_tools), nb_bugs), width=8)
else:
total_tool = sum(total_tools)
average_tool = 0
if len(total_tools) != 0:
average_tool = total_tool/len(total_tools)
line += "& {:{width}} ".format(format_time(datetime.timedelta(seconds=average_tool)), width=8)
print("%s \\\\" % line)
print(" \\midrule")
line = ' {0:11} '.format('Average')
for bench in sorted(total_bench):
if state == "timeout" or state == 'error':
tmp = total_attempts
if bench != "xtotal":
tmp = nb_bugs_bench[bench] * len(tools)
line += "& {:{width}} ".format(percent(len(total_bench[bench]), tmp), width=len(bench_name(bench)))
continue
total = sum(total_bench[bench])
average_tool = 0
if len(total_bench[bench]) != 0:
average_tool = total/len(total_bench[bench])
line += "& {:{width}} ".format(format_time(datetime.timedelta(seconds=average_tool)), width=len(bench_name(bench)))
print("%s \\\\" % line)
print("Execution time %s " % datetime.timedelta(seconds=total_time))
with open('not_runned.csv', 'w') as fd:
for t in not_runned:
fd.write(t +'\n')
| 19,051 | 42.3 | 196 |
py
|
RepairThemAll_experiment
|
RepairThemAll_experiment-master/script/compress_log.py
|
import os
import gzip
import shutil
ROOT = os.path.join(os.path.dirname(__file__), "..")
nb_patch = 0
patch_per_tool = {}
patch_per_bench = {}
for benchmark in (os.listdir(os.path.join(ROOT, "results"))):
benchmark_path = os.path.join(ROOT, "results", benchmark)
for project in sorted(os.listdir(benchmark_path)):
project_path = os.path.join(benchmark_path, project)
folders = os.listdir(project_path)
if benchmark == "QuixBugs":
folders = [""]
for bug_id in sorted(folders):
bug_path = os.path.join(project_path, bug_id)
for repair_tool in sorted(os.listdir(bug_path)):
tool_path = os.path.join(bug_path, repair_tool)
for seed in sorted(os.listdir(tool_path)):
seed_path = os.path.join(tool_path, seed)
for f in os.listdir(seed_path):
if f[-4:] != '.log':
continue
file_path = os.path.join(seed_path, f)
size = os.stat(file_path).st_size
if size >= 50 * 1024*1024:
print file_path, size/ (1024*1024)
with open(file_path, 'rb') as f_in, gzip.open(file_path.replace(".log", ".log.gz"), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(file_path)
elif size == 0:
os.remove(file_path)
| 1,534 | 41.638889 | 127 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/README.md
|
# Introduction
This is the evaluation software for "CBA: Context-based Adaptation" by Bastian Alt, Trevor Ballard, Ralf Steinmetz, Heinz Koeppl and Amr Rizk. It emulates an NDN network with clients, servers, and caches, in which clients stream video provided by the servers and stored by the caches. The streaming is headless, which means you can't watch the video, but we can log the progress of each client to see if their adaptation algorithms are choosing appropriate bitrates to avoid stalling while keeping the quality as high as possible. Clients can use one of several adaptation algorithms, including some novel contextual multi-armed bandit algorithms we created. The network bandwidth can also be shaped with `tc`, forcing the algorithms to pick lower or higher bitrates throughout the video.
The whole system can be divided into a few major, important components:
* **Main pipeline**: This is what starts up the NDN testbed, and where configuration options are specified. The main pipeline is run using Vagrant to keep setup simple.
* **QtSamplePlayer**: The video player. This is where you define your adaptation algorithms. For the contextual bandits, this is where the context vector is prepared and the bandits get used. If you want to change the context by, e.g., squaring all of the values, this is where you should do it.
* **NDN**: We use a slightly modified version of NDN to attach metadata to the Data packets, which goes into the context used by the contextual bandits. Right now, the only addition is `numHops`.
* **ndn-icp-download**: This is the interface to NDN. It sends out the Interests, receives the Data, and does other useful things like resending Interests on timeout. If you add more metadata to the Data packets, you will have to change this component to send that metadata to QtSamplePlayer.
Additionally, there are some other useful components for things like creating the bandwidth traces, creating a topology, and visualizing the data which I'll go over.
Note that this is research software that's been used for several projects. You will probably find things like folders, files, functions, and function parameters that aren't used anymore, but weren't removed. You can ignore these.
If you have any questions at all, feel free to email us. We are more than happy to help; this is a pretty complicated system.
# Installation
These installation instructions are for Ubuntu 18.04. No other environments have been tested.
## Prerequisites
The actual execution takes place with Vagrant, so install it with
sudo apt update
sudo apt install virtualbox vagrant
We also need to set up pip:
sudo apt install python-pip
## Installing the pipeline
First, clone the cba-pipeline repo. It is several GBs because it has a lot of multimedia data, so it will probably take awhile:
git clone https://github.com/arizk/cba-pipeline-public
cd cba-pipeline-public
There will be a Vagrantfile in the containernet folder, so we can set it up and access it via
cd containernet
vagrant up
vagrant ssh
Within Vagrant, the containernet repo will be at /home/ubuntu/containernet. Note that the files in this directory are the literal files on your machine, Vagrant is just a different execution environment. So if you change a file in this directory on your local machine, it will also be changed in Vagrant and vice-versa.
Once you are in Vagrant, you can build the Docker images and begin the program:
cd /home/ubuntu/containernet/ndn-experiment
sudo ./start.sh --build-docker
Once the program finishes, the results will be in containernet/results/. The first time you build will take a long time, but subsequent builds will use previous builds. You'll probably get errors for the ndn-cxx docs, just ignore these.
# Main Pipeline
## Overview
The testbed uses Containernet to do evaluations. This is an extension of Mininet that uses Docker containers. Every client, cache, and server is a Docker instance, and you can use and access them like any other Docker container.
## start.sh
start.sh is the program you run to do the evaluation. From within Vagrant, in `/home/ubuntu/containernet/ndn-experiment/`, assuming Docker has been built:
sudo ./start.sh
start.sh optionally builds the Docker images, then runs the evaluation. Since each client, cache, and server is a Docker instance, we have to create the right Docker image before running the evaluation. The folder for the clients is containernet/ndn-containers/ndn\_headless-player, and the folder for caches and servers is containernet/ndn-containers/ndn-repo. A good rule of thumb is to rebuild the corresponding docker images any time you edit something in these repositories. You can call one of the following:
* `sudo ./start.sh --build-player`: rebuilds the client image
* `sudo ./start.sh --build-repo`: rebuilds the cache/server image
* `sudo ./start.sh --build-docker`: rebuilds everything; the client, cache, and server images
If you edit one of the adaptation algorithms, for example, you should do `sudo ./start.sh --build-player`
start.sh is set up to run multiple epochs of multiple configurations. Every value in `qoe_weights_list` and `algo_list` will be run for the number of epochs specified (the `seq` in the `for` loop). Furthermore, for contextual bandits, these algorithms will be run with memory and without memory. *With* memory means that the weights from one epoch will be used in the next, so the contextual bandit will continue to learn over epochs. *Without* memory means the bandit will start from nothing each epoch, which will test how well it can learn within just one video. One epoch means one video stream.
**Important**: start.sh calls docker\_ndn.py for each epoch, which is where the actual topology is deployed and the streaming begins. It gets called twice in start.sh, once with memory, and once without. If you want to use a different topology or change how long you stream the video, you **have** to edit **both** of the calls to docker\_ndn.py directly. The topology is given by the `-t` flag, and the duration (in seconds; 1 chunk is 2s, so 200s is 100 chunks) is given by the `-d` flag. We could not turn these into bash variables, as they would always be interpreted incorrectly, so they have to be changed directly.
## docker\_ndn.py
docker\_ndn.py is what deploys a topology, starts the streaming session, then tears down the topology at the end and stores the results, for one epoch.
The available topologies are defined in this file, in the `initExperiment` function. To create a new topology, follow the examples: create clients, caches, switches for the caches, and servers with `setNDNNodeInfo`, then connect them how you wish with `topology.addLink(...`. Note that every cache should be attached to a switch, and this switch should be attached to clients, servers, or other switches. You should not attach a cache to anything other than one switch, and you should not attach more than one cache to one switch. The bandwidth for each link is specified with the `bw` parameter in `topology.addLink(...`. If you use `tc`, the actual bandwidth on the link will change.
After the topology is chosen, `waitForPlayback` is called. This is where the streaming begins and ends.If you want to enable/disable `tc`, you can do so here by simply commenting/uncommenting the lines which create and start the `traffic_control` threads. Note that **you must manually specify which links tc should happen on.** Furthermore, you must create a tc thread for both directions of the link, as the tc only affects the outgoing data from the interface. In the args to `trafficControl`, the 2nd and 3rd argument are what's relevant here; the tc will be on the interface *from* the 2nd argument *to* the 3rd argument. Look at the current tc threads in `waitForPlayback` to see how this happens. Note that a lot of the arguments to this function are not currently used; they are historical, and were just not removed to save time. After the program begins, it will wait for the duration (specified by the `-d` flag) plus 20 seconds (if a client stalls, it will take a little longer than the duration to complete, so we give them some extra time), then fetch the results, kill the network, and end the epoch.
For each client in the topology, `waitForPlayback` will create a new thread to run `startPlayer`. This function does a few things:
1. Create the command that gets executed in the client Docker instance.
2. If we use memory, place the previous epoch's contextual bandit object (a Python pickle object with the weights from the last epoch) in the client Docker instance.
3. Execute the command, beginning playback.
After the duration plus 20 seconds, `waitForPlayback` will tell all of the clients to stop streaming and will call the `getDataFromClient` function for each client. This function will simply get the client's output and place it in the results folder. If we use memory, it will also get the contextual bandit object and store it to use for the next epoch.
## trafficControl.py and dynamicLinkChange.py
trafficControl.py does tc, modifying the bandwidth of a link over the duration of the video. The `traffic_control` function is the target of any tc threads made in `waitForPlayback`. It assumes that there are some bandwidth traces which are CSV files of the following form:
<bandwidth in MBps>,<duration of bandwidth in seconds>
The actual `tc` command takes place in the `reconfigureConnection` function, which gets called here. You will probably not have to change anything in `dynamicLinkChange.py`. Note that, currently, all of the calls to `traffic_control` in `waitForPlayback` give the same client name, which means that all of the links will be reading from the same file (the one for "client1"), and will therefore modulate in the same way at the same time. You can have them read from different traces (if you want different links to change in different ways) by just changing this.
# QtSamplePlayer
qtsampleplayer is an open-source video player which we've modified. This is what does the streaming on the client Docker instances. It's a big piece of software, but we are mostly concerned with the adaptation algorithms, which are all the way in `containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/qtsampleplayer/libdashframework/Adaptation/`. Note that the code for the caches and servers is also in `containernet/ndn-containers/`, but you will probably not need to worry about them.
## Adaptation algorithms in C++
Each adaptation algorithm is a C++ file with some common functions. The contextual bandits are SparseBayesUcb.cpp, SparseBayesUcbOse.cpp, LinUcb.cpp, and SparseBayseUcbSvi.cpp. SparseBayseUcbSvi.cpp has a tendency to stall in the Python code which we couldn't figure out, so you will probably just want to use the first three.
Note that all of the calls to `std::cout` are what get written to the log file. We redirect the output to this file when executing the player.
Again, to be clear, if you change any of the C++ stuff, or really anything in a subdirectory of containernet/ndn-containers/, you **have** to run `sudo ./start.sh --build-player` or `sudo ./start.sh --build-docker` (again, build-player only rebuilds the clients, build-docker does both clients and caches/servers). When you do this, it will recompile the QtSamplePlayer code and put it in a new Docker image for the client. This is the easiest way to compile your code, and the output will tell you if there were any compilation errors (if you see any, press Ctrl+c a bunch of times to kill `start.sh` so you can look into the issue). A faster, but slightly more complicated way to check if a program compiles is detailed at the end of this document.
## OnSegmentDownload
The adaptation algorithms call functions from `run_bandits.py` to choose a bitrate and update the model. The main function in each C++ adaptation algorithm is `OnSegmentDownload`; this gets called whenever all of the Data packets for the last segment we requested have arrived, which means we can calculate the QoE, update our model, and use the context from these new Data packets. The structure of `OnSegmentDownload` is as follows:
1. Calculate the QoE for the previous segment.
2. Put all of the **previous** context information into Python lists, which will eventually be given to the bandits.
3. Execute the bandit's update function, which will update the internal model of the bandit with the reward, previous decision, and previous context. We use the previous context (as in, the context from when we made the previous decision, on the previous `OnSegmentDownload`) because it will tell the bandit "here's the information you had last time, here's the decision that you made, and here's the reward it got you".
4. Add the new context, which we'll use on the *next* call to `OnSegmentDownload`.
5. Call `SetBitrate`, which will tell the bandit to pick the next quality.
6. Call `NotifyBitrateChange`, which will tell a different part of QtSamplePlayer which bitrate it should select for the next segment. This is a bit of an implementation detail that you don't need to worry about.
## SetBitrate
`SetBitrate` follows a similar pattern as the middle of `OnSegmentDownload`:
1. Put all of the **new** context information into Python lists.
2. Execute the bandit's decision function, which will observe the context and return which quality we should request next.
## run\_bandits.py
run\_bandits.py is the interface we use to interact with the real contextual bandit code. You will almost certainly not have to change the real bandit code, but, if you change the context, you will have to change run\_bandits.py. There are three functions:
* `choose`: this is what we call in step 2 of `SetBitrate`.
* `update`: this is what we call in step 3 of `OnSegmentDownload`.
* `preprocess`: this is what you will probably need to change. We must have a fixed context vector size, but the number of Data packets can vary from one video chunk to another, so in this step we subsample the context to get the right number, and create the final context vector which will be given to the bandit.
## Changing the context
If you want to add a new set of context features, like ECN or the RTT squared, you will have to make a few changes. For manipulating existing context features (e.g. RTT squared), I would recommend you just square them and add them to the context vector in `preprocessing` in `run_bandits.py`. This is the easiest way and most logical to me, since we are just manipulating some information, which the C++ code doesn't need to know about.
To add something new, you will probably have to change the C++. Look at the way rttMap and numHopsMap are set up for inspiration. Keep in mind that each quality needs its own context vector, which is why we use maps. A useful command-line function for searching through many files is:
find /path/to/folder -type f -exec grep --color -n -e "thing you want to search for" /dev/null {} +
Note that if you add something to NDN, e.g. more metadata like numHops, you will also have to change ..../libdashframework/Input/DASHReceiver.\* and ndn-icp-download. DASHReceiver is the interface between ndn-icp-download and the adaptation algorithm, so context from the network gets routed through it.
# ndn-icp-download
ndn-icp-download, located in `containernet/ndn-containers/ndn_headless-player/code/ndn-icp-download/`, is the interface between NDN and the video player. Any metadata attached to the Data packets will have to be processed and sent to the video player here, in the `onDataReceived` function. Look at how `numHopsTag` is fetched and processed for ideas. Note also that you will have to update the signature for `signalContextToDASHReceiver` to accomodate the new information. In DASHReceiver.cpp, where we call `this->chunktimeObs->addContextSignal(...`, if you add another element to the context vector, append another underscore and number to the end of the `boost::bind(...`. For example, if you put 4 things in the call to `signalContextToDASHReceiver`, that line would read
this->chunktimeObs->addContextSignal(boost::bind(&DASHReceiver::NotifyContextChunk, this, _1, _2, _3, _4));
Other than this, you probably don't need to worry about ndn-icp-download.
# NDN
There are two code components to NDN: there's ndn-cxx, which is a C++ library for working with NDN, and NFD (NDN Forwarding Daemon), which is the actual forwarder. If you want to add a new metadata field to the Data packets, you will have to change both of them. Trevor only added numHops, so either branch should just have a couple of commits. Look at these commits to see what was changed, as the process should be similar for any new kind of context.
These repositories are used to create the Docker images for the clients, caches, and servers. They get cloned and set up in the Dockerfile, so if you want to change the URL for some reason, edit the Dockerfile and rebuild the image. We assume that the NDN code is the same for clients, caches, and servers.
Final note, NDN **MUST** be at the version in the fork (0.4.1). If you try to use a newer version of NDN, it will not work. So if you just want to remove numHops, you should take our fork and roll back our commits.
To test the new NDN code, you have to include it in the Docker image. This happens automatically in the Dockerfiles for ndn\_headless-player and ndn\_repo: we clone the git repository and compile it. However, Docker will automatically use the stored result of each command when building to save time. This means that it will see that the git url and build instructions did not change, so it will just use the build results from last time even though the *contents* of the GitHub repo may changed. An easy way to get around this is to put some harmless command in the Dockerfile **above** the NDN clone commands, like `echo "hello world"`. Docker will see that the Dockerfile changed at this line, so it will have to re-execute all of the commands after it as well, which will cause it to pull and build our new NDN code.
NDN is very poorly documented, so if you have any trouble, please do not hesitate to email me. We found that the NDN mailing lists were the most useful resource, so you could try searching through them as well.
# Miscellaneous
## Visualizing the Results
The visualization is done via a Jupyter (i.e. IPython) notebook. For simplicity, it assumes you have a folder on your local machine where you keep all of the previous evaluation runs in separate folders. Within each run's folder, there's another folder with the bandwidth\_traces and the results. For example, say you keep all of the evaluations at ~/evaluations. Then, after a run, you would do:
cd ~/evaluations
mkdir <name of the run. The name itself isn't important but, each run should have a unique name>
cd <name of the run>
mkdir bandwidth_traces
mkdir results
cp -r ~/cba-pipeline/containernet/results/* results/
cp -r ~/cba-pipeline/containernet/ndn-experiment/bandwidth_traces/* bandwidth_traces/
Now open Visualizations.ipynb. In the very first cell, specify the <name of the run>, the weight set you want to visualize, and whether you want to visualize the memory or non-memory results, along with directory paths, etc. The main visualizations will be made. At the end is a miscellaneous section to produce data for an additional visualization, the fairness graph, and tables in the paper.
The fairness graph measures how well two clients share a common linke, and is made by running
python3 make_fairness_graph.py
Based on the weights/mem configuration used to output the fairness graph data, the filename of the data will be different. Edit make_fairness_graph.py and change the suffix to be correct.
## Generating Bandwidth Traces
In containernet/ndn-experiment/bandwidth_traces, there is a Python file, `generate_traces.py`, which can be used to generate bandwidth traces. Edit this file to generate as many values and files as you want or change the distribution they're drawn from. Then create the traces with
python3 generate_traces.py
## Generating Full Topologies
You are free to use any topology you like (consult with Amr first), but this is the way we generate topologies with several clients/caches/servers that are all randomly attached to each other. In util/, the `generate_topology.py` script will create a semi-random undirected graph produce a picture of it. This graph is basically a network, and the clients, caches, and servers will be represented by different colors. The text output will tell you which things to connect to one another, and the image `ndn-topo.png` will show what the network looks like. Now you can edit `docker_ndn.py` and add your generated topology by hand, based on this output.
Note that topologies have to be fairly small since each node is a Docker instance, so the computer may run out of space. The values that are currently in `generate_topology.py` where about as high as we could get them on a standard machine.
## Working inside clients
When doing a lot of C++ stuff, it's sometimes faster to try things from within the client instance. If you kill start.sh while it's streaming something (e.g. by pressing Ctrl+c or Ctrl+\), the teardown code won't be called, so the Docker instances will persist indefinitely. From within Vagrant, you can then list all of the Docker instances and ssh into them:
docker ps
docker exec -it <name of container> /bin/bash
Once inside, you can edit, compile, and run the code to test it without having to wait for Docker to set up and tear down. We have two useful scripts for recompiling ndn-icp-download and qtsampleplayer from within a client called `rebuild-ndn-icp.sh` and `rebuild-player.sh`, which should be in the root of the client. There's another script `restart.sh`, that optionally rebuilds either of these things and restarts the player, so a frequent workflow would be to change something, call one of these scripts, then keep tweaking if it didn't compile, and finally make our changes in the local files before restarting the pipeline. Note that the code only exists within this Docker instance; it's not connected to the real file. So if you change something in the adaptation algorithms, once you kill Docker, your changes will be lost. Just make the same changes in your local file and you'll be good.
| 22,459 | 106.980769 | 1,115 |
md
|
cba-pipeline-public
|
cba-pipeline-public-master/util/generate_topology.py
|
from graph_tool.all import *
import numpy as np
import math
NUM_CLIENTS = 7
NUM_CACHES = 5
NUM_SERVERS = 4
NUM_INTERNAL_CACHES = 0
# Create the graph
g = Graph(directed=False)
clients = [g.add_vertex() for _ in range(NUM_CLIENTS)]
caches = [g.add_vertex() for _ in range(NUM_CACHES)]
servers = [g.add_vertex() for _ in range(NUM_SERVERS)]
node_type = g.new_vertex_property('int')
type_idx = g.new_vertex_property('int')
# Randomly attach caches to each other
for i, ca1 in enumerate(caches):
node_type[ca1] = 2
type_idx[ca1] = i
for j, ca2 in enumerate(caches[i+1:]):
if np.random.uniform(low=0, high=1) > 0.5:
g.add_edge(ca1, ca2)
print('cache {} connected to cache {}'.format(i, j+i+1))
# Attach each server to a cache (1-to-1)
for i, s in enumerate(servers):
g.add_edge(s, caches[math.floor(i/2)])
node_type[s] = 0
type_idx[s] = i
print('server {} connected to cache {}'.format(i, math.floor(i/2)))
# Attach each client to a random cache that does NOT have a server attached to it
# Also limit ourselves so there are a few internal caches
for i, cl in enumerate(clients):
cache = math.floor(np.random.uniform(low=math.floor(NUM_SERVERS/2), high=NUM_CACHES-NUM_INTERNAL_CACHES))
g.add_edge(cl, caches[cache])
node_type[cl] = 1
type_idx[cl] = i
print('client {} connected to cache {}'.format(i, cache))
def test(v_idx):
return 'su' + str(v_idx)
# Display the result
graph_draw(g, vertex_text=type_idx, vertex_font_size=10,
output_size=(500,500), vertex_color=[1,1,1,0],
vertex_fill_color=node_type, output="ndn-topo.png",
edge_end_marker='circle', edge_start_marker='circle',
edge_pen_width=2)
| 1,729 | 33.6 | 109 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/setup.py
|
#!/usr/bin/env python
"Setuptools params"
from setuptools import setup, find_packages
from os.path import join
# Get version number from source tree
import sys
sys.path.append('.')
from mininet.net import VERSION
scripts = [join('bin', filename) for filename in ['mn']]
modname = distname = 'mininet'
setup(
name=distname,
version=VERSION.replace("d", ""),
description='Process-based OpenFlow emulator',
author='Bob Lantz',
author_email='rlantz@cs.stanford.edu',
packages=['mininet', 'mininet.examples'],
long_description="""
Mininet is a network emulator which uses lightweight
virtualization to create virtual networks for rapid
prototyping of Software-Defined Network (SDN) designs
using OpenFlow. http://mininet.org
""",
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: System :: Emulators",
],
keywords='networking emulator protocol Internet OpenFlow SDN',
license='BSD',
install_requires=[
'setuptools',
'urllib3',
'docker==2.1.0',
'python-iptables',
'pytest'
],
scripts=scripts,
zip_safe=False,
)
| 1,323 | 26.020408 | 66 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/mnexec.c
|
/* mnexec: execution utility for mininet
*
* Starts up programs and does things that are slow or
* difficult in Python, including:
*
* - closing all file descriptors except stdin/out/error
* - detaching from a controlling tty using setsid
* - running in network and mount namespaces
* - printing out the pid of a process so we can identify it later
* - attaching to a namespace and cgroup
* - setting RT scheduling
*
* Partially based on public domain setsid(1)
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <linux/sched.h>
#include <unistd.h>
#include <limits.h>
#include <syscall.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sched.h>
#include <ctype.h>
#include <sys/mount.h>
#if !defined(VERSION)
#define VERSION "(devel)"
#endif
void usage(char *name)
{
printf("Execution utility for Mininet\n\n"
"Usage: %s [-cdnp] [-a pid] [-g group] [-r rtprio] cmd args...\n\n"
"Options:\n"
" -c: close all file descriptors except stdin/out/error\n"
" -d: detach from tty by calling setsid()\n"
" -n: run in new network and mount namespaces\n"
" -p: print ^A + pid\n"
" -a pid: attach to pid's network and mount namespaces\n"
" -g group: add to cgroup\n"
" -r rtprio: run with SCHED_RR (usually requires -g)\n"
" -v: print version\n",
name);
}
int setns(int fd, int nstype)
{
return syscall(__NR_setns, fd, nstype);
}
/* Validate alphanumeric path foo1/bar2/baz */
void validate(char *path)
{
char *s;
for (s=path; *s; s++) {
if (!isalnum(*s) && *s != '/') {
fprintf(stderr, "invalid path: %s\n", path);
exit(1);
}
}
}
/* Add our pid to cgroup */
void cgroup(char *gname)
{
static char path[PATH_MAX];
static char *groups[] = {
"cpu", "cpuacct", "cpuset", NULL
};
char **gptr;
pid_t pid = getpid();
int count = 0;
validate(gname);
for (gptr = groups; *gptr; gptr++) {
FILE *f;
snprintf(path, PATH_MAX, "/sys/fs/cgroup/%s/%s/tasks",
*gptr, gname);
f = fopen(path, "w");
if (f) {
count++;
fprintf(f, "%d\n", pid);
fclose(f);
}
}
if (!count) {
fprintf(stderr, "cgroup: could not add to cgroup %s\n",
gname);
exit(1);
}
}
int main(int argc, char *argv[])
{
int c;
int fd;
char path[PATH_MAX];
int nsid;
int pid;
char *cwd = get_current_dir_name();
static struct sched_param sp;
while ((c = getopt(argc, argv, "+cdnpa:g:r:vh")) != -1)
switch(c) {
case 'c':
/* close file descriptors except stdin/out/error */
for (fd = getdtablesize(); fd > 2; fd--)
close(fd);
break;
case 'd':
/* detach from tty */
if (getpgrp() == getpid()) {
switch(fork()) {
case -1:
perror("fork");
return 1;
case 0: /* child */
break;
default: /* parent */
return 0;
}
}
setsid();
break;
case 'n':
/* run in network and mount namespaces */
if (unshare(CLONE_NEWNET|CLONE_NEWNS) == -1) {
perror("unshare");
return 1;
}
/* Mark our whole hierarchy recursively as private, so that our
* mounts do not propagate to other processes.
*/
if (mount("none", "/", NULL, MS_REC|MS_PRIVATE, NULL) == -1) {
perror("remount");
return 1;
}
/* mount sysfs to pick up the new network namespace */
if (mount("sysfs", "/sys", "sysfs", MS_MGC_VAL, NULL) == -1) {
perror("mount");
return 1;
}
break;
case 'p':
/* print pid */
printf("\001%d\n", getpid());
fflush(stdout);
break;
case 'a':
/* Attach to pid's network namespace and mount namespace */
pid = atoi(optarg);
sprintf(path, "/proc/%d/ns/net", pid);
nsid = open(path, O_RDONLY);
if (nsid < 0) {
perror(path);
return 1;
}
if (setns(nsid, 0) != 0) {
perror("setns");
return 1;
}
/* Plan A: call setns() to attach to mount namespace */
sprintf(path, "/proc/%d/ns/mnt", pid);
nsid = open(path, O_RDONLY);
if (nsid < 0 || setns(nsid, 0) != 0) {
/* Plan B: chroot/chdir into pid's root file system */
sprintf(path, "/proc/%d/root", pid);
if (chroot(path) < 0) {
perror(path);
return 1;
}
}
/* chdir to correct working directory */
if (chdir(cwd) != 0) {
perror(cwd);
return 1;
}
break;
case 'g':
/* Attach to cgroup */
cgroup(optarg);
break;
case 'r':
/* Set RT scheduling priority */
sp.sched_priority = atoi(optarg);
if (sched_setscheduler(getpid(), SCHED_RR, &sp) < 0) {
perror("sched_setscheduler");
return 1;
}
break;
case 'v':
printf("%s\n", VERSION);
exit(0);
case 'h':
usage(argv[0]);
exit(0);
default:
usage(argv[0]);
exit(1);
}
if (optind < argc) {
execvp(argv[optind], &argv[optind]);
perror(argv[optind]);
return 1;
}
usage(argv[0]);
return 0;
}
| 6,024 | 26.764977 | 78 |
c
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-experiment/trafficControl.py
|
import time
import re
import pandas as pd
import re
from dynamicLinkChange import *
import locks
TRACE = 'client1' # TODO remove, simply due to trace filenames
def traffic_control(to_host, switch, epoch=-1):
print('=======================================')
print('Starting trace playback')
print(epoch)
with open('bandwidth_traces/{}_epoch{}.csv'.format(TRACE, epoch), 'r') as f:
lines = [line.split(',') for line in f]
for bw, bw_period in lines:
if locks.STOP_TC:
break
# Double bandwidth because we have 2 clients (assuming doubles topo)
print(time.time())
reconfigureConnection(to_host, switch, float(bw))
time.sleep(float(bw_period))
print('Done playing bandwidth trace.')
print('=======================================')
return()
| 858 | 32.038462 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-experiment/topology.py
|
"""
FROM https://raw.githubusercontent.com/icarus-sim/icarus/master/icarus/scenarios/topology.py
Modified by Denny Stohr
Functions for creating or importing topologies for experiments.
To create a custom topology, create a function returning an instance of the
`IcnTopology` class. An IcnTopology is simply a subclass of a Topology class
provided by FNSS.
A valid ICN topology must have the following attributes:
* Each node must have one stack among: source, receiver, router
* The topology must have an attribute called `icr_candidates` which is a set
of router nodes on which a cache may be possibly deployed. Caches are not
deployed directly at topology creation, instead they are deployed by a
cache placement algorithm.
"""
from __future__ import division
from os import path
import networkx as nx
import fnss
__all__ = [
'IcnTopology',
'topology_tree',
'topology_path',
'topology_ring',
'topology_mesh',
'topology_geant',
'topology_tiscali',
'topology_wide',
'topology_garr',
'topology_rocketfuel_latency'
]
# Delays
# These values are suggested by this Computer Networks 2011 paper:
# http://www.cs.ucla.edu/classes/winter09/cs217/2011CN_NameRouting.pdf
# which is citing as source of this data, measurements from this IMC'06 paper:
# http://www.mpi-sws.org/~druschel/publications/ds2-imc.pdf
INTERNAL_LINK_DELAY = 2
EXTERNAL_LINK_DELAY = 34
# Path where all topologies are stored
TOPOLOGY_RESOURCES_DIR = '/ndn/tz/'
class IcnTopology(fnss.Topology):
"""Class modelling an ICN topology
An ICN topology is a simple FNSS Topology with addition methods that
return sets of caching nodes, sources and receivers.
"""
def cache_nodes(self):
"""Return a dictionary mapping nodes with a cache and respective cache
size
Returns
-------
cache_nodes : dict
Dictionary mapping node identifiers and cache size
"""
return {v: self.node[v]['stack'][1]['cache_size']
for v in self
if 'stack' in self.node[v]
and 'cache_size' in self.node[v]['stack'][1]
}
def sources(self):
"""Return a set of source nodes
Returns
-------
sources : set
Set of source nodes
"""
return set(v for v in self
if 'stack' in self.node[v]
and self.node[v]['stack'][0] == 'source')
def receivers(self):
"""Return a set of receiver nodes
Returns
-------
receivers : set
Set of receiver nodes
"""
return set(v for v in self
if 'stack' in self.node[v]
and self.node[v]['stack'][0] == 'receiver')
def topology_dash():
caches = ['ca4', 'ca5', 'ca6']
clients = ['c7', 'c8']
servers = ['so9']
switches = ['s1', 's2', 's3']
topology = fnss.topologies.Topology()
for v in switches:
topology.add_node(v)
topology.node[v]['type'] = 'switch'
topology.node[v]['ndntype'] = 'switch'
fnss.add_stack(topology, v, 'switch') #source
for v in servers:
topology.add_node(v)
topology.node[v]['type'] = 'host'
topology.node[v]['ndntype'] = 'server'
fnss.add_stack(topology, v, 'servers') #source
for v in caches:
topology.add_node(v)
topology.node[v]['type'] = 'host'
topology.node[v]['ndntype'] = 'cache'
fnss.add_stack(topology, v, 'caches') #source
for v in clients:
topology.add_node(v)
topology.node[v]['type'] = 'host'
topology.node[v]['ndntype'] = 'client'
fnss.add_stack(topology, v, 'reciver') #reciver
topology.add_edge(caches[0], switches[0])
topology.link(caches[1], switches[1])
topology.link(caches[2], switches[2])
topology.link(clients[0], switches[0])
topology.link(clients[1], switches[2])
topology.link(servers[0], switches[1])
topology.link(switches[0], switches[1])
topology.link(switches[1], switches[2])
topology.link(switches[2], switches[0])
#TODO Set delays according to path type
#fnss.set_weights_constant(topology, 1.0)
#fnss.set_delays_constant(topology, delay, 'ms')
return IcnTopology(topology)
def topology_tree(k, h, delay=1, **kwargs):
"""Returns a tree topology, with a source at the root, receivers at the
leafs and caches at all intermediate nodes.
Parameters
----------
h : int
The height of the tree
k : int
The branching factor of the tree
delay : float
The link delay in milliseconds
Returns
-------
topology : IcnTopology
The topology object
"""
topology = fnss.k_ary_tree_topology(k, h)
receivers = [v for v in topology.nodes_iter()
if topology.node[v]['depth'] == h]
sources = [v for v in topology.nodes_iter()
if topology.node[v]['depth'] == 0]
routers = [v for v in topology.nodes_iter()
if topology.node[v]['depth'] > 0
and topology.node[v]['depth'] < h]
topology.graph['icr_candidates'] = set(routers)
for v in sources:
topology.node[v]['type'] = 'host'
fnss.add_stack(topology, v, 'source') #source
for v in receivers:
topology.node[v]['type'] = 'host'
fnss.add_stack(topology, v, 'reciver') #reciver
for v in routers:
topology.node[v]['type'] = 'switch'
fnss.add_stack(topology, v, 'switch')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, delay, 'ms')
# label links as internal
#for u, v in topology.edges_iter():
# topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
def topology_path(n, delay=1, **kwargs):
"""Return a path topology with a receiver on node `0` and a source at node
'n-1'
Parameters
----------
n : int (>=3)
The number of nodes
delay : float
The link delay in milliseconds
Returns
-------
topology : IcnTopology
The topology object
"""
topology = fnss.line_topology(n)
receivers = [0]
routers = xrange(1, n - 1, 2)
sources = xrange(2, n, 2) #routers inbetween
topology.graph['icr_candidates'] = set(routers)
for v in sources:
topology.node[v]['type'] = 'host'
fnss.add_stack(topology, v, 'source')
for v in receivers:
topology.node[v]['type'] = 'host'
fnss.add_stack(topology, v, 'receiver')
for v in routers:
topology.node[v]['type'] = 'switch'
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, delay, 'ms')
# label links as internal or external
for u, v in topology.edges_iter():
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
def topology_ring(n, delay_int=1, delay_ext=5, **kwargs):
"""Returns a ring topology
This topology is comprised of a ring of *n* nodes. Each of these nodes is
attached to a receiver. In addition one router is attached to a source.
Therefore, this topology has in fact 2n + 1 nodes.
It models the case of a metro ring network, with many receivers and one
only source towards the core network.
Parameters
----------
n : int
The number of routers in the ring
delay_int : float
The internal link delay in milliseconds
delay_ext : float
The external link delay in milliseconds
Returns
-------
topology : IcnTopology
The topology object
"""
topology = fnss.ring_topology(n)
routers = range(n)
receivers = range(n, 2 * n)
source = 2 * n
internal_links = zip(routers, receivers)
external_links = [(routers[0], source)]
for u, v in internal_links:
topology.add_edge(u, v, type='internal')
for u, v in external_links:
topology.add_edge(u, v, type='external')
topology.graph['icr_candidates'] = set(routers)
fnss.add_stack(topology, source, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, delay_int, 'ms', internal_links)
fnss.set_delays_constant(topology, delay_ext, 'ms', external_links)
return IcnTopology(topology)
def topology_mesh(n, m, delay_int=1, delay_ext=5, **kwargs):
"""Returns a ring topology
This topology is comprised of a mesh of *n* nodes. Each of these nodes is
attached to a receiver. In addition *m* router are attached each to a source.
Therefore, this topology has in fact 2n + m nodes.
Parameters
----------
n : int
The number of routers in the ring
m : int
The number of sources
delay_int : float
The internal link delay in milliseconds
delay_ext : float
The external link delay in milliseconds
Returns
-------
topology : IcnTopology
The topology object
"""
if m > n:
raise ValueError("m cannot be greater than n")
topology = fnss.full_mesh_topology(n)
routers = range(n)
receivers = range(n, 2 * n)
sources = range(2 * n, 2 * n + m)
internal_links = zip(routers, receivers)
external_links = zip(routers[:m], sources)
for u, v in internal_links:
topology.add_edge(u, v, type='internal')
for u, v in external_links:
topology.add_edge(u, v, type='external')
topology.graph['icr_candidates'] = set(routers)
for v in sources:
topology.node[v]['type'] = 'host'
fnss.add_stack(topology, v, 'source')
for v in receivers:
topology.node[v]['type'] = 'host'
fnss.add_stack(topology, v, 'receiver')
for v in routers:
topology.node[v]['type'] = 'switch'
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, delay_int, 'ms', internal_links)
fnss.set_delays_constant(topology, delay_ext, 'ms', external_links)
return IcnTopology(topology)
#def topology_lamdanet():
# """LambdaNet"""
def topology_geant(**kwargs):
"""Return a scenario based on GEANT topology
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
# 240 nodes in the main component
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR,
'Geant2012.graphml')
).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
deg = nx.degree(topology)
receivers = [v for v in topology.nodes() if deg[v] == 1] # 8 nodes
icr_candidates = [v for v in topology.nodes() if deg[v] > 2] # 19 nodes
# attach sources to topology
source_attachments = [v for v in topology.nodes() if deg[v] == 2] # 13 nodes
sources = []
for v in source_attachments:
u = v + 1000 # node ID of source
topology.add_edge(v, u)
sources.append(u)
routers = [v for v in topology.nodes() if v not in sources + receivers]
# add stacks to nodes
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# label links as internal or external
for u, v in topology.edges_iter():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
def topology_tiscali(**kwargs):
"""Return a scenario based on Tiscali topology, parsed from RocketFuel dataset
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
# 240 nodes in the main component
topology = fnss.parse_rocketfuel_isp_map(path.join(TOPOLOGY_RESOURCES_DIR,
'3257.r0.cch')
).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
# degree of nodes
deg = nx.degree(topology)
# nodes with degree = 1
onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80
# we select as caches nodes with highest degrees
# we use as min degree 6 --> 36 nodes
# If we changed min degrees, that would be the number of caches we would have:
# Min degree N caches
# 2 160
# 3 102
# 4 75
# 5 50
# 6 36
# 7 30
# 8 26
# 9 19
# 10 16
# 11 12
# 12 11
# 13 7
# 14 3
# 15 3
# 16 2
icr_candidates = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes
# sources are node with degree 1 whose neighbor has degree at least equal to 5
# we assume that sources are nodes connected to a hub
# they are 44
sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are
# receivers are node with degree 1 whose neighbor has degree at most equal to 4
# we assume that receivers are nodes not well connected to the network
# they are 36
receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5]
# we set router stacks because some strategies will fail if no stacks
# are deployed
routers = [v for v in topology.nodes() if v not in sources + receivers]
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# Deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
def topology_wide(**kwargs):
"""Return a scenario based on GARR topology
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR, 'WideJpn.graphml')).to_undirected()
# sources are nodes representing neighbouring AS's
sources = [9, 8, 11, 13, 12, 15, 14, 17, 16, 19, 18]
# receivers are internal nodes with degree = 1
receivers = [27, 28, 3, 5, 4, 7]
# caches are all remaining nodes --> 27 caches
routers = [n for n in topology.nodes() if n not in receivers + sources]
# All routers can be upgraded to ICN functionalities
icr_candidates = routers
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# Deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
def topology_garr(**kwargs):
"""Return a scenario based on GARR topology
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR, 'Garr201201.graphml')).to_undirected()
# sources are nodes representing neighbouring AS's
sources = [0, 2, 3, 5, 13, 16, 23, 24, 25, 27, 51, 52, 54]
# receivers are internal nodes with degree = 1
receivers = [1, 7, 8, 9, 11, 12, 19, 26, 28, 30, 32, 33, 41, 42, 43, 47, 48, 50, 53, 57, 60]
# caches are all remaining nodes --> 27 caches
routers = [n for n in topology.nodes() if n not in receivers + sources]
icr_candidates = routers
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# Deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
def topology_garr2(**kwargs):
"""Return a scenario based on GARR topology.
Differently from plain GARR, this topology some receivers are appended to
routers and only a subset of routers which are actually on the path of some
traffic are selected to become ICN routers. These changes make this
topology more realistic.
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR, 'Garr201201.graphml')).to_undirected()
# sources are nodes representing neighbouring AS's
sources = [0, 2, 3, 5, 13, 16, 23, 24, 25, 27, 51, 52, 54]
# receivers are internal nodes with degree = 1
receivers = [1, 7, 8, 9, 11, 12, 19, 26, 28, 30, 32, 33, 41, 42, 43, 47, 48, 50, 53, 57, 60]
# routers are all remaining nodes --> 27 caches
routers = [n for n in topology.nodes_iter() if n not in receivers + sources]
artificial_receivers = list(range(1000, 1000 + len(routers)))
for i in range(len(routers)):
topology.add_edge(routers[i], artificial_receivers[i])
receivers += artificial_receivers
# Caches to nodes with degree > 3 (after adding artificial receivers)
degree = nx.degree(topology)
icr_candidates = [n for n in topology.nodes_iter() if degree[n] > 3.5]
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# Deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
def topology_geant2(**kwargs):
"""Return a scenario based on GEANT topology.
Differently from plain GEANT, this topology some receivers are appended to
routers and only a subset of routers which are actually on the path of some
traffic are selected to become ICN routers. These changes make this
topology more realistic.
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
# 53 nodes
topology = fnss.parse_topology_zoo(path.join(TOPOLOGY_RESOURCES_DIR,
'Geant2012.graphml')
).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
deg = nx.degree(topology)
receivers = [v for v in topology.nodes() if deg[v] == 1] # 8 nodes
# attach sources to topology
source_attachments = [v for v in topology.nodes() if deg[v] == 2] # 13 nodes
sources = []
for v in source_attachments:
u = v + 1000 # node ID of source
topology.add_edge(v, u)
sources.append(u)
routers = [v for v in topology.nodes() if v not in sources + receivers]
# Put caches in nodes with top betweenness centralities
betw = nx.betweenness_centrality(topology)
routers = sorted(routers, key=lambda k: betw[k])
# Select as ICR candidates the top 50% routers for betweenness centrality
icr_candidates = routers[len(routers) // 2:]
# add stacks to nodes
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# label links as internal or external
for u, v in topology.edges_iter():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
def topology_tiscali2(**kwargs):
"""Return a scenario based on Tiscali topology, parsed from RocketFuel dataset
Differently from plain Tiscali, this topology some receivers are appended to
routers and only a subset of routers which are actually on the path of some
traffic are selected to become ICN routers. These changes make this
topology more realistic.
Parameters
----------
seed : int, optional
The seed used for random number generation
Returns
-------
topology : fnss.Topology
The topology object
"""
# 240 nodes in the main component
topology = fnss.parse_rocketfuel_isp_map(path.join(TOPOLOGY_RESOURCES_DIR,
'3257.r0.cch')
).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
# degree of nodes
deg = nx.degree(topology)
# nodes with degree = 1
onedeg = [v for v in topology.nodes() if deg[v] == 1] # they are 80
# we select as caches nodes with highest degrees
# we use as min degree 6 --> 36 nodes
# If we changed min degrees, that would be the number of caches we would have:
# Min degree N caches
# 2 160
# 3 102
# 4 75
# 5 50
# 6 36
# 7 30
# 8 26
# 9 19
# 10 16
# 11 12
# 12 11
# 13 7
# 14 3
# 15 3
# 16 2
icr_candidates = [v for v in topology.nodes() if deg[v] >= 6] # 36 nodes
# Add remove caches to adapt betweenness centrality of caches
for i in [181, 208, 211, 220, 222, 250, 257]:
icr_candidates.remove(i)
icr_candidates.extend([232, 303, 326, 363, 378])
# sources are node with degree 1 whose neighbor has degree at least equal to 5
# we assume that sources are nodes connected to a hub
# they are 44
sources = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] > 4.5] # they are
# receivers are node with degree 1 whose neighbor has degree at most equal to 4
# we assume that receivers are nodes not well connected to the network
# they are 36
receivers = [v for v in onedeg if deg[list(topology.edge[v].keys())[0]] < 4.5]
# we set router stacks because some strategies will fail if no stacks
# are deployed
routers = [v for v in topology.nodes() if v not in sources + receivers]
# set weights and delays on all links
fnss.set_weights_constant(topology, 1.0)
fnss.set_delays_constant(topology, INTERNAL_LINK_DELAY, 'ms')
# deploy stacks
topology.graph['icr_candidates'] = set(icr_candidates)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
# label links as internal or external
for u, v in topology.edges():
if u in sources or v in sources:
topology.edge[u][v]['type'] = 'external'
# this prevents sources to be used to route traffic
fnss.set_weights_constant(topology, 1000.0, [(u, v)])
fnss.set_delays_constant(topology, EXTERNAL_LINK_DELAY, 'ms', [(u, v)])
else:
topology.edge[u][v]['type'] = 'internal'
return IcnTopology(topology)
def topology_rocketfuel_latency(asn, source_ratio=0.1, ext_delay=EXTERNAL_LINK_DELAY, **kwargs):
"""Parse a generic RocketFuel topology with annotated latencies
To each node of the parsed topology it is attached an artificial receiver
node. To the routers with highest degree it is also attached a source node.
Parameters
----------
asn : int
AS number
source_ratio : float
Ratio between number of source nodes (artificially attached) and routers
ext_delay : float
Delay on external nodes
"""
if source_ratio < 0 or source_ratio > 1:
raise ValueError('source_ratio must be comprised between 0 and 1')
f_topo = path.join(TOPOLOGY_RESOURCES_DIR, 'rocketfuel-latency', str(asn), 'latencies.intra')
topology = fnss.parse_rocketfuel_isp_latency(f_topo).to_undirected()
topology = list(nx.connected_component_subgraphs(topology))[0]
# First mark all current links as inernal
for u, v in topology.edges_iter():
topology.edge[u][v]['type'] = 'internal'
# Note: I don't need to filter out nodes with degree 1 cause they all have
# a greater degree value but we compute degree to decide where to attach sources
routers = topology.nodes()
# Source attachment
n_sources = int(source_ratio * len(routers))
sources = ['src_%d' % i for i in range(n_sources)]
deg = nx.degree(topology)
# Attach sources based on their degree purely, but they may end up quite clustered
routers = sorted(routers, key=lambda k: deg[k], reverse=True)
for i in range(len(sources)):
topology.add_edge(sources[i], routers[i], delay=ext_delay, type='external')
# Here let's try attach them via cluster
# clusters = compute_clusters(topology, n_sources, distance=None, n_iter=1000)
# source_attachments = [max(cluster, key=lambda k: deg[k]) for cluster in clusters]
# for i in range(len(sources)):
# topology.add_edge(sources[i], source_attachments[i], delay=ext_delay, type='external')
# attach artificial receiver nodes to ICR candidates
receivers = ['rec_%d' % i for i in range(len(routers))]
for i in range(len(routers)):
topology.add_edge(receivers[i], routers[i], delay=0, type='internal')
# Set weights to latency values
for u, v in topology.edges_iter():
topology.edge[u][v]['weight'] = topology.edge[u][v]['delay']
# Deploy stacks on nodes
topology.graph['icr_candidates'] = set(routers)
for v in sources:
fnss.add_stack(topology, v, 'source')
for v in receivers:
fnss.add_stack(topology, v, 'receiver')
for v in routers:
fnss.add_stack(topology, v, 'router')
return IcnTopology(topology)
| 30,191 | 36.182266 | 111 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-experiment/start.sh
|
#!/bin/bash
# Useful functions
buildPlayer() {
echo "Build Player"
cd /home/ubuntu/containernet/ndn-containers/ndn_headless-player/
docker-compose build ndn-player
}
buildRepo() {
echo "Build Repo"
cd /home/ubuntu/containernet/ndn-containers/ndn_repo/
docker-compose build --build-arg STORAGE_TYPE=repong-bbb ndn
}
# Ensure results folder exists and is empty
if [ ! -d "/home/ubuntu/containernet/results/" ]; then
mkdir /home/ubuntu/containernet/results/
fi
if [ ! -z "$(ls -A /home/ubuntu/containernet/results/)" ]; then
echo "../results directory not empty. Store the results somewhere, then do 'rm ../results/*'. Aborting."
exit 1
fi
# Build containers if desired
if [ "$1" == '--build-docker' ]; then
buildPlayer
buildRepo
fi
if [ "$1" == '--build-repo' ]; then
buildRepo
fi
if [ "$1" == '--build-player' ]; then
buildPlayer
fi
# Begin the evaluation
echo "Beginning evaluation"
cd /home/ubuntu/containernet/ndn-experiment
# Clear out old ucb objects
rm -rf /home/ubuntu/containernet/ucb_objs
mkdir /home/ubuntu/containernet/ucb_objs
# Run each algorithm with various parameters.
# All have topology "large-scale", duration 200s, 20 clients
declare -a qoe_weights_list=("6 2 2" "1 1 3"); # "1 1 10");
declare -a algo_list=("sbuose" "bola" "sbu" "linucb" "p");
for epoch in $(seq 1 5); do
echo "Epoch $epoch"
for qoe_weights in "${qoe_weights_list[@]}"; do
mkdir -p /home/ubuntu/containernet/ucb_objs/client{1..10}/${qoe_weights// /_}
for algo in "${algo_list[@]}"; do
# If it's a bandit, try it with memory. Either way, also do it without.
if [[ "$algo" =~ ^(sbu|sbuose|linucb)$ ]]; then
docker stop $(docker ps -aq)
docker rm $(docker ps -aq)
mn -c
echo "Executing $algo, QoE $qoe_weights, memory"
python /home/ubuntu/containernet/ndn-experiment/docker_ndn.py -a "$algo" -q $(echo "$qoe_weights") -t large-scale -d 200 --memory -e "$epoch"
fi
docker stop $(docker ps -aq)
docker rm $(docker ps -aq)
mn -c
echo "Executing $algo, QoE $qoe_weights, no memory"
python /home/ubuntu/containernet/ndn-experiment/docker_ndn.py -a "$algo" -q $(echo "$qoe_weights") -t large-scale -d 200 --no-memory -e "$epoch"
done
done
done
| 2,365 | 32.8 | 157 |
sh
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-experiment/locks.py
|
global STOP_TC
| 15 | 7 | 14 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-experiment/dynamicLinkChange.py
|
def reconfigureConnection(host, switch, bandwidth = None, delay = None, loss = None):
opts = {}
if bandwidth is not None:
opts['bw'] = bandwidth
if delay is not None:
opts['delay'] = str(delay) + 'ms'
if loss is not None:
opts['loss'] = loss
if opts:
setLinkOptions(host, switch, opts)
def setLinkOptions(host, switch, opts):
links = host.connectionsTo(switch)
link_tuple = links[0]
switchIntf = link_tuple[1]
bw = opts.get('bw')
delay = opts.get('delay')
loss = opts.get('loss')
# Gets executed
if bw:
tc_change_cmd ='%s class change dev %s parent 5:0 classid 5:1 htb rate '+str(bw)+'Mbit burst 15k'
switchIntf.tc(tc_change_cmd)
# Does not get executed
if opts.get('delay') or opts.get('loss'):
parent = '5:1' if bw is not None else 'root'
netem_change_cmd = '%s class change dev %s parent ' + parent + 'netem '
if delay:
netem_change_cmd += 'delay ' + opts.get('delay')
if loss:
netem_change_cmd += ' loss ' + opts.get('delay')
switchIntf.tc(netem_change_cmd)
| 1,066 | 23.813953 | 101 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-experiment/docker_ndn.py
|
#!/usr/bin/python
from mininet.net import Containernet
from mininet.node import Docker, OVSSwitch
from mininet.log import setLogLevel, info
from mininet.link import TCLink
from mininet.topo import Topo
from mininet.util import dumpNodeConnections
import time
from lxml import etree
from subprocess import call
from threading import Thread
import os
import argparse
import locks
SEGMENT_LENGTH = 2
NFD_STRATEGY = 'access'
setLogLevel('info')
class OVSBridgeSTP( OVSSwitch ):
"""Open vSwitch Ethernet bridge with Spanning Tree Protocol
rooted at the first bridge that is created."""
prio = 4096
def start( self, *args, **kwargs ):
OVSSwitch.start( self, *args, **kwargs )
self.cmd( 'ovs-vsctl set-fail-mode', self, 'standalone' )
self.cmd( 'ovs-vsctl set-controller', self )
self.cmd( 'ovs-vsctl set Bridge', self,
'rstp_enable=true',
'mcast_snooping_enable=true',
'other_config:rstp-priority=%d' % OVSBridgeSTP.prio )
#switches = { 'ovs-stp': OVSBridgeSTP } TODO remove
def setNDNNodeInfo(mn_topo, node, nodeType, segments=15):
"""Specify parameters for client, server, and cache nodes.
s: segments, t: type, v: videos folder, r: repo_ng options,
c: cache size"""
if 'server' in nodeType:
mn_topo.setNodeInfo(node, {
"dcmd": ["/bin/bash", "-c",
"/ndn-entrypoint.sh -v /videos/ -t repong-bbb -s {0} -c 0 -r '-D -u'"
.format(segments)],
"dimage": "ndn_repo_ndn:latest",
"privileged": True,
"publish_all_ports": True,
"cls": Docker
})
elif 'client' in nodeType:
mn_topo.setNodeInfo(node, {
"dcmd": ["/bin/bash", "/ndn-entrypoint.sh"],
"dimage": "ndn_headless-player_ndn-player:latest",
"privileged": True,
"publish_all_ports": True,
"cls": Docker
})
elif 'cache' in nodeType:
#caches have 0 segments and a defined cache size
mn_topo.setNodeInfo(node, {
"dcmd": ["/bin/bash", "-c",
"/ndn-entrypoint.sh -t repong-bbb -s 0 -v /opt/ -c 15000"],
"dimage": "ndn_repo_ndn:latest",
"privileged": True,
"publish_all_ports": True,
"cls": Docker
})
def initExperiment(toptype, duration, algo, qoe_weights, use_memory, epoch):
"""Define the topologies and begin the experiment"""
info('*** Executing {0} topology'.format(toptype))
# Set link attributes
segments = duration/SEGMENT_LENGTH
delaycore = '10ms'
delayedge = '20ms'
delayserver = '40ms'
bwcore = 1000
bwedge = 1000
bwserver = 20
linkloss = 0
queue=1000
# Define topologies
if 'large-scale' == toptype:
# Randomly-generated large-scale topology. Note that it was randomly generated with
# make_topo.py, but this implementation is fixed.
num_clients = 7
num_servers = 4
num_caches = 5
topology = Topo()
clients = ['client{}'.format(i+1) for i in range(num_clients)]
servers = ['server{}'.format(i+1) for i in range(num_servers)]
caches = ['cache{}'.format(i+1) for i in range(num_caches)]
switches = ['switch{}'.format(i+1) for i in range(num_caches)]
for sw in switches:
topology.addSwitch(sw, cls=OVSBridgeSTP)
for se in servers:
host = topology.addHost(se)
setNDNNodeInfo(topology, host, 'server', segments=str(segments))
for ca in caches:
host = topology.addHost(ca)
setNDNNodeInfo(topology, host, 'cache')
for cl in clients:
host = topology.addHost(cl)
setNDNNodeInfo(topology, host, 'client')
# Link nodes. Clients, servers, and caches are only linked to one node, so the idx in the list
# corresponds to the idx in `clients`, and the number is the switch idx to attach to. Same for
# cache and server.
client_links = [3, 3, 4, 4, 2, 3, 4]
server_links = [int(i/2) for i in range(num_servers)]
cache_links = list(range(num_caches))
# Caches are connected to multiple other caches, so use the same idea as above, but it's now
# a list of lists. Note that each connection is only listed once (e.g. 5->7 is listed, but
# not 7->5) as mininet assumes all links are bidirectional.
switch_links = [[2], [2, 3, 4], [3], [4]]
for ca, link in zip(caches, cache_links):
topology.addLink(ca, switches[link], cls=TCLink, delay=delaycore, bw=bwcore,
loss=linkloss, max_queue_size=queue)
for se, link in zip(servers, server_links):
topology.addLink(se, switches[link], cls=TCLink, delay=delayserver, bw=bwserver,
loss=linkloss, max_queue_size=queue)
for cl, link in zip(clients, client_links):
topology.addLink(cl, switches[link], cls=TCLink, delay=delayedge, bw=bwedge,
loss=linkloss, max_queue_size=queue)
for sw, links in zip(switches, switch_links):
for link in links:
topology.addLink(sw, switches[link], cls=TCLink, delay=delaycore, bw=bwcore,
loss=linkloss, max_queue_size=queue)
if 'doubles' == toptype:
# A topology with 2 clients, 2 caches, and 2 servers. Both clients are
# connected to one cache, and both servers are connected to the other.
# The two caches are connected, connecting the whole network.
topology = Topo()
caches = ['cache1', 'cache2']
clients = ['client1', 'client2']
servers = ['server1', 'server2']
switches = ['switch1', 'switch2']
for sw in switches:
topology.addSwitch(sw, cls=OVSBridgeSTP)
for se in servers:
host = topology.addHost(se)
setNDNNodeInfo(topology, host, 'server', segments=str(segments))
for ca in caches:
host = topology.addHost(ca)
setNDNNodeInfo(topology, host, 'cache')
for cl in clients:
host = topology.addHost(cl)
setNDNNodeInfo(topology, host, 'client')
# Link nodes
topology.addLink(caches[0], switches[0], cls=TCLink, delay=delaycore,
bw=bwcore, loss=linkloss, max_queue_size=queue)
topology.addLink(caches[1], switches[1], cls=TCLink, delay=delaycore,
bw=bwcore, loss=linkloss, max_queue_size=queue)
topology.addLink(servers[0], switches[1], cls=TCLink, delay=delayserver,
bw=bwserver, loss=linkloss, max_queue_size=queue)
topology.addLink(servers[1], switches[1], cls=TCLink, delay=delayserver,
bw=bwserver, loss=linkloss, max_queue_size=queue)
topology.addLink(switches[0], switches[1], cls=TCLink, delay=delaycore,
bw=bwcore, loss=linkloss, max_queue_size=queue)
topology.addLink(clients[0], switches[0], cls=TCLink, delay=delayedge,
bw=bwedge, loss=linkloss, max_queue_size=queue)
topology.addLink(clients[1], switches[0], cls=TCLink, delay=delayedge,
bw=bwedge, loss=linkloss, max_queue_size=queue)
# Create the network and perform additional setup
global net
net = Containernet(topo=topology)
for h in net.hosts:
setStrategy(h, '/n', NFD_STRATEGY)
info('*** Network connections:\n')
dumpNodeConnections(net.hosts)
dumpNodeConnections(net.switches)
info('*** Starting network\n')
net.start()
info('\n*** Adding NDN links:\n')
for client in clients:
for cache in caches:
addNDNRoute(client, cache, con_type='udp', cost=2)
for server in servers:
for cache in caches:
addNDNRoute(cache, server, con_type='udp', cost=10)
for cache1 in caches:
for cache2 in caches:
if cache1 != cache2:
addNDNRoute(cache1, cache2, con_type='udp', cost=1)
# Wait until all clients have finished
info('*** Waiting for clients to finish playback\n')
waitForPlayback(servers, clients, switches, net, duration, algo,
qoe_weights, use_memory, epoch, toptype)
# Teardown
info('*** Stopping network\n')
net.stop()
def waitForPlayback(servers, clients, switches, net, playbacktime, algo,
qoe_weights, use_memory, epoch, toptype):
# Wait for repo-ng to initialize
while not (checkRepoNGInitDone(net.get(servers[0]))):
info('*** Waiting for repo-ng to initialize database...\n')
time.sleep(5)
info('*** Starting clients')
locks.STOP_TC = False
# Sculpt traffic for doubles topology
if toptype == 'doubles':
tc_switch0 = Thread(target=traffic_control,
args = (net.get(switches[0]), # FROM here...
net.get(switches[1]), # ...TO here.
epoch))
tc_switch1 = Thread(target=traffic_control,
args = (net.get(switches[1]),
net.get(switches[0]),
epoch))
tc_switch0.start()
tc_switch1.start()
# Begin streaming
for client in clients:
client_thread = Thread(target=startPlayer, kwargs = {
'host': net.get(client),
'aa': algo,
'segmentsizeOn': False,
'sampleSize': 50,
'client': client,
'qoe_weights': qoe_weights,
'use_memory': use_memory,
'epoch': epoch,
'timesteps_per_epoch': int(playbacktime/2)
})
client_thread.start()
# Wait until streaming finished, giving some extra time due to stalling,
# then stop the client and get its data
time.sleep(playbacktime + 20)
for client in clients:
killPlayer(net.get(client))
locks.STOP_TC = True
for client in clients:
getDataFromClient(algo, toptype, playbacktime, qoe_weights, use_memory, client, epoch)
def getDataFromClient(algo, topology, duration, qoe_weights, use_memory,
client, epoch):
"""Fetch results and UCB object from client containers"""
# Determine the filename based on the client and playback parameters
weights_str = '-'.join([str(q) for q in qoe_weights])
memory_str = 'memory' if use_memory else 'no-memory'
filename = "../results/{0}_{1}_{2}_{3}_{4}_{5}_{6}".format(algo, topology,
duration, weights_str, memory_str, client, epoch)
# Fetch the results
try:
os.makedirs(os.path.dirname(filename))
except:
pass
with open(filename, "w") as text_file:
text_file.write(net.get(client).cmd('cat /player-out.txt'))
# Get the UCB object if we'll need it for the next epoch
if use_memory:
weights_str = '_'.join([str(int(q)) for q in qoe_weights])
ucb_obj_path = '/home/ubuntu/containernet/ucb_objs/{0}/{1}/{2}_obj.pkl'.format(
client, weights_str, algo)
call('docker cp mn.{0}:/{1}.pkl {2}'.format(client, algo, ucb_obj_path),
shell=True)
info('*** Copied data to: {0}\n'.format(filename))
def addNDNRoute(source, dest, cost=1, con_type='tcp'):
source = net.get(source)
dest = net.get(dest)
if con_type == 'tcp':
return source.sendCmd("nfdc register -c {0} /n tcp://{1}".format(
cost, dest.IP()))
elif con_type == 'udp':
return source.sendCmd("nfdc register -c {0} /n udp://{1}".format(
cost, dest.IP()))
elif con_type == 'ether':
source.cmdPrint('sysctl -w net.ipv4.ip_forward=1')
source.cmdPrint(
'ip link add name server link {}-eth0 type macvlan'.format(source.name))
source.cmdPrint(
'ip link set dev server address {}'.format(dest.MAC()))
source.cmdPrint('ip link set server up')
source.cmdPrint(
'ip addr add {}/32 brd + dev server'.format(dest.IP()))
source.cmdPrint('ip route add {} dev server'.format(source.IP()))
source.cmdPrint('ifconfig')
time.sleep(2)
xml = source.cmd('nfd-status -x')
doc = etree.fromstring(xml)
face_ID = 'Not Set'
for b in doc.findall(".//{ndn:/localhost/nfd/status/1}localUri/."):
if "server" in b.text:
print 'Found Face ID'
face_ID = b.getparent().getchildren()[0].text
print face_ID
return source.cmdPrint("nfdc register -c {0} /n {1}".format(cost, face_ID))
elif con_type == 'auto':
source.sendCmd('nfd-autoreg --prefix=/n -w 10.0.0.0/8 &')
def checkRepoNGInitDone(node):
status = node.cmd('ls /INITDONE')
if 'No such file or directory' not in status:
return True
else:
return False
def setStrategy(node, name, strategy):
print node.cmd("nfdc set-strategy %s ndn:/localhost/nfd/strategy/%s" %
(name, strategy))
def startPlayer(host=None, mpd="/n/mpd", aa='p', alpha='44', segmentsizeOn=True,
sampleSize='', client='N/A', qoe_weights=None, use_memory=False,
epoch=-1, timesteps_per_epoch=100):
"""Begin playback.
/code/executeDockerScenario.sh -nohead -n -bola 0.8 12 -u /n/mpd
p is extended PANDA, alpha
bola is bola, alpha is 12
fb is regular panda
br BufferRateBased (AdapTech)
r ratebased
b BufferBased regular
bt BufferBasedThreeThreshold
nr no adaptation (test for caching)
"""
# TODO Player does not start
if aa == 'bola' or aa == 'fb':
sampleSize = ''
if sampleSize != '':
sampleSize = '-c {}'.format(sampleSize)
localmpd = ''
if segmentsizeOn:
localmpd = '-localmpd /code/mpdss'
PLAYER_PATH = "/code/executeDockerScenario.sh -nohead"
ADAPDATION_ALGORITHM = "{0} -n 0.8 {1} -{2} {3} {4} {5} {6}".format(localmpd, sampleSize, aa, alpha, *qoe_weights)
MPD_COMMAND = "-u {0} > /player-out.txt &".format(mpd)
# If we use memory, upload any existing objects
# epoch > 0 to overwrite previous memory objects once this epoch finishes
if use_memory and epoch > 0:
ucb_obj_folder = '/home/ubuntu/containernet/ucb_objs/{0}/{1}/'.format(
client, '_'.join([str(int(q)) for q in qoe_weights]))
ucb_obj_path = ucb_obj_folder + '{0}_obj.pkl'.format(aa)
if not os.path.exists(ucb_obj_folder):
print("ERROR: ucb_objs/client/weights folder does not exist!")
exit(1)
if os.path.exists(ucb_obj_path):
call('docker cp {0} mn.{1}:/{2}.pkl'.format(ucb_obj_path, client, aa), shell=True)
host.cmd('export BEGIN_TIMESTEP="{0}"'.format((epoch-1)*timesteps_per_epoch))
elif epoch > 1:
print('WARNING: no UCB object found while using memory with epoch > 1')
# Begin playback
host.cmd('rm log')
host.cmd(['chmod +x', '/code/executeDockerScenario.sh'])
host.cmd('ndnpeek /n/mpd')
host.sendCmd([PLAYER_PATH, ADAPDATION_ALGORITHM, MPD_COMMAND])
def killPlayer(host):
print host.cmd('pkill qtsampleplayer')
if __name__ == '__main__':
setLogLevel('info')
# Parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--algo', nargs=1, type=str, required=True)
parser.add_argument('-t', '--topology', nargs=1, type=str, required=True)
parser.add_argument('-d', '--duration', nargs=1, type=int, required=True)
parser.add_argument('-q', '--qoe_weights', nargs=3, type=float, required=True)
parser.add_argument('--memory', dest='use_memory', action='store_true')
parser.add_argument('--no-memory', dest='use_memory', action='store_false')
parser.set_defaults(use_memory=False)
parser.add_argument('-e', '--epoch', nargs=1, type=int, required=True)
args = vars(parser.parse_args())
initExperiment(toptype=args['topology'][0], duration=args['duration'][0],
algo=args['algo'][0], qoe_weights=args['qoe_weights'],
use_memory=args['use_memory'], epoch=args['epoch'][0])
| 16,422 | 42.678191 | 118 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-experiment/bandwidth_traces/generate_random_traces.py
|
# Note, skip epoch 1 because that one used trevor_trace
import csv
import numpy as np
for i in range(2, 21):
np.random.seed(i)
bws = [np.random.normal(7, 3) for _ in range(10000)]
bws = [bw if bw > 0.5 else 0.5 for bw in bws]
periods = [np.random.normal(5) for _ in range(10000)]
periods = [per if per >0.5 else 0.5 for per in periods]
with open('trace_epoch_{}.csv'.format(i), 'w') as f:
writer = csv.writer(f)
writer.writerows(zip(bws, periods))
| 489 | 34 | 59 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-experiment/bandwidth_traces/generate_traces.py
|
import numpy as np
import csv
for client in range(1, 2):
for epoch in range(1, 6):
#bws = [np.random.gamma(6, 0.5) for _ in range(1000)]
#pds = [np.random.gamma(2, 0.5) for _ in range(1000)]
bws = [3.7, 3.2, 2.3, 1.7, 1.2, 1.2, 1.7, 2.3, 3.2, 3.7]
bws = [bw - (epoch-1) * 0.1 for bw in bws]
pds = [20 for i in range(10)]
# Add 20 extra seconds to the end since we let it go for 220 seconds total, in case of stalling
pds[-1] += 20
lines = zip(bws, pds)
with open('client{}_epoch{}.csv'.format(client, epoch), 'w') as f:
writer = csv.writer(f)
for line in lines:
writer.writerow(line)
| 701 | 38 | 103 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/miniedit.py
|
#!/usr/bin/python
"""
MiniEdit: a simple network editor for Mininet
This is a simple demonstration of how one might build a
GUI application using Mininet as the network model.
Bob Lantz, April 2010
Gregory Gee, July 2013
Controller icon from http://semlabs.co.uk/
OpenFlow icon from https://www.opennetworking.org/
"""
# Miniedit needs some work in order to pass pylint...
# pylint: disable=line-too-long,too-many-branches
# pylint: disable=too-many-statements,attribute-defined-outside-init
# pylint: disable=missing-docstring
MINIEDIT_VERSION = '2.2.0.1'
from optparse import OptionParser
# from Tkinter import *
from Tkinter import ( Frame, Label, LabelFrame, Entry, OptionMenu, Checkbutton,
Menu, Toplevel, Button, BitmapImage, PhotoImage, Canvas,
Scrollbar, Wm, TclError, StringVar, IntVar,
E, W, EW, NW, Y, VERTICAL, SOLID, CENTER,
RIGHT, LEFT, BOTH, TRUE, FALSE )
from ttk import Notebook
from tkMessageBox import showerror
from subprocess import call
import tkFont
import tkFileDialog
import tkSimpleDialog
import re
import json
from distutils.version import StrictVersion
import os
import sys
from functools import partial
if 'PYTHONPATH' in os.environ:
sys.path = os.environ[ 'PYTHONPATH' ].split( ':' ) + sys.path
# someday: from ttk import *
from mininet.log import info, setLogLevel
from mininet.net import Mininet, VERSION
from mininet.util import netParse, ipAdd, quietRun
from mininet.util import buildTopo
from mininet.util import custom, customClass
from mininet.term import makeTerm, cleanUpScreens
from mininet.node import Controller, RemoteController, NOX, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSSwitch, UserSwitch
from mininet.link import TCLink, Intf, Link
from mininet.cli import CLI
from mininet.moduledeps import moduleDeps
from mininet.topo import SingleSwitchTopo, LinearTopo, SingleSwitchReversedTopo
from mininet.topolib import TreeTopo
print 'MiniEdit running against Mininet '+VERSION
MININET_VERSION = re.sub(r'[^\d\.]', '', VERSION)
if StrictVersion(MININET_VERSION) > StrictVersion('2.0'):
from mininet.node import IVSSwitch
TOPODEF = 'none'
TOPOS = { 'minimal': lambda: SingleSwitchTopo( k=2 ),
'linear': LinearTopo,
'reversed': SingleSwitchReversedTopo,
'single': SingleSwitchTopo,
'none': None,
'tree': TreeTopo }
CONTROLLERDEF = 'ref'
CONTROLLERS = { 'ref': Controller,
'ovsc': OVSController,
'nox': NOX,
'remote': RemoteController,
'none': lambda name: None }
LINKDEF = 'default'
LINKS = { 'default': Link,
'tc': TCLink }
HOSTDEF = 'proc'
HOSTS = { 'proc': Host,
'rt': custom( CPULimitedHost, sched='rt' ),
'cfs': custom( CPULimitedHost, sched='cfs' ) }
class InbandController( RemoteController ):
"RemoteController that ignores checkListening"
def checkListening( self ):
"Overridden to do nothing."
return
class CustomUserSwitch(UserSwitch):
"Customized UserSwitch"
def __init__( self, name, dpopts='--no-slicing', **kwargs ):
UserSwitch.__init__( self, name, **kwargs )
self.switchIP = None
def getSwitchIP(self):
"Return management IP address"
return self.switchIP
def setSwitchIP(self, ip):
"Set management IP address"
self.switchIP = ip
def start( self, controllers ):
"Start and set management IP address"
# Call superclass constructor
UserSwitch.start( self, controllers )
# Set Switch IP address
if self.switchIP is not None:
if not self.inNamespace:
self.cmd( 'ifconfig', self, self.switchIP )
else:
self.cmd( 'ifconfig lo', self.switchIP )
class LegacyRouter( Node ):
"Simple IP router"
def __init__( self, name, inNamespace=True, **params ):
Node.__init__( self, name, inNamespace, **params )
def config( self, **_params ):
if self.intfs:
self.setParam( _params, 'setIP', ip='0.0.0.0' )
r = Node.config( self, **_params )
self.cmd('sysctl -w net.ipv4.ip_forward=1')
return r
class LegacySwitch(OVSSwitch):
"OVS switch in standalone/bridge mode"
def __init__( self, name, **params ):
OVSSwitch.__init__( self, name, failMode='standalone', **params )
self.switchIP = None
class customOvs(OVSSwitch):
"Customized OVS switch"
def __init__( self, name, failMode='secure', datapath='kernel', **params ):
OVSSwitch.__init__( self, name, failMode=failMode, datapath=datapath,**params )
self.switchIP = None
def getSwitchIP(self):
"Return management IP address"
return self.switchIP
def setSwitchIP(self, ip):
"Set management IP address"
self.switchIP = ip
def start( self, controllers ):
"Start and set management IP address"
# Call superclass constructor
OVSSwitch.start( self, controllers )
# Set Switch IP address
if self.switchIP is not None:
self.cmd( 'ifconfig', self, self.switchIP )
class PrefsDialog(tkSimpleDialog.Dialog):
"Preferences dialog"
def __init__(self, parent, title, prefDefaults):
self.prefValues = prefDefaults
tkSimpleDialog.Dialog.__init__(self, parent, title)
def body(self, master):
"Create dialog body"
self.rootFrame = master
self.leftfieldFrame = Frame(self.rootFrame, padx=5, pady=5)
self.leftfieldFrame.grid(row=0, column=0, sticky='nswe', columnspan=2)
self.rightfieldFrame = Frame(self.rootFrame, padx=5, pady=5)
self.rightfieldFrame.grid(row=0, column=2, sticky='nswe', columnspan=2)
# Field for Base IP
Label(self.leftfieldFrame, text="IP Base:").grid(row=0, sticky=E)
self.ipEntry = Entry(self.leftfieldFrame)
self.ipEntry.grid(row=0, column=1)
ipBase = self.prefValues['ipBase']
self.ipEntry.insert(0, ipBase)
# Selection of terminal type
Label(self.leftfieldFrame, text="Default Terminal:").grid(row=1, sticky=E)
self.terminalVar = StringVar(self.leftfieldFrame)
self.terminalOption = OptionMenu(self.leftfieldFrame, self.terminalVar, "xterm", "gterm")
self.terminalOption.grid(row=1, column=1, sticky=W)
terminalType = self.prefValues['terminalType']
self.terminalVar.set(terminalType)
# Field for CLI
Label(self.leftfieldFrame, text="Start CLI:").grid(row=2, sticky=E)
self.cliStart = IntVar()
self.cliButton = Checkbutton(self.leftfieldFrame, variable=self.cliStart)
self.cliButton.grid(row=2, column=1, sticky=W)
if self.prefValues['startCLI'] == '0':
self.cliButton.deselect()
else:
self.cliButton.select()
# Selection of switch type
Label(self.leftfieldFrame, text="Default Switch:").grid(row=3, sticky=E)
self.switchType = StringVar(self.leftfieldFrame)
self.switchTypeMenu = OptionMenu(self.leftfieldFrame, self.switchType, "Open vSwitch Kernel Mode", "Indigo Virtual Switch", "Userspace Switch", "Userspace Switch inNamespace")
self.switchTypeMenu.grid(row=3, column=1, sticky=W)
switchTypePref = self.prefValues['switchType']
if switchTypePref == 'ivs':
self.switchType.set("Indigo Virtual Switch")
elif switchTypePref == 'userns':
self.switchType.set("Userspace Switch inNamespace")
elif switchTypePref == 'user':
self.switchType.set("Userspace Switch")
else:
self.switchType.set("Open vSwitch Kernel Mode")
# Fields for OVS OpenFlow version
ovsFrame= LabelFrame(self.leftfieldFrame, text='Open vSwitch', padx=5, pady=5)
ovsFrame.grid(row=4, column=0, columnspan=2, sticky=EW)
Label(ovsFrame, text="OpenFlow 1.0:").grid(row=0, sticky=E)
Label(ovsFrame, text="OpenFlow 1.1:").grid(row=1, sticky=E)
Label(ovsFrame, text="OpenFlow 1.2:").grid(row=2, sticky=E)
Label(ovsFrame, text="OpenFlow 1.3:").grid(row=3, sticky=E)
self.ovsOf10 = IntVar()
self.covsOf10 = Checkbutton(ovsFrame, variable=self.ovsOf10)
self.covsOf10.grid(row=0, column=1, sticky=W)
if self.prefValues['openFlowVersions']['ovsOf10'] == '0':
self.covsOf10.deselect()
else:
self.covsOf10.select()
self.ovsOf11 = IntVar()
self.covsOf11 = Checkbutton(ovsFrame, variable=self.ovsOf11)
self.covsOf11.grid(row=1, column=1, sticky=W)
if self.prefValues['openFlowVersions']['ovsOf11'] == '0':
self.covsOf11.deselect()
else:
self.covsOf11.select()
self.ovsOf12 = IntVar()
self.covsOf12 = Checkbutton(ovsFrame, variable=self.ovsOf12)
self.covsOf12.grid(row=2, column=1, sticky=W)
if self.prefValues['openFlowVersions']['ovsOf12'] == '0':
self.covsOf12.deselect()
else:
self.covsOf12.select()
self.ovsOf13 = IntVar()
self.covsOf13 = Checkbutton(ovsFrame, variable=self.ovsOf13)
self.covsOf13.grid(row=3, column=1, sticky=W)
if self.prefValues['openFlowVersions']['ovsOf13'] == '0':
self.covsOf13.deselect()
else:
self.covsOf13.select()
# Field for DPCTL listen port
Label(self.leftfieldFrame, text="dpctl port:").grid(row=5, sticky=E)
self.dpctlEntry = Entry(self.leftfieldFrame)
self.dpctlEntry.grid(row=5, column=1)
if 'dpctl' in self.prefValues:
self.dpctlEntry.insert(0, self.prefValues['dpctl'])
# sFlow
sflowValues = self.prefValues['sflow']
self.sflowFrame= LabelFrame(self.rightfieldFrame, text='sFlow Profile for Open vSwitch', padx=5, pady=5)
self.sflowFrame.grid(row=0, column=0, columnspan=2, sticky=EW)
Label(self.sflowFrame, text="Target:").grid(row=0, sticky=E)
self.sflowTarget = Entry(self.sflowFrame)
self.sflowTarget.grid(row=0, column=1)
self.sflowTarget.insert(0, sflowValues['sflowTarget'])
Label(self.sflowFrame, text="Sampling:").grid(row=1, sticky=E)
self.sflowSampling = Entry(self.sflowFrame)
self.sflowSampling.grid(row=1, column=1)
self.sflowSampling.insert(0, sflowValues['sflowSampling'])
Label(self.sflowFrame, text="Header:").grid(row=2, sticky=E)
self.sflowHeader = Entry(self.sflowFrame)
self.sflowHeader.grid(row=2, column=1)
self.sflowHeader.insert(0, sflowValues['sflowHeader'])
Label(self.sflowFrame, text="Polling:").grid(row=3, sticky=E)
self.sflowPolling = Entry(self.sflowFrame)
self.sflowPolling.grid(row=3, column=1)
self.sflowPolling.insert(0, sflowValues['sflowPolling'])
# NetFlow
nflowValues = self.prefValues['netflow']
self.nFrame= LabelFrame(self.rightfieldFrame, text='NetFlow Profile for Open vSwitch', padx=5, pady=5)
self.nFrame.grid(row=1, column=0, columnspan=2, sticky=EW)
Label(self.nFrame, text="Target:").grid(row=0, sticky=E)
self.nflowTarget = Entry(self.nFrame)
self.nflowTarget.grid(row=0, column=1)
self.nflowTarget.insert(0, nflowValues['nflowTarget'])
Label(self.nFrame, text="Active Timeout:").grid(row=1, sticky=E)
self.nflowTimeout = Entry(self.nFrame)
self.nflowTimeout.grid(row=1, column=1)
self.nflowTimeout.insert(0, nflowValues['nflowTimeout'])
Label(self.nFrame, text="Add ID to Interface:").grid(row=2, sticky=E)
self.nflowAddId = IntVar()
self.nflowAddIdButton = Checkbutton(self.nFrame, variable=self.nflowAddId)
self.nflowAddIdButton.grid(row=2, column=1, sticky=W)
if nflowValues['nflowAddId'] == '0':
self.nflowAddIdButton.deselect()
else:
self.nflowAddIdButton.select()
# initial focus
return self.ipEntry
def apply(self):
ipBase = self.ipEntry.get()
terminalType = self.terminalVar.get()
startCLI = str(self.cliStart.get())
sw = self.switchType.get()
dpctl = self.dpctlEntry.get()
ovsOf10 = str(self.ovsOf10.get())
ovsOf11 = str(self.ovsOf11.get())
ovsOf12 = str(self.ovsOf12.get())
ovsOf13 = str(self.ovsOf13.get())
sflowValues = {'sflowTarget':self.sflowTarget.get(),
'sflowSampling':self.sflowSampling.get(),
'sflowHeader':self.sflowHeader.get(),
'sflowPolling':self.sflowPolling.get()}
nflowvalues = {'nflowTarget':self.nflowTarget.get(),
'nflowTimeout':self.nflowTimeout.get(),
'nflowAddId':str(self.nflowAddId.get())}
self.result = {'ipBase':ipBase,
'terminalType':terminalType,
'dpctl':dpctl,
'sflow':sflowValues,
'netflow':nflowvalues,
'startCLI':startCLI}
if sw == 'Indigo Virtual Switch':
self.result['switchType'] = 'ivs'
if StrictVersion(MININET_VERSION) < StrictVersion('2.1'):
self.ovsOk = False
showerror(title="Error",
message='MiniNet version 2.1+ required. You have '+VERSION+'.')
elif sw == 'Userspace Switch':
self.result['switchType'] = 'user'
elif sw == 'Userspace Switch inNamespace':
self.result['switchType'] = 'userns'
else:
self.result['switchType'] = 'ovs'
self.ovsOk = True
if ovsOf11 == "1":
ovsVer = self.getOvsVersion()
if StrictVersion(ovsVer) < StrictVersion('2.0'):
self.ovsOk = False
showerror(title="Error",
message='Open vSwitch version 2.0+ required. You have '+ovsVer+'.')
if ovsOf12 == "1" or ovsOf13 == "1":
ovsVer = self.getOvsVersion()
if StrictVersion(ovsVer) < StrictVersion('1.10'):
self.ovsOk = False
showerror(title="Error",
message='Open vSwitch version 1.10+ required. You have '+ovsVer+'.')
if self.ovsOk:
self.result['openFlowVersions']={'ovsOf10':ovsOf10,
'ovsOf11':ovsOf11,
'ovsOf12':ovsOf12,
'ovsOf13':ovsOf13}
else:
self.result = None
@staticmethod
def getOvsVersion():
"Return OVS version"
outp = quietRun("ovs-vsctl show")
r = r'ovs_version: "(.*)"'
m = re.search(r, outp)
if m is None:
print 'Version check failed'
return None
else:
print 'Open vSwitch version is '+m.group(1)
return m.group(1)
class CustomDialog(object):
# TODO: Fix button placement and Title and window focus lock
def __init__(self, master, _title):
self.top=Toplevel(master)
self.bodyFrame = Frame(self.top)
self.bodyFrame.grid(row=0, column=0, sticky='nswe')
self.body(self.bodyFrame)
#return self.b # initial focus
buttonFrame = Frame(self.top, relief='ridge', bd=3, bg='lightgrey')
buttonFrame.grid(row=1 , column=0, sticky='nswe')
okButton = Button(buttonFrame, width=8, text='OK', relief='groove',
bd=4, command=self.okAction)
okButton.grid(row=0, column=0, sticky=E)
canlceButton = Button(buttonFrame, width=8, text='Cancel', relief='groove',
bd=4, command=self.cancelAction)
canlceButton.grid(row=0, column=1, sticky=W)
def body(self, master):
self.rootFrame = master
def apply(self):
self.top.destroy()
def cancelAction(self):
self.top.destroy()
def okAction(self):
self.apply()
self.top.destroy()
class HostDialog(CustomDialog):
def __init__(self, master, title, prefDefaults):
self.prefValues = prefDefaults
self.result = None
CustomDialog.__init__(self, master, title)
def body(self, master):
self.rootFrame = master
n = Notebook(self.rootFrame)
self.propFrame = Frame(n)
self.vlanFrame = Frame(n)
self.interfaceFrame = Frame(n)
self.mountFrame = Frame(n)
n.add(self.propFrame, text='Properties')
n.add(self.vlanFrame, text='VLAN Interfaces')
n.add(self.interfaceFrame, text='External Interfaces')
n.add(self.mountFrame, text='Private Directories')
n.pack()
### TAB 1
# Field for Hostname
Label(self.propFrame, text="Hostname:").grid(row=0, sticky=E)
self.hostnameEntry = Entry(self.propFrame)
self.hostnameEntry.grid(row=0, column=1)
if 'hostname' in self.prefValues:
self.hostnameEntry.insert(0, self.prefValues['hostname'])
# Field for Switch IP
Label(self.propFrame, text="IP Address:").grid(row=1, sticky=E)
self.ipEntry = Entry(self.propFrame)
self.ipEntry.grid(row=1, column=1)
if 'ip' in self.prefValues:
self.ipEntry.insert(0, self.prefValues['ip'])
# Field for default route
Label(self.propFrame, text="Default Route:").grid(row=2, sticky=E)
self.routeEntry = Entry(self.propFrame)
self.routeEntry.grid(row=2, column=1)
if 'defaultRoute' in self.prefValues:
self.routeEntry.insert(0, self.prefValues['defaultRoute'])
# Field for CPU
Label(self.propFrame, text="Amount CPU:").grid(row=3, sticky=E)
self.cpuEntry = Entry(self.propFrame)
self.cpuEntry.grid(row=3, column=1)
if 'cpu' in self.prefValues:
self.cpuEntry.insert(0, str(self.prefValues['cpu']))
# Selection of Scheduler
if 'sched' in self.prefValues:
sched = self.prefValues['sched']
else:
sched = 'host'
self.schedVar = StringVar(self.propFrame)
self.schedOption = OptionMenu(self.propFrame, self.schedVar, "host", "cfs", "rt")
self.schedOption.grid(row=3, column=2, sticky=W)
self.schedVar.set(sched)
# Selection of Cores
Label(self.propFrame, text="Cores:").grid(row=4, sticky=E)
self.coreEntry = Entry(self.propFrame)
self.coreEntry.grid(row=4, column=1)
if 'cores' in self.prefValues:
self.coreEntry.insert(1, self.prefValues['cores'])
# Start command
Label(self.propFrame, text="Start Command:").grid(row=5, sticky=E)
self.startEntry = Entry(self.propFrame)
self.startEntry.grid(row=5, column=1, sticky='nswe', columnspan=3)
if 'startCommand' in self.prefValues:
self.startEntry.insert(0, str(self.prefValues['startCommand']))
# Stop command
Label(self.propFrame, text="Stop Command:").grid(row=6, sticky=E)
self.stopEntry = Entry(self.propFrame)
self.stopEntry.grid(row=6, column=1, sticky='nswe', columnspan=3)
if 'stopCommand' in self.prefValues:
self.stopEntry.insert(0, str(self.prefValues['stopCommand']))
### TAB 2
# External Interfaces
self.externalInterfaces = 0
Label(self.interfaceFrame, text="External Interface:").grid(row=0, column=0, sticky=E)
self.b = Button( self.interfaceFrame, text='Add', command=self.addInterface)
self.b.grid(row=0, column=1)
self.interfaceFrame = VerticalScrolledTable(self.interfaceFrame, rows=0, columns=1, title='External Interfaces')
self.interfaceFrame.grid(row=1, column=0, sticky='nswe', columnspan=2)
self.tableFrame = self.interfaceFrame.interior
self.tableFrame.addRow(value=['Interface Name'], readonly=True)
# Add defined interfaces
externalInterfaces = []
if 'externalInterfaces' in self.prefValues:
externalInterfaces = self.prefValues['externalInterfaces']
for externalInterface in externalInterfaces:
self.tableFrame.addRow(value=[externalInterface])
### TAB 3
# VLAN Interfaces
self.vlanInterfaces = 0
Label(self.vlanFrame, text="VLAN Interface:").grid(row=0, column=0, sticky=E)
self.vlanButton = Button( self.vlanFrame, text='Add', command=self.addVlanInterface)
self.vlanButton.grid(row=0, column=1)
self.vlanFrame = VerticalScrolledTable(self.vlanFrame, rows=0, columns=2, title='VLAN Interfaces')
self.vlanFrame.grid(row=1, column=0, sticky='nswe', columnspan=2)
self.vlanTableFrame = self.vlanFrame.interior
self.vlanTableFrame.addRow(value=['IP Address','VLAN ID'], readonly=True)
vlanInterfaces = []
if 'vlanInterfaces' in self.prefValues:
vlanInterfaces = self.prefValues['vlanInterfaces']
for vlanInterface in vlanInterfaces:
self.vlanTableFrame.addRow(value=vlanInterface)
### TAB 4
# Private Directories
self.privateDirectories = 0
Label(self.mountFrame, text="Private Directory:").grid(row=0, column=0, sticky=E)
self.mountButton = Button( self.mountFrame, text='Add', command=self.addDirectory)
self.mountButton.grid(row=0, column=1)
self.mountFrame = VerticalScrolledTable(self.mountFrame, rows=0, columns=2, title='Directories')
self.mountFrame.grid(row=1, column=0, sticky='nswe', columnspan=2)
self.mountTableFrame = self.mountFrame.interior
self.mountTableFrame.addRow(value=['Mount','Persistent Directory'], readonly=True)
directoryList = []
if 'privateDirectory' in self.prefValues:
directoryList = self.prefValues['privateDirectory']
for privateDir in directoryList:
if isinstance( privateDir, tuple ):
self.mountTableFrame.addRow(value=privateDir)
else:
self.mountTableFrame.addRow(value=[privateDir,''])
def addDirectory( self ):
self.mountTableFrame.addRow()
def addVlanInterface( self ):
self.vlanTableFrame.addRow()
def addInterface( self ):
self.tableFrame.addRow()
def apply(self):
externalInterfaces = []
for row in range(self.tableFrame.rows):
if (len(self.tableFrame.get(row, 0)) > 0 and
row > 0):
externalInterfaces.append(self.tableFrame.get(row, 0))
vlanInterfaces = []
for row in range(self.vlanTableFrame.rows):
if (len(self.vlanTableFrame.get(row, 0)) > 0 and
len(self.vlanTableFrame.get(row, 1)) > 0 and
row > 0):
vlanInterfaces.append([self.vlanTableFrame.get(row, 0), self.vlanTableFrame.get(row, 1)])
privateDirectories = []
for row in range(self.mountTableFrame.rows):
if len(self.mountTableFrame.get(row, 0)) > 0 and row > 0:
if len(self.mountTableFrame.get(row, 1)) > 0:
privateDirectories.append((self.mountTableFrame.get(row, 0), self.mountTableFrame.get(row, 1)))
else:
privateDirectories.append(self.mountTableFrame.get(row, 0))
results = {'cpu': self.cpuEntry.get(),
'cores':self.coreEntry.get(),
'sched':self.schedVar.get(),
'hostname':self.hostnameEntry.get(),
'ip':self.ipEntry.get(),
'defaultRoute':self.routeEntry.get(),
'startCommand':self.startEntry.get(),
'stopCommand':self.stopEntry.get(),
'privateDirectory':privateDirectories,
'externalInterfaces':externalInterfaces,
'vlanInterfaces':vlanInterfaces}
self.result = results
class SwitchDialog(CustomDialog):
def __init__(self, master, title, prefDefaults):
self.prefValues = prefDefaults
self.result = None
CustomDialog.__init__(self, master, title)
def body(self, master):
self.rootFrame = master
self.leftfieldFrame = Frame(self.rootFrame)
self.rightfieldFrame = Frame(self.rootFrame)
self.leftfieldFrame.grid(row=0, column=0, sticky='nswe')
self.rightfieldFrame.grid(row=0, column=1, sticky='nswe')
rowCount = 0
externalInterfaces = []
if 'externalInterfaces' in self.prefValues:
externalInterfaces = self.prefValues['externalInterfaces']
# Field for Hostname
Label(self.leftfieldFrame, text="Hostname:").grid(row=rowCount, sticky=E)
self.hostnameEntry = Entry(self.leftfieldFrame)
self.hostnameEntry.grid(row=rowCount, column=1)
self.hostnameEntry.insert(0, self.prefValues['hostname'])
rowCount+=1
# Field for DPID
Label(self.leftfieldFrame, text="DPID:").grid(row=rowCount, sticky=E)
self.dpidEntry = Entry(self.leftfieldFrame)
self.dpidEntry.grid(row=rowCount, column=1)
if 'dpid' in self.prefValues:
self.dpidEntry.insert(0, self.prefValues['dpid'])
rowCount+=1
# Field for Netflow
Label(self.leftfieldFrame, text="Enable NetFlow:").grid(row=rowCount, sticky=E)
self.nflow = IntVar()
self.nflowButton = Checkbutton(self.leftfieldFrame, variable=self.nflow)
self.nflowButton.grid(row=rowCount, column=1, sticky=W)
if 'netflow' in self.prefValues:
if self.prefValues['netflow'] == '0':
self.nflowButton.deselect()
else:
self.nflowButton.select()
else:
self.nflowButton.deselect()
rowCount+=1
# Field for sflow
Label(self.leftfieldFrame, text="Enable sFlow:").grid(row=rowCount, sticky=E)
self.sflow = IntVar()
self.sflowButton = Checkbutton(self.leftfieldFrame, variable=self.sflow)
self.sflowButton.grid(row=rowCount, column=1, sticky=W)
if 'sflow' in self.prefValues:
if self.prefValues['sflow'] == '0':
self.sflowButton.deselect()
else:
self.sflowButton.select()
else:
self.sflowButton.deselect()
rowCount+=1
# Selection of switch type
Label(self.leftfieldFrame, text="Switch Type:").grid(row=rowCount, sticky=E)
self.switchType = StringVar(self.leftfieldFrame)
self.switchTypeMenu = OptionMenu(self.leftfieldFrame, self.switchType, "Default", "Open vSwitch Kernel Mode", "Indigo Virtual Switch", "Userspace Switch", "Userspace Switch inNamespace")
self.switchTypeMenu.grid(row=rowCount, column=1, sticky=W)
if 'switchType' in self.prefValues:
switchTypePref = self.prefValues['switchType']
if switchTypePref == 'ivs':
self.switchType.set("Indigo Virtual Switch")
elif switchTypePref == 'userns':
self.switchType.set("Userspace Switch inNamespace")
elif switchTypePref == 'user':
self.switchType.set("Userspace Switch")
elif switchTypePref == 'ovs':
self.switchType.set("Open vSwitch Kernel Mode")
else:
self.switchType.set("Default")
else:
self.switchType.set("Default")
rowCount+=1
# Field for Switch IP
Label(self.leftfieldFrame, text="IP Address:").grid(row=rowCount, sticky=E)
self.ipEntry = Entry(self.leftfieldFrame)
self.ipEntry.grid(row=rowCount, column=1)
if 'switchIP' in self.prefValues:
self.ipEntry.insert(0, self.prefValues['switchIP'])
rowCount+=1
# Field for DPCTL port
Label(self.leftfieldFrame, text="DPCTL port:").grid(row=rowCount, sticky=E)
self.dpctlEntry = Entry(self.leftfieldFrame)
self.dpctlEntry.grid(row=rowCount, column=1)
if 'dpctl' in self.prefValues:
self.dpctlEntry.insert(0, self.prefValues['dpctl'])
rowCount+=1
# External Interfaces
Label(self.rightfieldFrame, text="External Interface:").grid(row=0, sticky=E)
self.b = Button( self.rightfieldFrame, text='Add', command=self.addInterface)
self.b.grid(row=0, column=1)
self.interfaceFrame = VerticalScrolledTable(self.rightfieldFrame, rows=0, columns=1, title='External Interfaces')
self.interfaceFrame.grid(row=1, column=0, sticky='nswe', columnspan=2)
self.tableFrame = self.interfaceFrame.interior
# Add defined interfaces
for externalInterface in externalInterfaces:
self.tableFrame.addRow(value=[externalInterface])
self.commandFrame = Frame(self.rootFrame)
self.commandFrame.grid(row=1, column=0, sticky='nswe', columnspan=2)
self.commandFrame.columnconfigure(1, weight=1)
# Start command
Label(self.commandFrame, text="Start Command:").grid(row=0, column=0, sticky=W)
self.startEntry = Entry(self.commandFrame)
self.startEntry.grid(row=0, column=1, sticky='nsew')
if 'startCommand' in self.prefValues:
self.startEntry.insert(0, str(self.prefValues['startCommand']))
# Stop command
Label(self.commandFrame, text="Stop Command:").grid(row=1, column=0, sticky=W)
self.stopEntry = Entry(self.commandFrame)
self.stopEntry.grid(row=1, column=1, sticky='nsew')
if 'stopCommand' in self.prefValues:
self.stopEntry.insert(0, str(self.prefValues['stopCommand']))
def addInterface( self ):
self.tableFrame.addRow()
def defaultDpid( self, name):
"Derive dpid from switch name, s1 -> 1"
assert self # satisfy pylint and allow contextual override
try:
dpid = int( re.findall( r'\d+', name )[ 0 ] )
dpid = hex( dpid )[ 2: ]
return dpid
except IndexError:
return None
#raise Exception( 'Unable to derive default datapath ID - '
# 'please either specify a dpid or use a '
# 'canonical switch name such as s23.' )
def apply(self):
externalInterfaces = []
for row in range(self.tableFrame.rows):
#print 'Interface is ' + self.tableFrame.get(row, 0)
if len(self.tableFrame.get(row, 0)) > 0:
externalInterfaces.append(self.tableFrame.get(row, 0))
dpid = self.dpidEntry.get()
if (self.defaultDpid(self.hostnameEntry.get()) is None
and len(dpid) == 0):
showerror(title="Error",
message= 'Unable to derive default datapath ID - '
'please either specify a DPID or use a '
'canonical switch name such as s23.' )
results = {'externalInterfaces':externalInterfaces,
'hostname':self.hostnameEntry.get(),
'dpid':dpid,
'startCommand':self.startEntry.get(),
'stopCommand':self.stopEntry.get(),
'sflow':str(self.sflow.get()),
'netflow':str(self.nflow.get()),
'dpctl':self.dpctlEntry.get(),
'switchIP':self.ipEntry.get()}
sw = self.switchType.get()
if sw == 'Indigo Virtual Switch':
results['switchType'] = 'ivs'
if StrictVersion(MININET_VERSION) < StrictVersion('2.1'):
self.ovsOk = False
showerror(title="Error",
message='MiniNet version 2.1+ required. You have '+VERSION+'.')
elif sw == 'Userspace Switch inNamespace':
results['switchType'] = 'userns'
elif sw == 'Userspace Switch':
results['switchType'] = 'user'
elif sw == 'Open vSwitch Kernel Mode':
results['switchType'] = 'ovs'
else:
results['switchType'] = 'default'
self.result = results
class VerticalScrolledTable(LabelFrame):
"""A pure Tkinter scrollable frame that actually works!
* Use the 'interior' attribute to place widgets inside the scrollable frame
* Construct and pack/place/grid normally
* This frame only allows vertical scrolling
"""
def __init__(self, parent, rows=2, columns=2, title=None, *args, **kw):
LabelFrame.__init__(self, parent, text=title, padx=5, pady=5, *args, **kw)
# create a canvas object and a vertical scrollbar for scrolling it
vscrollbar = Scrollbar(self, orient=VERTICAL)
vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE)
canvas = Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=vscrollbar.set)
canvas.pack(side=LEFT, fill=BOTH, expand=TRUE)
vscrollbar.config(command=canvas.yview)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = TableFrame(canvas, rows=rows, columns=columns)
interior_id = canvas.create_window(0, 0, window=interior,
anchor=NW)
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
def _configure_interior(_event):
# update the scrollbars to match the size of the inner frame
size = (interior.winfo_reqwidth(), interior.winfo_reqheight())
canvas.config(scrollregion="0 0 %s %s" % size)
if interior.winfo_reqwidth() != canvas.winfo_width():
# update the canvas's width to fit the inner frame
canvas.config(width=interior.winfo_reqwidth())
interior.bind('<Configure>', _configure_interior)
def _configure_canvas(_event):
if interior.winfo_reqwidth() != canvas.winfo_width():
# update the inner frame's width to fill the canvas
canvas.itemconfigure(interior_id, width=canvas.winfo_width())
canvas.bind('<Configure>', _configure_canvas)
return
class TableFrame(Frame):
def __init__(self, parent, rows=2, columns=2):
Frame.__init__(self, parent, background="black")
self._widgets = []
self.rows = rows
self.columns = columns
for row in range(rows):
current_row = []
for column in range(columns):
label = Entry(self, borderwidth=0)
label.grid(row=row, column=column, sticky="wens", padx=1, pady=1)
current_row.append(label)
self._widgets.append(current_row)
def set(self, row, column, value):
widget = self._widgets[row][column]
widget.insert(0, value)
def get(self, row, column):
widget = self._widgets[row][column]
return widget.get()
def addRow( self, value=None, readonly=False ):
#print "Adding row " + str(self.rows +1)
current_row = []
for column in range(self.columns):
label = Entry(self, borderwidth=0)
label.grid(row=self.rows, column=column, sticky="wens", padx=1, pady=1)
if value is not None:
label.insert(0, value[column])
if readonly == True:
label.configure(state='readonly')
current_row.append(label)
self._widgets.append(current_row)
self.update_idletasks()
self.rows += 1
class LinkDialog(tkSimpleDialog.Dialog):
def __init__(self, parent, title, linkDefaults):
self.linkValues = linkDefaults
tkSimpleDialog.Dialog.__init__(self, parent, title)
def body(self, master):
self.var = StringVar(master)
Label(master, text="Bandwidth:").grid(row=0, sticky=E)
self.e1 = Entry(master)
self.e1.grid(row=0, column=1)
Label(master, text="Mbit").grid(row=0, column=2, sticky=W)
if 'bw' in self.linkValues:
self.e1.insert(0,str(self.linkValues['bw']))
Label(master, text="Delay:").grid(row=1, sticky=E)
self.e2 = Entry(master)
self.e2.grid(row=1, column=1)
if 'delay' in self.linkValues:
self.e2.insert(0, self.linkValues['delay'])
Label(master, text="Loss:").grid(row=2, sticky=E)
self.e3 = Entry(master)
self.e3.grid(row=2, column=1)
Label(master, text="%").grid(row=2, column=2, sticky=W)
if 'loss' in self.linkValues:
self.e3.insert(0, str(self.linkValues['loss']))
Label(master, text="Max Queue size:").grid(row=3, sticky=E)
self.e4 = Entry(master)
self.e4.grid(row=3, column=1)
if 'max_queue_size' in self.linkValues:
self.e4.insert(0, str(self.linkValues['max_queue_size']))
Label(master, text="Jitter:").grid(row=4, sticky=E)
self.e5 = Entry(master)
self.e5.grid(row=4, column=1)
if 'jitter' in self.linkValues:
self.e5.insert(0, self.linkValues['jitter'])
Label(master, text="Speedup:").grid(row=5, sticky=E)
self.e6 = Entry(master)
self.e6.grid(row=5, column=1)
if 'speedup' in self.linkValues:
self.e6.insert(0, str(self.linkValues['speedup']))
return self.e1 # initial focus
def apply(self):
self.result = {}
if len(self.e1.get()) > 0:
self.result['bw'] = int(self.e1.get())
if len(self.e2.get()) > 0:
self.result['delay'] = self.e2.get()
if len(self.e3.get()) > 0:
self.result['loss'] = int(self.e3.get())
if len(self.e4.get()) > 0:
self.result['max_queue_size'] = int(self.e4.get())
if len(self.e5.get()) > 0:
self.result['jitter'] = self.e5.get()
if len(self.e6.get()) > 0:
self.result['speedup'] = int(self.e6.get())
class ControllerDialog(tkSimpleDialog.Dialog):
def __init__(self, parent, title, ctrlrDefaults=None):
if ctrlrDefaults:
self.ctrlrValues = ctrlrDefaults
tkSimpleDialog.Dialog.__init__(self, parent, title)
def body(self, master):
self.var = StringVar(master)
self.protcolvar = StringVar(master)
rowCount=0
# Field for Hostname
Label(master, text="Name:").grid(row=rowCount, sticky=E)
self.hostnameEntry = Entry(master)
self.hostnameEntry.grid(row=rowCount, column=1)
self.hostnameEntry.insert(0, self.ctrlrValues['hostname'])
rowCount+=1
# Field for Remove Controller Port
Label(master, text="Controller Port:").grid(row=rowCount, sticky=E)
self.e2 = Entry(master)
self.e2.grid(row=rowCount, column=1)
self.e2.insert(0, self.ctrlrValues['remotePort'])
rowCount+=1
# Field for Controller Type
Label(master, text="Controller Type:").grid(row=rowCount, sticky=E)
controllerType = self.ctrlrValues['controllerType']
self.o1 = OptionMenu(master, self.var, "Remote Controller", "In-Band Controller", "OpenFlow Reference", "OVS Controller")
self.o1.grid(row=rowCount, column=1, sticky=W)
if controllerType == 'ref':
self.var.set("OpenFlow Reference")
elif controllerType == 'inband':
self.var.set("In-Band Controller")
elif controllerType == 'remote':
self.var.set("Remote Controller")
else:
self.var.set("OVS Controller")
rowCount+=1
# Field for Controller Protcol
Label(master, text="Protocol:").grid(row=rowCount, sticky=E)
if 'controllerProtocol' in self.ctrlrValues:
controllerProtocol = self.ctrlrValues['controllerProtocol']
else:
controllerProtocol = 'tcp'
self.protcol = OptionMenu(master, self.protcolvar, "TCP", "SSL")
self.protcol.grid(row=rowCount, column=1, sticky=W)
if controllerProtocol == 'ssl':
self.protcolvar.set("SSL")
else:
self.protcolvar.set("TCP")
rowCount+=1
# Field for Remove Controller IP
remoteFrame= LabelFrame(master, text='Remote/In-Band Controller', padx=5, pady=5)
remoteFrame.grid(row=rowCount, column=0, columnspan=2, sticky=W)
Label(remoteFrame, text="IP Address:").grid(row=0, sticky=E)
self.e1 = Entry(remoteFrame)
self.e1.grid(row=0, column=1)
self.e1.insert(0, self.ctrlrValues['remoteIP'])
rowCount+=1
return self.hostnameEntry # initial focus
def apply(self):
self.result = { 'hostname': self.hostnameEntry.get(),
'remoteIP': self.e1.get(),
'remotePort': int(self.e2.get())}
controllerType = self.var.get()
if controllerType == 'Remote Controller':
self.result['controllerType'] = 'remote'
elif controllerType == 'In-Band Controller':
self.result['controllerType'] = 'inband'
elif controllerType == 'OpenFlow Reference':
self.result['controllerType'] = 'ref'
else:
self.result['controllerType'] = 'ovsc'
controllerProtocol = self.protcolvar.get()
if controllerProtocol == 'SSL':
self.result['controllerProtocol'] = 'ssl'
else:
self.result['controllerProtocol'] = 'tcp'
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
# pylint: disable=protected-access
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
# pylint: enable=protected-access
except TclError:
pass
label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
class MiniEdit( Frame ):
"A simple network editor for Mininet."
def __init__( self, parent=None, cheight=600, cwidth=1000 ):
self.defaultIpBase='10.0.0.0/8'
self.nflowDefaults = {'nflowTarget':'',
'nflowTimeout':'600',
'nflowAddId':'0'}
self.sflowDefaults = {'sflowTarget':'',
'sflowSampling':'400',
'sflowHeader':'128',
'sflowPolling':'30'}
self.appPrefs={
"ipBase": self.defaultIpBase,
"startCLI": "0",
"terminalType": 'xterm',
"switchType": 'ovs',
"dpctl": '',
'sflow':self.sflowDefaults,
'netflow':self.nflowDefaults,
'openFlowVersions':{'ovsOf10':'1',
'ovsOf11':'0',
'ovsOf12':'0',
'ovsOf13':'0'}
}
Frame.__init__( self, parent )
self.action = None
self.appName = 'MiniEdit'
self.fixedFont = tkFont.Font ( family="DejaVu Sans Mono", size="14" )
# Style
self.font = ( 'Geneva', 9 )
self.smallFont = ( 'Geneva', 7 )
self.bg = 'white'
# Title
self.top = self.winfo_toplevel()
self.top.title( self.appName )
# Menu bar
self.createMenubar()
# Editing canvas
self.cheight, self.cwidth = cheight, cwidth
self.cframe, self.canvas = self.createCanvas()
# Toolbar
self.controllers = {}
# Toolbar
self.images = miniEditImages()
self.buttons = {}
self.active = None
self.tools = ( 'Select', 'Host', 'Switch', 'LegacySwitch', 'LegacyRouter', 'NetLink', 'Controller' )
self.customColors = { 'Switch': 'darkGreen', 'Host': 'blue' }
self.toolbar = self.createToolbar()
# Layout
self.toolbar.grid( column=0, row=0, sticky='nsew')
self.cframe.grid( column=1, row=0 )
self.columnconfigure( 1, weight=1 )
self.rowconfigure( 0, weight=1 )
self.pack( expand=True, fill='both' )
# About box
self.aboutBox = None
# Initialize node data
self.nodeBindings = self.createNodeBindings()
self.nodePrefixes = { 'LegacyRouter': 'r', 'LegacySwitch': 's', 'Switch': 's', 'Host': 'h' , 'Controller': 'c'}
self.widgetToItem = {}
self.itemToWidget = {}
# Initialize link tool
self.link = self.linkWidget = None
# Selection support
self.selection = None
# Keyboard bindings
self.bind( '<Control-q>', lambda event: self.quit() )
self.bind( '<KeyPress-Delete>', self.deleteSelection )
self.bind( '<KeyPress-BackSpace>', self.deleteSelection )
self.focus()
self.hostPopup = Menu(self.top, tearoff=0)
self.hostPopup.add_command(label='Host Options', font=self.font)
self.hostPopup.add_separator()
self.hostPopup.add_command(label='Properties', font=self.font, command=self.hostDetails )
self.hostRunPopup = Menu(self.top, tearoff=0)
self.hostRunPopup.add_command(label='Host Options', font=self.font)
self.hostRunPopup.add_separator()
self.hostRunPopup.add_command(label='Terminal', font=self.font, command=self.xterm )
self.legacyRouterRunPopup = Menu(self.top, tearoff=0)
self.legacyRouterRunPopup.add_command(label='Router Options', font=self.font)
self.legacyRouterRunPopup.add_separator()
self.legacyRouterRunPopup.add_command(label='Terminal', font=self.font, command=self.xterm )
self.switchPopup = Menu(self.top, tearoff=0)
self.switchPopup.add_command(label='Switch Options', font=self.font)
self.switchPopup.add_separator()
self.switchPopup.add_command(label='Properties', font=self.font, command=self.switchDetails )
self.switchRunPopup = Menu(self.top, tearoff=0)
self.switchRunPopup.add_command(label='Switch Options', font=self.font)
self.switchRunPopup.add_separator()
self.switchRunPopup.add_command(label='List bridge details', font=self.font, command=self.listBridge )
self.linkPopup = Menu(self.top, tearoff=0)
self.linkPopup.add_command(label='Link Options', font=self.font)
self.linkPopup.add_separator()
self.linkPopup.add_command(label='Properties', font=self.font, command=self.linkDetails )
self.linkRunPopup = Menu(self.top, tearoff=0)
self.linkRunPopup.add_command(label='Link Options', font=self.font)
self.linkRunPopup.add_separator()
self.linkRunPopup.add_command(label='Link Up', font=self.font, command=self.linkUp )
self.linkRunPopup.add_command(label='Link Down', font=self.font, command=self.linkDown )
self.controllerPopup = Menu(self.top, tearoff=0)
self.controllerPopup.add_command(label='Controller Options', font=self.font)
self.controllerPopup.add_separator()
self.controllerPopup.add_command(label='Properties', font=self.font, command=self.controllerDetails )
# Event handling initalization
self.linkx = self.linky = self.linkItem = None
self.lastSelection = None
# Model initialization
self.links = {}
self.hostOpts = {}
self.switchOpts = {}
self.hostCount = 0
self.switchCount = 0
self.controllerCount = 0
self.net = None
# Close window gracefully
Wm.wm_protocol( self.top, name='WM_DELETE_WINDOW', func=self.quit )
def quit( self ):
"Stop our network, if any, then quit."
self.stop()
Frame.quit( self )
def createMenubar( self ):
"Create our menu bar."
font = self.font
mbar = Menu( self.top, font=font )
self.top.configure( menu=mbar )
fileMenu = Menu( mbar, tearoff=False )
mbar.add_cascade( label="File", font=font, menu=fileMenu )
fileMenu.add_command( label="New", font=font, command=self.newTopology )
fileMenu.add_command( label="Open", font=font, command=self.loadTopology )
fileMenu.add_command( label="Save", font=font, command=self.saveTopology )
fileMenu.add_command( label="Export Level 2 Script", font=font, command=self.exportScript )
fileMenu.add_separator()
fileMenu.add_command( label='Quit', command=self.quit, font=font )
editMenu = Menu( mbar, tearoff=False )
mbar.add_cascade( label="Edit", font=font, menu=editMenu )
editMenu.add_command( label="Cut", font=font,
command=lambda: self.deleteSelection( None ) )
editMenu.add_command( label="Preferences", font=font, command=self.prefDetails)
runMenu = Menu( mbar, tearoff=False )
mbar.add_cascade( label="Run", font=font, menu=runMenu )
runMenu.add_command( label="Run", font=font, command=self.doRun )
runMenu.add_command( label="Stop", font=font, command=self.doStop )
fileMenu.add_separator()
runMenu.add_command( label='Show OVS Summary', font=font, command=self.ovsShow )
runMenu.add_command( label='Root Terminal', font=font, command=self.rootTerminal )
# Application menu
appMenu = Menu( mbar, tearoff=False )
mbar.add_cascade( label="Help", font=font, menu=appMenu )
appMenu.add_command( label='About MiniEdit', command=self.about,
font=font)
# Canvas
def createCanvas( self ):
"Create and return our scrolling canvas frame."
f = Frame( self )
canvas = Canvas( f, width=self.cwidth, height=self.cheight,
bg=self.bg )
# Scroll bars
xbar = Scrollbar( f, orient='horizontal', command=canvas.xview )
ybar = Scrollbar( f, orient='vertical', command=canvas.yview )
canvas.configure( xscrollcommand=xbar.set, yscrollcommand=ybar.set )
# Resize box
resize = Label( f, bg='white' )
# Layout
canvas.grid( row=0, column=1, sticky='nsew')
ybar.grid( row=0, column=2, sticky='ns')
xbar.grid( row=1, column=1, sticky='ew' )
resize.grid( row=1, column=2, sticky='nsew' )
# Resize behavior
f.rowconfigure( 0, weight=1 )
f.columnconfigure( 1, weight=1 )
f.grid( row=0, column=0, sticky='nsew' )
f.bind( '<Configure>', lambda event: self.updateScrollRegion() )
# Mouse bindings
canvas.bind( '<ButtonPress-1>', self.clickCanvas )
canvas.bind( '<B1-Motion>', self.dragCanvas )
canvas.bind( '<ButtonRelease-1>', self.releaseCanvas )
return f, canvas
def updateScrollRegion( self ):
"Update canvas scroll region to hold everything."
bbox = self.canvas.bbox( 'all' )
if bbox is not None:
self.canvas.configure( scrollregion=( 0, 0, bbox[ 2 ],
bbox[ 3 ] ) )
def canvasx( self, x_root ):
"Convert root x coordinate to canvas coordinate."
c = self.canvas
return c.canvasx( x_root ) - c.winfo_rootx()
def canvasy( self, y_root ):
"Convert root y coordinate to canvas coordinate."
c = self.canvas
return c.canvasy( y_root ) - c.winfo_rooty()
# Toolbar
def activate( self, toolName ):
"Activate a tool and press its button."
# Adjust button appearance
if self.active:
self.buttons[ self.active ].configure( relief='raised' )
self.buttons[ toolName ].configure( relief='sunken' )
# Activate dynamic bindings
self.active = toolName
@staticmethod
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(_event):
toolTip.showtip(text)
def leave(_event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
def createToolbar( self ):
"Create and return our toolbar frame."
toolbar = Frame( self )
# Tools
for tool in self.tools:
cmd = ( lambda t=tool: self.activate( t ) )
b = Button( toolbar, text=tool, font=self.smallFont, command=cmd)
if tool in self.images:
b.config( height=35, image=self.images[ tool ] )
self.createToolTip(b, str(tool))
# b.config( compound='top' )
b.pack( fill='x' )
self.buttons[ tool ] = b
self.activate( self.tools[ 0 ] )
# Spacer
Label( toolbar, text='' ).pack()
# Commands
for cmd, color in [ ( 'Stop', 'darkRed' ), ( 'Run', 'darkGreen' ) ]:
doCmd = getattr( self, 'do' + cmd )
b = Button( toolbar, text=cmd, font=self.smallFont,
fg=color, command=doCmd )
b.pack( fill='x', side='bottom' )
return toolbar
def doRun( self ):
"Run command."
self.activate( 'Select' )
for tool in self.tools:
self.buttons[ tool ].config( state='disabled' )
self.start()
def doStop( self ):
"Stop command."
self.stop()
for tool in self.tools:
self.buttons[ tool ].config( state='normal' )
def addNode( self, node, nodeNum, x, y, name=None):
"Add a new node to our canvas."
if 'Switch' == node:
self.switchCount += 1
if 'Host' == node:
self.hostCount += 1
if 'Controller' == node:
self.controllerCount += 1
if name is None:
name = self.nodePrefixes[ node ] + nodeNum
self.addNamedNode(node, name, x, y)
def addNamedNode( self, node, name, x, y):
"Add a new node to our canvas."
icon = self.nodeIcon( node, name )
item = self.canvas.create_window( x, y, anchor='c', window=icon,
tags=node )
self.widgetToItem[ icon ] = item
self.itemToWidget[ item ] = icon
icon.links = {}
def convertJsonUnicode(self, text):
"Some part of Mininet don't like Unicode"
if isinstance(text, dict):
return {self.convertJsonUnicode(key): self.convertJsonUnicode(value) for key, value in text.iteritems()}
elif isinstance(text, list):
return [self.convertJsonUnicode(element) for element in text]
elif isinstance(text, unicode):
return text.encode('utf-8')
else:
return text
def loadTopology( self ):
"Load command."
c = self.canvas
myFormats = [
('Mininet Topology','*.mn'),
('All Files','*'),
]
f = tkFileDialog.askopenfile(filetypes=myFormats, mode='rb')
if f == None:
return
self.newTopology()
loadedTopology = self.convertJsonUnicode(json.load(f))
# Load application preferences
if 'application' in loadedTopology:
self.appPrefs = dict(self.appPrefs.items() + loadedTopology['application'].items())
if "ovsOf10" not in self.appPrefs["openFlowVersions"]:
self.appPrefs["openFlowVersions"]["ovsOf10"] = '0'
if "ovsOf11" not in self.appPrefs["openFlowVersions"]:
self.appPrefs["openFlowVersions"]["ovsOf11"] = '0'
if "ovsOf12" not in self.appPrefs["openFlowVersions"]:
self.appPrefs["openFlowVersions"]["ovsOf12"] = '0'
if "ovsOf13" not in self.appPrefs["openFlowVersions"]:
self.appPrefs["openFlowVersions"]["ovsOf13"] = '0'
if "sflow" not in self.appPrefs:
self.appPrefs["sflow"] = self.sflowDefaults
if "netflow" not in self.appPrefs:
self.appPrefs["netflow"] = self.nflowDefaults
# Load controllers
if 'controllers' in loadedTopology:
if loadedTopology['version'] == '1':
# This is old location of controller info
hostname = 'c0'
self.controllers = {}
self.controllers[hostname] = loadedTopology['controllers']['c0']
self.controllers[hostname]['hostname'] = hostname
self.addNode('Controller', 0, float(30), float(30), name=hostname)
icon = self.findWidgetByName(hostname)
icon.bind('<Button-3>', self.do_controllerPopup )
else:
controllers = loadedTopology['controllers']
for controller in controllers:
hostname = controller['opts']['hostname']
x = controller['x']
y = controller['y']
self.addNode('Controller', 0, float(x), float(y), name=hostname)
self.controllers[hostname] = controller['opts']
icon = self.findWidgetByName(hostname)
icon.bind('<Button-3>', self.do_controllerPopup )
# Load hosts
hosts = loadedTopology['hosts']
for host in hosts:
nodeNum = host['number']
hostname = 'h'+nodeNum
if 'hostname' in host['opts']:
hostname = host['opts']['hostname']
else:
host['opts']['hostname'] = hostname
if 'nodeNum' not in host['opts']:
host['opts']['nodeNum'] = int(nodeNum)
x = host['x']
y = host['y']
self.addNode('Host', nodeNum, float(x), float(y), name=hostname)
# Fix JSON converting tuple to list when saving
if 'privateDirectory' in host['opts']:
newDirList = []
for privateDir in host['opts']['privateDirectory']:
if isinstance( privateDir, list ):
newDirList.append((privateDir[0],privateDir[1]))
else:
newDirList.append(privateDir)
host['opts']['privateDirectory'] = newDirList
self.hostOpts[hostname] = host['opts']
icon = self.findWidgetByName(hostname)
icon.bind('<Button-3>', self.do_hostPopup )
# Load switches
switches = loadedTopology['switches']
for switch in switches:
nodeNum = switch['number']
hostname = 's'+nodeNum
if 'controllers' not in switch['opts']:
switch['opts']['controllers'] = []
if 'switchType' not in switch['opts']:
switch['opts']['switchType'] = 'default'
if 'hostname' in switch['opts']:
hostname = switch['opts']['hostname']
else:
switch['opts']['hostname'] = hostname
if 'nodeNum' not in switch['opts']:
switch['opts']['nodeNum'] = int(nodeNum)
x = switch['x']
y = switch['y']
if switch['opts']['switchType'] == "legacyRouter":
self.addNode('LegacyRouter', nodeNum, float(x), float(y), name=hostname)
icon = self.findWidgetByName(hostname)
icon.bind('<Button-3>', self.do_legacyRouterPopup )
elif switch['opts']['switchType'] == "legacySwitch":
self.addNode('LegacySwitch', nodeNum, float(x), float(y), name=hostname)
icon = self.findWidgetByName(hostname)
icon.bind('<Button-3>', self.do_legacySwitchPopup )
else:
self.addNode('Switch', nodeNum, float(x), float(y), name=hostname)
icon = self.findWidgetByName(hostname)
icon.bind('<Button-3>', self.do_switchPopup )
self.switchOpts[hostname] = switch['opts']
# create links to controllers
if int(loadedTopology['version']) > 1:
controllers = self.switchOpts[hostname]['controllers']
for controller in controllers:
dest = self.findWidgetByName(controller)
dx, dy = self.canvas.coords( self.widgetToItem[ dest ] )
self.link = self.canvas.create_line(float(x),
float(y),
dx,
dy,
width=4,
fill='red',
dash=(6, 4, 2, 4),
tag='link' )
c.itemconfig(self.link, tags=c.gettags(self.link)+('control',))
self.addLink( icon, dest, linktype='control' )
self.createControlLinkBindings()
self.link = self.linkWidget = None
else:
dest = self.findWidgetByName('c0')
dx, dy = self.canvas.coords( self.widgetToItem[ dest ] )
self.link = self.canvas.create_line(float(x),
float(y),
dx,
dy,
width=4,
fill='red',
dash=(6, 4, 2, 4),
tag='link' )
c.itemconfig(self.link, tags=c.gettags(self.link)+('control',))
self.addLink( icon, dest, linktype='control' )
self.createControlLinkBindings()
self.link = self.linkWidget = None
# Load links
links = loadedTopology['links']
for link in links:
srcNode = link['src']
src = self.findWidgetByName(srcNode)
sx, sy = self.canvas.coords( self.widgetToItem[ src ] )
destNode = link['dest']
dest = self.findWidgetByName(destNode)
dx, dy = self.canvas.coords( self.widgetToItem[ dest] )
self.link = self.canvas.create_line( sx, sy, dx, dy, width=4,
fill='blue', tag='link' )
c.itemconfig(self.link, tags=c.gettags(self.link)+('data',))
self.addLink( src, dest, linkopts=link['opts'] )
self.createDataLinkBindings()
self.link = self.linkWidget = None
f.close()
def findWidgetByName( self, name ):
for widget in self.widgetToItem:
if name == widget[ 'text' ]:
return widget
def newTopology( self ):
"New command."
for widget in self.widgetToItem.keys():
self.deleteItem( self.widgetToItem[ widget ] )
self.hostCount = 0
self.switchCount = 0
self.controllerCount = 0
self.links = {}
self.hostOpts = {}
self.switchOpts = {}
self.controllers = {}
self.appPrefs["ipBase"]= self.defaultIpBase
def saveTopology( self ):
"Save command."
myFormats = [
('Mininet Topology','*.mn'),
('All Files','*'),
]
savingDictionary = {}
fileName = tkFileDialog.asksaveasfilename(filetypes=myFormats ,title="Save the topology as...")
if len(fileName ) > 0:
# Save Application preferences
savingDictionary['version'] = '2'
# Save Switches and Hosts
hostsToSave = []
switchesToSave = []
controllersToSave = []
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
x1, y1 = self.canvas.coords( self.widgetToItem[ widget ] )
if 'Switch' in tags or 'LegacySwitch' in tags or 'LegacyRouter' in tags:
nodeNum = self.switchOpts[name]['nodeNum']
nodeToSave = {'number':str(nodeNum),
'x':str(x1),
'y':str(y1),
'opts':self.switchOpts[name] }
switchesToSave.append(nodeToSave)
elif 'Host' in tags:
nodeNum = self.hostOpts[name]['nodeNum']
nodeToSave = {'number':str(nodeNum),
'x':str(x1),
'y':str(y1),
'opts':self.hostOpts[name] }
hostsToSave.append(nodeToSave)
elif 'Controller' in tags:
nodeToSave = {'x':str(x1),
'y':str(y1),
'opts':self.controllers[name] }
controllersToSave.append(nodeToSave)
else:
raise Exception( "Cannot create mystery node: " + name )
savingDictionary['hosts'] = hostsToSave
savingDictionary['switches'] = switchesToSave
savingDictionary['controllers'] = controllersToSave
# Save Links
linksToSave = []
for link in self.links.values():
src = link['src']
dst = link['dest']
linkopts = link['linkOpts']
srcName, dstName = src[ 'text' ], dst[ 'text' ]
linkToSave = {'src':srcName,
'dest':dstName,
'opts':linkopts}
if link['type'] == 'data':
linksToSave.append(linkToSave)
savingDictionary['links'] = linksToSave
# Save Application preferences
savingDictionary['application'] = self.appPrefs
try:
f = open(fileName, 'wb')
f.write(json.dumps(savingDictionary, sort_keys=True, indent=4, separators=(',', ': ')))
# pylint: disable=broad-except
except Exception as er:
print er
# pylint: enable=broad-except
finally:
f.close()
def exportScript( self ):
"Export command."
myFormats = [
('Mininet Custom Topology','*.py'),
('All Files','*'),
]
fileName = tkFileDialog.asksaveasfilename(filetypes=myFormats ,title="Export the topology as...")
if len(fileName ) > 0:
#print "Now saving under %s" % fileName
f = open(fileName, 'wb')
f.write("#!/usr/bin/python\n")
f.write("\n")
f.write("from mininet.net import Mininet\n")
f.write("from mininet.node import Controller, RemoteController, OVSController\n")
f.write("from mininet.node import CPULimitedHost, Host, Node\n")
f.write("from mininet.node import OVSKernelSwitch, UserSwitch\n")
if StrictVersion(MININET_VERSION) > StrictVersion('2.0'):
f.write("from mininet.node import IVSSwitch\n")
f.write("from mininet.cli import CLI\n")
f.write("from mininet.log import setLogLevel, info\n")
f.write("from mininet.link import TCLink, Intf\n")
f.write("from subprocess import call\n")
inBandCtrl = False
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Controller' in tags:
opts = self.controllers[name]
controllerType = opts['controllerType']
if controllerType == 'inband':
inBandCtrl = True
if inBandCtrl == True:
f.write("\n")
f.write("class InbandController( RemoteController ):\n")
f.write("\n")
f.write(" def checkListening( self ):\n")
f.write(" \"Overridden to do nothing.\"\n")
f.write(" return\n")
f.write("\n")
f.write("def myNetwork():\n")
f.write("\n")
f.write(" net = Mininet( topo=None,\n")
if len(self.appPrefs['dpctl']) > 0:
f.write(" listenPort="+self.appPrefs['dpctl']+",\n")
f.write(" build=False,\n")
f.write(" ipBase='"+self.appPrefs['ipBase']+"')\n")
f.write("\n")
f.write(" info( '*** Adding controller\\n' )\n")
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Controller' in tags:
opts = self.controllers[name]
controllerType = opts['controllerType']
if 'controllerProtocol' in opts:
controllerProtocol = opts['controllerProtocol']
else:
controllerProtocol = 'tcp'
controllerIP = opts['remoteIP']
controllerPort = opts['remotePort']
f.write(" "+name+"=net.addController(name='"+name+"',\n")
if controllerType == 'remote':
f.write(" controller=RemoteController,\n")
f.write(" ip='"+controllerIP+"',\n")
elif controllerType == 'inband':
f.write(" controller=InbandController,\n")
f.write(" ip='"+controllerIP+"',\n")
elif controllerType == 'ovsc':
f.write(" controller=OVSController,\n")
else:
f.write(" controller=Controller,\n")
f.write(" protocol='"+controllerProtocol+"',\n")
f.write(" port="+str(controllerPort)+")\n")
f.write("\n")
# Save Switches and Hosts
f.write(" info( '*** Add switches\\n')\n")
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'LegacyRouter' in tags:
f.write(" "+name+" = net.addHost('"+name+"', cls=Node, ip='0.0.0.0')\n")
f.write(" "+name+".cmd('sysctl -w net.ipv4.ip_forward=1')\n")
if 'LegacySwitch' in tags:
f.write(" "+name+" = net.addSwitch('"+name+"', cls=OVSKernelSwitch, failMode='standalone')\n")
if 'Switch' in tags:
opts = self.switchOpts[name]
nodeNum = opts['nodeNum']
f.write(" "+name+" = net.addSwitch('"+name+"'")
if opts['switchType'] == 'default':
if self.appPrefs['switchType'] == 'ivs':
f.write(", cls=IVSSwitch")
elif self.appPrefs['switchType'] == 'user':
f.write(", cls=UserSwitch")
elif self.appPrefs['switchType'] == 'userns':
f.write(", cls=UserSwitch, inNamespace=True")
else:
f.write(", cls=OVSKernelSwitch")
elif opts['switchType'] == 'ivs':
f.write(", cls=IVSSwitch")
elif opts['switchType'] == 'user':
f.write(", cls=UserSwitch")
elif opts['switchType'] == 'userns':
f.write(", cls=UserSwitch, inNamespace=True")
else:
f.write(", cls=OVSKernelSwitch")
if 'dpctl' in opts:
f.write(", listenPort="+opts['dpctl'])
if 'dpid' in opts:
f.write(", dpid='"+opts['dpid']+"'")
f.write(")\n")
if 'externalInterfaces' in opts:
for extInterface in opts['externalInterfaces']:
f.write(" Intf( '"+extInterface+"', node="+name+" )\n")
f.write("\n")
f.write(" info( '*** Add hosts\\n')\n")
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Host' in tags:
opts = self.hostOpts[name]
ip = None
defaultRoute = None
if 'defaultRoute' in opts and len(opts['defaultRoute']) > 0:
defaultRoute = "'via "+opts['defaultRoute']+"'"
else:
defaultRoute = 'None'
if 'ip' in opts and len(opts['ip']) > 0:
ip = opts['ip']
else:
nodeNum = self.hostOpts[name]['nodeNum']
ipBaseNum, prefixLen = netParse( self.appPrefs['ipBase'] )
ip = ipAdd(i=nodeNum, prefixLen=prefixLen, ipBaseNum=ipBaseNum)
if 'cores' in opts or 'cpu' in opts:
f.write(" "+name+" = net.addHost('"+name+"', cls=CPULimitedHost, ip='"+ip+"', defaultRoute="+defaultRoute+")\n")
if 'cores' in opts:
f.write(" "+name+".setCPUs(cores='"+opts['cores']+"')\n")
if 'cpu' in opts:
f.write(" "+name+".setCPUFrac(f="+str(opts['cpu'])+", sched='"+opts['sched']+"')\n")
else:
f.write(" "+name+" = net.addHost('"+name+"', cls=Host, ip='"+ip+"', defaultRoute="+defaultRoute+")\n")
if 'externalInterfaces' in opts:
for extInterface in opts['externalInterfaces']:
f.write(" Intf( '"+extInterface+"', node="+name+" )\n")
f.write("\n")
# Save Links
f.write(" info( '*** Add links\\n')\n")
for key,linkDetail in self.links.iteritems():
tags = self.canvas.gettags(key)
if 'data' in tags:
optsExist = False
src = linkDetail['src']
dst = linkDetail['dest']
linkopts = linkDetail['linkOpts']
srcName, dstName = src[ 'text' ], dst[ 'text' ]
bw = ''
# delay = ''
# loss = ''
# max_queue_size = ''
linkOpts = "{"
if 'bw' in linkopts:
bw = linkopts['bw']
linkOpts = linkOpts + "'bw':"+str(bw)
optsExist = True
if 'delay' in linkopts:
# delay = linkopts['delay']
if optsExist:
linkOpts = linkOpts + ","
linkOpts = linkOpts + "'delay':'"+linkopts['delay']+"'"
optsExist = True
if 'loss' in linkopts:
if optsExist:
linkOpts = linkOpts + ","
linkOpts = linkOpts + "'loss':"+str(linkopts['loss'])
optsExist = True
if 'max_queue_size' in linkopts:
if optsExist:
linkOpts = linkOpts + ","
linkOpts = linkOpts + "'max_queue_size':"+str(linkopts['max_queue_size'])
optsExist = True
if 'jitter' in linkopts:
if optsExist:
linkOpts = linkOpts + ","
linkOpts = linkOpts + "'jitter':'"+linkopts['jitter']+"'"
optsExist = True
if 'speedup' in linkopts:
if optsExist:
linkOpts = linkOpts + ","
linkOpts = linkOpts + "'speedup':"+str(linkopts['speedup'])
optsExist = True
linkOpts = linkOpts + "}"
if optsExist:
f.write(" "+srcName+dstName+" = "+linkOpts+"\n")
f.write(" net.addLink("+srcName+", "+dstName)
if optsExist:
f.write(", cls=TCLink , **"+srcName+dstName)
f.write(")\n")
f.write("\n")
f.write(" info( '*** Starting network\\n')\n")
f.write(" net.build()\n")
f.write(" info( '*** Starting controllers\\n')\n")
f.write(" for controller in net.controllers:\n")
f.write(" controller.start()\n")
f.write("\n")
f.write(" info( '*** Starting switches\\n')\n")
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Switch' in tags or 'LegacySwitch' in tags:
opts = self.switchOpts[name]
ctrlList = ",".join(opts['controllers'])
f.write(" net.get('"+name+"').start(["+ctrlList+"])\n")
f.write("\n")
f.write(" info( '*** Post configure switches and hosts\\n')\n")
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Switch' in tags:
opts = self.switchOpts[name]
if opts['switchType'] == 'default':
if self.appPrefs['switchType'] == 'user':
if 'switchIP' in opts:
if len(opts['switchIP']) > 0:
f.write(" "+name+".cmd('ifconfig "+name+" "+opts['switchIP']+"')\n")
elif self.appPrefs['switchType'] == 'userns':
if 'switchIP' in opts:
if len(opts['switchIP']) > 0:
f.write(" "+name+".cmd('ifconfig lo "+opts['switchIP']+"')\n")
elif self.appPrefs['switchType'] == 'ovs':
if 'switchIP' in opts:
if len(opts['switchIP']) > 0:
f.write(" "+name+".cmd('ifconfig "+name+" "+opts['switchIP']+"')\n")
elif opts['switchType'] == 'user':
if 'switchIP' in opts:
if len(opts['switchIP']) > 0:
f.write(" "+name+".cmd('ifconfig "+name+" "+opts['switchIP']+"')\n")
elif opts['switchType'] == 'userns':
if 'switchIP' in opts:
if len(opts['switchIP']) > 0:
f.write(" "+name+".cmd('ifconfig lo "+opts['switchIP']+"')\n")
elif opts['switchType'] == 'ovs':
if 'switchIP' in opts:
if len(opts['switchIP']) > 0:
f.write(" "+name+".cmd('ifconfig "+name+" "+opts['switchIP']+"')\n")
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Host' in tags:
opts = self.hostOpts[name]
# Attach vlan interfaces
if 'vlanInterfaces' in opts:
for vlanInterface in opts['vlanInterfaces']:
f.write(" "+name+".cmd('vconfig add "+name+"-eth0 "+vlanInterface[1]+"')\n")
f.write(" "+name+".cmd('ifconfig "+name+"-eth0."+vlanInterface[1]+" "+vlanInterface[0]+"')\n")
# Run User Defined Start Command
if 'startCommand' in opts:
f.write(" "+name+".cmdPrint('"+opts['startCommand']+"')\n")
if 'Switch' in tags:
opts = self.switchOpts[name]
# Run User Defined Start Command
if 'startCommand' in opts:
f.write(" "+name+".cmdPrint('"+opts['startCommand']+"')\n")
# Configure NetFlow
nflowValues = self.appPrefs['netflow']
if len(nflowValues['nflowTarget']) > 0:
nflowEnabled = False
nflowSwitches = ''
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Switch' in tags:
opts = self.switchOpts[name]
if 'netflow' in opts:
if opts['netflow'] == '1':
nflowSwitches = nflowSwitches+' -- set Bridge '+name+' netflow=@MiniEditNF'
nflowEnabled=True
if nflowEnabled:
nflowCmd = 'ovs-vsctl -- --id=@MiniEditNF create NetFlow '+ 'target=\\\"'+nflowValues['nflowTarget']+'\\\" '+ 'active-timeout='+nflowValues['nflowTimeout']
if nflowValues['nflowAddId'] == '1':
nflowCmd = nflowCmd + ' add_id_to_interface=true'
else:
nflowCmd = nflowCmd + ' add_id_to_interface=false'
f.write(" \n")
f.write(" call('"+nflowCmd+nflowSwitches+"', shell=True)\n")
# Configure sFlow
sflowValues = self.appPrefs['sflow']
if len(sflowValues['sflowTarget']) > 0:
sflowEnabled = False
sflowSwitches = ''
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Switch' in tags:
opts = self.switchOpts[name]
if 'sflow' in opts:
if opts['sflow'] == '1':
sflowSwitches = sflowSwitches+' -- set Bridge '+name+' sflow=@MiniEditSF'
sflowEnabled=True
if sflowEnabled:
sflowCmd = 'ovs-vsctl -- --id=@MiniEditSF create sFlow '+ 'target=\\\"'+sflowValues['sflowTarget']+'\\\" '+ 'header='+sflowValues['sflowHeader']+' '+ 'sampling='+sflowValues['sflowSampling']+' '+ 'polling='+sflowValues['sflowPolling']
f.write(" \n")
f.write(" call('"+sflowCmd+sflowSwitches+"', shell=True)\n")
f.write("\n")
f.write(" CLI(net)\n")
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Host' in tags:
opts = self.hostOpts[name]
# Run User Defined Stop Command
if 'stopCommand' in opts:
f.write(" "+name+".cmdPrint('"+opts['stopCommand']+"')\n")
if 'Switch' in tags:
opts = self.switchOpts[name]
# Run User Defined Stop Command
if 'stopCommand' in opts:
f.write(" "+name+".cmdPrint('"+opts['stopCommand']+"')\n")
f.write(" net.stop()\n")
f.write("\n")
f.write("if __name__ == '__main__':\n")
f.write(" setLogLevel( 'info' )\n")
f.write(" myNetwork()\n")
f.write("\n")
f.close()
# Generic canvas handler
#
# We could have used bindtags, as in nodeIcon, but
# the dynamic approach used here
# may actually require less code. In any case, it's an
# interesting introspection-based alternative to bindtags.
def canvasHandle( self, eventName, event ):
"Generic canvas event handler"
if self.active is None:
return
toolName = self.active
handler = getattr( self, eventName + toolName, None )
if handler is not None:
handler( event )
def clickCanvas( self, event ):
"Canvas click handler."
self.canvasHandle( 'click', event )
def dragCanvas( self, event ):
"Canvas drag handler."
self.canvasHandle( 'drag', event )
def releaseCanvas( self, event ):
"Canvas mouse up handler."
self.canvasHandle( 'release', event )
# Currently the only items we can select directly are
# links. Nodes are handled by bindings in the node icon.
def findItem( self, x, y ):
"Find items at a location in our canvas."
items = self.canvas.find_overlapping( x, y, x, y )
if len( items ) == 0:
return None
else:
return items[ 0 ]
# Canvas bindings for Select, Host, Switch and Link tools
def clickSelect( self, event ):
"Select an item."
self.selectItem( self.findItem( event.x, event.y ) )
def deleteItem( self, item ):
"Delete an item."
# Don't delete while network is running
if self.buttons[ 'Select' ][ 'state' ] == 'disabled':
return
# Delete from model
if item in self.links:
self.deleteLink( item )
if item in self.itemToWidget:
self.deleteNode( item )
# Delete from view
self.canvas.delete( item )
def deleteSelection( self, _event ):
"Delete the selected item."
if self.selection is not None:
self.deleteItem( self.selection )
self.selectItem( None )
def nodeIcon( self, node, name ):
"Create a new node icon."
icon = Button( self.canvas, image=self.images[ node ],
text=name, compound='top' )
# Unfortunately bindtags wants a tuple
bindtags = [ str( self.nodeBindings ) ]
bindtags += list( icon.bindtags() )
icon.bindtags( tuple( bindtags ) )
return icon
def newNode( self, node, event ):
"Add a new node to our canvas."
c = self.canvas
x, y = c.canvasx( event.x ), c.canvasy( event.y )
name = self.nodePrefixes[ node ]
if 'Switch' == node:
self.switchCount += 1
name = self.nodePrefixes[ node ] + str( self.switchCount )
self.switchOpts[name] = {}
self.switchOpts[name]['nodeNum']=self.switchCount
self.switchOpts[name]['hostname']=name
self.switchOpts[name]['switchType']='default'
self.switchOpts[name]['controllers']=[]
if 'LegacyRouter' == node:
self.switchCount += 1
name = self.nodePrefixes[ node ] + str( self.switchCount )
self.switchOpts[name] = {}
self.switchOpts[name]['nodeNum']=self.switchCount
self.switchOpts[name]['hostname']=name
self.switchOpts[name]['switchType']='legacyRouter'
if 'LegacySwitch' == node:
self.switchCount += 1
name = self.nodePrefixes[ node ] + str( self.switchCount )
self.switchOpts[name] = {}
self.switchOpts[name]['nodeNum']=self.switchCount
self.switchOpts[name]['hostname']=name
self.switchOpts[name]['switchType']='legacySwitch'
self.switchOpts[name]['controllers']=[]
if 'Host' == node:
self.hostCount += 1
name = self.nodePrefixes[ node ] + str( self.hostCount )
self.hostOpts[name] = {'sched':'host'}
self.hostOpts[name]['nodeNum']=self.hostCount
self.hostOpts[name]['hostname']=name
if 'Controller' == node:
name = self.nodePrefixes[ node ] + str( self.controllerCount )
ctrlr = { 'controllerType': 'ref',
'hostname': name,
'controllerProtocol': 'tcp',
'remoteIP': '127.0.0.1',
'remotePort': 6633}
self.controllers[name] = ctrlr
# We want to start controller count at 0
self.controllerCount += 1
icon = self.nodeIcon( node, name )
item = self.canvas.create_window( x, y, anchor='c', window=icon,
tags=node )
self.widgetToItem[ icon ] = item
self.itemToWidget[ item ] = icon
self.selectItem( item )
icon.links = {}
if 'Switch' == node:
icon.bind('<Button-3>', self.do_switchPopup )
if 'LegacyRouter' == node:
icon.bind('<Button-3>', self.do_legacyRouterPopup )
if 'LegacySwitch' == node:
icon.bind('<Button-3>', self.do_legacySwitchPopup )
if 'Host' == node:
icon.bind('<Button-3>', self.do_hostPopup )
if 'Controller' == node:
icon.bind('<Button-3>', self.do_controllerPopup )
def clickController( self, event ):
"Add a new Controller to our canvas."
self.newNode( 'Controller', event )
def clickHost( self, event ):
"Add a new host to our canvas."
self.newNode( 'Host', event )
def clickLegacyRouter( self, event ):
"Add a new switch to our canvas."
self.newNode( 'LegacyRouter', event )
def clickLegacySwitch( self, event ):
"Add a new switch to our canvas."
self.newNode( 'LegacySwitch', event )
def clickSwitch( self, event ):
"Add a new switch to our canvas."
self.newNode( 'Switch', event )
def dragNetLink( self, event ):
"Drag a link's endpoint to another node."
if self.link is None:
return
# Since drag starts in widget, we use root coords
x = self.canvasx( event.x_root )
y = self.canvasy( event.y_root )
c = self.canvas
c.coords( self.link, self.linkx, self.linky, x, y )
def releaseNetLink( self, _event ):
"Give up on the current link."
if self.link is not None:
self.canvas.delete( self.link )
self.linkWidget = self.linkItem = self.link = None
# Generic node handlers
def createNodeBindings( self ):
"Create a set of bindings for nodes."
bindings = {
'<ButtonPress-1>': self.clickNode,
'<B1-Motion>': self.dragNode,
'<ButtonRelease-1>': self.releaseNode,
'<Enter>': self.enterNode,
'<Leave>': self.leaveNode
}
l = Label() # lightweight-ish owner for bindings
for event, binding in bindings.items():
l.bind( event, binding )
return l
def selectItem( self, item ):
"Select an item and remember old selection."
self.lastSelection = self.selection
self.selection = item
def enterNode( self, event ):
"Select node on entry."
self.selectNode( event )
def leaveNode( self, _event ):
"Restore old selection on exit."
self.selectItem( self.lastSelection )
def clickNode( self, event ):
"Node click handler."
if self.active is 'NetLink':
self.startLink( event )
else:
self.selectNode( event )
return 'break'
def dragNode( self, event ):
"Node drag handler."
if self.active is 'NetLink':
self.dragNetLink( event )
else:
self.dragNodeAround( event )
def releaseNode( self, event ):
"Node release handler."
if self.active is 'NetLink':
self.finishLink( event )
# Specific node handlers
def selectNode( self, event ):
"Select the node that was clicked on."
item = self.widgetToItem.get( event.widget, None )
self.selectItem( item )
def dragNodeAround( self, event ):
"Drag a node around on the canvas."
c = self.canvas
# Convert global to local coordinates;
# Necessary since x, y are widget-relative
x = self.canvasx( event.x_root )
y = self.canvasy( event.y_root )
w = event.widget
# Adjust node position
item = self.widgetToItem[ w ]
c.coords( item, x, y )
# Adjust link positions
for dest in w.links:
link = w.links[ dest ]
item = self.widgetToItem[ dest ]
x1, y1 = c.coords( item )
c.coords( link, x, y, x1, y1 )
self.updateScrollRegion()
def createControlLinkBindings( self ):
"Create a set of bindings for nodes."
# Link bindings
# Selection still needs a bit of work overall
# Callbacks ignore event
def select( _event, link=self.link ):
"Select item on mouse entry."
self.selectItem( link )
def highlight( _event, link=self.link ):
"Highlight item on mouse entry."
self.selectItem( link )
self.canvas.itemconfig( link, fill='green' )
def unhighlight( _event, link=self.link ):
"Unhighlight item on mouse exit."
self.canvas.itemconfig( link, fill='red' )
#self.selectItem( None )
self.canvas.tag_bind( self.link, '<Enter>', highlight )
self.canvas.tag_bind( self.link, '<Leave>', unhighlight )
self.canvas.tag_bind( self.link, '<ButtonPress-1>', select )
def createDataLinkBindings( self ):
"Create a set of bindings for nodes."
# Link bindings
# Selection still needs a bit of work overall
# Callbacks ignore event
def select( _event, link=self.link ):
"Select item on mouse entry."
self.selectItem( link )
def highlight( _event, link=self.link ):
"Highlight item on mouse entry."
self.selectItem( link )
self.canvas.itemconfig( link, fill='green' )
def unhighlight( _event, link=self.link ):
"Unhighlight item on mouse exit."
self.canvas.itemconfig( link, fill='blue' )
#self.selectItem( None )
self.canvas.tag_bind( self.link, '<Enter>', highlight )
self.canvas.tag_bind( self.link, '<Leave>', unhighlight )
self.canvas.tag_bind( self.link, '<ButtonPress-1>', select )
self.canvas.tag_bind( self.link, '<Button-3>', self.do_linkPopup )
def startLink( self, event ):
"Start a new link."
if event.widget not in self.widgetToItem:
# Didn't click on a node
return
w = event.widget
item = self.widgetToItem[ w ]
x, y = self.canvas.coords( item )
self.link = self.canvas.create_line( x, y, x, y, width=4,
fill='blue', tag='link' )
self.linkx, self.linky = x, y
self.linkWidget = w
self.linkItem = item
def finishLink( self, event ):
"Finish creating a link"
if self.link is None:
return
source = self.linkWidget
c = self.canvas
# Since we dragged from the widget, use root coords
x, y = self.canvasx( event.x_root ), self.canvasy( event.y_root )
target = self.findItem( x, y )
dest = self.itemToWidget.get( target, None )
if ( source is None or dest is None or source == dest
or dest in source.links or source in dest.links ):
self.releaseNetLink( event )
return
# For now, don't allow hosts to be directly linked
stags = self.canvas.gettags( self.widgetToItem[ source ] )
dtags = self.canvas.gettags( target )
if (('Host' in stags and 'Host' in dtags) or
('Controller' in dtags and 'LegacyRouter' in stags) or
('Controller' in stags and 'LegacyRouter' in dtags) or
('Controller' in dtags and 'LegacySwitch' in stags) or
('Controller' in stags and 'LegacySwitch' in dtags) or
('Controller' in dtags and 'Host' in stags) or
('Controller' in stags and 'Host' in dtags) or
('Controller' in stags and 'Controller' in dtags)):
self.releaseNetLink( event )
return
# Set link type
linkType='data'
if 'Controller' in stags or 'Controller' in dtags:
linkType='control'
c.itemconfig(self.link, dash=(6, 4, 2, 4), fill='red')
self.createControlLinkBindings()
else:
linkType='data'
self.createDataLinkBindings()
c.itemconfig(self.link, tags=c.gettags(self.link)+(linkType,))
x, y = c.coords( target )
c.coords( self.link, self.linkx, self.linky, x, y )
self.addLink( source, dest, linktype=linkType )
if linkType == 'control':
controllerName = ''
switchName = ''
if 'Controller' in stags:
controllerName = source[ 'text' ]
switchName = dest[ 'text' ]
else:
controllerName = dest[ 'text' ]
switchName = source[ 'text' ]
self.switchOpts[switchName]['controllers'].append(controllerName)
# We're done
self.link = self.linkWidget = None
# Menu handlers
def about( self ):
"Display about box."
about = self.aboutBox
if about is None:
bg = 'white'
about = Toplevel( bg='white' )
about.title( 'About' )
desc = self.appName + ': a simple network editor for MiniNet'
version = 'MiniEdit '+MINIEDIT_VERSION
author = 'Originally by: Bob Lantz <rlantz@cs>, April 2010'
enhancements = 'Enhancements by: Gregory Gee, Since July 2013'
www = 'http://gregorygee.wordpress.com/category/miniedit/'
line1 = Label( about, text=desc, font='Helvetica 10 bold', bg=bg )
line2 = Label( about, text=version, font='Helvetica 9', bg=bg )
line3 = Label( about, text=author, font='Helvetica 9', bg=bg )
line4 = Label( about, text=enhancements, font='Helvetica 9', bg=bg )
line5 = Entry( about, font='Helvetica 9', bg=bg, width=len(www), justify=CENTER )
line5.insert(0, www)
line5.configure(state='readonly')
line1.pack( padx=20, pady=10 )
line2.pack(pady=10 )
line3.pack(pady=10 )
line4.pack(pady=10 )
line5.pack(pady=10 )
hide = ( lambda about=about: about.withdraw() )
self.aboutBox = about
# Hide on close rather than destroying window
Wm.wm_protocol( about, name='WM_DELETE_WINDOW', func=hide )
# Show (existing) window
about.deiconify()
def createToolImages( self ):
"Create toolbar (and icon) images."
@staticmethod
def checkIntf( intf ):
"Make sure intf exists and is not configured."
if ( ' %s:' % intf ) not in quietRun( 'ip link show' ):
showerror(title="Error",
message='External interface ' +intf + ' does not exist! Skipping.')
return False
ips = re.findall( r'\d+\.\d+\.\d+\.\d+', quietRun( 'ifconfig ' + intf ) )
if ips:
showerror(title="Error",
message= intf + ' has an IP address and is probably in use! Skipping.' )
return False
return True
def hostDetails( self, _ignore=None ):
if ( self.selection is None or
self.net is not None or
self.selection not in self.itemToWidget ):
return
widget = self.itemToWidget[ self.selection ]
name = widget[ 'text' ]
tags = self.canvas.gettags( self.selection )
if 'Host' not in tags:
return
prefDefaults = self.hostOpts[name]
hostBox = HostDialog(self, title='Host Details', prefDefaults=prefDefaults)
self.master.wait_window(hostBox.top)
if hostBox.result:
newHostOpts = {'nodeNum':self.hostOpts[name]['nodeNum']}
newHostOpts['sched'] = hostBox.result['sched']
if len(hostBox.result['startCommand']) > 0:
newHostOpts['startCommand'] = hostBox.result['startCommand']
if len(hostBox.result['stopCommand']) > 0:
newHostOpts['stopCommand'] = hostBox.result['stopCommand']
if len(hostBox.result['cpu']) > 0:
newHostOpts['cpu'] = float(hostBox.result['cpu'])
if len(hostBox.result['cores']) > 0:
newHostOpts['cores'] = hostBox.result['cores']
if len(hostBox.result['hostname']) > 0:
newHostOpts['hostname'] = hostBox.result['hostname']
name = hostBox.result['hostname']
widget[ 'text' ] = name
if len(hostBox.result['defaultRoute']) > 0:
newHostOpts['defaultRoute'] = hostBox.result['defaultRoute']
if len(hostBox.result['ip']) > 0:
newHostOpts['ip'] = hostBox.result['ip']
if len(hostBox.result['externalInterfaces']) > 0:
newHostOpts['externalInterfaces'] = hostBox.result['externalInterfaces']
if len(hostBox.result['vlanInterfaces']) > 0:
newHostOpts['vlanInterfaces'] = hostBox.result['vlanInterfaces']
if len(hostBox.result['privateDirectory']) > 0:
newHostOpts['privateDirectory'] = hostBox.result['privateDirectory']
self.hostOpts[name] = newHostOpts
print 'New host details for ' + name + ' = ' + str(newHostOpts)
def switchDetails( self, _ignore=None ):
if ( self.selection is None or
self.net is not None or
self.selection not in self.itemToWidget ):
return
widget = self.itemToWidget[ self.selection ]
name = widget[ 'text' ]
tags = self.canvas.gettags( self.selection )
if 'Switch' not in tags:
return
prefDefaults = self.switchOpts[name]
switchBox = SwitchDialog(self, title='Switch Details', prefDefaults=prefDefaults)
self.master.wait_window(switchBox.top)
if switchBox.result:
newSwitchOpts = {'nodeNum':self.switchOpts[name]['nodeNum']}
newSwitchOpts['switchType'] = switchBox.result['switchType']
newSwitchOpts['controllers'] = self.switchOpts[name]['controllers']
if len(switchBox.result['startCommand']) > 0:
newSwitchOpts['startCommand'] = switchBox.result['startCommand']
if len(switchBox.result['stopCommand']) > 0:
newSwitchOpts['stopCommand'] = switchBox.result['stopCommand']
if len(switchBox.result['dpctl']) > 0:
newSwitchOpts['dpctl'] = switchBox.result['dpctl']
if len(switchBox.result['dpid']) > 0:
newSwitchOpts['dpid'] = switchBox.result['dpid']
if len(switchBox.result['hostname']) > 0:
newSwitchOpts['hostname'] = switchBox.result['hostname']
name = switchBox.result['hostname']
widget[ 'text' ] = name
if len(switchBox.result['externalInterfaces']) > 0:
newSwitchOpts['externalInterfaces'] = switchBox.result['externalInterfaces']
newSwitchOpts['switchIP'] = switchBox.result['switchIP']
newSwitchOpts['sflow'] = switchBox.result['sflow']
newSwitchOpts['netflow'] = switchBox.result['netflow']
self.switchOpts[name] = newSwitchOpts
print 'New switch details for ' + name + ' = ' + str(newSwitchOpts)
def linkUp( self ):
if ( self.selection is None or
self.net is None):
return
link = self.selection
linkDetail = self.links[link]
src = linkDetail['src']
dst = linkDetail['dest']
srcName, dstName = src[ 'text' ], dst[ 'text' ]
self.net.configLinkStatus(srcName, dstName, 'up')
self.canvas.itemconfig(link, dash=())
def linkDown( self ):
if ( self.selection is None or
self.net is None):
return
link = self.selection
linkDetail = self.links[link]
src = linkDetail['src']
dst = linkDetail['dest']
srcName, dstName = src[ 'text' ], dst[ 'text' ]
self.net.configLinkStatus(srcName, dstName, 'down')
self.canvas.itemconfig(link, dash=(4, 4))
def linkDetails( self, _ignore=None ):
if ( self.selection is None or
self.net is not None):
return
link = self.selection
linkDetail = self.links[link]
# src = linkDetail['src']
# dest = linkDetail['dest']
linkopts = linkDetail['linkOpts']
linkBox = LinkDialog(self, title='Link Details', linkDefaults=linkopts)
if linkBox.result is not None:
linkDetail['linkOpts'] = linkBox.result
print 'New link details = ' + str(linkBox.result)
def prefDetails( self ):
prefDefaults = self.appPrefs
prefBox = PrefsDialog(self, title='Preferences', prefDefaults=prefDefaults)
print 'New Prefs = ' + str(prefBox.result)
if prefBox.result:
self.appPrefs = prefBox.result
def controllerDetails( self ):
if ( self.selection is None or
self.net is not None or
self.selection not in self.itemToWidget ):
return
widget = self.itemToWidget[ self.selection ]
name = widget[ 'text' ]
tags = self.canvas.gettags( self.selection )
oldName = name
if 'Controller' not in tags:
return
ctrlrBox = ControllerDialog(self, title='Controller Details', ctrlrDefaults=self.controllers[name])
if ctrlrBox.result:
#print 'Controller is ' + ctrlrBox.result[0]
if len(ctrlrBox.result['hostname']) > 0:
name = ctrlrBox.result['hostname']
widget[ 'text' ] = name
else:
ctrlrBox.result['hostname'] = name
self.controllers[name] = ctrlrBox.result
print 'New controller details for ' + name + ' = ' + str(self.controllers[name])
# Find references to controller and change name
if oldName != name:
for widget in self.widgetToItem:
switchName = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Switch' in tags:
switch = self.switchOpts[switchName]
if oldName in switch['controllers']:
switch['controllers'].remove(oldName)
switch['controllers'].append(name)
def listBridge( self, _ignore=None ):
if ( self.selection is None or
self.net is None or
self.selection not in self.itemToWidget ):
return
name = self.itemToWidget[ self.selection ][ 'text' ]
tags = self.canvas.gettags( self.selection )
if name not in self.net.nameToNode:
return
if 'Switch' in tags or 'LegacySwitch' in tags:
call(["xterm -T 'Bridge Details' -sb -sl 2000 -e 'ovs-vsctl list bridge " + name + "; read -p \"Press Enter to close\"' &"], shell=True)
@staticmethod
def ovsShow( _ignore=None ):
call(["xterm -T 'OVS Summary' -sb -sl 2000 -e 'ovs-vsctl show; read -p \"Press Enter to close\"' &"], shell=True)
@staticmethod
def rootTerminal( _ignore=None ):
call(["xterm -T 'Root Terminal' -sb -sl 2000 &"], shell=True)
# Model interface
#
# Ultimately we will either want to use a topo or
# mininet object here, probably.
def addLink( self, source, dest, linktype='data', linkopts=None ):
"Add link to model."
if linkopts is None:
linkopts = {}
source.links[ dest ] = self.link
dest.links[ source ] = self.link
self.links[ self.link ] = {'type' :linktype,
'src':source,
'dest':dest,
'linkOpts':linkopts}
def deleteLink( self, link ):
"Delete link from model."
pair = self.links.get( link, None )
if pair is not None:
source=pair['src']
dest=pair['dest']
del source.links[ dest ]
del dest.links[ source ]
stags = self.canvas.gettags( self.widgetToItem[ source ] )
# dtags = self.canvas.gettags( self.widgetToItem[ dest ] )
ltags = self.canvas.gettags( link )
if 'control' in ltags:
controllerName = ''
switchName = ''
if 'Controller' in stags:
controllerName = source[ 'text' ]
switchName = dest[ 'text' ]
else:
controllerName = dest[ 'text' ]
switchName = source[ 'text' ]
if controllerName in self.switchOpts[switchName]['controllers']:
self.switchOpts[switchName]['controllers'].remove(controllerName)
if link is not None:
del self.links[ link ]
def deleteNode( self, item ):
"Delete node (and its links) from model."
widget = self.itemToWidget[ item ]
tags = self.canvas.gettags(item)
if 'Controller' in tags:
# remove from switch controller lists
for serachwidget in self.widgetToItem:
name = serachwidget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ serachwidget ] )
if 'Switch' in tags:
if widget['text'] in self.switchOpts[name]['controllers']:
self.switchOpts[name]['controllers'].remove(widget['text'])
for link in widget.links.values():
# Delete from view and model
self.deleteItem( link )
del self.itemToWidget[ item ]
del self.widgetToItem[ widget ]
def buildNodes( self, net):
# Make nodes
print "Getting Hosts and Switches."
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
#print name+' has '+str(tags)
if 'Switch' in tags:
opts = self.switchOpts[name]
#print str(opts)
# Create the correct switch class
switchClass = customOvs
switchParms={}
if 'dpctl' in opts:
switchParms['listenPort']=int(opts['dpctl'])
if 'dpid' in opts:
switchParms['dpid']=opts['dpid']
if opts['switchType'] == 'default':
if self.appPrefs['switchType'] == 'ivs':
switchClass = IVSSwitch
elif self.appPrefs['switchType'] == 'user':
switchClass = CustomUserSwitch
elif self.appPrefs['switchType'] == 'userns':
switchParms['inNamespace'] = True
switchClass = CustomUserSwitch
else:
switchClass = customOvs
elif opts['switchType'] == 'user':
switchClass = CustomUserSwitch
elif opts['switchType'] == 'userns':
switchClass = CustomUserSwitch
switchParms['inNamespace'] = True
elif opts['switchType'] == 'ivs':
switchClass = IVSSwitch
else:
switchClass = customOvs
if switchClass == customOvs:
# Set OpenFlow versions
self.openFlowVersions = []
if self.appPrefs['openFlowVersions']['ovsOf10'] == '1':
self.openFlowVersions.append('OpenFlow10')
if self.appPrefs['openFlowVersions']['ovsOf11'] == '1':
self.openFlowVersions.append('OpenFlow11')
if self.appPrefs['openFlowVersions']['ovsOf12'] == '1':
self.openFlowVersions.append('OpenFlow12')
if self.appPrefs['openFlowVersions']['ovsOf13'] == '1':
self.openFlowVersions.append('OpenFlow13')
protoList = ",".join(self.openFlowVersions)
switchParms['protocols'] = protoList
newSwitch = net.addSwitch( name , cls=switchClass, **switchParms)
# Some post startup config
if switchClass == CustomUserSwitch:
if 'switchIP' in opts:
if len(opts['switchIP']) > 0:
newSwitch.setSwitchIP(opts['switchIP'])
if switchClass == customOvs:
if 'switchIP' in opts:
if len(opts['switchIP']) > 0:
newSwitch.setSwitchIP(opts['switchIP'])
# Attach external interfaces
if 'externalInterfaces' in opts:
for extInterface in opts['externalInterfaces']:
if self.checkIntf(extInterface):
Intf( extInterface, node=newSwitch )
elif 'LegacySwitch' in tags:
newSwitch = net.addSwitch( name , cls=LegacySwitch)
elif 'LegacyRouter' in tags:
newSwitch = net.addHost( name , cls=LegacyRouter)
elif 'Host' in tags:
opts = self.hostOpts[name]
#print str(opts)
ip = None
defaultRoute = None
if 'defaultRoute' in opts and len(opts['defaultRoute']) > 0:
defaultRoute = 'via '+opts['defaultRoute']
if 'ip' in opts and len(opts['ip']) > 0:
ip = opts['ip']
else:
nodeNum = self.hostOpts[name]['nodeNum']
ipBaseNum, prefixLen = netParse( self.appPrefs['ipBase'] )
ip = ipAdd(i=nodeNum, prefixLen=prefixLen, ipBaseNum=ipBaseNum)
# Create the correct host class
if 'cores' in opts or 'cpu' in opts:
if 'privateDirectory' in opts:
hostCls = partial( CPULimitedHost,
privateDirs=opts['privateDirectory'] )
else:
hostCls=CPULimitedHost
else:
if 'privateDirectory' in opts:
hostCls = partial( Host,
privateDirs=opts['privateDirectory'] )
else:
hostCls=Host
print hostCls
newHost = net.addHost( name,
cls=hostCls,
ip=ip,
defaultRoute=defaultRoute
)
# Set the CPULimitedHost specific options
if 'cores' in opts:
newHost.setCPUs(cores = opts['cores'])
if 'cpu' in opts:
newHost.setCPUFrac(f=opts['cpu'], sched=opts['sched'])
# Attach external interfaces
if 'externalInterfaces' in opts:
for extInterface in opts['externalInterfaces']:
if self.checkIntf(extInterface):
Intf( extInterface, node=newHost )
if 'vlanInterfaces' in opts:
if len(opts['vlanInterfaces']) > 0:
print 'Checking that OS is VLAN prepared'
self.pathCheck('vconfig', moduleName='vlan package')
moduleDeps( add='8021q' )
elif 'Controller' in tags:
opts = self.controllers[name]
# Get controller info from panel
controllerType = opts['controllerType']
if 'controllerProtocol' in opts:
controllerProtocol = opts['controllerProtocol']
else:
controllerProtocol = 'tcp'
opts['controllerProtocol'] = 'tcp'
controllerIP = opts['remoteIP']
controllerPort = opts['remotePort']
# Make controller
print 'Getting controller selection:'+controllerType
if controllerType == 'remote':
net.addController(name=name,
controller=RemoteController,
ip=controllerIP,
protocol=controllerProtocol,
port=controllerPort)
elif controllerType == 'inband':
net.addController(name=name,
controller=InbandController,
ip=controllerIP,
protocol=controllerProtocol,
port=controllerPort)
elif controllerType == 'ovsc':
net.addController(name=name,
controller=OVSController,
protocol=controllerProtocol,
port=controllerPort)
else:
net.addController(name=name,
controller=Controller,
protocol=controllerProtocol,
port=controllerPort)
else:
raise Exception( "Cannot create mystery node: " + name )
@staticmethod
def pathCheck( *args, **kwargs ):
"Make sure each program in *args can be found in $PATH."
moduleName = kwargs.get( 'moduleName', 'it' )
for arg in args:
if not quietRun( 'which ' + arg ):
showerror(title="Error",
message= 'Cannot find required executable %s.\n' % arg +
'Please make sure that %s is installed ' % moduleName +
'and available in your $PATH.' )
def buildLinks( self, net):
# Make links
print "Getting Links."
for key,link in self.links.iteritems():
tags = self.canvas.gettags(key)
if 'data' in tags:
src=link['src']
dst=link['dest']
linkopts=link['linkOpts']
srcName, dstName = src[ 'text' ], dst[ 'text' ]
srcNode, dstNode = net.nameToNode[ srcName ], net.nameToNode[ dstName ]
if linkopts:
net.addLink(srcNode, dstNode, cls=TCLink, **linkopts)
else:
#print str(srcNode)
#print str(dstNode)
net.addLink(srcNode, dstNode)
self.canvas.itemconfig(key, dash=())
def build( self ):
print "Build network based on our topology."
dpctl = None
if len(self.appPrefs['dpctl']) > 0:
dpctl = int(self.appPrefs['dpctl'])
net = Mininet( topo=None,
listenPort=dpctl,
build=False,
ipBase=self.appPrefs['ipBase'] )
self.buildNodes(net)
self.buildLinks(net)
# Build network (we have to do this separately at the moment )
net.build()
return net
def postStartSetup( self ):
# Setup host details
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Host' in tags:
newHost = self.net.get(name)
opts = self.hostOpts[name]
# Attach vlan interfaces
if 'vlanInterfaces' in opts:
for vlanInterface in opts['vlanInterfaces']:
print 'adding vlan interface '+vlanInterface[1]
newHost.cmdPrint('ifconfig '+name+'-eth0.'+vlanInterface[1]+' '+vlanInterface[0])
# Run User Defined Start Command
if 'startCommand' in opts:
newHost.cmdPrint(opts['startCommand'])
if 'Switch' in tags:
newNode = self.net.get(name)
opts = self.switchOpts[name]
# Run User Defined Start Command
if 'startCommand' in opts:
newNode.cmdPrint(opts['startCommand'])
# Configure NetFlow
nflowValues = self.appPrefs['netflow']
if len(nflowValues['nflowTarget']) > 0:
nflowEnabled = False
nflowSwitches = ''
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Switch' in tags:
opts = self.switchOpts[name]
if 'netflow' in opts:
if opts['netflow'] == '1':
print name+' has Netflow enabled'
nflowSwitches = nflowSwitches+' -- set Bridge '+name+' netflow=@MiniEditNF'
nflowEnabled=True
if nflowEnabled:
nflowCmd = 'ovs-vsctl -- --id=@MiniEditNF create NetFlow '+ 'target=\\\"'+nflowValues['nflowTarget']+'\\\" '+ 'active-timeout='+nflowValues['nflowTimeout']
if nflowValues['nflowAddId'] == '1':
nflowCmd = nflowCmd + ' add_id_to_interface=true'
else:
nflowCmd = nflowCmd + ' add_id_to_interface=false'
print 'cmd = '+nflowCmd+nflowSwitches
call(nflowCmd+nflowSwitches, shell=True)
else:
print 'No switches with Netflow'
else:
print 'No NetFlow targets specified.'
# Configure sFlow
sflowValues = self.appPrefs['sflow']
if len(sflowValues['sflowTarget']) > 0:
sflowEnabled = False
sflowSwitches = ''
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Switch' in tags:
opts = self.switchOpts[name]
if 'sflow' in opts:
if opts['sflow'] == '1':
print name+' has sflow enabled'
sflowSwitches = sflowSwitches+' -- set Bridge '+name+' sflow=@MiniEditSF'
sflowEnabled=True
if sflowEnabled:
sflowCmd = 'ovs-vsctl -- --id=@MiniEditSF create sFlow '+ 'target=\\\"'+sflowValues['sflowTarget']+'\\\" '+ 'header='+sflowValues['sflowHeader']+' '+ 'sampling='+sflowValues['sflowSampling']+' '+ 'polling='+sflowValues['sflowPolling']
print 'cmd = '+sflowCmd+sflowSwitches
call(sflowCmd+sflowSwitches, shell=True)
else:
print 'No switches with sflow'
else:
print 'No sFlow targets specified.'
## NOTE: MAKE SURE THIS IS LAST THING CALLED
# Start the CLI if enabled
if self.appPrefs['startCLI'] == '1':
info( "\n\n NOTE: PLEASE REMEMBER TO EXIT THE CLI BEFORE YOU PRESS THE STOP BUTTON. Not exiting will prevent MiniEdit from quitting and will prevent you from starting the network again during this sessoin.\n\n")
CLI(self.net)
def start( self ):
"Start network."
if self.net is None:
self.net = self.build()
# Since I am going to inject per switch controllers.
# I can't call net.start(). I have to replicate what it
# does and add the controller options.
#self.net.start()
info( '**** Starting %s controllers\n' % len( self.net.controllers ) )
for controller in self.net.controllers:
info( str(controller) + ' ')
controller.start()
info('\n')
info( '**** Starting %s switches\n' % len( self.net.switches ) )
#for switch in self.net.switches:
# info( switch.name + ' ')
# switch.start( self.net.controllers )
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Switch' in tags:
opts = self.switchOpts[name]
switchControllers = []
for ctrl in opts['controllers']:
switchControllers.append(self.net.get(ctrl))
info( name + ' ')
# Figure out what controllers will manage this switch
self.net.get(name).start( switchControllers )
if 'LegacySwitch' in tags:
self.net.get(name).start( [] )
info( name + ' ')
info('\n')
self.postStartSetup()
def stop( self ):
"Stop network."
if self.net is not None:
# Stop host details
for widget in self.widgetToItem:
name = widget[ 'text' ]
tags = self.canvas.gettags( self.widgetToItem[ widget ] )
if 'Host' in tags:
newHost = self.net.get(name)
opts = self.hostOpts[name]
# Run User Defined Stop Command
if 'stopCommand' in opts:
newHost.cmdPrint(opts['stopCommand'])
if 'Switch' in tags:
newNode = self.net.get(name)
opts = self.switchOpts[name]
# Run User Defined Stop Command
if 'stopCommand' in opts:
newNode.cmdPrint(opts['stopCommand'])
self.net.stop()
cleanUpScreens()
self.net = None
def do_linkPopup(self, event):
# display the popup menu
if self.net is None:
try:
self.linkPopup.tk_popup(event.x_root, event.y_root, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.linkPopup.grab_release()
else:
try:
self.linkRunPopup.tk_popup(event.x_root, event.y_root, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.linkRunPopup.grab_release()
def do_controllerPopup(self, event):
# display the popup menu
if self.net is None:
try:
self.controllerPopup.tk_popup(event.x_root, event.y_root, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.controllerPopup.grab_release()
def do_legacyRouterPopup(self, event):
# display the popup menu
if self.net is not None:
try:
self.legacyRouterRunPopup.tk_popup(event.x_root, event.y_root, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.legacyRouterRunPopup.grab_release()
def do_hostPopup(self, event):
# display the popup menu
if self.net is None:
try:
self.hostPopup.tk_popup(event.x_root, event.y_root, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.hostPopup.grab_release()
else:
try:
self.hostRunPopup.tk_popup(event.x_root, event.y_root, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.hostRunPopup.grab_release()
def do_legacySwitchPopup(self, event):
# display the popup menu
if self.net is not None:
try:
self.switchRunPopup.tk_popup(event.x_root, event.y_root, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.switchRunPopup.grab_release()
def do_switchPopup(self, event):
# display the popup menu
if self.net is None:
try:
self.switchPopup.tk_popup(event.x_root, event.y_root, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.switchPopup.grab_release()
else:
try:
self.switchRunPopup.tk_popup(event.x_root, event.y_root, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.switchRunPopup.grab_release()
def xterm( self, _ignore=None ):
"Make an xterm when a button is pressed."
if ( self.selection is None or
self.net is None or
self.selection not in self.itemToWidget ):
return
name = self.itemToWidget[ self.selection ][ 'text' ]
if name not in self.net.nameToNode:
return
term = makeTerm( self.net.nameToNode[ name ], 'Host', term=self.appPrefs['terminalType'] )
if StrictVersion(MININET_VERSION) > StrictVersion('2.0'):
self.net.terms += term
else:
self.net.terms.append(term)
def iperf( self, _ignore=None ):
"Make an xterm when a button is pressed."
if ( self.selection is None or
self.net is None or
self.selection not in self.itemToWidget ):
return
name = self.itemToWidget[ self.selection ][ 'text' ]
if name not in self.net.nameToNode:
return
self.net.nameToNode[ name ].cmd( 'iperf -s -p 5001 &' )
### BELOW HERE IS THE TOPOLOGY IMPORT CODE ###
def parseArgs( self ):
"""Parse command-line args and return options object.
returns: opts parse options dict"""
if '--custom' in sys.argv:
index = sys.argv.index( '--custom' )
if len( sys.argv ) > index + 1:
filename = sys.argv[ index + 1 ]
self.parseCustomFile( filename )
else:
raise Exception( 'Custom file name not found' )
desc = ( "The %prog utility creates Mininet network from the\n"
"command line. It can create parametrized topologies,\n"
"invoke the Mininet CLI, and run tests." )
usage = ( '%prog [options]\n'
'(type %prog -h for details)' )
opts = OptionParser( description=desc, usage=usage )
addDictOption( opts, TOPOS, TOPODEF, 'topo' )
addDictOption( opts, LINKS, LINKDEF, 'link' )
opts.add_option( '--custom', type='string', default=None,
help='read custom topo and node params from .py' +
'file' )
self.options, self.args = opts.parse_args()
# We don't accept extra arguments after the options
if self.args:
opts.print_help()
exit()
def setCustom( self, name, value ):
"Set custom parameters for MininetRunner."
if name in ( 'topos', 'switches', 'hosts', 'controllers' ):
# Update dictionaries
param = name.upper()
globals()[ param ].update( value )
elif name == 'validate':
# Add custom validate function
self.validate = value
else:
# Add or modify global variable or class
globals()[ name ] = value
def parseCustomFile( self, fileName ):
"Parse custom file and add params before parsing cmd-line options."
customs = {}
if os.path.isfile( fileName ):
execfile( fileName, customs, customs )
for name, val in customs.iteritems():
self.setCustom( name, val )
else:
raise Exception( 'could not find custom file: %s' % fileName )
def importTopo( self ):
print 'topo='+self.options.topo
if self.options.topo == 'none':
return
self.newTopology()
topo = buildTopo( TOPOS, self.options.topo )
link = customClass( LINKS, self.options.link )
importNet = Mininet(topo=topo, build=False, link=link)
importNet.build()
c = self.canvas
rowIncrement = 100
currentY = 100
# Add Controllers
print 'controllers:'+str(len(importNet.controllers))
for controller in importNet.controllers:
name = controller.name
x = self.controllerCount*100+100
self.addNode('Controller', self.controllerCount,
float(x), float(currentY), name=name)
icon = self.findWidgetByName(name)
icon.bind('<Button-3>', self.do_controllerPopup )
ctrlr = { 'controllerType': 'ref',
'hostname': name,
'controllerProtocol': controller.protocol,
'remoteIP': controller.ip,
'remotePort': controller.port}
self.controllers[name] = ctrlr
currentY = currentY + rowIncrement
# Add switches
print 'switches:'+str(len(importNet.switches))
columnCount = 0
for switch in importNet.switches:
name = switch.name
self.switchOpts[name] = {}
self.switchOpts[name]['nodeNum']=self.switchCount
self.switchOpts[name]['hostname']=name
self.switchOpts[name]['switchType']='default'
self.switchOpts[name]['controllers']=[]
x = columnCount*100+100
self.addNode('Switch', self.switchCount,
float(x), float(currentY), name=name)
icon = self.findWidgetByName(name)
icon.bind('<Button-3>', self.do_switchPopup )
# Now link to controllers
for controller in importNet.controllers:
self.switchOpts[name]['controllers'].append(controller.name)
dest = self.findWidgetByName(controller.name)
dx, dy = c.coords( self.widgetToItem[ dest ] )
self.link = c.create_line(float(x),
float(currentY),
dx,
dy,
width=4,
fill='red',
dash=(6, 4, 2, 4),
tag='link' )
c.itemconfig(self.link, tags=c.gettags(self.link)+('control',))
self.addLink( icon, dest, linktype='control' )
self.createControlLinkBindings()
self.link = self.linkWidget = None
if columnCount == 9:
columnCount = 0
currentY = currentY + rowIncrement
else:
columnCount =columnCount+1
currentY = currentY + rowIncrement
# Add hosts
print 'hosts:'+str(len(importNet.hosts))
columnCount = 0
for host in importNet.hosts:
name = host.name
self.hostOpts[name] = {'sched':'host'}
self.hostOpts[name]['nodeNum']=self.hostCount
self.hostOpts[name]['hostname']=name
self.hostOpts[name]['ip']=host.IP()
x = columnCount*100+100
self.addNode('Host', self.hostCount,
float(x), float(currentY), name=name)
icon = self.findWidgetByName(name)
icon.bind('<Button-3>', self.do_hostPopup )
if columnCount == 9:
columnCount = 0
currentY = currentY + rowIncrement
else:
columnCount =columnCount+1
print 'links:'+str(len(topo.links()))
#[('h1', 's3'), ('h2', 's4'), ('s3', 's4')]
for link in topo.links():
print str(link)
srcNode = link[0]
src = self.findWidgetByName(srcNode)
sx, sy = self.canvas.coords( self.widgetToItem[ src ] )
destNode = link[1]
dest = self.findWidgetByName(destNode)
dx, dy = self.canvas.coords( self.widgetToItem[ dest] )
params = topo.linkInfo( srcNode, destNode )
print 'Link Parameters='+str(params)
self.link = self.canvas.create_line( sx, sy, dx, dy, width=4,
fill='blue', tag='link' )
c.itemconfig(self.link, tags=c.gettags(self.link)+('data',))
self.addLink( src, dest, linkopts=params )
self.createDataLinkBindings()
self.link = self.linkWidget = None
importNet.stop()
def miniEditImages():
"Create and return images for MiniEdit."
# Image data. Git will be unhappy. However, the alternative
# is to keep track of separate binary files, which is also
# unappealing.
return {
'Select': BitmapImage(
file='/usr/include/X11/bitmaps/left_ptr' ),
'Switch': PhotoImage( data=r"""
R0lGODlhLgAgAPcAAB2ZxGq61imex4zH3RWWwmK41tzd3vn9/jCiyfX7/Q6SwFay0gBlmtnZ2snJ
yr+2tAuMu6rY6D6kyfHx8XO/2Uqszjmly6DU5uXz+JLN4uz3+kSrzlKx0ZeZm2K21BuYw67a6QB9
r+Xl5rW2uHW61On1+UGpzbrf6xiXwny9166vsMLCwgBdlAmHt8TFxgBwpNTs9C2hyO7t7ZnR5L/B
w0yv0NXV1gBimKGjpABtoQBuoqKkpiaUvqWmqHbB2/j4+Pf39729vgB/sN7w9obH3hSMugCAsonJ
4M/q8wBglgB6rCCaxLO0tX7C2wBqniGMuABzpuPl5f3+/v39/fr6+r7i7vP6/ABonV621LLc6zWk
yrq6uq6wskGlyUaszp6gohmYw8HDxKaoqn3E3LGztWGuzcnLzKmrrOnp6gB1qCaex1q001ewz+Dg
4QB3qrCxstHS09LR0dHR0s7Oz8zNzsfIyQaJuQB0pozL4YzI3re4uAGFtYDG3hOUwb+/wQB5rOvr
6wB2qdju9TWfxgBpniOcxeLj48vn8dvc3VKuzwB2qp6fos/Q0aXV6D+jxwB7rsXHyLu8vb27vCSc
xSGZwxyZxH3A2RuUv0+uzz+ozCedxgCDtABnnABroKutr/7+/n2/2LTd6wBvo9bX2OLo6lGv0C6d
xS6avjmmzLTR2uzr6m651RuXw4jF3CqfxySaxSadyAuRv9bd4cPExRiMuDKjyUWevNPS0sXl8BeY
xKytr8G/wABypXvC23vD3O73+3vE3cvU2PH5+7S1t7q7vCGVwO/v8JfM3zymyyyZwrWys+Hy90Ki
xK6qqg+TwBKXxMvMzaWtsK7U4jemzLXEygBxpW++2aCho97Z18bP0/T09fX29vb19ViuzdDR0crf
51qz01y00ujo6Onq6hCDs2Gpw3i71CqWv3S71nO92M/h52m207bJ0AN6rPPz9Nrh5Nvo7K/b6oTI
37Td7ABqneHi4yScxo/M4RiWwRqVwcro8n3B2lGoylStzszMzAAAACH5BAEAAP8ALAAAAAAuACAA
Bwj/AP8JHEjw3wEkEY74WOjrQhUNBSNKnCjRSoYKCOwJcKWpEAACBFBRGEKxZMkDjRAg2OBlQyYL
WhDEcOWxDwofv0zqHIhhDYIFC2p4MYFMS62ZaiYVWlJJAYIqO00KMlEjABYOQokaRbp0CYBKffpE
iDpxSKYC1gqswToUmYVaCFyp6QrgwwcCscaSJZhgQYBeAdRyqFBhgwWkGyct8WoXRZ8Ph/YOxMOB
CIUAHsBxwGQBAII1YwpMI5Brcd0PKFA4Q2ZFMgYteZqkwxyu1KQNJzQc+CdFCrxypyqdRoEPX6x7
ki/n2TfbAxtNRHYTVCWpWTRbuRoX7yMgZ9QSFQa0/7LU/BXygjIWXVOBTR2sxp7BxGpENgKbY+PR
reqyIOKnOh0M445AjTjDCgrPSBNFKt9w8wMVU5g0Bg8kDAAKOutQAkNEQNBwDRAEeVEcAV6w84Ay
KowQSRhmzNGAASIAYow2IP6DySPk8ANKCv1wINE2cpjxCUEgOIOPAKicQMMbKnhyhhg97HDNF4vs
IEYkNkzwjwSP/PHIE2VIgIdEnxjAiBwNGIKGDKS8I0sw2VAzApNOQimGLlyMAIkDw2yhZTF/KKGE
lxCEMtEPBtDhACQurLDCLkFIsoUeZLyRpx8OmEGHN3AEcU0HkFAhUDFulDroJvOU5M44iDjgDTQO
1P/hzRw2IFJPGw3AAY0LI/SAwxc7jEKQI2mkEUipRoxp0g821AMIGlG0McockMzihx5c1LkDDmSg
UVAiafACRbGPVKDTFG3MYUYdLoThRxDE6DEMGUww8eQONGwTER9piFINFOPasaFJVIjTwC1xzOGP
A3HUKoIMDTwJR4QRgdBOJzq8UM0Lj5QihU5ZdGMOCSSYUwYzAwwkDhNtUKTBOZ10koMOoohihDwm
HZKPEDwb4fMe9An0g5Yl+SDKFTHnkMMLLQAjXUTxUCLEIyH0bIQAwuxVQhEMcEIIIUmHUEsWGCQg
xQEaIFGAHV0+QnUIIWwyg2T/3MPLDQwwcAUhTjiswYsQl1SAxQKmbBJCIMe6ISjVmXwsWQKJEJJE
3l1/TY8O4wZyh8ZQ3IF4qX9cggTdAmEwCAMs3IB311fsDfbMGv97BxSBQBAP6QMN0QUhLCSRhOp5
e923zDpk/EIaRdyO+0C/eHBHEiz0vjrrfMfciSKD4LJ8RBEk88IN0ff+O/CEVEPLGK1tH1ECM7Dx
RDWdcMLJFTpUQ44jfCyjvlShZNDE/0QAgT6ypr6AAAA7
"""),
'LegacySwitch': PhotoImage( data=r"""
R0lGODlhMgAYAPcAAAEBAXmDjbe4uAE5cjF7xwFWq2Sa0S9biSlrrdTW1k2Ly02a5xUvSQFHjmep
6bfI2Q5SlQIYLwFfvj6M3Jaan8fHyDuFzwFp0Vah60uU3AEiRhFgrgFRogFr10N9uTFrpytHYQFM
mGWt9wIwX+bm5kaT4gtFgR1cnJPF9yt80CF0yAIMGHmp2c/P0AEoUb/P4Fei7qK4zgpLjgFkyQlf
t1mf5jKD1WWJrQ86ZwFAgBhYmVOa4MPV52uv8y+A0iR3ywFbtUyX5ECI0Q1UmwIcOUGQ3RBXoQI0
aRJbpr3BxVeJvQUJDafH5wIlS2aq7xBmv52lr7fH12el5Wml3097ph1ru7vM3HCz91Ke6lid40KQ
4GSQvgQGClFnfwVJjszMzVCX3hljrdPT1AFLlBRnutPf6yd5zjeI2QE9eRBdrBNVl+3v70mV4ydf
lwMVKwErVlul8AFChTGB1QE3bsTFxQImTVmAp0FjiUSM1k+b6QQvWQ1SlxMgLgFixEqU3xJhsgFT
pn2Xs5OluZ+1yz1Xb6HN+Td9wy1zuYClykV5r0x2oeDh4qmvt8LDwxhuxRlLfyRioo2124mft9bi
71mDr7fT79nl8Z2hpQs9b7vN4QMQIOPj5XOPrU2Jx32z6xtvwzeBywFFikFnjwcPFa29yxJjuFmP
xQFv3qGxwRc/Z8vb6wsRGBNqwqmpqTdvqQIbNQFPngMzZAEfP0mQ13mHlQFYsAFnznOXu2mPtQxj
vQ1Vn4Ot1+/x8my0/CJgnxNNh8DT5CdJaWyx+AELFWmt8QxPkxBZpwMFB015pgFduGCNuyx7zdnZ
2WKm6h1xyOPp8aW70QtPkUmM0LrCyr/FyztljwFPm0OJzwFny7/L1xFjswE/e12i50iR2VR8o2Gf
3xszS2eTvz2BxSlloQdJiwMHDzF3u7bJ3T2I1WCp8+Xt80FokQFJklef6mORw2ap7SJ1y77Q47nN
3wFfu1Kb5cXJyxdhrdDR0wlNkTSF11Oa4yp4yQEuW0WQ3QIDBQI7dSH5BAEAAAAALAAAAAAyABgA
Bwj/AAEIHDjKF6SDvhImPMHwhA6HOiLqUENRDYSLEIplxBcNHz4Z5GTI8BLKS5OBA1Ply2fDhxwf
PlLITGFmmRkzP+DlVKHCmU9nnz45csSqKKsn9gileZKrVC4aRFACOGZu5UobNuRohRkzhc2b+36o
qCaqrFmzZEV1ERBg3BOmMl5JZTBhwhm7ZyycYZnvJdeuNl21qkCHTiPDhxspTtKoQgUKCJ6wehMV
5QctWupeo6TkjOd8e1lmdQkTGbTTMaDFiDGINeskX6YhEicUiQa5A/kUKaFFwQ0oXzjZ8Tbcm3Hj
irwpMtTSgg9QMJf5WEZ9375AiED19ImpSQSUB4Kw/8HFSMyiRWJaqG/xhf2X91+oCbmq1e/MFD/2
EcApVkWVJhp8J9AqsywQxDfAbLJJPAy+kMkL8shjxTkUnhOJZ5+JVp8cKfhwxwdf4fQLgG4MFAwW
KOZRAxM81EAPPQvoE0QQfrDhx4399OMBMjz2yCMVivCoCAWXKLKMTPvoUYcsKwi0RCcwYCAlFjU0
A6OBM4pXAhsl8FYELYWFWZhiZCbRQgIC2AGTLy408coxAoEDx5wwtGPALTVg0E4NKC7gp4FsBKoA
Ki8U+oIVmVih6DnZPMBMAlGwIARWOLiggSYC+ZNIOulwY4AkSZCyxaikbqHMqaeaIp4+rAaxQxBg
2P+IozuRzvLZIS4syYVAfMAhwhSC1EPCGoskIIYY9yS7Hny75OFnEIAGyiVvWkjjRxF11fXIG3WU
KNA6wghDTCW88PKMJZOkm24Z7LarSjPtoIjFn1lKyyVmmBVhwRtvaDDMgFL0Eu4VhaiDwhXCXNFD
D8QQw7ATEDsBw8RSxotFHs7CKJ60XWrRBj91EOGPQCA48c7J7zTjSTPctOzynjVkkYU+O9S8Axg4
Z6BzBt30003Ps+AhNB5C4PCGC5gKJMMTZJBRytOl/CH1HxvQkMbVVxujtdZGGKGL17rsEfYQe+xR
zNnFcGQCv7LsKlAtp8R9Sgd0032BLXjPoPcMffTd3YcEgAMOxOBA1GJ4AYgXAMjiHDTgggveCgRI
3RfcnffefgcOeDKEG3444osDwgEspMNiTQhx5FoOShxcrrfff0uQjOycD+554qFzMHrpp4cwBju/
5+CmVNbArnntndeCO+O689777+w0IH0o1P/TRJMohRA4EJwn47nyiocOSOmkn/57COxE3wD11Mfh
fg45zCGyVF4Ufvvyze8ewv5jQK9++6FwXxzglwM0GPAfR8AeSo4gwAHCbxsQNCAa/kHBAVhwAHPI
4BE2eIRYeHAEIBwBP0Y4Qn41YWRSCQgAOw==
"""),
'LegacyRouter': PhotoImage( data=r"""
R0lGODlhMgAYAPcAAAEBAXZ8gQNAgL29vQNctjl/xVSa4j1dfCF+3QFq1DmL3wJMmAMzZZW11dnZ
2SFrtyNdmTSO6gIZMUKa8gJVqEOHzR9Pf5W74wFjxgFx4jltn+np6Eyi+DuT6qKiohdtwwUPGWiq
6ymF4LHH3Rh11CV81kKT5AMoUA9dq1ap/mV0gxdXlytRdR1ptRNPjTt9vwNgvwJZsX+69gsXJQFH
jTtjizF0tvHx8VOm9z2V736Dhz2N3QM2acPZ70qe8gFo0HS19wVRnTiR6hMpP0eP1i6J5iNlqAtg
tktjfQFu3TNxryx4xAMTIzOE1XqAh1uf5SWC4AcfNy1XgQJny93n8a2trRh312Gt+VGm/AQIDTmB
yAF37QJasydzvxM/ayF3zhdLf8zLywFdu4i56gFlyi2J4yV/1w8wUo2/8j+X8D2Q5Eee9jeR7Uia
7DpeggFt2QNPm97e3jRong9bpziH2DuT7aipqQoVICmG45vI9R5720eT4Q1hs1er/yVVhwJJktPh
70tfdbHP7Xev5xs5V7W1sz9jhz11rUVZcQ9WoCVVhQk7cRdtwWuw9QYOFyFHbSBnr0dznxtWkS18
zKfP9wwcLAMHCwFFiS5UeqGtuRNNiwMfPS1hlQMtWRE5XzGM5yhxusLCwCljnwMdOFWh7cve8pG/
7Tlxp+Tr8g9bpXF3f0lheStrrYu13QEXLS1ppTV3uUuR1RMjNTF3vU2X4TZupwRSolNne4nB+T+L
2YGz4zJ/zYe99YGHjRdDcT95sx09XQldsgMLEwMrVc/X3yN3yQ1JhTRbggsdMQNfu9HPz6WlpW2t
7RctQ0GFyeHh4dvl8SBZklCb5kOO2kWR3Vmt/zdjkQIQHi90uvPz8wIVKBp42SV5zbfT7wtXpStV
fwFWrBVvyTt3swFz5kGBv2+1/QlbrVFjdQM7d1+j54i67UmX51qn9i1vsy+D2TuR5zddhQsjOR1t
u0GV6ghbsDVZf4+76RRisent8Xd9hQFBgwFNmwJLlcPDwwFr1z2T5yH5BAEAAAAALAAAAAAyABgA
Bwj/AAEIHEiQYJY7Qwg9UsTplRIbENuxEiXJgpcz8e5YKsixY8Essh7JcbbOBwcOa1JOmJAmTY4c
HeoIabJrCShI0XyB8YRso0eOjoAdWpciBZajJ1GuWcnSZY46Ed5N8hPATqEBoRB9gVJsxRlhPwHI
0kDkVywcRpGe9LF0adOnMpt8CxDnxg1o9lphKoEACoIvmlxxvHOKVg0n/Tzku2WoVoU2J1P6WNkS
rtwADuxCG/MOjwgRUEIjGG3FhaOBzaThiDSCil27G8Isc3LLjZwXsA6YYJmDjhTMmseoKQIFDx7R
oxHo2abnwygAlUj1mV6tWjlelEpRwfd6gzI7VeJQ/2vZoVaDUqigqftXpH0R46H9Kl++zUo4JnKq
9dGvv09RHFhcIUMe0NiFDyql0OJUHWywMc87TXRhhCRGiHAccvNZUR8JxpDTH38p9HEUFhxgMSAv
jbBjQge8PSXEC6uo0IsHA6gAAShmgCbffNtsQwIJifhRHX/TpUUiSijlUk8AqgQixSwdNBjCa7CF
oVggmEgCyRf01WcFCYvYUgB104k4YlK5HONEXXfpokYdMrXRAzMhmNINNNzB9p0T57AgyZckpKKP
GFNgw06ZWKR10jTw6MAmFWj4AJcQQkQQwSefvFeGCemMIQggeaJywSQ/wgHOAmJskQEfWqBlFBEH
1P/QaGY3QOpDZXA2+A6m7hl3IRQKGDCIAj6iwE8yGKC6xbJv8IHNHgACQQybN2QiTi5NwdlBpZdi
isd7vyanByOJ7CMGGRhgwE+qyy47DhnBPLDLEzLIAEQjBtChRmVPNWgpr+Be+Nc9icARww9TkIEu
DAsQ0O7DzGIQzD2QdDEJHTsIAROc3F7qWQncyHPPHN5QQAAG/vjzw8oKp8sPPxDH3O44/kwBQzLB
xBCMOTzzHEMMBMBARgJvZJBBEm/4k0ACKydMBgwYoKNNEjJXbTXE42Q9jtFIp8z0Dy1jQMA1AGzi
z9VoW7310V0znYDTGMQgwUDXLDBO2nhvoTXbbyRk/XXL+pxWkAT8UJ331WsbnbTSK8MggDZhCTOM
LQkcjvXeSPedAAw0nABWWARZIgEDfyTzxt15Z53BG1PEcEknrvgEelhZMDHKCTwI8EcQFHBBAAFc
gGPLHwLwcMIo12Qxu0ABAQA7
"""),
'Controller': PhotoImage( data=r"""
R0lGODlhMAAwAPcAAAEBAWfNAYWFhcfHx+3t6/f390lJUaWlpfPz8/Hx72lpaZGRke/v77m5uc0B
AeHh4e/v7WNjY3t7e5eXlyMjI4mJidPT0+3t7f///09PT7Ozs/X19fHx8ZWTk8HBwX9/fwAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAAAALAAAAAAwADAA
Bwj/AAEIHEiwoMGDCBMqXMiwocOHECNKnEixosWLGAEIeMCxo8ePHwVkBGABg8mTKFOmtDByAIYN
MGPCRCCzQIENNzEMGOkBAwIKQIMKpYCgKAIHCDB4GNkAA4OnUJ9++CDhQ1QGFzA0GKkBA4GvYMOK
BYtBA1cNaNOqXcuWq8q3b81m7Cqzbk2bMMu6/Tl0qFEEAZLKxdj1KlSqVA3rnet1rOOwiwmznUzZ
LdzLJgdfpIv3pmebN2Pm1GyRbocNp1PLNMDaAM3Im1/alQk4gO28pCt2RdCBt+/eRg8IP1AUdmmf
f5MrL56bYlcOvaP7Xo6Ag3HdGDho3869u/YE1507t+3AgLz58ujPMwg/sTBUCAzgy49PH0LW5u0x
XFiwvz////5dcJ9bjxVIAHsSdUXAAgs2yOCDDn6FYEQaFGDgYxNCpEFfHHKIX4IDhCjiiCSS+CGF
FlCmogYpcnVABTDGKGOMAlRQYwUHnKjhAjX2aOOPN8LImgAL6PiQBhLMqCSNAThQgQRGOqRBBD1W
aaOVAggnQARRNqRBBxmEKeaYZIrZQZcMKbDiigqM5OabcMYp55x01ilnQAA7
"""),
'Host': PhotoImage( data=r"""
R0lGODlhIAAYAPcAMf//////zP//mf//Zv//M///AP/M///MzP/M
mf/MZv/MM//MAP+Z//+ZzP+Zmf+ZZv+ZM/+ZAP9m//9mzP9mmf9m
Zv9mM/9mAP8z//8zzP8zmf8zZv8zM/8zAP8A//8AzP8Amf8AZv8A
M/8AAMz//8z/zMz/mcz/Zsz/M8z/AMzM/8zMzMzMmczMZszMM8zM
AMyZ/8yZzMyZmcyZZsyZM8yZAMxm/8xmzMxmmcxmZsxmM8xmAMwz
/8wzzMwzmcwzZswzM8wzAMwA/8wAzMwAmcwAZswAM8wAAJn//5n/
zJn/mZn/Zpn/M5n/AJnM/5nMzJnMmZnMZpnMM5nMAJmZ/5mZzJmZ
mZmZZpmZM5mZAJlm/5lmzJlmmZlmZplmM5lmAJkz/5kzzJkzmZkz
ZpkzM5kzAJkA/5kAzJkAmZkAZpkAM5kAAGb//2b/zGb/mWb/Zmb/
M2b/AGbM/2bMzGbMmWbMZmbMM2bMAGaZ/2aZzGaZmWaZZmaZM2aZ
AGZm/2ZmzGZmmWZmZmZmM2ZmAGYz/2YzzGYzmWYzZmYzM2YzAGYA
/2YAzGYAmWYAZmYAM2YAADP//zP/zDP/mTP/ZjP/MzP/ADPM/zPM
zDPMmTPMZjPMMzPMADOZ/zOZzDOZmTOZZjOZMzOZADNm/zNmzDNm
mTNmZjNmMzNmADMz/zMzzDMzmTMzZjMzMzMzADMA/zMAzDMAmTMA
ZjMAMzMAAAD//wD/zAD/mQD/ZgD/MwD/AADM/wDMzADMmQDMZgDM
MwDMAACZ/wCZzACZmQCZZgCZMwCZAABm/wBmzABmmQBmZgBmMwBm
AAAz/wAzzAAzmQAzZgAzMwAzAAAA/wAAzAAAmQAAZgAAM+4AAN0A
ALsAAKoAAIgAAHcAAFUAAEQAACIAABEAAADuAADdAAC7AACqAACI
AAB3AABVAABEAAAiAAARAAAA7gAA3QAAuwAAqgAAiAAAdwAAVQAA
RAAAIgAAEe7u7t3d3bu7u6qqqoiIiHd3d1VVVURERCIiIhEREQAA
ACH5BAEAAAAALAAAAAAgABgAAAiNAAH8G0iwoMGDCAcKTMiw4UBw
BPXVm0ixosWLFvVBHFjPoUeC9Tb+6/jRY0iQ/8iVbHiS40CVKxG2
HEkQZsyCM0mmvGkw50uePUV2tEnOZkyfQA8iTYpTKNOgKJ+C3AhO
p9SWVaVOfWj1KdauTL9q5UgVbFKsEjGqXVtP40NwcBnCjXtw7tx/
C8cSBBAQADs=
""" ),
'OldSwitch': PhotoImage( data=r"""
R0lGODlhIAAYAPcAMf//////zP//mf//Zv//M///AP/M///MzP/M
mf/MZv/MM//MAP+Z//+ZzP+Zmf+ZZv+ZM/+ZAP9m//9mzP9mmf9m
Zv9mM/9mAP8z//8zzP8zmf8zZv8zM/8zAP8A//8AzP8Amf8AZv8A
M/8AAMz//8z/zMz/mcz/Zsz/M8z/AMzM/8zMzMzMmczMZszMM8zM
AMyZ/8yZzMyZmcyZZsyZM8yZAMxm/8xmzMxmmcxmZsxmM8xmAMwz
/8wzzMwzmcwzZswzM8wzAMwA/8wAzMwAmcwAZswAM8wAAJn//5n/
zJn/mZn/Zpn/M5n/AJnM/5nMzJnMmZnMZpnMM5nMAJmZ/5mZzJmZ
mZmZZpmZM5mZAJlm/5lmzJlmmZlmZplmM5lmAJkz/5kzzJkzmZkz
ZpkzM5kzAJkA/5kAzJkAmZkAZpkAM5kAAGb//2b/zGb/mWb/Zmb/
M2b/AGbM/2bMzGbMmWbMZmbMM2bMAGaZ/2aZzGaZmWaZZmaZM2aZ
AGZm/2ZmzGZmmWZmZmZmM2ZmAGYz/2YzzGYzmWYzZmYzM2YzAGYA
/2YAzGYAmWYAZmYAM2YAADP//zP/zDP/mTP/ZjP/MzP/ADPM/zPM
zDPMmTPMZjPMMzPMADOZ/zOZzDOZmTOZZjOZMzOZADNm/zNmzDNm
mTNmZjNmMzNmADMz/zMzzDMzmTMzZjMzMzMzADMA/zMAzDMAmTMA
ZjMAMzMAAAD//wD/zAD/mQD/ZgD/MwD/AADM/wDMzADMmQDMZgDM
MwDMAACZ/wCZzACZmQCZZgCZMwCZAABm/wBmzABmmQBmZgBmMwBm
AAAz/wAzzAAzmQAzZgAzMwAzAAAA/wAAzAAAmQAAZgAAM+4AAN0A
ALsAAKoAAIgAAHcAAFUAAEQAACIAABEAAADuAADdAAC7AACqAACI
AAB3AABVAABEAAAiAAARAAAA7gAA3QAAuwAAqgAAiAAAdwAAVQAA
RAAAIgAAEe7u7t3d3bu7u6qqqoiIiHd3d1VVVURERCIiIhEREQAA
ACH5BAEAAAAALAAAAAAgABgAAAhwAAEIHEiwoMGDCBMqXMiwocOH
ECNKnEixosWB3zJq3Mixo0eNAL7xG0mypMmTKPl9Cznyn8uWL/m5
/AeTpsyYI1eKlBnO5r+eLYHy9Ck0J8ubPmPOrMmUpM6UUKMa/Ui1
6saLWLNq3cq1q9evYB0GBAA7
""" ),
'NetLink': PhotoImage( data=r"""
R0lGODlhFgAWAPcAMf//////zP//mf//Zv//M///AP/M///MzP/M
mf/MZv/MM//MAP+Z//+ZzP+Zmf+ZZv+ZM/+ZAP9m//9mzP9mmf9m
Zv9mM/9mAP8z//8zzP8zmf8zZv8zM/8zAP8A//8AzP8Amf8AZv8A
M/8AAMz//8z/zMz/mcz/Zsz/M8z/AMzM/8zMzMzMmczMZszMM8zM
AMyZ/8yZzMyZmcyZZsyZM8yZAMxm/8xmzMxmmcxmZsxmM8xmAMwz
/8wzzMwzmcwzZswzM8wzAMwA/8wAzMwAmcwAZswAM8wAAJn//5n/
zJn/mZn/Zpn/M5n/AJnM/5nMzJnMmZnMZpnMM5nMAJmZ/5mZzJmZ
mZmZZpmZM5mZAJlm/5lmzJlmmZlmZplmM5lmAJkz/5kzzJkzmZkz
ZpkzM5kzAJkA/5kAzJkAmZkAZpkAM5kAAGb//2b/zGb/mWb/Zmb/
M2b/AGbM/2bMzGbMmWbMZmbMM2bMAGaZ/2aZzGaZmWaZZmaZM2aZ
AGZm/2ZmzGZmmWZmZmZmM2ZmAGYz/2YzzGYzmWYzZmYzM2YzAGYA
/2YAzGYAmWYAZmYAM2YAADP//zP/zDP/mTP/ZjP/MzP/ADPM/zPM
zDPMmTPMZjPMMzPMADOZ/zOZzDOZmTOZZjOZMzOZADNm/zNmzDNm
mTNmZjNmMzNmADMz/zMzzDMzmTMzZjMzMzMzADMA/zMAzDMAmTMA
ZjMAMzMAAAD//wD/zAD/mQD/ZgD/MwD/AADM/wDMzADMmQDMZgDM
MwDMAACZ/wCZzACZmQCZZgCZMwCZAABm/wBmzABmmQBmZgBmMwBm
AAAz/wAzzAAzmQAzZgAzMwAzAAAA/wAAzAAAmQAAZgAAM+4AAN0A
ALsAAKoAAIgAAHcAAFUAAEQAACIAABEAAADuAADdAAC7AACqAACI
AAB3AABVAABEAAAiAAARAAAA7gAA3QAAuwAAqgAAiAAAdwAAVQAA
RAAAIgAAEe7u7t3d3bu7u6qqqoiIiHd3d1VVVURERCIiIhEREQAA
ACH5BAEAAAAALAAAAAAWABYAAAhIAAEIHEiwoEGBrhIeXEgwoUKG
Cx0+hGhQoiuKBy1irChxY0GNHgeCDAlgZEiTHlFuVImRJUWXEGEy
lBmxI8mSNknm1Dnx5sCAADs=
""" )
}
def addDictOption( opts, choicesDict, default, name, helpStr=None ):
"""Convenience function to add choices dicts to OptionParser.
opts: OptionParser instance
choicesDict: dictionary of valid choices, must include default
default: default choice key
name: long option name
help: string"""
if default not in choicesDict:
raise Exception( 'Invalid default %s for choices dict: %s' %
( default, name ) )
if not helpStr:
helpStr = ( '|'.join( sorted( choicesDict.keys() ) ) +
'[,param=value...]' )
opts.add_option( '--' + name,
type='string',
default = default,
help = helpStr )
if __name__ == '__main__':
setLogLevel( 'info' )
app = MiniEdit()
### import topology if specified ###
app.parseArgs()
app.importTopo()
app.mainloop()
| 154,479 | 42.090656 | 254 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/tree1024.py
|
#!/usr/bin/python
"""
Create a 1024-host network, and run the CLI on it.
If this fails because of kernel limits, you may have
to adjust them, e.g. by adding entries to /etc/sysctl.conf
and running sysctl -p. Check util/sysctl_addon.
"""
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.node import OVSSwitch
from mininet.topolib import TreeNet
if __name__ == '__main__':
setLogLevel( 'info' )
network = TreeNet( depth=2, fanout=32, switch=OVSSwitch )
network.run( CLI, network )
| 522 | 26.526316 | 61 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/natnet.py
|
#!/usr/bin/python
"""
natnet.py: Example network with NATs
h0
|
s0
|
----------------
| |
nat1 nat2
| |
s1 s2
| |
h1 h2
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.nodelib import NAT
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.util import irange
class InternetTopo(Topo):
"Single switch connected to n hosts."
def __init__(self, n=2, **opts):
Topo.__init__(self, **opts)
# set up inet switch
inetSwitch = self.addSwitch('s0')
# add inet host
inetHost = self.addHost('h0')
self.addLink(inetSwitch, inetHost)
# add local nets
for i in irange(1, n):
inetIntf = 'nat%d-eth0' % i
localIntf = 'nat%d-eth1' % i
localIP = '192.168.%d.1' % i
localSubnet = '192.168.%d.0/24' % i
natParams = { 'ip' : '%s/24' % localIP }
# add NAT to topology
nat = self.addNode('nat%d' % i, cls=NAT, subnet=localSubnet,
inetIntf=inetIntf, localIntf=localIntf)
switch = self.addSwitch('s%d' % i)
# connect NAT to inet and local switches
self.addLink(nat, inetSwitch, intfName1=inetIntf)
self.addLink(nat, switch, intfName1=localIntf, params1=natParams)
# add host and connect to local switch
host = self.addHost('h%d' % i,
ip='192.168.%d.100/24' % i,
defaultRoute='via %s' % localIP)
self.addLink(host, switch)
def run():
"Create network and run the CLI"
topo = InternetTopo()
net = Mininet(topo=topo)
net.start()
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel('info')
run()
| 1,948 | 26.842857 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/linearbandwidth.py
|
#!/usr/bin/python
"""
Test bandwidth (using iperf) on linear networks of varying size,
using both kernel and user datapaths.
We construct a network of N hosts and N-1 switches, connected as follows:
h1 <-> s1 <-> s2 .. sN-1
| | |
h2 h3 hN
WARNING: by default, the reference controller only supports 16
switches, so this test WILL NOT WORK unless you have recompiled
your controller to support 100 switches (or more.)
In addition to testing the bandwidth across varying numbers
of switches, this example demonstrates:
- creating a custom topology, LinearTestTopo
- using the ping() and iperf() tests from Mininet()
- testing both the kernel and user switches
"""
from mininet.net import Mininet
from mininet.node import UserSwitch, OVSKernelSwitch, Controller
from mininet.topo import Topo
from mininet.log import lg
from mininet.util import irange, quietRun
from mininet.link import TCLink
from functools import partial
import sys
flush = sys.stdout.flush
class LinearTestTopo( Topo ):
"Topology for a string of N hosts and N-1 switches."
def __init__( self, N, **params ):
# Initialize topology
Topo.__init__( self, **params )
# Create switches and hosts
hosts = [ self.addHost( 'h%s' % h )
for h in irange( 1, N ) ]
switches = [ self.addSwitch( 's%s' % s )
for s in irange( 1, N - 1 ) ]
# Wire up switches
last = None
for switch in switches:
if last:
self.addLink( last, switch )
last = switch
# Wire up hosts
self.addLink( hosts[ 0 ], switches[ 0 ] )
for host, switch in zip( hosts[ 1: ], switches ):
self.addLink( host, switch )
def linearBandwidthTest( lengths ):
"Check bandwidth at various lengths along a switch chain."
results = {}
switchCount = max( lengths )
hostCount = switchCount + 1
switches = { 'reference user': UserSwitch,
'Open vSwitch kernel': OVSKernelSwitch }
# UserSwitch is horribly slow with recent kernels.
# We can reinstate it once its performance is fixed
del switches[ 'reference user' ]
topo = LinearTestTopo( hostCount )
# Select TCP Reno
output = quietRun( 'sysctl -w net.ipv4.tcp_congestion_control=reno' )
assert 'reno' in output
for datapath in switches.keys():
print "*** testing", datapath, "datapath"
Switch = switches[ datapath ]
results[ datapath ] = []
link = partial( TCLink, delay='1ms' )
net = Mininet( topo=topo, switch=Switch,
controller=Controller, waitConnected=True,
link=link )
net.start()
print "*** testing basic connectivity"
for n in lengths:
net.ping( [ net.hosts[ 0 ], net.hosts[ n ] ] )
print "*** testing bandwidth"
for n in lengths:
src, dst = net.hosts[ 0 ], net.hosts[ n ]
# Try to prime the pump to reduce PACKET_INs during test
# since the reference controller is reactive
src.cmd( 'telnet', dst.IP(), '5001' )
print "testing", src.name, "<->", dst.name,
bandwidth = net.iperf( [ src, dst ], seconds=10 )
print bandwidth
flush()
results[ datapath ] += [ ( n, bandwidth ) ]
net.stop()
for datapath in switches.keys():
print
print "*** Linear network results for", datapath, "datapath:"
print
result = results[ datapath ]
print "SwitchCount\tiperf Results"
for switchCount, bandwidth in result:
print switchCount, '\t\t',
print bandwidth[ 0 ], 'server, ', bandwidth[ 1 ], 'client'
print
print
if __name__ == '__main__':
lg.setLogLevel( 'info' )
sizes = [ 1, 10, 20, 40, 60, 80 ]
print "*** Running linearBandwidthTest", sizes
linearBandwidthTest( sizes )
| 3,988 | 30.409449 | 73 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/intfoptions.py
|
#!/usr/bin/python
'''
example of using various TCIntf options.
reconfigures a single interface using intf.config()
to use different traffic control commands to test
bandwidth, loss, and delay
'''
from mininet.net import Mininet
from mininet.log import setLogLevel, info
from mininet.link import TCLink
def intfOptions():
"run various traffic control commands on a single interface"
net = Mininet( autoStaticArp=True )
net.addController( 'c0' )
h1 = net.addHost( 'h1' )
h2 = net.addHost( 'h2' )
s1 = net.addSwitch( 's1' )
link1 = net.addLink( h1, s1, cls=TCLink )
net.addLink( h2, s1 )
net.start()
# flush out latency from reactive forwarding delay
net.pingAll()
info( '\n*** Configuring one intf with bandwidth of 5 Mb\n' )
link1.intf1.config( bw=5 )
info( '\n*** Running iperf to test\n' )
net.iperf()
info( '\n*** Configuring one intf with loss of 50%\n' )
link1.intf1.config( loss=50 )
info( '\n' )
net.iperf( ( h1, h2 ), l4Type='UDP' )
info( '\n*** Configuring one intf with delay of 15ms\n' )
link1.intf1.config( delay='15ms' )
info( '\n*** Run a ping to confirm delay\n' )
net.pingPairFull()
info( '\n*** Done testing\n' )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
intfOptions()
| 1,320 | 25.959184 | 65 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/sshd.py
|
#!/usr/bin/python
"""
Create a network and start sshd(8) on each host.
While something like rshd(8) would be lighter and faster,
(and perfectly adequate on an in-machine network)
the advantage of running sshd is that scripts can work
unchanged on mininet and hardware.
In addition to providing ssh access to hosts, this example
demonstrates:
- creating a convenience function to construct networks
- connecting the host network to the root namespace
- running server processes (sshd in this case) on hosts
"""
import sys
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import lg
from mininet.node import Node
from mininet.topolib import TreeTopo
from mininet.util import waitListening
def TreeNet( depth=1, fanout=2, **kwargs ):
"Convenience function for creating tree networks."
topo = TreeTopo( depth, fanout )
return Mininet( topo, **kwargs )
def connectToRootNS( network, switch, ip, routes ):
"""Connect hosts to root namespace via switch. Starts network.
network: Mininet() network object
switch: switch to connect to root namespace
ip: IP address for root namespace node
routes: host networks to route to"""
# Create a node in root namespace and link to switch 0
root = Node( 'root', inNamespace=False )
intf = network.addLink( root, switch ).intf1
root.setIP( ip, intf=intf )
# Start network that now includes link to root namespace
network.start()
# Add routes from root ns to hosts
for route in routes:
root.cmd( 'route add -net ' + route + ' dev ' + str( intf ) )
def sshd( network, cmd='/usr/sbin/sshd', opts='-D',
ip='10.123.123.1/32', routes=None, switch=None ):
"""Start a network, connect it to root ns, and run sshd on all hosts.
ip: root-eth0 IP address in root namespace (10.123.123.1/32)
routes: Mininet host networks to route to (10.0/24)
switch: Mininet switch to connect to root namespace (s1)"""
if not switch:
switch = network[ 's1' ] # switch to use
if not routes:
routes = [ '10.0.0.0/24' ]
connectToRootNS( network, switch, ip, routes )
for host in network.hosts:
host.cmd( cmd + ' ' + opts + '&' )
print "*** Waiting for ssh daemons to start"
for server in network.hosts:
waitListening( server=server, port=22, timeout=5 )
print
print "*** Hosts are running sshd at the following addresses:"
print
for host in network.hosts:
print host.name, host.IP()
print
print "*** Type 'exit' or control-D to shut down network"
CLI( network )
for host in network.hosts:
host.cmd( 'kill %' + cmd )
network.stop()
if __name__ == '__main__':
lg.setLogLevel( 'info')
net = TreeNet( depth=1, fanout=4 )
# get sshd args from the command line or use default args
# useDNS=no -u0 to avoid reverse DNS lookup timeout
argvopts = ' '.join( sys.argv[ 1: ] ) if len( sys.argv ) > 1 else (
'-D -o UseDNS=no -u0' )
sshd( net, opts=argvopts )
| 3,040 | 34.360465 | 73 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/controlnet.py
|
#!/usr/bin/python
"""
controlnet.py: Mininet with a custom control network
We create two Mininet() networks, a control network
and a data network, running four DataControllers on the
control network to control the data network.
Since we're using UserSwitch on the data network,
it should correctly fail over to a backup controller.
We also use a Mininet Facade to talk to both the
control and data networks from a single CLI.
"""
from functools import partial
from mininet.net import Mininet
from mininet.node import Controller, UserSwitch
from mininet.cli import CLI
from mininet.topo import Topo
from mininet.topolib import TreeTopo
from mininet.log import setLogLevel, info
# Some minor hacks
class DataController( Controller ):
"""Data Network Controller.
patched to avoid checkListening error and to delete intfs"""
def checkListening( self ):
"Ignore spurious error"
pass
def stop( self, *args, **kwargs ):
"Make sure intfs are deleted"
kwargs.update( deleteIntfs=True )
super( DataController, self ).stop( *args, **kwargs )
class MininetFacade( object ):
"""Mininet object facade that allows a single CLI to
talk to one or more networks"""
def __init__( self, net, *args, **kwargs ):
"""Create MininetFacade object.
net: Primary Mininet object
args: unnamed networks passed as arguments
kwargs: named networks passed as arguments"""
self.net = net
self.nets = [ net ] + list( args ) + kwargs.values()
self.nameToNet = kwargs
self.nameToNet['net'] = net
def __getattr__( self, name ):
"returns attribute from Primary Mininet object"
return getattr( self.net, name )
def __getitem__( self, key ):
"returns primary/named networks or node from any net"
#search kwargs for net named key
if key in self.nameToNet:
return self.nameToNet[ key ]
#search each net for node named key
for net in self.nets:
if key in net:
return net[ key ]
def __iter__( self ):
"Iterate through all nodes in all Mininet objects"
for net in self.nets:
for node in net:
yield node
def __len__( self ):
"returns aggregate number of nodes in all nets"
count = 0
for net in self.nets:
count += len(net)
return count
def __contains__( self, key ):
"returns True if node is a member of any net"
return key in self.keys()
def keys( self ):
"returns a list of all node names in all networks"
return list( self )
def values( self ):
"returns a list of all nodes in all networks"
return [ self[ key ] for key in self ]
def items( self ):
"returns (key,value) tuple list for every node in all networks"
return zip( self.keys(), self.values() )
# A real control network!
class ControlNetwork( Topo ):
"Control Network Topology"
def __init__( self, n, dataController=DataController, **kwargs ):
"""n: number of data network controller nodes
dataController: class for data network controllers"""
Topo.__init__( self, **kwargs )
# Connect everything to a single switch
cs0 = self.addSwitch( 'cs0' )
# Add hosts which will serve as data network controllers
for i in range( 0, n ):
c = self.addHost( 'c%s' % i, cls=dataController,
inNamespace=True )
self.addLink( c, cs0 )
# Connect switch to root namespace so that data network
# switches will be able to talk to us
root = self.addHost( 'root', inNamespace=False )
self.addLink( root, cs0 )
# Make it Happen!!
def run():
"Create control and data networks, and invoke the CLI"
info( '* Creating Control Network\n' )
ctopo = ControlNetwork( n=4, dataController=DataController )
cnet = Mininet( topo=ctopo, ipBase='192.168.123.0/24', controller=None )
info( '* Adding Control Network Controller\n')
cnet.addController( 'cc0', controller=Controller )
info( '* Starting Control Network\n')
cnet.start()
info( '* Creating Data Network\n' )
topo = TreeTopo( depth=2, fanout=2 )
# UserSwitch so we can easily test failover
sw = partial( UserSwitch, opts='--inactivity-probe=1 --max-backoff=1' )
net = Mininet( topo=topo, switch=sw, controller=None )
info( '* Adding Controllers to Data Network\n' )
for host in cnet.hosts:
if isinstance(host, Controller):
net.addController( host )
info( '* Starting Data Network\n')
net.start()
mn = MininetFacade( net, cnet=cnet )
CLI( mn )
info( '* Stopping Data Network\n' )
net.stop()
info( '* Stopping Control Network\n' )
cnet.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
run()
| 4,967 | 30.245283 | 76 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/cluster.py
|
#!/usr/bin/python
"""
cluster.py: prototyping/experimentation for distributed Mininet,
aka Mininet: Cluster Edition
Author: Bob Lantz
Core classes:
RemoteNode: a Node() running on a remote server
RemoteOVSSwitch(): an OVSSwitch() running on a remote server
RemoteLink: a Link() on a remote server
Tunnel: a Link() between a local Node() and a RemoteNode()
These are largely interoperable with local objects.
- One Mininet to rule them all
It is important that the same topologies, APIs, and CLI can be used
with minimal or no modification in both local and distributed environments.
- Multiple placement models
Placement should be as easy as possible. We should provide basic placement
support and also allow for explicit placement.
Questions:
What is the basic communication mechanism?
To start with? Probably a single multiplexed ssh connection between each
pair of mininet servers that needs to communicate.
How are tunnels created?
We have several options including ssh, GRE, OF capsulator, socat, VDE, l2tp,
etc.. It's not clear what the best one is. For now, we use ssh tunnels since
they are encrypted and semi-automatically shared. We will probably want to
support GRE as well because it's very easy to set up with OVS.
How are tunnels destroyed?
They are destroyed when the links are deleted in Mininet.stop()
How does RemoteNode.popen() work?
It opens a shared ssh connection to the remote server and attaches to
the namespace using mnexec -a -g.
Is there any value to using Paramiko vs. raw ssh?
Maybe, but it doesn't seem to support L2 tunneling.
Should we preflight the entire network, including all server-to-server
connections?
Yes! We don't yet do this with remote server-to-server connections yet.
Should we multiplex the link ssh connections?
Yes, this is done automatically with ControlMaster=auto.
Note on ssh and DNS:
Please add UseDNS: no to your /etc/ssh/sshd_config!!!
Things to do:
- asynchronous/pipelined/parallel startup
- ssh debugging/profiling
- make connections into real objects
- support for other tunneling schemes
- tests and benchmarks
- hifi support (e.g. delay compensation)
"""
from mininet.node import Node, Host, OVSSwitch, Controller
from mininet.link import Link, Intf
from mininet.net import Mininet
from mininet.topo import LinearTopo
from mininet.topolib import TreeTopo
from mininet.util import quietRun, errRun
from mininet.examples.clustercli import CLI
from mininet.log import setLogLevel, debug, info, error
from mininet.clean import addCleanupCallback
from signal import signal, SIGINT, SIG_IGN
from subprocess import Popen, PIPE, STDOUT
import os
from random import randrange
import sys
import re
from itertools import groupby
from operator import attrgetter
from distutils.version import StrictVersion
def findUser():
"Try to return logged-in (usually non-root) user"
return (
# If we're running sudo
os.environ.get( 'SUDO_USER', False ) or
# Logged-in user (if we have a tty)
( quietRun( 'who am i' ).split() or [ False ] )[ 0 ] or
# Give up and return effective user
quietRun( 'whoami' ).strip() )
class ClusterCleanup( object ):
"Cleanup callback"
inited = False
serveruser = {}
@classmethod
def add( cls, server, user='' ):
"Add an entry to server: user dict"
if not cls.inited:
addCleanupCallback( cls.cleanup )
if not user:
user = findUser()
cls.serveruser[ server ] = user
@classmethod
def cleanup( cls ):
"Clean up"
info( '*** Cleaning up cluster\n' )
for server, user in cls.serveruser.iteritems():
if server == 'localhost':
# Handled by mininet.clean.cleanup()
continue
else:
cmd = [ 'su', user, '-c',
'ssh %s@%s sudo mn -c' % ( user, server ) ]
info( cmd, '\n' )
info( quietRun( cmd ) )
# BL note: so little code is required for remote nodes,
# we will probably just want to update the main Node()
# class to enable it for remote access! However, there
# are a large number of potential failure conditions with
# remote nodes which we may want to detect and handle.
# Another interesting point is that we could put everything
# in a mix-in class and easily add cluster mode to 2.0.
class RemoteMixin( object ):
"A mix-in class to turn local nodes into remote nodes"
# ssh base command
# -q: don't print stupid diagnostic messages
# BatchMode yes: don't ask for password
# ForwardAgent yes: forward authentication credentials
sshbase = [ 'ssh', '-q',
'-o', 'BatchMode=yes',
'-o', 'ForwardAgent=yes', '-tt' ]
def __init__( self, name, server='localhost', user=None, serverIP=None,
controlPath=False, splitInit=False, **kwargs):
"""Instantiate a remote node
name: name of remote node
server: remote server (optional)
user: user on remote server (optional)
controlPath: specify shared ssh control path (optional)
splitInit: split initialization?
**kwargs: see Node()"""
# We connect to servers by IP address
self.server = server if server else 'localhost'
self.serverIP = ( serverIP if serverIP
else self.findServerIP( self.server ) )
self.user = user if user else findUser()
ClusterCleanup.add( server=server, user=user )
if controlPath is True:
# Set a default control path for shared SSH connections
controlPath = '/tmp/mn-%r@%h:%p'
self.controlPath = controlPath
self.splitInit = splitInit
if self.user and self.server != 'localhost':
self.dest = '%s@%s' % ( self.user, self.serverIP )
self.sshcmd = [ 'sudo', '-E', '-u', self.user ] + self.sshbase
if self.controlPath:
self.sshcmd += [ '-o', 'ControlPath=' + self.controlPath,
'-o', 'ControlMaster=auto',
'-o', 'ControlPersist=' + '1' ]
self.sshcmd += [ self.dest ]
self.isRemote = True
else:
self.dest = None
self.sshcmd = []
self.isRemote = False
# Satisfy pylint
self.shell, self.pid = None, None
super( RemoteMixin, self ).__init__( name, **kwargs )
# Determine IP address of local host
_ipMatchRegex = re.compile( r'\d+\.\d+\.\d+\.\d+' )
@classmethod
def findServerIP( cls, server ):
"Return our server's IP address"
# First, check for an IP address
ipmatch = cls._ipMatchRegex.findall( server )
if ipmatch:
return ipmatch[ 0 ]
# Otherwise, look up remote server
output = quietRun( 'getent ahostsv4 %s' % server )
ips = cls._ipMatchRegex.findall( output )
ip = ips[ 0 ] if ips else None
return ip
# Command support via shell process in namespace
def startShell( self, *args, **kwargs ):
"Start a shell process for running commands"
if self.isRemote:
kwargs.update( mnopts='-c' )
super( RemoteMixin, self ).startShell( *args, **kwargs )
# Optional split initialization
self.sendCmd( 'echo $$' )
if not self.splitInit:
self.finishInit()
def finishInit( self ):
"Wait for split initialization to complete"
self.pid = int( self.waitOutput() )
def rpopen( self, *cmd, **opts ):
"Return a Popen object on underlying server in root namespace"
params = { 'stdin': PIPE,
'stdout': PIPE,
'stderr': STDOUT,
'sudo': True }
params.update( opts )
return self._popen( *cmd, **params )
def rcmd( self, *cmd, **opts):
"""rcmd: run a command on underlying server
in root namespace
args: string or list of strings
returns: stdout and stderr"""
popen = self.rpopen( *cmd, **opts )
# print 'RCMD: POPEN:', popen
# These loops are tricky to get right.
# Once the process exits, we can read
# EOF twice if necessary.
result = ''
while True:
poll = popen.poll()
result += popen.stdout.read()
if poll is not None:
break
return result
@staticmethod
def _ignoreSignal():
"Detach from process group to ignore all signals"
os.setpgrp()
def _popen( self, cmd, sudo=True, tt=True, **params):
"""Spawn a process on a remote node
cmd: remote command to run (list)
**params: parameters to Popen()
returns: Popen() object"""
if type( cmd ) is str:
cmd = cmd.split()
if self.isRemote:
if sudo:
cmd = [ 'sudo', '-E' ] + cmd
if tt:
cmd = self.sshcmd + cmd
else:
# Hack: remove -tt
sshcmd = list( self.sshcmd )
sshcmd.remove( '-tt' )
cmd = sshcmd + cmd
else:
if self.user and not sudo:
# Drop privileges
cmd = [ 'sudo', '-E', '-u', self.user ] + cmd
params.update( preexec_fn=self._ignoreSignal )
debug( '_popen', cmd, '\n' )
popen = super( RemoteMixin, self )._popen( cmd, **params )
return popen
def popen( self, *args, **kwargs ):
"Override: disable -tt"
return super( RemoteMixin, self).popen( *args, tt=False, **kwargs )
def addIntf( self, *args, **kwargs ):
"Override: use RemoteLink.moveIntf"
kwargs.update( moveIntfFn=RemoteLink.moveIntf )
return super( RemoteMixin, self).addIntf( *args, **kwargs )
class RemoteNode( RemoteMixin, Node ):
"A node on a remote server"
pass
class RemoteHost( RemoteNode ):
"A RemoteHost is simply a RemoteNode"
pass
class RemoteOVSSwitch( RemoteMixin, OVSSwitch ):
"Remote instance of Open vSwitch"
OVSVersions = {}
def __init__( self, *args, **kwargs ):
# No batch startup yet
kwargs.update( batch=True )
super( RemoteOVSSwitch, self ).__init__( *args, **kwargs )
def isOldOVS( self ):
"Is remote switch using an old OVS version?"
cls = type( self )
if self.server not in cls.OVSVersions:
# pylint: disable=not-callable
vers = self.cmd( 'ovs-vsctl --version' )
# pylint: enable=not-callable
cls.OVSVersions[ self.server ] = re.findall(
r'\d+\.\d+', vers )[ 0 ]
return ( StrictVersion( cls.OVSVersions[ self.server ] ) <
StrictVersion( '1.10' ) )
@classmethod
def batchStartup( cls, switches, **_kwargs ):
"Start up switches in per-server batches"
key = attrgetter( 'server' )
for server, switchGroup in groupby( sorted( switches, key=key ), key ):
info( '(%s)' % server )
group = tuple( switchGroup )
switch = group[ 0 ]
OVSSwitch.batchStartup( group, run=switch.cmd )
return switches
@classmethod
def batchShutdown( cls, switches, **_kwargs ):
"Stop switches in per-server batches"
key = attrgetter( 'server' )
for server, switchGroup in groupby( sorted( switches, key=key ), key ):
info( '(%s)' % server )
group = tuple( switchGroup )
switch = group[ 0 ]
OVSSwitch.batchShutdown( group, run=switch.rcmd )
return switches
class RemoteLink( Link ):
"A RemoteLink is a link between nodes which may be on different servers"
def __init__( self, node1, node2, **kwargs ):
"""Initialize a RemoteLink
see Link() for parameters"""
# Create links on remote node
self.node1 = node1
self.node2 = node2
self.tunnel = None
kwargs.setdefault( 'params1', {} )
kwargs.setdefault( 'params2', {} )
self.cmd = None # satisfy pylint
Link.__init__( self, node1, node2, **kwargs )
def stop( self ):
"Stop this link"
if self.tunnel:
self.tunnel.terminate()
self.intf1.delete()
self.intf2.delete()
else:
Link.stop( self )
self.tunnel = None
def makeIntfPair( self, intfname1, intfname2, addr1=None, addr2=None,
node1=None, node2=None, deleteIntfs=True ):
"""Create pair of interfaces
intfname1: name of interface 1
intfname2: name of interface 2
(override this method [and possibly delete()]
to change link type)"""
node1 = self.node1 if node1 is None else node1
node2 = self.node2 if node2 is None else node2
server1 = getattr( node1, 'server', 'localhost' )
server2 = getattr( node2, 'server', 'localhost' )
if server1 == server2:
# Link within same server
return Link.makeIntfPair( intfname1, intfname2, addr1, addr2,
node1, node2, deleteIntfs=deleteIntfs )
# Otherwise, make a tunnel
self.tunnel = self.makeTunnel( node1, node2, intfname1, intfname2,
addr1, addr2 )
return self.tunnel
@staticmethod
def moveIntf( intf, node, printError=True ):
"""Move remote interface from root ns to node
intf: string, interface
dstNode: destination Node
srcNode: source Node or None (default) for root ns
printError: if true, print error"""
intf = str( intf )
cmd = 'ip link set %s netns %s' % ( intf, node.pid )
node.rcmd( cmd )
links = node.cmd( 'ip link show' )
if not ' %s:' % intf in links:
if printError:
error( '*** Error: RemoteLink.moveIntf: ' + intf +
' not successfully moved to ' + node.name + '\n' )
return False
return True
def makeTunnel( self, node1, node2, intfname1, intfname2,
addr1=None, addr2=None ):
"Make a tunnel across switches on different servers"
# We should never try to create a tunnel to ourselves!
assert node1.server != 'localhost' or node2.server != 'localhost'
# And we can't ssh into this server remotely as 'localhost',
# so try again swappping node1 and node2
if node2.server == 'localhost':
return self.makeTunnel( node2, node1, intfname2, intfname1,
addr2, addr1 )
# 1. Create tap interfaces
for node in node1, node2:
# For now we are hard-wiring tap9, which we will rename
cmd = 'ip tuntap add dev tap9 mode tap user ' + node.user
result = node.rcmd( cmd )
if result:
raise Exception( 'error creating tap9 on %s: %s' %
( node, result ) )
# 2. Create ssh tunnel between tap interfaces
# -n: close stdin
dest = '%s@%s' % ( node2.user, node2.serverIP )
cmd = [ 'ssh', '-n', '-o', 'Tunnel=Ethernet', '-w', '9:9',
dest, 'echo @' ]
self.cmd = cmd
tunnel = node1.rpopen( cmd, sudo=False )
# When we receive the character '@', it means that our
# tunnel should be set up
debug( 'Waiting for tunnel to come up...\n' )
ch = tunnel.stdout.read( 1 )
if ch != '@':
raise Exception( 'makeTunnel:\n',
'Tunnel setup failed for',
'%s:%s' % ( node1, node1.dest ), 'to',
'%s:%s\n' % ( node2, node2.dest ),
'command was:', cmd, '\n' )
# 3. Move interfaces if necessary
for node in node1, node2:
if not self.moveIntf( 'tap9', node ):
raise Exception( 'interface move failed on node %s' % node )
# 4. Rename tap interfaces to desired names
for node, intf, addr in ( ( node1, intfname1, addr1 ),
( node2, intfname2, addr2 ) ):
if not addr:
result = node.cmd( 'ip link set tap9 name', intf )
else:
result = node.cmd( 'ip link set tap9 name', intf,
'address', addr )
if result:
raise Exception( 'error renaming %s: %s' % ( intf, result ) )
return tunnel
def status( self ):
"Detailed representation of link"
if self.tunnel:
if self.tunnel.poll() is not None:
status = "Tunnel EXITED %s" % self.tunnel.returncode
else:
status = "Tunnel Running (%s: %s)" % (
self.tunnel.pid, self.cmd )
else:
status = "OK"
result = "%s %s" % ( Link.status( self ), status )
return result
# Some simple placement algorithms for MininetCluster
class Placer( object ):
"Node placement algorithm for MininetCluster"
def __init__( self, servers=None, nodes=None, hosts=None,
switches=None, controllers=None, links=None ):
"""Initialize placement object
servers: list of servers
nodes: list of all nodes
hosts: list of hosts
switches: list of switches
controllers: list of controllers
links: list of links
(all arguments are optional)
returns: server"""
self.servers = servers or []
self.nodes = nodes or []
self.hosts = hosts or []
self.switches = switches or []
self.controllers = controllers or []
self.links = links or []
def place( self, node ):
"Return server for a given node"
assert self, node # satisfy pylint
# Default placement: run locally
return 'localhost'
class RandomPlacer( Placer ):
"Random placement"
def place( self, nodename ):
"""Random placement function
nodename: node name"""
assert nodename # please pylint
# This may be slow with lots of servers
return self.servers[ randrange( 0, len( self.servers ) ) ]
class RoundRobinPlacer( Placer ):
"""Round-robin placement
Note this will usually result in cross-server links between
hosts and switches"""
def __init__( self, *args, **kwargs ):
Placer.__init__( self, *args, **kwargs )
self.next = 0
def place( self, nodename ):
"""Round-robin placement function
nodename: node name"""
assert nodename # please pylint
# This may be slow with lots of servers
server = self.servers[ self.next ]
self.next = ( self.next + 1 ) % len( self.servers )
return server
class SwitchBinPlacer( Placer ):
"""Place switches (and controllers) into evenly-sized bins,
and attempt to co-locate hosts and switches"""
def __init__( self, *args, **kwargs ):
Placer.__init__( self, *args, **kwargs )
# Easy lookup for servers and node sets
self.servdict = dict( enumerate( self.servers ) )
self.hset = frozenset( self.hosts )
self.sset = frozenset( self.switches )
self.cset = frozenset( self.controllers )
# Server and switch placement indices
self.placement = self.calculatePlacement()
@staticmethod
def bin( nodes, servers ):
"Distribute nodes evenly over servers"
# Calculate base bin size
nlen = len( nodes )
slen = len( servers )
# Basic bin size
quotient = int( nlen / slen )
binsizes = { server: quotient for server in servers }
# Distribute remainder
remainder = nlen % slen
for server in servers[ 0 : remainder ]:
binsizes[ server ] += 1
# Create binsize[ server ] tickets for each server
tickets = sum( [ binsizes[ server ] * [ server ]
for server in servers ], [] )
# And assign one ticket to each node
return { node: ticket for node, ticket in zip( nodes, tickets ) }
def calculatePlacement( self ):
"Pre-calculate node placement"
placement = {}
# Create host-switch connectivity map,
# associating host with last switch that it's
# connected to
switchFor = {}
for src, dst in self.links:
if src in self.hset and dst in self.sset:
switchFor[ src ] = dst
if dst in self.hset and src in self.sset:
switchFor[ dst ] = src
# Place switches
placement = self.bin( self.switches, self.servers )
# Place controllers and merge into placement dict
placement.update( self.bin( self.controllers, self.servers ) )
# Co-locate hosts with their switches
for h in self.hosts:
if h in placement:
# Host is already placed - leave it there
continue
if h in switchFor:
placement[ h ] = placement[ switchFor[ h ] ]
else:
raise Exception(
"SwitchBinPlacer: cannot place isolated host " + h )
return placement
def place( self, node ):
"""Simple placement algorithm:
place switches into evenly sized bins,
and place hosts near their switches"""
return self.placement[ node ]
class HostSwitchBinPlacer( Placer ):
"""Place switches *and hosts* into evenly-sized bins
Note that this will usually result in cross-server
links between hosts and switches"""
def __init__( self, *args, **kwargs ):
Placer.__init__( self, *args, **kwargs )
# Calculate bin sizes
scount = len( self.servers )
self.hbin = max( int( len( self.hosts ) / scount ), 1 )
self.sbin = max( int( len( self.switches ) / scount ), 1 )
self.cbin = max( int( len( self.controllers ) / scount ), 1 )
info( 'scount:', scount )
info( 'bins:', self.hbin, self.sbin, self.cbin, '\n' )
self.servdict = dict( enumerate( self.servers ) )
self.hset = frozenset( self.hosts )
self.sset = frozenset( self.switches )
self.cset = frozenset( self.controllers )
self.hind, self.sind, self.cind = 0, 0, 0
def place( self, nodename ):
"""Simple placement algorithm:
place nodes into evenly sized bins"""
# Place nodes into bins
if nodename in self.hset:
server = self.servdict[ self.hind / self.hbin ]
self.hind += 1
elif nodename in self.sset:
server = self.servdict[ self.sind / self.sbin ]
self.sind += 1
elif nodename in self.cset:
server = self.servdict[ self.cind / self.cbin ]
self.cind += 1
else:
info( 'warning: unknown node', nodename )
server = self.servdict[ 0 ]
return server
# The MininetCluster class is not strictly necessary.
# However, it has several purposes:
# 1. To set up ssh connection sharing/multiplexing
# 2. To pre-flight the system so that everything is more likely to work
# 3. To allow connection/connectivity monitoring
# 4. To support pluggable placement algorithms
class MininetCluster( Mininet ):
"Cluster-enhanced version of Mininet class"
# Default ssh command
# BatchMode yes: don't ask for password
# ForwardAgent yes: forward authentication credentials
sshcmd = [ 'ssh', '-o', 'BatchMode=yes', '-o', 'ForwardAgent=yes' ]
def __init__( self, *args, **kwargs ):
"""servers: a list of servers to use (note: include
localhost or None to use local system as well)
user: user name for server ssh
placement: Placer() subclass"""
params = { 'host': RemoteHost,
'switch': RemoteOVSSwitch,
'link': RemoteLink,
'precheck': True }
params.update( kwargs )
servers = params.pop( 'servers', [ 'localhost' ] )
servers = [ s if s else 'localhost' for s in servers ]
self.servers = servers
self.serverIP = params.pop( 'serverIP', {} )
if not self.serverIP:
self.serverIP = { server: RemoteMixin.findServerIP( server )
for server in self.servers }
self.user = params.pop( 'user', findUser() )
if params.pop( 'precheck' ):
self.precheck()
self.connections = {}
self.placement = params.pop( 'placement', SwitchBinPlacer )
# Make sure control directory exists
self.cdir = os.environ[ 'HOME' ] + '/.ssh/mn'
errRun( [ 'mkdir', '-p', self.cdir ] )
Mininet.__init__( self, *args, **params )
def popen( self, cmd ):
"Popen() for server connections"
assert self # please pylint
old = signal( SIGINT, SIG_IGN )
conn = Popen( cmd, stdin=PIPE, stdout=PIPE, close_fds=True )
signal( SIGINT, old )
return conn
def baddLink( self, *args, **kwargs ):
"break addlink for testing"
pass
def precheck( self ):
"""Pre-check to make sure connection works and that
we can call sudo without a password"""
result = 0
info( '*** Checking servers\n' )
for server in self.servers:
ip = self.serverIP[ server ]
if not server or server == 'localhost':
continue
info( server, '' )
dest = '%s@%s' % ( self.user, ip )
cmd = [ 'sudo', '-E', '-u', self.user ]
cmd += self.sshcmd + [ '-n', dest, 'sudo true' ]
debug( ' '.join( cmd ), '\n' )
_out, _err, code = errRun( cmd )
if code != 0:
error( '\nstartConnection: server connection check failed '
'to %s using command:\n%s\n'
% ( server, ' '.join( cmd ) ) )
result |= code
if result:
error( '*** Server precheck failed.\n'
'*** Make sure that the above ssh command works'
' correctly.\n'
'*** You may also need to run mn -c on all nodes, and/or\n'
'*** use sudo -E.\n' )
sys.exit( 1 )
info( '\n' )
def modifiedaddHost( self, *args, **kwargs ):
"Slightly modify addHost"
assert self # please pylint
kwargs[ 'splitInit' ] = True
return Mininet.addHost( *args, **kwargs )
def placeNodes( self ):
"""Place nodes on servers (if they don't have a server), and
start shell processes"""
if not self.servers or not self.topo:
# No shirt, no shoes, no service
return
nodes = self.topo.nodes()
placer = self.placement( servers=self.servers,
nodes=self.topo.nodes(),
hosts=self.topo.hosts(),
switches=self.topo.switches(),
links=self.topo.links() )
for node in nodes:
config = self.topo.nodeInfo( node )
# keep local server name consistent accross nodes
if 'server' in config.keys() and config[ 'server' ] is None:
config[ 'server' ] = 'localhost'
server = config.setdefault( 'server', placer.place( node ) )
if server:
config.setdefault( 'serverIP', self.serverIP[ server ] )
info( '%s:%s ' % ( node, server ) )
key = ( None, server )
_dest, cfile, _conn = self.connections.get(
key, ( None, None, None ) )
if cfile:
config.setdefault( 'controlPath', cfile )
def addController( self, *args, **kwargs ):
"Patch to update IP address to global IP address"
controller = Mininet.addController( self, *args, **kwargs )
# Update IP address for controller that may not be local
if ( isinstance( controller, Controller)
and controller.IP() == '127.0.0.1'
and ' eth0:' in controller.cmd( 'ip link show' ) ):
Intf( 'eth0', node=controller ).updateIP()
return controller
def buildFromTopo( self, *args, **kwargs ):
"Start network"
info( '*** Placing nodes\n' )
self.placeNodes()
info( '\n' )
Mininet.buildFromTopo( self, *args, **kwargs )
def testNsTunnels():
"Test tunnels between nodes in namespaces"
net = Mininet( host=RemoteHost, link=RemoteLink )
h1 = net.addHost( 'h1' )
h2 = net.addHost( 'h2', server='ubuntu2' )
net.addLink( h1, h2 )
net.start()
net.pingAll()
net.stop()
# Manual topology creation with net.add*()
#
# This shows how node options may be used to manage
# cluster placement using the net.add*() API
def testRemoteNet( remote='ubuntu2' ):
"Test remote Node classes"
print '*** Remote Node Test'
net = Mininet( host=RemoteHost, switch=RemoteOVSSwitch,
link=RemoteLink )
c0 = net.addController( 'c0' )
# Make sure controller knows its non-loopback address
Intf( 'eth0', node=c0 ).updateIP()
print "*** Creating local h1"
h1 = net.addHost( 'h1' )
print "*** Creating remote h2"
h2 = net.addHost( 'h2', server=remote )
print "*** Creating local s1"
s1 = net.addSwitch( 's1' )
print "*** Creating remote s2"
s2 = net.addSwitch( 's2', server=remote )
print "*** Adding links"
net.addLink( h1, s1 )
net.addLink( s1, s2 )
net.addLink( h2, s2 )
net.start()
print 'Mininet is running on', quietRun( 'hostname' ).strip()
for node in c0, h1, h2, s1, s2:
print 'Node', node, 'is running on', node.cmd( 'hostname' ).strip()
net.pingAll()
CLI( net )
net.stop()
# High-level/Topo API example
#
# This shows how existing Mininet topologies may be used in cluster
# mode by creating node placement functions and a controller which
# can be accessed remotely. This implements a very compatible version
# of cluster edition with a minimum of code!
remoteHosts = [ 'h2' ]
remoteSwitches = [ 's2' ]
remoteServer = 'ubuntu2'
def HostPlacer( name, *args, **params ):
"Custom Host() constructor which places hosts on servers"
if name in remoteHosts:
return RemoteHost( name, *args, server=remoteServer, **params )
else:
return Host( name, *args, **params )
def SwitchPlacer( name, *args, **params ):
"Custom Switch() constructor which places switches on servers"
if name in remoteSwitches:
return RemoteOVSSwitch( name, *args, server=remoteServer, **params )
else:
return RemoteOVSSwitch( name, *args, **params )
def ClusterController( *args, **kwargs):
"Custom Controller() constructor which updates its eth0 IP address"
controller = Controller( *args, **kwargs )
# Find out its IP address so that cluster switches can connect
Intf( 'eth0', node=controller ).updateIP()
return controller
def testRemoteTopo():
"Test remote Node classes using Mininet()/Topo() API"
topo = LinearTopo( 2 )
net = Mininet( topo=topo, host=HostPlacer, switch=SwitchPlacer,
link=RemoteLink, controller=ClusterController )
net.start()
net.pingAll()
net.stop()
# Need to test backwards placement, where each host is on
# a server other than its switch!! But seriously we could just
# do random switch placement rather than completely random
# host placement.
def testRemoteSwitches():
"Test with local hosts and remote switches"
servers = [ 'localhost', 'ubuntu2']
topo = TreeTopo( depth=4, fanout=2 )
net = MininetCluster( topo=topo, servers=servers,
placement=RoundRobinPlacer )
net.start()
net.pingAll()
net.stop()
#
# For testing and demo purposes it would be nice to draw the
# network graph and color it based on server.
# The MininetCluster() class integrates pluggable placement
# functions, for maximum ease of use. MininetCluster() also
# pre-flights and multiplexes server connections.
def testMininetCluster():
"Test MininetCluster()"
servers = [ 'localhost', 'ubuntu2' ]
topo = TreeTopo( depth=3, fanout=3 )
net = MininetCluster( topo=topo, servers=servers,
placement=SwitchBinPlacer )
net.start()
net.pingAll()
net.stop()
def signalTest():
"Make sure hosts are robust to signals"
h = RemoteHost( 'h0', server='ubuntu1' )
h.shell.send_signal( SIGINT )
h.shell.poll()
if h.shell.returncode is None:
print 'OK: ', h, 'has not exited'
else:
print 'FAILURE:', h, 'exited with code', h.shell.returncode
h.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
# testRemoteTopo()
# testRemoteNet()
# testMininetCluster()
# testRemoteSwitches()
signalTest()
| 33,412 | 35.51694 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/popenpoll.py
|
#!/usr/bin/python
"Monitor multiple hosts using popen()/pmonitor()"
from mininet.net import Mininet
from mininet.topo import SingleSwitchTopo
from mininet.util import pmonitor
from time import time
from signal import SIGINT
def pmonitorTest( N=3, seconds=10 ):
"Run pings and monitor multiple hosts using pmonitor"
topo = SingleSwitchTopo( N )
net = Mininet( topo )
net.start()
hosts = net.hosts
print "Starting test..."
server = hosts[ 0 ]
popens = {}
for h in hosts:
popens[ h ] = h.popen('ping', server.IP() )
print "Monitoring output for", seconds, "seconds"
endTime = time() + seconds
for h, line in pmonitor( popens, timeoutms=500 ):
if h:
print '<%s>: %s' % ( h.name, line ),
if time() >= endTime:
for p in popens.values():
p.send_signal( SIGINT )
net.stop()
if __name__ == '__main__':
pmonitorTest()
| 932 | 26.441176 | 57 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/consoles.py
|
#!/usr/bin/python
"""
consoles.py: bring up a bunch of miniature consoles on a virtual network
This demo shows how to monitor a set of nodes by using
Node's monitor() and Tkinter's createfilehandler().
We monitor nodes in a couple of ways:
- First, each individual node is monitored, and its output is added
to its console window
- Second, each time a console window gets iperf output, it is parsed
and accumulated. Once we have output for all consoles, a bar is
added to the bandwidth graph.
The consoles also support limited interaction:
- Pressing "return" in a console will send a command to it
- Pressing the console's title button will open up an xterm
Bob Lantz, April 2010
"""
import re
from Tkinter import Frame, Button, Label, Text, Scrollbar, Canvas, Wm, READABLE
from mininet.log import setLogLevel
from mininet.topolib import TreeNet
from mininet.term import makeTerms, cleanUpScreens
from mininet.util import quietRun
class Console( Frame ):
"A simple console on a host."
def __init__( self, parent, net, node, height=10, width=32, title='Node' ):
Frame.__init__( self, parent )
self.net = net
self.node = node
self.prompt = node.name + '# '
self.height, self.width, self.title = height, width, title
# Initialize widget styles
self.buttonStyle = { 'font': 'Monaco 7' }
self.textStyle = {
'font': 'Monaco 7',
'bg': 'black',
'fg': 'green',
'width': self.width,
'height': self.height,
'relief': 'sunken',
'insertbackground': 'green',
'highlightcolor': 'green',
'selectforeground': 'black',
'selectbackground': 'green'
}
# Set up widgets
self.text = self.makeWidgets( )
self.bindEvents()
self.sendCmd( 'export TERM=dumb' )
self.outputHook = None
def makeWidgets( self ):
"Make a label, a text area, and a scroll bar."
def newTerm( net=self.net, node=self.node, title=self.title ):
"Pop up a new terminal window for a node."
net.terms += makeTerms( [ node ], title )
label = Button( self, text=self.node.name, command=newTerm,
**self.buttonStyle )
label.pack( side='top', fill='x' )
text = Text( self, wrap='word', **self.textStyle )
ybar = Scrollbar( self, orient='vertical', width=7,
command=text.yview )
text.configure( yscrollcommand=ybar.set )
text.pack( side='left', expand=True, fill='both' )
ybar.pack( side='right', fill='y' )
return text
def bindEvents( self ):
"Bind keyboard and file events."
# The text widget handles regular key presses, but we
# use special handlers for the following:
self.text.bind( '<Return>', self.handleReturn )
self.text.bind( '<Control-c>', self.handleInt )
self.text.bind( '<KeyPress>', self.handleKey )
# This is not well-documented, but it is the correct
# way to trigger a file event handler from Tk's
# event loop!
self.tk.createfilehandler( self.node.stdout, READABLE,
self.handleReadable )
# We're not a terminal (yet?), so we ignore the following
# control characters other than [\b\n\r]
ignoreChars = re.compile( r'[\x00-\x07\x09\x0b\x0c\x0e-\x1f]+' )
def append( self, text ):
"Append something to our text frame."
text = self.ignoreChars.sub( '', text )
self.text.insert( 'end', text )
self.text.mark_set( 'insert', 'end' )
self.text.see( 'insert' )
outputHook = lambda x, y: True # make pylint happier
if self.outputHook:
outputHook = self.outputHook
outputHook( self, text )
def handleKey( self, event ):
"If it's an interactive command, send it to the node."
char = event.char
if self.node.waiting:
self.node.write( char )
def handleReturn( self, event ):
"Handle a carriage return."
cmd = self.text.get( 'insert linestart', 'insert lineend' )
# Send it immediately, if "interactive" command
if self.node.waiting:
self.node.write( event.char )
return
# Otherwise send the whole line to the shell
pos = cmd.find( self.prompt )
if pos >= 0:
cmd = cmd[ pos + len( self.prompt ): ]
self.sendCmd( cmd )
# Callback ignores event
def handleInt( self, _event=None ):
"Handle control-c."
self.node.sendInt()
def sendCmd( self, cmd ):
"Send a command to our node."
if not self.node.waiting:
self.node.sendCmd( cmd )
def handleReadable( self, _fds, timeoutms=None ):
"Handle file readable event."
data = self.node.monitor( timeoutms )
self.append( data )
if not self.node.waiting:
# Print prompt
self.append( self.prompt )
def waiting( self ):
"Are we waiting for output?"
return self.node.waiting
def waitOutput( self ):
"Wait for any remaining output."
while self.node.waiting:
# A bit of a trade-off here...
self.handleReadable( self, timeoutms=1000)
self.update()
def clear( self ):
"Clear all of our text."
self.text.delete( '1.0', 'end' )
class Graph( Frame ):
"Graph that we can add bars to over time."
def __init__( self, parent=None, bg = 'white', gheight=200, gwidth=500,
barwidth=10, ymax=3.5,):
Frame.__init__( self, parent )
self.bg = bg
self.gheight = gheight
self.gwidth = gwidth
self.barwidth = barwidth
self.ymax = float( ymax )
self.xpos = 0
# Create everything
self.title, self.scale, self.graph = self.createWidgets()
self.updateScrollRegions()
self.yview( 'moveto', '1.0' )
def createScale( self ):
"Create a and return a new canvas with scale markers."
height = float( self.gheight )
width = 25
ymax = self.ymax
scale = Canvas( self, width=width, height=height,
background=self.bg )
opts = { 'fill': 'red' }
# Draw scale line
scale.create_line( width - 1, height, width - 1, 0, **opts )
# Draw ticks and numbers
for y in range( 0, int( ymax + 1 ) ):
ypos = height * (1 - float( y ) / ymax )
scale.create_line( width, ypos, width - 10, ypos, **opts )
scale.create_text( 10, ypos, text=str( y ), **opts )
return scale
def updateScrollRegions( self ):
"Update graph and scale scroll regions."
ofs = 20
height = self.gheight + ofs
self.graph.configure( scrollregion=( 0, -ofs,
self.xpos * self.barwidth, height ) )
self.scale.configure( scrollregion=( 0, -ofs, 0, height ) )
def yview( self, *args ):
"Scroll both scale and graph."
self.graph.yview( *args )
self.scale.yview( *args )
def createWidgets( self ):
"Create initial widget set."
# Objects
title = Label( self, text='Bandwidth (Gb/s)', bg=self.bg )
width = self.gwidth
height = self.gheight
scale = self.createScale()
graph = Canvas( self, width=width, height=height, background=self.bg)
xbar = Scrollbar( self, orient='horizontal', command=graph.xview )
ybar = Scrollbar( self, orient='vertical', command=self.yview )
graph.configure( xscrollcommand=xbar.set, yscrollcommand=ybar.set,
scrollregion=(0, 0, width, height ) )
scale.configure( yscrollcommand=ybar.set )
# Layout
title.grid( row=0, columnspan=3, sticky='new')
scale.grid( row=1, column=0, sticky='nsew' )
graph.grid( row=1, column=1, sticky='nsew' )
ybar.grid( row=1, column=2, sticky='ns' )
xbar.grid( row=2, column=0, columnspan=2, sticky='ew' )
self.rowconfigure( 1, weight=1 )
self.columnconfigure( 1, weight=1 )
return title, scale, graph
def addBar( self, yval ):
"Add a new bar to our graph."
percent = yval / self.ymax
c = self.graph
x0 = self.xpos * self.barwidth
x1 = x0 + self.barwidth
y0 = self.gheight
y1 = ( 1 - percent ) * self.gheight
c.create_rectangle( x0, y0, x1, y1, fill='green' )
self.xpos += 1
self.updateScrollRegions()
self.graph.xview( 'moveto', '1.0' )
def clear( self ):
"Clear graph contents."
self.graph.delete( 'all' )
self.xpos = 0
def test( self ):
"Add a bar for testing purposes."
ms = 1000
if self.xpos < 10:
self.addBar( self.xpos / 10 * self.ymax )
self.after( ms, self.test )
def setTitle( self, text ):
"Set graph title"
self.title.configure( text=text, font='Helvetica 9 bold' )
class ConsoleApp( Frame ):
"Simple Tk consoles for Mininet."
menuStyle = { 'font': 'Geneva 7 bold' }
def __init__( self, net, parent=None, width=4 ):
Frame.__init__( self, parent )
self.top = self.winfo_toplevel()
self.top.title( 'Mininet' )
self.net = net
self.menubar = self.createMenuBar()
cframe = self.cframe = Frame( self )
self.consoles = {} # consoles themselves
titles = {
'hosts': 'Host',
'switches': 'Switch',
'controllers': 'Controller'
}
for name in titles:
nodes = getattr( net, name )
frame, consoles = self.createConsoles(
cframe, nodes, width, titles[ name ] )
self.consoles[ name ] = Object( frame=frame, consoles=consoles )
self.selected = None
self.select( 'hosts' )
self.cframe.pack( expand=True, fill='both' )
cleanUpScreens()
# Close window gracefully
Wm.wm_protocol( self.top, name='WM_DELETE_WINDOW', func=self.quit )
# Initialize graph
graph = Graph( cframe )
self.consoles[ 'graph' ] = Object( frame=graph, consoles=[ graph ] )
self.graph = graph
self.graphVisible = False
self.updates = 0
self.hostCount = len( self.consoles[ 'hosts' ].consoles )
self.bw = 0
self.pack( expand=True, fill='both' )
def updateGraph( self, _console, output ):
"Update our graph."
m = re.search( r'(\d+.?\d*) ([KMG]?bits)/sec', output )
if not m:
return
val, units = float( m.group( 1 ) ), m.group( 2 )
#convert to Gbps
if units[0] == 'M':
val *= 10 ** -3
elif units[0] == 'K':
val *= 10 ** -6
elif units[0] == 'b':
val *= 10 ** -9
self.updates += 1
self.bw += val
if self.updates >= self.hostCount:
self.graph.addBar( self.bw )
self.bw = 0
self.updates = 0
def setOutputHook( self, fn=None, consoles=None ):
"Register fn as output hook [on specific consoles.]"
if consoles is None:
consoles = self.consoles[ 'hosts' ].consoles
for console in consoles:
console.outputHook = fn
def createConsoles( self, parent, nodes, width, title ):
"Create a grid of consoles in a frame."
f = Frame( parent )
# Create consoles
consoles = []
index = 0
for node in nodes:
console = Console( f, self.net, node, title=title )
consoles.append( console )
row = index / width
column = index % width
console.grid( row=row, column=column, sticky='nsew' )
index += 1
f.rowconfigure( row, weight=1 )
f.columnconfigure( column, weight=1 )
return f, consoles
def select( self, groupName ):
"Select a group of consoles to display."
if self.selected is not None:
self.selected.frame.pack_forget()
self.selected = self.consoles[ groupName ]
self.selected.frame.pack( expand=True, fill='both' )
def createMenuBar( self ):
"Create and return a menu (really button) bar."
f = Frame( self )
buttons = [
( 'Hosts', lambda: self.select( 'hosts' ) ),
( 'Switches', lambda: self.select( 'switches' ) ),
( 'Controllers', lambda: self.select( 'controllers' ) ),
( 'Graph', lambda: self.select( 'graph' ) ),
( 'Ping', self.ping ),
( 'Iperf', self.iperf ),
( 'Interrupt', self.stop ),
( 'Clear', self.clear ),
( 'Quit', self.quit )
]
for name, cmd in buttons:
b = Button( f, text=name, command=cmd, **self.menuStyle )
b.pack( side='left' )
f.pack( padx=4, pady=4, fill='x' )
return f
def clear( self ):
"Clear selection."
for console in self.selected.consoles:
console.clear()
def waiting( self, consoles=None ):
"Are any of our hosts waiting for output?"
if consoles is None:
consoles = self.consoles[ 'hosts' ].consoles
for console in consoles:
if console.waiting():
return True
return False
def ping( self ):
"Tell each host to ping the next one."
consoles = self.consoles[ 'hosts' ].consoles
if self.waiting( consoles ):
return
count = len( consoles )
i = 0
for console in consoles:
i = ( i + 1 ) % count
ip = consoles[ i ].node.IP()
console.sendCmd( 'ping ' + ip )
def iperf( self ):
"Tell each host to iperf to the next one."
consoles = self.consoles[ 'hosts' ].consoles
if self.waiting( consoles ):
return
count = len( consoles )
self.setOutputHook( self.updateGraph )
for console in consoles:
# Sometimes iperf -sD doesn't return,
# so we run it in the background instead
console.node.cmd( 'iperf -s &' )
i = 0
for console in consoles:
i = ( i + 1 ) % count
ip = consoles[ i ].node.IP()
console.sendCmd( 'iperf -t 99999 -i 1 -c ' + ip )
def stop( self, wait=True ):
"Interrupt all hosts."
consoles = self.consoles[ 'hosts' ].consoles
for console in consoles:
console.handleInt()
if wait:
for console in consoles:
console.waitOutput()
self.setOutputHook( None )
# Shut down any iperfs that might still be running
quietRun( 'killall -9 iperf' )
def quit( self ):
"Stop everything and quit."
self.stop( wait=False)
Frame.quit( self )
# Make it easier to construct and assign objects
def assign( obj, **kwargs ):
"Set a bunch of fields in an object."
obj.__dict__.update( kwargs )
class Object( object ):
"Generic object you can stuff junk into."
def __init__( self, **kwargs ):
assign( self, **kwargs )
if __name__ == '__main__':
setLogLevel( 'info' )
network = TreeNet( depth=2, fanout=4 )
network.start()
app = ConsoleApp( network, width=4 )
app.mainloop()
network.stop()
| 15,612 | 32.432548 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/nat.py
|
#!/usr/bin/python
"""
Example to create a Mininet topology and connect it to the internet via NAT
"""
from mininet.cli import CLI
from mininet.log import lg
from mininet.topolib import TreeNet
if __name__ == '__main__':
lg.setLogLevel( 'info')
net = TreeNet( depth=1, fanout=4 )
# Add NAT connectivity
net.addNAT().configDefault()
net.start()
print "*** Hosts are running and should have internet connectivity"
print "*** Type 'exit' or control-D to shut down network"
CLI( net )
# Shut down NAT
net.stop()
| 550 | 24.045455 | 75 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/mobility.py
|
#!/usr/bin/python
"""
Simple example of Mobility with Mininet
(aka enough rope to hang yourself.)
We move a host from s1 to s2, s2 to s3, and then back to s1.
Gotchas:
The reference controller doesn't support mobility, so we need to
manually flush the switch flow tables!
Good luck!
to-do:
- think about wifi/hub behavior
- think about clearing last hop - why doesn't that work?
"""
from mininet.net import Mininet
from mininet.node import OVSSwitch
from mininet.topo import LinearTopo
from mininet.log import output, warn
from random import randint
class MobilitySwitch( OVSSwitch ):
"Switch that can reattach and rename interfaces"
def delIntf( self, intf ):
"Remove (and detach) an interface"
port = self.ports[ intf ]
del self.ports[ intf ]
del self.intfs[ port ]
del self.nameToIntf[ intf.name ]
def addIntf( self, intf, rename=False, **kwargs ):
"Add (and reparent) an interface"
OVSSwitch.addIntf( self, intf, **kwargs )
intf.node = self
if rename:
self.renameIntf( intf )
def attach( self, intf ):
"Attach an interface and set its port"
port = self.ports[ intf ]
if port:
if self.isOldOVS():
self.cmd( 'ovs-vsctl add-port', self, intf )
else:
self.cmd( 'ovs-vsctl add-port', self, intf,
'-- set Interface', intf,
'ofport_request=%s' % port )
self.validatePort( intf )
def validatePort( self, intf ):
"Validate intf's OF port number"
ofport = int( self.cmd( 'ovs-vsctl get Interface', intf,
'ofport' ) )
if ofport != self.ports[ intf ]:
warn( 'WARNING: ofport for', intf, 'is actually', ofport,
'\n' )
def renameIntf( self, intf, newname='' ):
"Rename an interface (to its canonical name)"
intf.ifconfig( 'down' )
if not newname:
newname = '%s-eth%d' % ( self.name, self.ports[ intf ] )
intf.cmd( 'ip link set', intf, 'name', newname )
del self.nameToIntf[ intf.name ]
intf.name = newname
self.nameToIntf[ intf.name ] = intf
intf.ifconfig( 'up' )
def moveIntf( self, intf, switch, port=None, rename=True ):
"Move one of our interfaces to another switch"
self.detach( intf )
self.delIntf( intf )
switch.addIntf( intf, port=port, rename=rename )
switch.attach( intf )
def printConnections( switches ):
"Compactly print connected nodes to each switch"
for sw in switches:
output( '%s: ' % sw )
for intf in sw.intfList():
link = intf.link
if link:
intf1, intf2 = link.intf1, link.intf2
remote = intf1 if intf1.node != sw else intf2
output( '%s(%s) ' % ( remote.node, sw.ports[ intf ] ) )
output( '\n' )
def moveHost( host, oldSwitch, newSwitch, newPort=None ):
"Move a host from old switch to new switch"
hintf, sintf = host.connectionsTo( oldSwitch )[ 0 ]
oldSwitch.moveIntf( sintf, newSwitch, port=newPort )
return hintf, sintf
def mobilityTest():
"A simple test of mobility"
print '* Simple mobility test'
net = Mininet( topo=LinearTopo( 3 ), switch=MobilitySwitch )
print '* Starting network:'
net.start()
printConnections( net.switches )
print '* Testing network'
net.pingAll()
print '* Identifying switch interface for h1'
h1, old = net.get( 'h1', 's1' )
for s in 2, 3, 1:
new = net[ 's%d' % s ]
port = randint( 10, 20 )
print '* Moving', h1, 'from', old, 'to', new, 'port', port
hintf, sintf = moveHost( h1, old, new, newPort=port )
print '*', hintf, 'is now connected to', sintf
print '* Clearing out old flows'
for sw in net.switches:
sw.dpctl( 'del-flows' )
print '* New network:'
printConnections( net.switches )
print '* Testing connectivity:'
net.pingAll()
old = new
net.stop()
if __name__ == '__main__':
mobilityTest()
| 4,198 | 30.103704 | 71 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/multilink.py
|
#!/usr/bin/python
"""
This is a simple example that demonstrates multiple links
between nodes.
"""
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.net import Mininet
from mininet.topo import Topo
def runMultiLink():
"Create and run multiple link network"
topo = simpleMultiLinkTopo( n=2 )
net = Mininet( topo=topo )
net.start()
CLI( net )
net.stop()
class simpleMultiLinkTopo( Topo ):
"Simple topology with multiple links"
def __init__( self, n, **kwargs ):
Topo.__init__( self, **kwargs )
h1, h2 = self.addHost( 'h1' ), self.addHost( 'h2' )
s1 = self.addSwitch( 's1' )
for _ in range( n ):
self.addLink( s1, h1 )
self.addLink( s1, h2 )
if __name__ == '__main__':
setLogLevel( 'info' )
runMultiLink()
| 834 | 21.567568 | 59 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/scratchnetuser.py
|
#!/usr/bin/python
"""
Build a simple network from scratch, using mininet primitives.
This is more complicated than using the higher-level classes,
but it exposes the configuration details and allows customization.
For most tasks, the higher-level API will be preferable.
This version uses the user datapath and an explicit control network.
"""
from mininet.net import Mininet
from mininet.node import Node
from mininet.link import Link
from mininet.log import setLogLevel, info
def linkIntfs( node1, node2 ):
"Create link from node1 to node2 and return intfs"
link = Link( node1, node2 )
return link.intf1, link.intf2
def scratchNetUser( cname='controller', cargs='ptcp:' ):
"Create network from scratch using user switch."
# It's not strictly necessary for the controller and switches
# to be in separate namespaces. For performance, they probably
# should be in the root namespace. However, it's interesting to
# see how they could work even if they are in separate namespaces.
info( '*** Creating Network\n' )
controller = Node( 'c0' )
switch = Node( 's0')
h0 = Node( 'h0' )
h1 = Node( 'h1' )
cintf, sintf = linkIntfs( controller, switch )
h0intf, sintf1 = linkIntfs( h0, switch )
h1intf, sintf2 = linkIntfs( h1, switch )
info( '*** Configuring control network\n' )
controller.setIP( '10.0.123.1/24', intf=cintf )
switch.setIP( '10.0.123.2/24', intf=sintf)
info( '*** Configuring hosts\n' )
h0.setIP( '192.168.123.1/24', intf=h0intf )
h1.setIP( '192.168.123.2/24', intf=h1intf )
info( '*** Network state:\n' )
for node in controller, switch, h0, h1:
info( str( node ) + '\n' )
info( '*** Starting controller and user datapath\n' )
controller.cmd( cname + ' ' + cargs + '&' )
switch.cmd( 'ifconfig lo 127.0.0.1' )
intfs = [ str( i ) for i in sintf1, sintf2 ]
switch.cmd( 'ofdatapath -i ' + ','.join( intfs ) + ' ptcp: &' )
switch.cmd( 'ofprotocol tcp:' + controller.IP() + ' tcp:localhost &' )
info( '*** Running test\n' )
h0.cmdPrint( 'ping -c1 ' + h1.IP() )
info( '*** Stopping network\n' )
controller.cmd( 'kill %' + cname )
switch.cmd( 'kill %ofdatapath' )
switch.cmd( 'kill %ofprotocol' )
switch.deleteIntfs()
info( '\n' )
if __name__ == '__main__':
setLogLevel( 'info' )
info( '*** Scratch network demo (user datapath)\n' )
Mininet.init()
scratchNetUser()
| 2,455 | 32.189189 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/simpleperf.py
|
#!/usr/bin/python
"""
Simple example of setting network and CPU parameters
NOTE: link params limit BW, add latency, and loss.
There is a high chance that pings WILL fail and that
iperf will hang indefinitely if the TCP handshake fails
to complete.
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from sys import argv
class SingleSwitchTopo(Topo):
"Single switch connected to n hosts."
def __init__(self, n=2, lossy=True, **opts):
Topo.__init__(self, **opts)
switch = self.addSwitch('s1')
for h in range(n):
# Each host gets 50%/n of system CPU
host = self.addHost('h%s' % (h + 1),
cpu=.5 / n)
if lossy:
# 10 Mbps, 5ms delay, 10% packet loss
self.addLink(host, switch,
bw=10, delay='5ms', loss=10, use_htb=True)
else:
# 10 Mbps, 5ms delay, no packet loss
self.addLink(host, switch,
bw=10, delay='5ms', loss=0, use_htb=True)
def perfTest( lossy=True ):
"Create network and run simple performance test"
topo = SingleSwitchTopo( n=4, lossy=lossy )
net = Mininet( topo=topo,
host=CPULimitedHost, link=TCLink,
autoStaticArp=True )
net.start()
print "Dumping host connections"
dumpNodeConnections(net.hosts)
print "Testing bandwidth between h1 and h4"
h1, h4 = net.getNodeByName('h1', 'h4')
net.iperf( ( h1, h4 ), l4Type='UDP' )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
# Prevent test_simpleperf from failing due to packet loss
perfTest( lossy=( 'testmode' not in argv ) )
| 1,888 | 31.568966 | 71 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/vlanhost.py
|
#!/usr/bin/env python
"""
vlanhost.py: Host subclass that uses a VLAN tag for the default interface.
Dependencies:
This class depends on the "vlan" package
$ sudo apt-get install vlan
Usage (example uses VLAN ID=1000):
From the command line:
sudo mn --custom vlanhost.py --host vlan,vlan=1000
From a script (see exampleUsage function below):
from functools import partial
from vlanhost import VLANHost
....
host = partial( VLANHost, vlan=1000 )
net = Mininet( host=host, ... )
Directly running this script:
sudo python vlanhost.py 1000
"""
from mininet.node import Host
from mininet.topo import Topo
from mininet.util import quietRun
from mininet.log import error
class VLANHost( Host ):
"Host connected to VLAN interface"
def config( self, vlan=100, **params ):
"""Configure VLANHost according to (optional) parameters:
vlan: VLAN ID for default interface"""
r = super( VLANHost, self ).config( **params )
intf = self.defaultIntf()
# remove IP from default, "physical" interface
self.cmd( 'ifconfig %s inet 0' % intf )
# create VLAN interface
self.cmd( 'vconfig add %s %d' % ( intf, vlan ) )
# assign the host's IP to the VLAN interface
self.cmd( 'ifconfig %s.%d inet %s' % ( intf, vlan, params['ip'] ) )
# update the intf name and host's intf map
newName = '%s.%d' % ( intf, vlan )
# update the (Mininet) interface to refer to VLAN interface name
intf.name = newName
# add VLAN interface to host's name to intf map
self.nameToIntf[ newName ] = intf
return r
hosts = { 'vlan': VLANHost }
def exampleAllHosts( vlan ):
"""Simple example of how VLANHost can be used in a script"""
# This is where the magic happens...
host = partial( VLANHost, vlan=vlan )
# vlan (type: int): VLAN ID to be used by all hosts
# Start a basic network using our VLANHost
topo = SingleSwitchTopo( k=2 )
net = Mininet( host=host, topo=topo )
net.start()
CLI( net )
net.stop()
# pylint: disable=arguments-differ
class VLANStarTopo( Topo ):
"""Example topology that uses host in multiple VLANs
The topology has a single switch. There are k VLANs with
n hosts in each, all connected to the single switch. There
are also n hosts that are not in any VLAN, also connected to
the switch."""
def build( self, k=2, n=2, vlanBase=100 ):
s1 = self.addSwitch( 's1' )
for i in range( k ):
vlan = vlanBase + i
for j in range(n):
name = 'h%d-%d' % ( j+1, vlan )
h = self.addHost( name, cls=VLANHost, vlan=vlan )
self.addLink( h, s1 )
for j in range( n ):
h = self.addHost( 'h%d' % (j+1) )
self.addLink( h, s1 )
def exampleCustomTags():
"""Simple example that exercises VLANStarTopo"""
net = Mininet( topo=VLANStarTopo() )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
import sys
from functools import partial
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.topo import SingleSwitchTopo
from mininet.log import setLogLevel
setLogLevel( 'info' )
if not quietRun( 'which vconfig' ):
error( "Cannot find command 'vconfig'\nThe package",
"'vlan' is required in Ubuntu or Debian,",
"or 'vconfig' in Fedora\n" )
exit()
if len( sys.argv ) >= 2:
exampleAllHosts( vlan=int( sys.argv[ 1 ] ) )
else:
exampleCustomTags()
| 3,679 | 28.44 | 75 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/treeping64.py
|
#!/usr/bin/python
"Create a 64-node tree network, and test connectivity using ping."
from mininet.log import setLogLevel
from mininet.node import UserSwitch, OVSKernelSwitch # , KernelSwitch
from mininet.topolib import TreeNet
def treePing64():
"Run ping test on 64-node tree networks."
results = {}
switches = { # 'reference kernel': KernelSwitch,
'reference user': UserSwitch,
'Open vSwitch kernel': OVSKernelSwitch }
for name in switches:
print "*** Testing", name, "datapath"
switch = switches[ name ]
network = TreeNet( depth=2, fanout=8, switch=switch )
result = network.run( network.pingAll )
results[ name ] = result
print
print "*** Tree network ping results:"
for name in switches:
print "%s: %d%% packet loss" % ( name, results[ name ] )
print
if __name__ == '__main__':
setLogLevel( 'info' )
treePing64()
| 950 | 27.818182 | 70 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/numberedports.py
|
#!/usr/bin/python
"""
Create a network with 5 hosts, numbered 1-4 and 9.
Validate that the port numbers match to the interface name,
and that the ovs ports match the mininet ports.
"""
from mininet.net import Mininet
from mininet.node import Controller
from mininet.log import setLogLevel, info, warn
def validatePort( switch, intf ):
"Validate intf's OF port number"
ofport = int( switch.cmd( 'ovs-vsctl get Interface', intf,
'ofport' ) )
if ofport != switch.ports[ intf ]:
warn( 'WARNING: ofport for', intf, 'is actually', ofport, '\n' )
return 0
else:
return 1
def testPortNumbering():
"""Test port numbering:
Create a network with 5 hosts (using Mininet's
mid-level API) and check that implicit and
explicit port numbering works as expected."""
net = Mininet( controller=Controller )
info( '*** Adding controller\n' )
net.addController( 'c0' )
info( '*** Adding hosts\n' )
h1 = net.addHost( 'h1', ip='10.0.0.1' )
h2 = net.addHost( 'h2', ip='10.0.0.2' )
h3 = net.addHost( 'h3', ip='10.0.0.3' )
h4 = net.addHost( 'h4', ip='10.0.0.4' )
h5 = net.addHost( 'h5', ip='10.0.0.5' )
info( '*** Adding switch\n' )
s1 = net.addSwitch( 's1' )
info( '*** Creating links\n' )
# host 1-4 connect to ports 1-4 on the switch
net.addLink( h1, s1 )
net.addLink( h2, s1 )
net.addLink( h3, s1 )
net.addLink( h4, s1 )
# specify a different port to connect host 5 to on the switch.
net.addLink( h5, s1, port1=1, port2= 9)
info( '*** Starting network\n' )
net.start()
# print the interfaces and their port numbers
info( '\n*** printing and validating the ports '
'running on each interface\n' )
for intfs in s1.intfList():
if not intfs.name == "lo":
info( intfs, ': ', s1.ports[intfs],
'\n' )
info( 'Validating that', intfs,
'is actually on port', s1.ports[intfs], '... ' )
if validatePort( s1, intfs ):
info( 'Validated.\n' )
print '\n'
# test the network with pingall
net.pingAll()
print '\n'
info( '*** Stopping network' )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
testPortNumbering()
| 2,330 | 28.1375 | 72 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/controllers2.py
|
#!/usr/bin/python
"""
This example creates a multi-controller network from semi-scratch by
using the net.add*() API and manually starting the switches and controllers.
This is the "mid-level" API, which is an alternative to the "high-level"
Topo() API which supports parametrized topology classes.
Note that one could also create a custom switch class and pass it into
the Mininet() constructor.
"""
from mininet.net import Mininet
from mininet.node import Controller, OVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel
def multiControllerNet():
"Create a network from semi-scratch with multiple controllers."
net = Mininet( controller=Controller, switch=OVSSwitch )
print "*** Creating (reference) controllers"
c1 = net.addController( 'c1', port=6633 )
c2 = net.addController( 'c2', port=6634 )
print "*** Creating switches"
s1 = net.addSwitch( 's1' )
s2 = net.addSwitch( 's2' )
print "*** Creating hosts"
hosts1 = [ net.addHost( 'h%d' % n ) for n in 3, 4 ]
hosts2 = [ net.addHost( 'h%d' % n ) for n in 5, 6 ]
print "*** Creating links"
for h in hosts1:
net.addLink( s1, h )
for h in hosts2:
net.addLink( s2, h )
net.addLink( s1, s2 )
print "*** Starting network"
net.build()
c1.start()
c2.start()
s1.start( [ c1 ] )
s2.start( [ c2 ] )
print "*** Testing network"
net.pingAll()
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' ) # for CLI output
multiControllerNet()
| 1,612 | 25.016129 | 76 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/bind.py
|
#!/usr/bin/python
"""
bind.py: Bind mount example
This creates hosts with private directories that the user specifies.
These hosts may have persistent directories that will be available
across multiple mininet session, or temporary directories that will
only last for one mininet session. To specify a persistent
directory, add a tuple to a list of private directories:
[ ( 'directory to be mounted on', 'directory to be mounted' ) ]
String expansion may be used to create a directory template for
each host. To do this, add a %(name)s in place of the host name
when creating your list of directories:
[ ( '/var/run', '/tmp/%(name)s/var/run' ) ]
If no persistent directory is specified, the directories will default
to temporary private directories. To do this, simply create a list of
directories to be made private. A tmpfs will then be mounted on them.
You may use both temporary and persistent directories at the same
time. In the following privateDirs string, each host will have a
persistent directory in the root filesystem at
"/tmp/(hostname)/var/run" mounted on "/var/run". Each host will also
have a temporary private directory mounted on "/var/log".
[ ( '/var/run', '/tmp/%(name)s/var/run' ), '/var/log' ]
This example has both persistent directories mounted on '/var/log'
and '/var/run'. It also has a temporary private directory mounted
on '/var/mn'
"""
from mininet.net import Mininet
from mininet.node import Host
from mininet.cli import CLI
from mininet.topo import SingleSwitchTopo
from mininet.log import setLogLevel, info
from functools import partial
# Sample usage
def testHostWithPrivateDirs():
"Test bind mounts"
topo = SingleSwitchTopo( 10 )
privateDirs = [ ( '/var/log', '/tmp/%(name)s/var/log' ),
( '/var/run', '/tmp/%(name)s/var/run' ),
'/var/mn' ]
host = partial( Host,
privateDirs=privateDirs )
net = Mininet( topo=topo, host=host )
net.start()
directories = [ directory[ 0 ] if isinstance( directory, tuple )
else directory for directory in privateDirs ]
info( 'Private Directories:', directories, '\n' )
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
testHostWithPrivateDirs()
info( 'Done.\n')
| 2,310 | 32.985294 | 69 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/clustercli.py
|
#!/usr/bin/python
"CLI for Mininet Cluster Edition prototype demo"
from mininet.cli import CLI
from mininet.log import output, error
# pylint: disable=global-statement
nx, graphviz_layout, plt = None, None, None # Will be imported on demand
class ClusterCLI( CLI ):
"CLI with additional commands for Cluster Edition demo"
@staticmethod
def colorsFor( seq ):
"Return a list of background colors for a sequence"
colors = [ 'red', 'lightgreen', 'cyan', 'yellow', 'orange',
'magenta', 'pink', 'grey', 'brown',
'white' ]
slen, clen = len( seq ), len( colors )
reps = max( 1, slen / clen )
colors = colors * reps
colors = colors[ 0 : slen ]
return colors
def do_plot( self, _line ):
"Plot topology colored by node placement"
# Import networkx if needed
global nx, plt
if not nx:
try:
# pylint: disable=import-error
import networkx
nx = networkx # satisfy pylint
from matplotlib import pyplot
plt = pyplot # satisfiy pylint
import pygraphviz
assert pygraphviz # silence pyflakes
# pylint: enable=import-error
except ImportError:
error( 'plot requires networkx, matplotlib and pygraphviz - '
'please install them and try again\n' )
return
# Make a networkx Graph
g = nx.Graph()
mn = self.mn
servers, hosts, switches = mn.servers, mn.hosts, mn.switches
nodes = hosts + switches
g.add_nodes_from( nodes )
links = [ ( link.intf1.node, link.intf2.node )
for link in self.mn.links ]
g.add_edges_from( links )
# Pick some shapes and colors
# shapes = hlen * [ 's' ] + slen * [ 'o' ]
color = dict( zip( servers, self.colorsFor( servers ) ) )
# Plot it!
pos = nx.graphviz_layout( g )
opts = { 'ax': None, 'font_weight': 'bold',
'width': 2, 'edge_color': 'darkblue' }
hcolors = [ color[ getattr( h, 'server', 'localhost' ) ]
for h in hosts ]
scolors = [ color[ getattr( s, 'server', 'localhost' ) ]
for s in switches ]
nx.draw_networkx( g, pos=pos, nodelist=hosts, node_size=800,
label='host', node_color=hcolors, node_shape='s',
**opts )
nx.draw_networkx( g, pos=pos, nodelist=switches, node_size=1000,
node_color=scolors, node_shape='o', **opts )
# Get rid of axes, add title, and show
fig = plt.gcf()
ax = plt.gca()
ax.get_xaxis().set_visible( False )
ax.get_yaxis().set_visible( False )
fig.canvas.set_window_title( 'Mininet')
plt.title( 'Node Placement', fontweight='bold' )
plt.show()
def do_status( self, _line ):
"Report on node shell status"
nodes = self.mn.hosts + self.mn.switches
for node in nodes:
node.shell.poll()
exited = [ node for node in nodes
if node.shell.returncode is not None ]
if exited:
for node in exited:
output( '%s has exited with code %d\n'
% ( node, node.shell.returncode ) )
else:
output( 'All nodes are still running.\n' )
def do_placement( self, _line ):
"Describe node placement"
mn = self.mn
nodes = mn.hosts + mn.switches + mn.controllers
for server in mn.servers:
names = [ n.name for n in nodes if hasattr( n, 'server' )
and n.server == server ]
output( '%s: %s\n' % ( server, ' '.join( names ) ) )
| 3,875 | 37.376238 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/README.md
|
Mininet Examples
========================================================
These examples are intended to help you get started using
Mininet's Python API.
========================================================
#### baresshd.py:
This example uses Mininet's medium-level API to create an sshd
process running in a namespace. Doesn't use OpenFlow.
#### bind.py:
This example shows how you can create private directories for each
node in a Mininet topology.
#### cluster.py:
This example contains all of the code for experimental cluster
edition. Remote classes and MininetCluster can be imported from
here to create a topology with nodes on remote machines.
#### clusterSanity.py:
This example runs cluster edition locally as a sanity check to test
basic functionality.
#### clustercli.py:
This example contains a CLI for experimental cluster edition.
#### clusterdemo.py:
This example is a basic demo of cluster edition on 3 servers with
a tree topology of depth 3 and fanout 3.
#### consoles.py:
This example creates a grid of console windows, one for each node,
and allows interaction with and monitoring of each console, including
graphical monitoring.
#### controllers.py:
This example creates a network with multiple controllers, by
using a custom `Switch()` subclass.
#### controllers2.py:
This example creates a network with multiple controllers by
creating an empty network, adding nodes to it, and manually
starting the switches.
#### controlnet.py:
This examples shows how you can model the control network as well
as the data network, by actually creating two Mininet objects.
#### cpu.py:
This example tests iperf bandwidth for varying CPU limits.
#### emptynet.py:
This example demonstrates creating an empty network (i.e. with no
topology object) and adding nodes to it.
#### hwintf.py:
This example shows how to add an interface (for example a real
hardware interface) to a network after the network is created.
#### intfoptions.py:
This example reconfigures a TCIntf during runtime with different
traffic control commands to test bandwidth, loss, and delay.
#### limit.py:
This example shows how to use link and CPU limits.
#### linearbandwidth.py:
This example shows how to create a custom topology programatically
by subclassing Topo, and how to run a series of tests on it.
#### linuxrouter.py:
This example shows how to create and configure a router in Mininet
that uses Linux IP forwarding.
#### miniedit.py:
This example demonstrates creating a network via a graphical editor.
#### mobility.py:
This example demonstrates detaching an interface from one switch and
attaching it another as a basic way to move a host around a network.
#### multiLink.py:
This example demonstrates the creation of multiple links between
nodes using a custom Topology class.
#### multiping.py:
This example demonstrates one method for
monitoring output from multiple hosts, using `node.monitor()`.
#### multipoll.py:
This example demonstrates monitoring output files from multiple hosts.
#### multitest.py:
This example creates a network and runs multiple tests on it.
#### nat.py:
This example shows how to connect a Mininet network to the Internet
using NAT. It also answers the eternal question "why can't I ping
`google.com`?"
#### natnet.py:
This example demonstrates how to create a network using a NAT node
to connect hosts to the internet.
#### numberedports.py:
This example verifies the mininet ofport numbers match up to the ovs port numbers.
It also verifies that the port numbers match up to the interface numbers
#### popen.py:
This example monitors a number of hosts using `host.popen()` and
`pmonitor()`.
#### popenpoll.py:
This example demonstrates monitoring output from multiple hosts using
the `node.popen()` interface (which returns `Popen` objects) and `pmonitor()`.
#### scratchnet.py, scratchnetuser.py:
These two examples demonstrate how to create a network by using the lowest-
level Mininet functions. Generally the higher-level API is easier to use,
but scratchnet shows what is going on behind the scenes.
#### simpleperf.py:
A simple example of configuring network and CPU bandwidth limits.
#### sshd.py:
This example shows how to run an `sshd` process in each host, allowing
you to log in via `ssh`. This requires connecting the Mininet data network
to an interface in the root namespace (generaly the control network
already lives in the root namespace, so it does not need to be explicitly
connected.)
#### tree1024.py:
This example attempts to create a 1024-host network, and then runs the
CLI on it. It may run into scalability limits, depending on available
memory and `sysctl` configuration (see `INSTALL`.)
#### treeping64.py:
This example creates a 64-host tree network, and attempts to check full
connectivity using `ping`, for different switch/datapath types.
#### vlanhost.py:
An example of how to subclass Host to use a VLAN on its primary interface.
| 4,965 | 26.588889 | 82 |
md
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/baresshd.py
|
#!/usr/bin/python
"This example doesn't use OpenFlow, but attempts to run sshd in a namespace."
import sys
from mininet.node import Host
from mininet.util import ensureRoot, waitListening
ensureRoot()
timeout = 5
print "*** Creating nodes"
h1 = Host( 'h1' )
root = Host( 'root', inNamespace=False )
print "*** Creating links"
h1.linkTo( root )
print h1
print "*** Configuring nodes"
h1.setIP( '10.0.0.1', 8 )
root.setIP( '10.0.0.2', 8 )
print "*** Creating banner file"
f = open( '/tmp/%s.banner' % h1.name, 'w' )
f.write( 'Welcome to %s at %s\n' % ( h1.name, h1.IP() ) )
f.close()
print "*** Running sshd"
cmd = '/usr/sbin/sshd -o UseDNS=no -u0 -o "Banner /tmp/%s.banner"' % h1.name
# add arguments from the command line
if len( sys.argv ) > 1:
cmd += ' ' + ' '.join( sys.argv[ 1: ] )
h1.cmd( cmd )
listening = waitListening( server=h1, port=22, timeout=timeout )
if listening:
print "*** You may now ssh into", h1.name, "at", h1.IP()
else:
print ( "*** Warning: after %s seconds, %s is not listening on port 22"
% ( timeout, h1.name ) )
| 1,074 | 23.431818 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/scratchnet.py
|
#!/usr/bin/python
"""
Build a simple network from scratch, using mininet primitives.
This is more complicated than using the higher-level classes,
but it exposes the configuration details and allows customization.
For most tasks, the higher-level API will be preferable.
"""
from mininet.net import Mininet
from mininet.node import Node
from mininet.link import Link
from mininet.log import setLogLevel, info
from mininet.util import quietRun
from time import sleep
def scratchNet( cname='controller', cargs='-v ptcp:' ):
"Create network from scratch using Open vSwitch."
info( "*** Creating nodes\n" )
controller = Node( 'c0', inNamespace=False )
switch = Node( 's0', inNamespace=False )
h0 = Node( 'h0' )
h1 = Node( 'h1' )
info( "*** Creating links\n" )
Link( h0, switch )
Link( h1, switch )
info( "*** Configuring hosts\n" )
h0.setIP( '192.168.123.1/24' )
h1.setIP( '192.168.123.2/24' )
info( str( h0 ) + '\n' )
info( str( h1 ) + '\n' )
info( "*** Starting network using Open vSwitch\n" )
controller.cmd( cname + ' ' + cargs + '&' )
switch.cmd( 'ovs-vsctl del-br dp0' )
switch.cmd( 'ovs-vsctl add-br dp0' )
for intf in switch.intfs.values():
print switch.cmd( 'ovs-vsctl add-port dp0 %s' % intf )
# Note: controller and switch are in root namespace, and we
# can connect via loopback interface
switch.cmd( 'ovs-vsctl set-controller dp0 tcp:127.0.0.1:6633' )
info( '*** Waiting for switch to connect to controller' )
while 'is_connected' not in quietRun( 'ovs-vsctl show' ):
sleep( 1 )
info( '.' )
info( '\n' )
info( "*** Running test\n" )
h0.cmdPrint( 'ping -c1 ' + h1.IP() )
info( "*** Stopping network\n" )
controller.cmd( 'kill %' + cname )
switch.cmd( 'ovs-vsctl del-br dp0' )
switch.deleteIntfs()
info( '\n' )
if __name__ == '__main__':
setLogLevel( 'info' )
info( '*** Scratch network demo (kernel datapath)\n' )
Mininet.init()
scratchNet()
| 2,032 | 28.463768 | 67 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/linuxrouter.py
|
#!/usr/bin/python
"""
linuxrouter.py: Example network with Linux IP router
This example converts a Node into a router using IP forwarding
already built into Linux.
The example topology creates a router and three IP subnets:
- 192.168.1.0/24 (r0-eth1, IP: 192.168.1.1)
- 172.16.0.0/12 (r0-eth2, IP: 172.16.0.1)
- 10.0.0.0/8 (r0-eth3, IP: 10.0.0.1)
Each subnet consists of a single host connected to
a single switch:
r0-eth1 - s1-eth1 - h1-eth0 (IP: 192.168.1.100)
r0-eth2 - s2-eth1 - h2-eth0 (IP: 172.16.0.100)
r0-eth3 - s3-eth1 - h3-eth0 (IP: 10.0.0.100)
The example relies on default routing entries that are
automatically created for each router interface, as well
as 'defaultRoute' parameters for the host interfaces.
Additional routes may be added to the router or hosts by
executing 'ip route' or 'route' commands on the router or hosts.
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import Node
from mininet.log import setLogLevel, info
from mininet.cli import CLI
class LinuxRouter( Node ):
"A Node with IP forwarding enabled."
def config( self, **params ):
super( LinuxRouter, self).config( **params )
# Enable forwarding on the router
self.cmd( 'sysctl net.ipv4.ip_forward=1' )
def terminate( self ):
self.cmd( 'sysctl net.ipv4.ip_forward=0' )
super( LinuxRouter, self ).terminate()
class NetworkTopo( Topo ):
"A LinuxRouter connecting three IP subnets"
def build( self, **_opts ):
defaultIP = '192.168.1.1/24' # IP address for r0-eth1
router = self.addNode( 'r0', cls=LinuxRouter, ip=defaultIP )
s1, s2, s3 = [ self.addSwitch( s ) for s in 's1', 's2', 's3' ]
self.addLink( s1, router, intfName2='r0-eth1',
params2={ 'ip' : defaultIP } ) # for clarity
self.addLink( s2, router, intfName2='r0-eth2',
params2={ 'ip' : '172.16.0.1/12' } )
self.addLink( s3, router, intfName2='r0-eth3',
params2={ 'ip' : '10.0.0.1/8' } )
h1 = self.addHost( 'h1', ip='192.168.1.100/24',
defaultRoute='via 192.168.1.1' )
h2 = self.addHost( 'h2', ip='172.16.0.100/12',
defaultRoute='via 172.16.0.1' )
h3 = self.addHost( 'h3', ip='10.0.0.100/8',
defaultRoute='via 10.0.0.1' )
for h, s in [ (h1, s1), (h2, s2), (h3, s3) ]:
self.addLink( h, s )
def run():
"Test linux router"
topo = NetworkTopo()
net = Mininet( topo=topo ) # controller is used by s1-s3
net.start()
info( '*** Routing Table on Router:\n' )
print net[ 'r0' ].cmd( 'route' )
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
run()
| 2,826 | 30.411111 | 70 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/controllers.py
|
#!/usr/bin/python
"""
Create a network where different switches are connected to
different controllers, by creating a custom Switch() subclass.
"""
from mininet.net import Mininet
from mininet.node import OVSSwitch, Controller, RemoteController
from mininet.topolib import TreeTopo
from mininet.log import setLogLevel
from mininet.cli import CLI
setLogLevel( 'info' )
# Two local and one "external" controller (which is actually c0)
# Ignore the warning message that the remote isn't (yet) running
c0 = Controller( 'c0', port=6633 )
c1 = Controller( 'c1', port=6634 )
c2 = RemoteController( 'c2', ip='127.0.0.1', port=6633 )
cmap = { 's1': c0, 's2': c1, 's3': c2 }
class MultiSwitch( OVSSwitch ):
"Custom Switch() subclass that connects to different controllers"
def start( self, controllers ):
return OVSSwitch.start( self, [ cmap[ self.name ] ] )
topo = TreeTopo( depth=2, fanout=2 )
net = Mininet( topo=topo, switch=MultiSwitch, build=False )
for c in [ c0, c1 ]:
net.addController(c)
net.build()
net.start()
CLI( net )
net.stop()
| 1,061 | 27.702703 | 69 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/__init__.py
|
"""
Mininet Examples
See README for details
"""
| 48 | 8.8 | 22 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/limit.py
|
#!/usr/bin/python
"""
limit.py: example of using link and CPU limits
"""
from mininet.net import Mininet
from mininet.link import TCIntf
from mininet.node import CPULimitedHost
from mininet.topolib import TreeTopo
from mininet.util import custom, quietRun
from mininet.log import setLogLevel, info
def testLinkLimit( net, bw ):
"Run bandwidth limit test"
info( '*** Testing network %.2f Mbps bandwidth limit\n' % bw )
net.iperf()
def limit( bw=10, cpu=.1 ):
"""Example/test of link and CPU bandwidth limits
bw: interface bandwidth limit in Mbps
cpu: cpu limit as fraction of overall CPU time"""
intf = custom( TCIntf, bw=bw )
myTopo = TreeTopo( depth=1, fanout=2 )
for sched in 'rt', 'cfs':
info( '*** Testing with', sched, 'bandwidth limiting\n' )
if sched == 'rt':
release = quietRun( 'uname -r' ).strip('\r\n')
output = quietRun( 'grep CONFIG_RT_GROUP_SCHED /boot/config-%s'
% release )
if output == '# CONFIG_RT_GROUP_SCHED is not set\n':
info( '*** RT Scheduler is not enabled in your kernel. '
'Skipping this test\n' )
continue
host = custom( CPULimitedHost, sched=sched, cpu=cpu )
net = Mininet( topo=myTopo, intf=intf, host=host )
net.start()
testLinkLimit( net, bw=bw )
net.runCpuLimitTest( cpu=cpu )
net.stop()
def verySimpleLimit( bw=150 ):
"Absurdly simple limiting test"
intf = custom( TCIntf, bw=bw )
net = Mininet( intf=intf )
h1, h2 = net.addHost( 'h1' ), net.addHost( 'h2' )
net.addLink( h1, h2 )
net.start()
net.pingAll()
net.iperf()
h1.cmdPrint( 'tc -s qdisc ls dev', h1.defaultIntf() )
h2.cmdPrint( 'tc -d class show dev', h2.defaultIntf() )
h1.cmdPrint( 'tc -s qdisc ls dev', h1.defaultIntf() )
h2.cmdPrint( 'tc -d class show dev', h2.defaultIntf() )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
limit()
| 2,034 | 32.360656 | 75 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/popen.py
|
#!/usr/bin/python
"""
This example monitors a number of hosts using host.popen() and
pmonitor()
"""
from mininet.net import Mininet
from mininet.node import CPULimitedHost
from mininet.topo import SingleSwitchTopo
from mininet.log import setLogLevel
from mininet.util import custom, pmonitor
def monitorhosts( hosts=5, sched='cfs' ):
"Start a bunch of pings and monitor them using popen"
mytopo = SingleSwitchTopo( hosts )
cpu = .5 / hosts
myhost = custom( CPULimitedHost, cpu=cpu, sched=sched )
net = Mininet( topo=mytopo, host=myhost )
net.start()
# Start a bunch of pings
popens = {}
last = net.hosts[ -1 ]
for host in net.hosts:
popens[ host ] = host.popen( "ping -c5 %s" % last.IP() )
last = host
# Monitor them and print output
for host, line in pmonitor( popens ):
if host:
print "<%s>: %s" % ( host.name, line.strip() )
# Done
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
monitorhosts( hosts=5 )
| 1,023 | 26.675676 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/emptynet.py
|
#!/usr/bin/python
"""
This example shows how to create an empty Mininet object
(without a topology object) and add nodes to it manually.
"""
from mininet.net import Mininet
from mininet.node import Controller
from mininet.cli import CLI
from mininet.log import setLogLevel, info
def emptyNet():
"Create an empty network and add nodes to it."
net = Mininet( controller=Controller )
info( '*** Adding controller\n' )
net.addController( 'c0' )
info( '*** Adding hosts\n' )
h1 = net.addHost( 'h1', ip='10.0.0.1' )
h2 = net.addHost( 'h2', ip='10.0.0.2' )
info( '*** Adding switch\n' )
s3 = net.addSwitch( 's3' )
info( '*** Creating links\n' )
net.addLink( h1, s3 )
net.addLink( h2, s3 )
info( '*** Starting network\n')
net.start()
info( '*** Running CLI\n' )
CLI( net )
info( '*** Stopping network' )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
emptyNet()
| 960 | 20.355556 | 57 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/cpu.py
|
#!/usr/bin/python
"""
cpu.py: test iperf bandwidth for varying cpu limits
"""
from mininet.net import Mininet
from mininet.node import CPULimitedHost
from mininet.topolib import TreeTopo
from mininet.util import custom, waitListening
from mininet.log import setLogLevel, info
def bwtest( cpuLimits, period_us=100000, seconds=5 ):
"""Example/test of link and CPU bandwidth limits
cpu: cpu limit as fraction of overall CPU time"""
topo = TreeTopo( depth=1, fanout=2 )
results = {}
for sched in 'rt', 'cfs':
print '*** Testing with', sched, 'bandwidth limiting'
for cpu in cpuLimits:
host = custom( CPULimitedHost, sched=sched,
period_us=period_us,
cpu=cpu )
try:
net = Mininet( topo=topo, host=host )
# pylint: disable=bare-except
except:
info( '*** Skipping host %s\n' % sched )
break
net.start()
net.pingAll()
hosts = [ net.getNodeByName( h ) for h in topo.hosts() ]
client, server = hosts[ 0 ], hosts[ -1 ]
server.cmd( 'iperf -s -p 5001 &' )
waitListening( client, server, 5001 )
result = client.cmd( 'iperf -yc -t %s -c %s' % (
seconds, server.IP() ) ).split( ',' )
bps = float( result[ -1 ] )
server.cmdPrint( 'kill %iperf' )
net.stop()
updated = results.get( sched, [] )
updated += [ ( cpu, bps ) ]
results[ sched ] = updated
return results
def dump( results ):
"Dump results"
fmt = '%s\t%s\t%s'
print
print fmt % ( 'sched', 'cpu', 'client MB/s' )
print
for sched in sorted( results.keys() ):
entries = results[ sched ]
for cpu, bps in entries:
pct = '%.2f%%' % ( cpu * 100 )
mbps = bps / 1e6
print fmt % ( sched, pct, mbps )
if __name__ == '__main__':
setLogLevel( 'info' )
limits = [ .45, .4, .3, .2, .1 ]
out = bwtest( limits )
dump( out )
| 2,119 | 27.648649 | 68 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/docker_ndn.py
|
#!/usr/bin/python
"""
"""
from mininet.net import Containernet
from mininet.node import Controller, Docker, OVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Link
from mininet.util import dumpNodeConnections
import fnss
import time
import re
from IPython import embed
class NDNContainernet( Containernet ):
"""
A Mininet with Docker related methods.
Inherits Mininet.
This class is not more than API beautification.
"""
def __init__(self, **params):
# call original Mininet.__init__
Containernet.__init__(self, **params)
def buildFromTopo( self, topo=None ):
"""Build mininet from a topology object
At the end of this function, everything should be connected
and up."""
# Possibly we should clean up here and/or validate
# the topo
if self.cleanup:
pass
info( '*** Creating network\n' )
if not self.controllers and self.controller:
# Add a default controller
info( '*** Adding controller\n' )
classes = self.controller
if not isinstance( classes, list ):
classes = [ classes ]
for i, cls in enumerate( classes ):
# Allow Controller objects because nobody understands partial()
if isinstance( cls, Controller ):
self.addController( cls )
else:
self.addController( 'c%d' % i, cls )
info( '*** Adding docker hosts:\n' )
for hostName in topo.hosts():
self.addDocker( hostName, **topo.nodeInfo( hostName ))
info( hostName + ' ' )
info( '\n*** Adding switches:\n' )
for switchName in topo.switches():
# A bit ugly: add batch parameter if appropriate
params = topo.nodeInfo( switchName)
cls = params.get( 'cls', self.switch )
#if hasattr( cls, 'batchStartup' ):
# params.setdefault( 'batch', True )
self.addSwitch( switchName, **params )
info( switchName + ' ' )
info( '\n*** Adding links:\n' )
for srcName, dstName, params in topo.links(
sort=True, withInfo=True ):
self.addLink( **params )
info( '(%s, %s) ' % ( srcName, dstName ) )
info( '\n' )
def topology():
fnss_topology = fnss.parse_topology_zoo('AttMpls.gml')
#fnss.two_tier_topology(1, 2, 2)
"Create a network with some docker containers acting as hosts."
# Set link attributes
# https://fnss.github.io/doc/core/apidoc/fnss.functions.html
#fnss.set_capacities_constant(fnss_topology, 10, 'Mbps')
#fnss.set_delays_constant(fnss_topology, 2, 'ms')
#fnss.set_buffer_sizes_constant(fnss_topology, 50, 'packets')
fnss.set_delays_geo_distance(fnss_topology, specific_delay=fnss.PROPAGATION_DELAY_FIBER)
mn_topo = fnss.to_mininet(fnss_topology, relabel_nodes=True)
for node in mn_topo.hosts():
mn_topo.setNodeInfo(node, {
"dcmd" : ["/bin/bash", "/ndn-entrypoint.sh"],
"dimage" : "ndnrepo_ndn:latest",
"privileged" : True,
"cls" : Docker
} )
#mn_topo.setNodeInfo(node, "privileged", True )
#mn_topo.setNodeInfo(node, "dimage", "ndnrepo_ndn:latest" )
#node.dcmd=["/bin/bash", "/ndn-entrypoint.sh"]
# = Docker('{0}'.format(node), ip='10.0.0.{0}'.format(node), , privileged=True, dimage="ndnrepo_ndn:latest")
#node.type='host'
#print node
#nodes.append(node)
net = NDNContainernet(topo=mn_topo, link=TCLink, controller=Controller)
dumpNodeConnections(net.hosts)
dumpNodeConnections(net.switches)
fnss_topology.edges()
info('*** Starting network\n')
net.start()
embed()
#TODO Add NDN Links for all
#fnss_topology.edges()
#[(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]
#addNDNRoute(d1, d2)
#net = Containernet(controller=Controller)
info('*** Adding controller\n')
#net.addController('c0')
info('*** Adding docker containers\n')
#d1 = net.addDocker('d1', ip='10.0.0.251', dcmd=["/bin/bash", "/ndn-entrypoint.sh"], privileged=True, dimage="ndnrepo_ndn:latest")
#d2 = net.addDocker('d2', ip='10.0.0.250', dcmd=["/bin/bash", "/ndn-entrypoint.sh"], privileged=True, dimage="ndnrepo_ndn:latest")
#s1 = net.addSwitch('s1')
info('*** Creating links\n')
#net.addLink(d1, s1)
#net.addLink(s1, d2)
time.sleep(5)
print addNDNRoute(d1, d2)
#TODO create file when inserting is done
while not (checkRepoNGInitDone(d1) and checkRepoNGInitDone(d2)):
time.sleep(5)
print listFilesInRepo(d1)
print listFilesInRepo(d2)
info('*** Running CLI\n')
dumpNodeConnections(net.hosts)
CLI(net)
info('*** Stopping network')
net.stop()
def addNDNRoute(source, dest, cost=1):
return source.cmd("nfdc register -c {0} ndn:/ndn/broadcast/ndnfs tcp://{1}".format(cost, dest.IP()))
def listFilesInRepo(node):
return node.cmd('repo-ng-ls')
def listLogs(node):
NFD_LOG="/var/log/nfd.out"
NFD_ERROR_LOG="/var/log/nfd-error.out"
NDN_COPY_LOG="/var/log/copy-repo.log"
NDN_COPY_ERROR="/var/log/copy-repo-error.log"
NDNFS_LOG="/var/log/ndnfs.log"
NDNFS_SERVER_LOG="/var/log/ndnfs-server.log"
REPO_NG_LOG="/var/log/repong.log"
REPONG_ERROR_LOG="/var/log/repong-error.log"
return node.cmd('tail {0} {1} {2}'.format(REPO_NG_LOG, NFD_LOG, NDN_COPY_LOG))
"""
/ndn/broadcast/ndnfs/videos/DashJsMediaFile/ElephantsDream_H264BPL30_0100.264.dash/%FD%00%00%01%5D%A3O%EF1/%00%192/sha256digest=981f889303b396519c1c1dc01aa472ab4714e70f0a283aba0679b59583fead17
/ndn/broadcast/ndnfs/videos/DashJsMediaFile/ElephantsDream_H264BPL30_0100.264.dash/%FD%00%00%01%5D%A3O%EF1/%00%193/sha256digest=2a4ee843e3ac3b21f341de4ec45b33eaaef49dbb7567edea32ea60f14a44a62d
/ndn/broadcast/ndnfs/videos/DashJsMediaFile/ElephantsDream_H264BPL30_0100.264.dash/%FD%00%00%01%5D%A3O%EF1/%00%194/sha256digest=d30753f707f5a983bbad98b0c37ba6ddc29da0dfb2068eb40b61ad78ee9b06fb
/ndn/broadcast/ndnfs/videos/DashJsMediaFile/ElephantsDream_H264BPL30_0100.264.dash/%FD%00%00%01%5D%A3O%EF1/%00%195/sha256digest=7e17adb27923a9af8677a09e46217a73ac1d58def212073ab09f482dcc6e163c
Total number of data = 72910
For TOS Dataset and Repo-Ng Total data should be
Total number of data = 39541
"""
def checkRepoNGInitDone(node):
s = node.cmd('repo-ng-ls')
p = re.compile(r'^Total number of data = +(\d+)', re.M)
try:
status = re.findall(p, s)[0]
except:
status = 0
if int(status) == 88460:
return True
else:
print status
return False
def listFiles(node):
print node.cmd("find /videos/")
def getFile(node, path="/ndn/broadcast/ndnfs/videos/DashJsMediaFile/NDN.mpd"):
print(node.cmd("ndngetfile {0}".format(path)))
if __name__ == '__main__':
setLogLevel('info')
topology()
| 7,197 | 32.170507 | 192 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/hwintf.py
|
#!/usr/bin/python
"""
This example shows how to add an interface (for example a real
hardware interface) to a network after the network is created.
"""
import re
import sys
from mininet.cli import CLI
from mininet.log import setLogLevel, info, error
from mininet.net import Mininet
from mininet.link import Intf
from mininet.topolib import TreeTopo
from mininet.util import quietRun
def checkIntf( intf ):
"Make sure intf exists and is not configured."
config = quietRun( 'ifconfig %s 2>/dev/null' % intf, shell=True )
if not config:
error( 'Error:', intf, 'does not exist!\n' )
exit( 1 )
ips = re.findall( r'\d+\.\d+\.\d+\.\d+', config )
if ips:
error( 'Error:', intf, 'has an IP address,'
'and is probably in use!\n' )
exit( 1 )
if __name__ == '__main__':
setLogLevel( 'info' )
# try to get hw intf from the command line; by default, use eth1
intfName = sys.argv[ 1 ] if len( sys.argv ) > 1 else 'eth1'
info( '*** Connecting to hw intf: %s' % intfName )
info( '*** Checking', intfName, '\n' )
checkIntf( intfName )
info( '*** Creating network\n' )
net = Mininet( topo=TreeTopo( depth=1, fanout=2 ) )
switch = net.switches[ 0 ]
info( '*** Adding hardware interface', intfName, 'to switch',
switch.name, '\n' )
_intf = Intf( intfName, node=switch )
info( '*** Note: you may need to reconfigure the interfaces for '
'the Mininet hosts:\n', net.hosts, '\n' )
net.start()
CLI( net )
net.stop()
| 1,549 | 27.703704 | 69 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/dockerhosts.py
|
#!/usr/bin/python
"""
This example shows how to create a simple network and
how to create docker containers (based on existing images)
to it.
"""
from mininet.net import Containernet
from mininet.node import Controller, Docker, OVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Link
def topology():
"Create a network with some docker containers acting as hosts."
net = Containernet(controller=Controller)
info('*** Adding controller\n')
net.addController('c0')
info('*** Adding hosts\n')
h1 = net.addHost('h1')
h2 = net.addHost('h2')
info('*** Adding docker containers\n')
d1 = net.addDocker('d1', ip='10.0.0.251', dimage="ubuntu:trusty")
d2 = net.addDocker('d2', ip='10.0.0.252', dimage="ubuntu:trusty", cpu_period=50000, cpu_quota=25000)
d3 = net.addHost(
'd3', ip='11.0.0.253', cls=Docker, dimage="ubuntu:trusty", cpu_shares=20)
d5 = net.addDocker('d5', dimage="ubuntu:trusty", volumes=["/:/mnt/vol1:rw"])
info('*** Adding switch\n')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2', cls=OVSSwitch)
s3 = net.addSwitch('s3')
info('*** Creating links\n')
net.addLink(h1, s1)
net.addLink(s1, d1)
net.addLink(h2, s2)
net.addLink(d2, s2)
net.addLink(s1, s2)
#net.addLink(s1, s2, cls=TCLink, delay="100ms", bw=1, loss=10)
# try to add a second interface to a docker container
net.addLink(d2, s3, params1={"ip": "11.0.0.254/8"})
net.addLink(d3, s3)
info('*** Starting network\n')
net.start()
net.ping([d1, d2])
# our extended ping functionality
net.ping([d1], manualdestip="10.0.0.252")
net.ping([d2, d3], manualdestip="11.0.0.254")
info('*** Dynamically add a container at runtime\n')
d4 = net.addDocker('d4', dimage="ubuntu:trusty")
# we have to specify a manual ip when we add a link at runtime
net.addLink(d4, s1, params1={"ip": "10.0.0.254/8"})
# other options to do this
#d4.defaultIntf().ifconfig("10.0.0.254 up")
#d4.setIP("10.0.0.254")
net.ping([d1], manualdestip="10.0.0.254")
info('*** Running CLI\n')
CLI(net)
info('*** Stopping network')
net.stop()
if __name__ == '__main__':
setLogLevel('info')
topology()
| 2,290 | 27.6375 | 104 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/multitest.py
|
#!/usr/bin/python
"""
This example shows how to create a network and run multiple tests.
For a more complicated test example, see udpbwtest.py.
"""
from mininet.cli import CLI
from mininet.log import lg, info
from mininet.net import Mininet
from mininet.node import OVSKernelSwitch
from mininet.topolib import TreeTopo
def ifconfigTest( net ):
"Run ifconfig on all hosts in net."
hosts = net.hosts
for host in hosts:
info( host.cmd( 'ifconfig' ) )
if __name__ == '__main__':
lg.setLogLevel( 'info' )
info( "*** Initializing Mininet and kernel modules\n" )
OVSKernelSwitch.setup()
info( "*** Creating network\n" )
network = Mininet( TreeTopo( depth=2, fanout=2 ), switch=OVSKernelSwitch )
info( "*** Starting network\n" )
network.start()
info( "*** Running ping test\n" )
network.pingAll()
info( "*** Running ifconfig test\n" )
ifconfigTest( network )
info( "*** Starting CLI (type 'exit' to exit)\n" )
CLI( network )
info( "*** Stopping network\n" )
network.stop()
| 1,049 | 28.166667 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/clusterdemo.py
|
#!/usr/bin/python
"clusterdemo.py: demo of Mininet Cluster Edition prototype"
from mininet.examples.cluster import MininetCluster, SwitchBinPlacer
from mininet.topolib import TreeTopo
from mininet.log import setLogLevel
from mininet.examples.clustercli import ClusterCLI as CLI
def demo():
"Simple Demo of Cluster Mode"
servers = [ 'localhost', 'ubuntu2', 'ubuntu3' ]
topo = TreeTopo( depth=3, fanout=3 )
net = MininetCluster( topo=topo, servers=servers,
placement=SwitchBinPlacer )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
demo()
| 639 | 26.826087 | 68 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/multipoll.py
|
#!/usr/bin/python
"""
Simple example of sending output to multiple files and
monitoring them
"""
from mininet.topo import SingleSwitchTopo
from mininet.net import Mininet
from mininet.log import setLogLevel
from time import time
from select import poll, POLLIN
from subprocess import Popen, PIPE
def monitorFiles( outfiles, seconds, timeoutms ):
"Monitor set of files and return [(host, line)...]"
devnull = open( '/dev/null', 'w' )
tails, fdToFile, fdToHost = {}, {}, {}
for h, outfile in outfiles.iteritems():
tail = Popen( [ 'tail', '-f', outfile ],
stdout=PIPE, stderr=devnull )
fd = tail.stdout.fileno()
tails[ h ] = tail
fdToFile[ fd ] = tail.stdout
fdToHost[ fd ] = h
# Prepare to poll output files
readable = poll()
for t in tails.values():
readable.register( t.stdout.fileno(), POLLIN )
# Run until a set number of seconds have elapsed
endTime = time() + seconds
while time() < endTime:
fdlist = readable.poll(timeoutms)
if fdlist:
for fd, _flags in fdlist:
f = fdToFile[ fd ]
host = fdToHost[ fd ]
# Wait for a line of output
line = f.readline().strip()
yield host, line
else:
# If we timed out, return nothing
yield None, ''
for t in tails.values():
t.terminate()
devnull.close() # Not really necessary
def monitorTest( N=3, seconds=3 ):
"Run pings and monitor multiple hosts"
topo = SingleSwitchTopo( N )
net = Mininet( topo )
net.start()
hosts = net.hosts
print "Starting test..."
server = hosts[ 0 ]
outfiles, errfiles = {}, {}
for h in hosts:
# Create and/or erase output files
outfiles[ h ] = '/tmp/%s.out' % h.name
errfiles[ h ] = '/tmp/%s.err' % h.name
h.cmd( 'echo >', outfiles[ h ] )
h.cmd( 'echo >', errfiles[ h ] )
# Start pings
h.cmdPrint('ping', server.IP(),
'>', outfiles[ h ],
'2>', errfiles[ h ],
'&' )
print "Monitoring output for", seconds, "seconds"
for h, line in monitorFiles( outfiles, seconds, timeoutms=500 ):
if h:
print '%s: %s' % ( h.name, line )
for h in hosts:
h.cmd('kill %ping')
net.stop()
if __name__ == '__main__':
setLogLevel('info')
monitorTest()
| 2,469 | 29.121951 | 68 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/multiping.py
|
#!/usr/bin/python
"""
multiping.py: monitor multiple sets of hosts using ping
This demonstrates how one may send a simple shell script to
multiple hosts and monitor their output interactively for a period=
of time.
"""
from mininet.net import Mininet
from mininet.node import Node
from mininet.topo import SingleSwitchTopo
from mininet.log import setLogLevel
from select import poll, POLLIN
from time import time
def chunks( l, n ):
"Divide list l into chunks of size n - thanks Stackoverflow"
return [ l[ i: i + n ] for i in range( 0, len( l ), n ) ]
def startpings( host, targetips ):
"Tell host to repeatedly ping targets"
targetips = ' '.join( targetips )
# Simple ping loop
cmd = ( 'while true; do '
' for ip in %s; do ' % targetips +
' echo -n %s "->" $ip ' % host.IP() +
' `ping -c1 -w 1 $ip | grep packets` ;'
' sleep 1;'
' done; '
'done &' )
print ( '*** Host %s (%s) will be pinging ips: %s' %
( host.name, host.IP(), targetips ) )
host.cmd( cmd )
def multiping( netsize, chunksize, seconds):
"Ping subsets of size chunksize in net of size netsize"
# Create network and identify subnets
topo = SingleSwitchTopo( netsize )
net = Mininet( topo=topo )
net.start()
hosts = net.hosts
subnets = chunks( hosts, chunksize )
# Create polling object
fds = [ host.stdout.fileno() for host in hosts ]
poller = poll()
for fd in fds:
poller.register( fd, POLLIN )
# Start pings
for subnet in subnets:
ips = [ host.IP() for host in subnet ]
#adding bogus to generate packet loss
ips.append( '10.0.0.200' )
for host in subnet:
startpings( host, ips )
# Monitor output
endTime = time() + seconds
while time() < endTime:
readable = poller.poll(1000)
for fd, _mask in readable:
node = Node.outToNode[ fd ]
print '%s:' % node.name, node.monitor().strip()
# Stop pings
for host in hosts:
host.cmd( 'kill %while' )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
multiping( netsize=20, chunksize=4, seconds=10 )
| 2,235 | 25.619048 | 67 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/examples/clusterSanity.py
|
#!/usr/bin/env python
'''
A sanity check for cluster edition
'''
from mininet.examples.cluster import MininetCluster
from mininet.log import setLogLevel
from mininet.examples.clustercli import ClusterCLI as CLI
from mininet.topo import SingleSwitchTopo
def clusterSanity():
"Sanity check for cluster mode"
topo = SingleSwitchTopo()
net = MininetCluster( topo=topo )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
clusterSanity()
| 501 | 20.826087 | 57 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.