repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jpmpentwater/cvxpy | examples/expr_trees/1D_convolution.py | 12 | 1453 | #!/usr/bin/env python
from cvxpy import *
import numpy as np
import random
from math import pi, sqrt, exp
def gauss(n=11,sigma=1):
r = range(-int(n/2),int(n/2)+1)
return [1 / (sigma * sqrt(2*pi)) * exp(-float(x)**2/(2*sigma**2)) for x in r]
np.random.seed(5)
random.seed(5)
DENSITY = 0.008
n = 1000
x = Variable(n)
# Create sparse signal.
signal = np.zeros(n)
nnz = 0
for i in range(n):
if random.random() < DENSITY:
signal[i] = random.uniform(0, 100)
nnz += 1
# Gaussian kernel.
m = 1001
kernel = gauss(m, m/10)
# Noisy signal.
std = 1
noise = np.random.normal(scale=std, size=n+m-1)
noisy_signal = conv(kernel, signal) #+ noise
gamma = Parameter(sign="positive")
fit = norm(conv(kernel, x) - noisy_signal, 2)
regularization = norm(x, 1)
constraints = [x >= 0]
gamma.value = 0.06
prob = Problem(Minimize(fit), constraints)
solver_options = {"NORMALIZE": True, "MAX_ITERS": 2500,
"EPS":1e-3}
result = prob.solve(solver=SCS,
verbose=True,
NORMALIZE=True,
MAX_ITERS=2500)
# Get problem matrix.
data, dims = prob.get_problem_data(solver=SCS)
# Plot result and fit.
import matplotlib.pyplot as plt
plt.plot(range(n), signal, label="true signal")
plt.plot(range(n), np.asarray(noisy_signal.value[:n, 0]), label="noisy convolution")
plt.plot(range(n), np.asarray(x.value[:,0]), label="recovered signal")
plt.legend(loc='upper right')
plt.show()
| gpl-3.0 |
shyamalschandra/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
huggingface/pytorch-transformers | src/transformers/utils/versions.py | 1 | 4611 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for working with package versions
"""
import operator
import re
import sys
from typing import Optional
from packaging import version
# The package importlib_metadata is in a different place, depending on the python version.
if sys.version_info < (3, 8):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
ops = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint):
if got_ver is None:
raise ValueError("got_ver is None")
if want_ver is None:
raise ValueError("want_ver is None")
if not ops[op](version.parse(got_ver), version.parse(want_ver)):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}"
)
def require_version(requirement: str, hint: Optional[str] = None) -> None:
"""
Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
The installed module version comes from the `site-packages` dir via `importlib_metadata`.
Args:
requirement (:obj:`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
hint (:obj:`str`, `optional`): what suggestion to print in case of requirements not being met
Example::
require_version("pandas>1.1.2")
require_version("numpy>1.18.5", "this is important to have for whatever reason")
"""
hint = f"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$", requirement):
pkg, op, want_ver = requirement, None, None
else:
match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
pkg, want_full = match[0]
want_range = want_full.split(",") # there could be multiple requirements
wanted = {}
for w in want_range:
match = re.findall(r"^([\s!=<>]{1,2})(.+)", w)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
op, want_ver = match[0]
wanted[op] = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}")
# special case
if pkg == "python":
got_ver = ".".join([str(x) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
return
# check if any version is installed
try:
got_ver = importlib_metadata.version(pkg)
except importlib_metadata.PackageNotFoundError:
raise importlib_metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}"
)
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
def require_version_core(requirement):
"""require_version wrapper which emits a core-specific hint on failure"""
hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git master"
return require_version(requirement, hint)
def require_version_examples(requirement):
"""require_version wrapper which emits examples-specific hint on failure"""
hint = "Try: pip install -r examples/requirements.txt"
return require_version(requirement, hint)
| apache-2.0 |
shangwuhencc/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
d-mittal/pystruct | pystruct/models/latent_graph_crf.py | 3 | 8415 | import numbers
import numpy as np
from scipy import sparse
from sklearn.cluster import KMeans
from . import GraphCRF
from ..inference import inference_dispatch
def kmeans_init(X, Y, all_edges, n_labels, n_states_per_label,
symmetric=True):
all_feats = []
# iterate over samples
for x, y, edges in zip(X, Y, all_edges):
# first, get neighbor counts from nodes
n_nodes = x.shape[0]
labels_one_hot = np.zeros((n_nodes, n_labels), dtype=np.int)
y = y.ravel()
gx = np.ogrid[:n_nodes]
labels_one_hot[gx, y] = 1
size = np.prod(y.shape)
graphs = [sparse.coo_matrix((np.ones(e.shape[0]), e.T), (size, size))
for e in edges]
if symmetric:
directions = [g + g.T for g in graphs]
else:
directions = [T for g in graphs for T in [g, g.T]]
neighbors = [s * labels_one_hot.reshape(size, -1) for s in directions]
neighbors = np.hstack(neighbors)
# normalize (for borders)
neighbors /= np.maximum(neighbors.sum(axis=1)[:, np.newaxis], 1)
# add unaries
features = np.hstack([x, neighbors])
all_feats.append(features)
all_feats_stacked = np.vstack(all_feats)
Y_stacked = np.hstack(Y).ravel()
# for each state, run k-means over whole dataset
H = [np.zeros(y.shape, dtype=np.int) for y in Y]
label_indices = np.hstack([0, np.cumsum(n_states_per_label)])
for label in np.unique(Y_stacked):
try:
km = KMeans(n_clusters=n_states_per_label[label])
except TypeError:
# for old versions :-/
km = KMeans(k=n_states_per_label[label])
indicator = Y_stacked == label
f = all_feats_stacked[indicator]
km.fit(f)
for feats_sample, y, h in zip(all_feats, Y, H):
indicator_sample = y.ravel() == label
if np.any(indicator_sample):
pred = km.predict(feats_sample[indicator_sample]).astype(np.int)
h.ravel()[indicator_sample] = pred + label_indices[label]
return H
class LatentGraphCRF(GraphCRF):
"""CRF with latent states for variables.
This is also called "hidden dynamics CRF".
For each output variable there is an additional variable which
can encode additional states and interactions.
Parameters
----------
n_labels : int
Number of states of output variables.
n_featues : int or None (default=None).
Number of input features per input variable.
``None`` means it is equal to ``n_labels``.
n_states_per_label : int or list (default=2)
Number of latent states associated with each observable state.
Can be either an integer, which means the same number
of hidden states will be used for all observable states, or a list
of integers of length ``n_labels``.
inference_method : string, default="ad3"
Function to call to do inference and loss-augmented inference.
Possible values are:
- 'max-product' for max-product belief propagation.
Recommended for chains an trees. Loopy belief propagatin in case of a general graph.
- 'lp' for Linear Programming relaxation using cvxopt.
- 'ad3' for AD3 dual decomposition.
- 'qpbo' for QPBO + alpha expansion.
- 'ogm' for OpenGM inference algorithms.
"""
def __init__(self, n_labels=None, n_features=None, n_states_per_label=2,
inference_method=None):
self.n_labels = n_labels
self.n_states_per_label = n_states_per_label
GraphCRF.__init__(self, n_states=None, n_features=n_features,
inference_method=inference_method)
def _set_size_joint_feature(self):
if None in [self.n_features, self.n_labels]:
return
if isinstance(self.n_states_per_label, numbers.Integral):
# same for all labels
n_states_per_label = np.array([
self.n_states_per_label for i in range(self.n_labels)])
else:
n_states_per_label = np.array(self.n_states_per_label)
if len(n_states_per_label) != self.n_labels:
raise ValueError("states_per_label must be integer"
"or array-like of length n_labels. Got %s"
% str(n_states_per_label))
self.n_states_per_label = n_states_per_label
self.n_states = np.sum(n_states_per_label)
# compute mapping from latent states to labels
ranges = np.cumsum(n_states_per_label)
states_map = np.zeros(self.n_states, dtype=np.int)
for l in range(1, self.n_labels):
states_map[ranges[l - 1]: ranges[l]] = l
self._states_map = states_map
GraphCRF._set_size_joint_feature(self)
def initialize(self, X, Y):
n_features = X[0][0].shape[1]
if self.n_features is None:
self.n_features = n_features
elif self.n_features != n_features:
raise ValueError("Expected %d features, got %d"
% (self.n_features, n_features))
n_labels = len(np.unique(np.hstack([y.ravel() for y in Y])))
if self.n_labels is None:
self.n_labels = n_labels
elif self.n_labels != n_labels:
raise ValueError("Expected %d states, got %d"
% (self.n_labels, n_labels))
self._set_size_joint_feature()
self._set_class_weight()
def label_from_latent(self, h):
return self._states_map[h]
def init_latent(self, X, Y):
# treat all edges the same
edges = [[self._get_edges(x)] for x in X]
features = np.array([self._get_features(x) for x in X])
return kmeans_init(features, Y, edges, n_labels=self.n_labels,
n_states_per_label=self.n_states_per_label)
def loss_augmented_inference(self, x, h, w, relaxed=False,
return_energy=False):
self.inference_calls += 1
self._check_size_w(w)
unary_potentials = self._get_unary_potentials(x, w)
pairwise_potentials = self._get_pairwise_potentials(x, w)
edges = self._get_edges(x)
# do loss-augmentation
for l in np.arange(self.n_states):
# for each class, decrement features
# for loss-agumention
unary_potentials[self.label_from_latent(h)
!= self.label_from_latent(l), l] += 1.
return inference_dispatch(unary_potentials, pairwise_potentials, edges,
self.inference_method, relaxed=relaxed,
return_energy=return_energy)
def latent(self, x, y, w):
unary_potentials = self._get_unary_potentials(x, w)
# forbid h that is incompoatible with y
# by modifying unary params
other_states = self._states_map != y[:, np.newaxis]
max_entry = np.maximum(np.max(unary_potentials), 1)
unary_potentials[other_states] = -1e2 * max_entry
pairwise_potentials = self._get_pairwise_potentials(x, w)
edges = self._get_edges(x)
h = inference_dispatch(unary_potentials, pairwise_potentials, edges,
self.inference_method, relaxed=False)
if (self.label_from_latent(h) != y).any():
print("inconsistent h and y")
h = np.hstack([0, np.cumsum(self.n_states_per_label)])[y]
return h
def loss(self, h, h_hat):
if isinstance(h_hat, tuple):
return self.continuous_loss(h, h_hat[0])
return GraphCRF.loss(self, self.label_from_latent(h),
self.label_from_latent(h_hat))
def continuous_loss(self, y, y_hat):
# continuous version of the loss
# y_hat is the result of linear programming
y_hat_org = np.zeros((y_hat.shape[0], self.n_labels))
for s in range(self.n_states):
y_hat_org[:, self._states_map[s]] += y_hat[:, s]
y_org = self.label_from_latent(y)
return GraphCRF.continuous_loss(self, y_org, y_hat_org)
def base_loss(self, y, y_hat):
if isinstance(y_hat, tuple):
return GraphCRF.continuous_loss(self, y, y_hat)
return GraphCRF.loss(self, y, y_hat)
| bsd-2-clause |
ABoothInTheWild/baseball-research | NBA/NBA_17/nba2017SeasonResults.py | 1 | 2218 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 07 14:28:16 2018
@author: Alexander
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 15:01:09 2018
@author: abooth
"""
from xmlstats import xmlstats
import numpy as np
import pandas as pd
access_token = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
user_agent = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
stats = xmlstats.Xmlstats(access_token, user_agent)
datetest = "20171017"
standingsOnDate = stats.standings(date=datetest, sport="nba")
sum([standing.won for standing in standingsOnDate.standing]) #3
teamIds = np.sort(np.array([standing.team_id for standing in standingsOnDate.standing]))
teamWins = []
teamLosses = []
for teamId in teamIds:
teamWins.extend([standing.won for standing in standingsOnDate.standing if standing.team_id == teamId])
teamLosses.extend([standing.lost for standing in standingsOnDate.standing if standing.team_id == teamId])
df = pd.DataFrame(np.column_stack([teamIds,teamWins, teamLosses]),
columns = ['xmlstatsTeamId','Wins'+datetest, 'Losses'+datetest])
from datetime import timedelta, date
import time
#https://stackoverflow.com/questions/1060279/iterating-through-a-range-of-dates-in-python
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
seasonResults = pd.DataFrame({'xmlstatsTeamId':teamIds})
start_date = date(2017, 10, 17)
end_date = date(2018, 4, 12)
for single_date in daterange(start_date, end_date):
date_format = single_date.strftime("%Y%m%d")
standingsOnDate = stats.standings(date=date_format, sport="nba")
teamWins = []
teamLosses = []
for teamId in teamIds:
teamWins.extend([standing.won for standing in standingsOnDate.standing if standing.team_id == teamId])
teamLosses.extend([standing.lost for standing in standingsOnDate.standing if standing.team_id == teamId])
seasonResults["Wins_"+date_format] = teamWins
seasonResults["Losses_"+date_format] = teamLosses
print(date_format)
time.sleep(12) #6 requests a minute
seasonResults.to_csv("nba2017SeasonResults.csv", index=False)
| gpl-3.0 |
latticelabs/Mitty | setup.py | 1 | 2920 | from setuptools import setup, find_packages
__version__ = eval(open('mitty/version.py').read().split('=')[1])
setup(
name='mitty',
version=__version__,
description='Simulator for genomic data',
author='Seven Bridges Genomics',
author_email='[email protected]',
packages=find_packages(include=['mitty*']),
include_package_data=True,
entry_points={
# Register the built in plugins
'mitty.plugins.sfs': ['double_exp = mitty.plugins.site_frequency.double_exp'],
'mitty.plugins.variants': ['snp = mitty.plugins.variants.snp_plugin',
'delete = mitty.plugins.variants.delete_plugin',
'uniformdel = mitty.plugins.variants.uniform_deletions',
'uniformins = mitty.plugins.variants.uniform_insertions',
'insert = mitty.plugins.variants.insert_plugin',
#'inversion = mitty.plugins.variants.inversion_plugin',
#'low_entropy_insert = mitty.plugins.variants.low_entropy_insert_plugin'
],
'mitty.plugins.population': ['standard = mitty.plugins.population.standard',
'vn = mitty.plugins.population.vn'],
'mitty.plugins.reads': ['simple_sequential = mitty.plugins.reads.simple_sequential_plugin',
'simple_illumina = mitty.plugins.reads.simple_illumina_plugin'],
# Command line scripts
'console_scripts': ['genomes = mitty.genomes:cli',
'reads = mitty.reads:cli',
'perfectbam = mitty.benchmarking.perfectbam:cli',
'badbams = mitty.benchmarking.badbams:cli',
'alindel = mitty.benchmarking.indel_alignment_accuracy:cli',
'benchsummary = mitty.benchmarking.benchmark_summary:cli',
'vcf2pop = mitty.lib.vcf2pop:cli',
'bam2tfq = mitty.benchmarking.convert_bam_to_truth_fastq:cli',
'alindel_plot = mitty.benchmarking.indel_alignment_accuracy_plot:cli',
'misplot = mitty.benchmarking.misalignment_plot:cli',
'acubam = mitty.benchmarking.bam_accuracy:cli',
'migratedb = mitty.util.db_migrate:cli',
'plot_gc_bias = mitty.util.plot_gc_bias:cli',
'splitta = mitty.util.splitta:cli',
'kmers = mitty.util.kmers:cli',
'pybwa = mitty.util.pybwa:cli']
},
install_requires=[
'cython',
'setuptools>=11.0.0',
'numpy>=1.9.0',
'docopt>=0.6.2',
'click>=3.3',
'pysam>=0.8.1',
'h5py>=2.5.0',
'matplotlib>=1.3.0',
'scipy'
],
) | gpl-2.0 |
justinfinkle/pydiffexp | scripts/osmo_yeast_prep.py | 1 | 2736 | import sys
import warnings
import numpy as np
import pandas as pd
def parse_title(title, split_str=" "):
"""
Parse the title of GSE13100 into usable metadata. Should work with pandas apply()
Args:
title:
split_str:
Returns:
"""
split = title.split(split_str)
meta = []
if len(split) == 2:
meta = [split[0], "NA", "NA", split[1]]
elif len(split) == 8:
meta = ['MUT', split[4], int(split[5].replace('t', "")), split[-1]]
elif len(split) == 6:
meta = ['WT', split[2], int(split[3].replace('t', "")), split[-1]]
return pd.Series(meta, index=['condition', 'rna_type', 'time', 'rep'])
def mi_to_array(mi):
labels = np.array(mRNA.columns.labels)
x = np.array([lev[labels[ii]].values.tolist() for ii, lev in enumerate(mi.levels)])
return x.T
if __name__ == '__main__':
# Change output for easier reading
pd.set_option('display.width', 200)
# Parse R GEOQuery data into a pandas multiindex
data = pd.read_csv("../data/GSE13100/GSE13100_BgCorrected_data.csv", index_col=0)
row_info = pd.read_csv("../data/GSE13100/GSE13100_BgCorrected_rowinfo.csv").fillna("NA")
col_info = pd.read_csv("../data/GSE13100/GSE13100_BgCorrected_colinfo.csv")
# Compile the experiment information
exp_info = col_info.title.apply(parse_title)
exp_info.insert(0, 'geo', col_info.geo_accession.values)
# Make sure the order matches the data
data.sort_index(axis=1, ascending=True, inplace=True)
exp_info.sort_values('geo', ascending=True, inplace=True)
if not np.array_equal(data.columns.values, exp_info.geo.values):
warnings.warn('Data columns and experimental info are not equal. Values may not match labels')
# Make the columns a multiindex
data.columns = pd.MultiIndex.from_arrays(exp_info.values.T.tolist(), names=exp_info.columns.values)
data.sort_index(axis=1, inplace=True)
# Select only the RA data and quantile normalize it. Log2 tranform will happen in pydiffexp pipeline
idx = pd.IndexSlice
mRNA = data.loc[:, idx[:, :, 'TR', :, :]]
mRNA.to_pickle("../data/GSE13100/bgcorrected_GSE13100_TR_data.pkl")
sys.exit()
# Plot the distributions of the RA abundance
info_idx = mi_to_array(mRNA.columns)
fig, ax = plt.subplots(6, 7)
ax_list = ax.flatten()
for ii in range(mRNA.shape[1]):
color = 'red' if info_idx[ii, 1] == 'MUT' else 'blue'
title = 'time: {}, rep: {}'.format(info_idx[ii, 3], info_idx[ii, 4])
ax_list[ii].hist(np.log2(mRNA.values[:, ii]+1), color=color)
ax_list[ii].set_title(title)
plt.show()
# Pickle Data
mRNA.to_pickle('../data/GSE13100/log2_bgcorrected_GSE13100_TR_data.pkl')
| gpl-3.0 |
Tong-Chen/scikit-learn | sklearn/manifold/tests/test_isomap.py | 31 | 3991 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
matthewwardrop/formulaic | benchmarks/plot.py | 1 | 1418 | import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv(os.path.join(os.path.dirname(__file__), 'benchmarks.csv')).sort_values('mean')
def grouped_barplot(df, cat, subcat, val, err, subcats=None, **kwargs):
# based on https://stackoverflow.com/a/42033734
categories = df[cat].unique()
x = np.arange(len(categories))
subcats = subcats or df[subcat].unique()
offsets = (np.arange(len(subcats)) - np.arange(len(subcats)).mean()) / (len(subcats) + 1.)
width = np.diff(offsets).mean()
for i, gr in enumerate(subcats):
dfg = df[df[subcat] == gr]
plt.bar(x + offsets[i], dfg[val].values, width=width,
label="{}".format(gr), yerr=dfg[err].values, capsize=6, **kwargs)
plt.xlabel(cat)
plt.ylabel(val)
plt.xticks(x, categories)
plt.legend(title=subcat, loc='center left', bbox_to_anchor=(1, 0.5))
def plot_benchmarks(toolings=None):
plt.figure(dpi=120, figsize=(10, 5))
grouped_barplot(data, cat='formula', subcat='tooling', val='mean', err='stderr', subcats=toolings, log=True)
plt.ylim(1e-2, None)
plt.grid()
plt.gca().set_axisbelow(True)
plt.ylabel("Mean Time (s)")
plt.xlabel("Formula")
plt.tight_layout()
plot_benchmarks(toolings=['formulaic', 'R', 'patsy', 'formulaic_sparse', 'R_sparse'])
plt.savefig(os.path.join(os.path.dirname(__file__), 'benchmarks.png'))
| mit |
ashhher3/scikit-learn | examples/text/document_clustering.py | 31 | 8036 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
lsa = make_pipeline(svd, Normalizer(copy=False))
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not (opts.n_components or opts.use_hashing):
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
MartinDelzant/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
meduz/NeuroTools | examples/matlab_vs_python/smallnet_acml.py | 3 | 4164 | # Created by Eugene M. Izhikevich, 2003 Modified by S. Fusi 2007
# Ported to Python by Eilif Muller, 2008.
#
# Notes:
#
# Requires matplotlib,ipython,numpy>=1.0.3
# On a debian/ubuntu based system:
# $ apt-get install python-matplotlib python-numpy ipython
#
# Start ipython with threaded plotting support:
# $ ipython -pylab
#
# At the resulting prompt, run the file by:
# In [1]: execfile('smallnet.py')
# Modules required
import numpy
import numpy.random as random
import acml_rng
# Bug fix for numpy version 1.0.4
numpy.lib.function_base.any = numpy.any
# For measuring performance
import time
t1 = time.time()
# Excitatory and inhibitory neuron counts
Ne = 1000
Ni = 4
N = Ne+Ni
# Synaptic couplings
Je = 250.0/Ne
Ji = 0.0
# reset depolarization (mV)
reset = 0.0
# refractory period (ms)
refr = 2.5
# Synaptic couplings (mV)
S = numpy.zeros((N,N))
S[:,:Ne] = Je*random.uniform(size=(N,Ne))
S[:,:Ni] = -Ji*random.uniform(size=(N,Ni))
# Connectivity
S[:,:Ne][random.uniform(size=(N,Ne))-0.9<=0.0]=0.0
S[:,Ne:][random.uniform(size=(N,Ni))-0.9<=0.0]=0.0
# (mV/ms) (lambda is a python keyword)
leak = 5.0
dt = 0.05
sdt = numpy.sqrt(dt)
# Statistics of the background external current
mb = 3.0; sb = 4.0
mue = mb; sigmae=sb
sigmai = 0.0
# State variable v, initial value of 0
v = numpy.zeros(N)
# Refractory period state variable
r = numpy.zeros(N)
# Spike timings in a list
firings = []
spikes = [[]]*N
print 'mu(nu=5Hz)=%f' % (mb+Ne*Je*.015-leak,)
print 'mu(nu=100Hz)=%f' % (mb+Ne*Je*.1-leak,)
# total duration of the simulation (ms)
duration = 400.0
t = numpy.arange(0.0,400.0,dt)
vt = numpy.zeros_like(t)
t2 = time.time()
print 'Elapsed time is ', str(t2-t1), ' seconds.'
t1 = time.time()
for i,ti in enumerate(t):
# time for a strong external input
if ti>150.0:
mue = 6.5
sigmae = 7.5
# time to restore the initial statistics of the external current
if ti>300.0:
mue = mb
sigmae = sb
Iext = acml_rng.normal(1.0,N)
Iext[:Ne]*=sigmae
Iext[Ne:]*=sigmai
# Which neurons fired?
fired = numpy.nonzero(v>=20.0)[0]
if len(fired)>0:
# Save mean firing rate of the excitatory neurons
v[fired] = reset
r[fired] = refr
# Append spikes to the spike list
for n in fired:
# Spikes are stored by a (neuron, time) pair
# For easy plotting later
firings.append((n,ti))
# and as a list for each neuron
spikes[n].append(ti)
aux = v-dt*(leak-mue)+numpy.sum(S[:,fired],1)+sdt*Iext
else:
aux = v-dt*(leak-mue)+sdt*Iext;
# Neurons not in the refractory period
nr = numpy.nonzero(r<=0)[0]
# Bound voltages above 0.0
v[nr] = numpy.where(aux[nr]>=0.0,aux[nr],0.0)
# Progress refractory variable
nr = numpy.nonzero(r>0)[0]
r[nr]-=dt
# record the voltage trace of the zeroeth neuron
vt[i] = v[0]
t2 = time.time()
print 'Elapsed time is ', str(t2-t1), ' seconds.'
# -------------------------------------------------------------------------
# Plot everything
# -------------------------------------------------------------------------
def myplot():
global firings
t1 = time.time()
figure()
# Membrane potential trace of the zeroeth neuron
subplot(3,1,1)
vt[vt>=20.0]=65.0
plot(t,vt)
ylabel(r'$V-V_{rest}\ \left[\rm{mV}\right]$')
# Raster plot of the spikes of the network
subplot(3,1,2)
myfirings = array(firings)
myfirings_100 = myfirings[myfirings[:,0]<min(100,Ne)]
plot(myfirings_100[:,1],myfirings_100[:,0],'.')
axis([0, duration, 0, min(100,Ne)])
ylabel('Neuron index')
# Mean firing rate of the excitatory population as a function of time
subplot(3,1,3)
# 1 ms resultion of rate histogram
dx = 1.0
x = arange(0,duration,dx)
myfirings_Ne = myfirings[myfirings[:,0]<Ne]
mean_fe,x = numpy.histogram(myfirings_Ne[:,1],x)
plot(x,mean_fe/dx/Ne*1000.0,ls='steps')
ylabel('Hz')
xlabel('time [ms]')
t2 = time.time()
print 'Finished. Elapsed', str(t2-t1), ' seconds.'
#myplot()
| gpl-2.0 |
mmechelke/bayesian_xfel | bxfel/core/structure_factor.py | 1 | 18608 |
import numpy as np
import scipy
import re
import os
import hashlib
import csb
from csb.bio.io.wwpdb import StructureParser
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
class ScatteringFactor(object):
"""
Cacluates the density in reciprocal space as
F(s) = sum_m f_m(s) exp(-B_m s**2 / 4) exp(i*2pi*s*r)
where f_m(s) is approximated by four Gaussian distributions
and exp(-B_m s**2 / 4) are the thermal fluctuations
g_m(s) = f_m(s) * exp(-B_m s**2 / 4) are precomputed
"""
def __init__(self, structure=None):
if structure is None:
self._atoms = list()
self._bfactor = list()
self._seq = list()
self._elements = list()
else:
self._structure = structure
# For now only non hydrogen atoms
# TODO use hydrogens as well
self._atoms = []
for chain in structure:
for residue in structure[chain]:
for atom in residue:
a = residue[atom]
if not a.name.startswith("H"):
self._atoms.append(residue[atom])
self._seq = []
self._bfactor = []
self._elements = []
for atom in self._atoms:
self._seq.append(atom.element.name)
self._elements.append(atom.element.name)
if atom._bfactor is None:
self._bfactor.append(1.)
else:
self._bfactor.append(atom._bfactor)
self._seq = np.array(self._seq)
self._elements = set(self._elements)
self._bfactor = np.clip(self._bfactor, 1., 100.)
self._atom_type_params = {}
self._read_sf(fn=os.path.expanduser("~/projects/xfel/py/xfel/core/atomsf.lib"))
@classmethod
def from_isd(cls, universe):
obj = cls()
atoms = universe.atoms
for atom in atoms:
element = str(atom.properties['element'].name)
obj._elements.append(element)
obj._atoms.append(atom)
obj._seq.append(element)
try:
obj._bfactor.append(max(1.,atom.properties['bfactor']))
except KeyError:
obj._bfactor.append(1.)
obj._seq = np.array(obj._seq)
obj._bfactor = np.array(obj._bfactor)
obj._elements = set(obj._elements)
obj._bfactor = np.clip(obj._bfactor, 1., 100.)
return obj
def _read_sf(self, fn):
"""
Reads the coefficients for the analystical approximation
to scattering factors from ccp4 database
"""
float_pattern = '[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?'
atom_pattern = '[A-Za-z]'
atom_pattern = '[A-Za-z0-9-+]+'
line_pattern = ("({0})\s+({1})"
"\s+({1})\s+({1})"
"\s+({1})\s+({1})"
"\s+({1})\s+({1})"
"\s+({1})\s+({1})").format(atom_pattern,float_pattern)
regex = re.compile(line_pattern)
with open(fn) as file_handle:
for line in file_handle:
if line.startswith("#"):
continue
m = regex.match(line)
atom_name = m.groups()[0]
a1, a2, a3, a4 = m.groups()[1], m.groups()[3], m.groups()[5], m.groups()[7]
b1, b2, b3, b4 = m.groups()[2], m.groups()[4], m.groups()[6], m.groups()[8]
c = m.groups()[9]
a = np.array([a1,a2,a3,a4],np.double)
b = np.array([b1,b2,b3,b4],np.double)
self._atom_type_params[atom_name] = (a,b,float(c))
def _calculate_gm(self, hkl):
"""
calculates the the product of scattering factor and
debye-waller factors
"""
f = np.zeros((len(self._atoms), hkl.shape[0]))
seq = self._seq
bfactor = self._bfactor
s_tols = 0.25 * (hkl**2).sum(-1)
for atom_type in self._elements:
a,b,c = self._atom_type_params[atom_type]
indices = np.where(seq==atom_type)[0]
fx = c + np.dot(np.exp(np.outer(-s_tols,b)),a)
f[indices,:] = fx[:]
f *= np.exp(np.outer(-bfactor,s_tols))
return f
def _calculate_gm_grad(self, hkl):
"""
calculate the gradien of the scattering factor and
debye-waller factor
"""
seq = np.array([a.element.name for a in self._atoms])
f = np.zeros((len(self._atoms), hkl.shape[0]))
dfg = np.zeros((len(self._atoms), hkl.shape[0], 3))
bfactors = np.array([a.bfactor for a in self._atoms])
bfactors = np.clip(bfactors, 1., 100.)
s_tols = 0.25 * (hkl**2).sum(-1)
for atom_type in self._elements:
a,b,c = self._atom_type_params[atom_type]
indices = np.where(seq==atom_type)[0]
bfactor = bfactors[indices]
g = np.exp(np.outer(-s_tols,b))
sf = np.dot(g, a) + c
gsf = np.sum(g * a[np.newaxis,:] * b[np.newaxis,:] * -0.5, -1)
dwf = np.exp(-np.outer(bfactor, s_tols))
gdwf = dwf * (bfactor * - 0.5)[:,np.newaxis]
grad = sf * gdwf + gsf * dwf
f[indices,:] = dwf * sf
dfg[indices,:,:] = grad[:,:,np.newaxis] * hkl
return dfg, f
def _calculate_scattering_factors(self, hkl):
"""
creates an approximation of the density in reciprocal space by
four gaussians
returns the scattering vectors
"""
seq = self._seq
bfactor = self._bfactor
f = np.zeros((len(self._atoms), hkl.shape[0]))
s_tols = 0.25 * (hkl**2).sum(-1)
for atom_type in self._elements:
a,b,c = self._atom_type_params[atom_type]
indices = np.where(seq==atom_type)[0]
fx = c + np.dot(np.exp(np.outer(-s_tols,b)),a)
f[indices,:] = fx[:]
return f
def _calculate_debyewaller_factors(self, hkl):
"""
"""
b = np.array(self._bfactor)
s_tols = 0.25 * (hkl**2).sum(-1)
t = np.exp(np.outer(-b,s_tols))
return t
def grad_s(self, X, hkl):
"""
Gradient with respect to the reciprocal space coordinates
@param X: atomic positions
@param hkl: reciprocal space positions
"""
seq = np.array([atom.element.name for atom in self._atoms])
bfactor = np.array([atom.bfactor for atom in self._atoms])
bfactor = np.clip(bfactor, 1., 100.)
s_tols = 0.25 * (hkl**2).sum(-1)
dw_factors = np.exp(np.outer(-bfactor, s_tols))
def grad_hkl(self, X, hkl):
seq = self._seq
bfactor = self._bfactor
bfactor = np.clip(bfactor, 1., 100.)
dg = np.zeros((len(self._atoms), hkl.shape[0], hkl.shape[1]))
g = np.zeros((len(self._atoms), hkl.shape[0]))
s_tols = 0.25 * (hkl**2).sum(-1)
dw_factors = np.exp(np.outer(-bfactor, s_tols))
ddw_factors = bfactor[:,np.newaxis] * dw_factors
for atom_type in self._elements:
a,b,c = self._atom_type_params[atom_type]
indices = np.where(seq==atom_type)[0]
inner_exp = np.exp(np.outer(-s_tols,b))
sf = np.dot(inner_exp, a) + c
dsf = np.dot(inner_exp, a*b)
gx = dsf * dw_factors[indices] + sf * ddw_factors[indices]
g[indices,:] = sf[:] * dw_factors[indices]
a = np.einsum('ab,bc->abc',gx, -0.5*hkl)
dg[indices,:,:] = a
phase = np.dot((2 * np.pi * X),hkl.T)
fx= np.sum(g * np.exp(1j * phase),0)
g2 = np.einsum('ba,bc->bac',g , 2 * np.pi * 1j *X)
dfx = np.einsum("abc,ab->bc",dg + g2,np.exp(1j * phase))
return dfx, fx
def calculate_structure_factors(self, X, hkl):
"""
TODO do this calculation in chunks to save space
"""
F = np.zeros(hkl.shape[0], dtype=np.complex128)
lim = hkl.shape[0]
step = 512
for i in range(0,lim,step):
_hkl = hkl[i:i+step]
f = self._calculate_scattering_factors(_hkl)
f *= self._calculate_debyewaller_factors(_hkl)
phase = np.dot((2 * np.pi * X),_hkl.T)
F[i:i+step] = np.sum(f * np.exp(1j * phase),0)
return F
def calculate_structure_factor_gradient(self, X, hkl):
"""
calculates the gradient of the fourier density
with respect to the atomic coordinates
"""
G = np.zeros(hkl.shape, dtype=np.complex128)
lim = hkl.shape[0]
F = np.zeros(hkl.shape[0], dtype=np.complex128)
step = 512
for i in range(0, lim, step):
_hkl = hkl[i:i+step]
dfg, f = self._calculate_gm_grad(_hkl)
phase = np.exp(1j * np.dot((2 * np.pi * X), _hkl.T))
gphase = phase[:, :, np.newaxis] *\
1j * 2 * np.pi * X[:, np.newaxis, :]
grad = dfg * phase[:, :, np.newaxis]
grad += f[:, :, np.newaxis] * gphase
F[i: i+step] = np.sum(f * phase, 0)
G[i: i+step, :] = np.sum(grad, 0)
return G, F
def calculate_structure_factor_gradient2(self, X):
"""
calculates the gradient of the fourier density
with respect to the atomic coordinates
"""
g_m = self._calculate_scattering_factors(hkl)
g_m *= self._calculate_debyewaller_factors(hkl)
phase = np.dot((2 * np.pi * X),self._hkl.T)
fx = (g_m *1j * 2 * np.pi * np.exp(1j * phase))
dF_dx = np.array([np.multiply.outer(s,fx_s) for s,fx_s in
zip(fx.T,self._hkl)])
return dF_dx
def calculate_intensity_gradient(self, X):
"""
calculates the gradient of the intensity with respect to the atomic coordinates dI/dx
"""
g_m = self._calculate_scattering_factors(self._hkl)
g_m *= self._calculate_debyewaller_factors(self._hkl)
phase = np.dot((2 * np.pi * X),self._hkl.T)
F = np.sum(g_m * np.exp(1j * phase),0)
fx = (g_m *1j * 2 * np.pi * np.exp(1j * phase))
dF_dx = np.array([np.multiply.outer(s,fx_s) for s,fx_s in zip(fx.T,self._hkl)])
dI_dx = np.conj(F[:,np.newaxis,np.newaxis]) * dF_dx + F[:,np.newaxis,np.newaxis] * np.conj(dF_dx)
return dI_dx
class Correlations(object):
def __init__(self, angles, nbins):
self._bin_angles(angles, nbins)
def _bin_angles(self, angles, nbins):
pass
def calculate_from_density(self, rho):
pass
class OnePhotonCorrelations(Correlations):
def _bin_angles(self, angles, nbins):
d = np.sqrt(np.sum(angles**2,-1))
lower = d.min()
upper = d.max()
axes = np.linspace(lower, upper, nbins)
indices = np.argsort(d)
bins = [[] for x in xrange(nbins)]
j = 0
for i in range(0,axes.shape[0]):
right_edge = axes[i]
print right_edge, i
while d[indices[j]] < right_edge:
bins[i-1].append(indices[j])
j += 1
bins[-1] = indices[j:].tolist()
self._axes = axes
self._bins = bins
def calculate_from_density(self, rho):
I = np.asarray([np.sum(rho.take(bin))
for bin in self._bins])
return I
class CachedScatteringFactor(ScatteringFactor):
def __init__(self, structure):
super(CachedScatteringFactor,self).__init__(structure)
self._f = None
def calculate_structure_factors(self, X, hkl):
if self._f is None:
print "calc f"
self._f = self._calculate_scattering_factors(hkl)
self._f *= self._calculate_debyewaller_factors(hkl)
else:
print "using cached f"
phase = np.dot((-2 * np.pi * X),hkl.T)
F = np.sum(self._f * np.exp(1j * phase),0)
return F
class SphericalSection(object):
def get(self,
n_points=20, radius=1.0,
polar_min=0., polar_max=np.pi,
azimut_min=0., azimut_max=2*np.pi):
theta = np.linspace(polar_min,polar_max, n_points)
phi = np.linspace(azimut_min, azimut_max, n_points)
x = np.outer(radius*np.sin(theta), np.cos(phi))
y = np.outer(radius*np.sin(theta), np.sin(phi))
z = np.outer(radius*np.cos(theta), np.ones(n_points))
return [x,y,z]
class EwaldSphereProjection(object):
def get_indices(self, wavelength, x,y,z):
"""
projects dectector points onto an Ewald Sphere
x, y, z are the pixel coordinates
x, y, z are all M x N matrices, where M x N is the detector size.
It is assumed that the detector is perpendicular to the Z-axis
"""
d = np.sqrt(x**2 + y**2 + z**2)
h = 1/wavelength * (x/d)
k = 1/wavelength * (y/d)
l = 1/wavelength * (z/d)
return h,k,l
def project(self, structure_factor, angle):
pass
if __name__ == "__main__":
import matplotlib
matplotlib.interactive(True)
import time
import os
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import pylab
from pylab import *
from csb.bio.io.wwpdb import StructureParser
from csb.bio.io.wwpdb import get
from xfel.core.density import Density
#structure = get("1L2Y")
#structure = StructureParser(os.path.expanduser("~/data/pdb/caffeine2.pdb")).parse()
#fn = os.path.expanduser("~/gsh.pdb")
structure = StructureParser(os.path.expanduser("~/projects/xfel/data/GTT_short.pdb")).parse()
x = np.linspace(-1.,1.,11)
h, k, l = np.meshgrid(x,x,x)
hkl = np.vstack([item.ravel() for item in [h,k,l]]).T
hkl = np.ascontiguousarray(hkl)
bf = np.random.random()
def bfactors(hkl, bf):
return np.exp(-0.25 * bf * (hkl**2).sum(-1))
def bfactor_grad(hkl):
return np.exp(-0.25 * bf * (hkl**2).sum(-1))[:,np.newaxis] * -0.5 * hkl * bf
a = np.random.random(4,)
b = np.random.random(4,)
c = 0.3
def sf(hkl,a,b,c):
s_tols = -0.25 * (hkl**2).sum(-1)
inner_exp = np.exp(np.outer(-s_tols,b))
sf = np.dot(inner_exp, a) + c
return sf
def sf_grad(hkl, a, b, c):
s_tols = -0.25 * (hkl**2).sum(-1)
sf = np.exp(np.outer(-s_tols,b)) * a[np.newaxis,:] * b[np.newaxis,:] * 0.5
return sf.sum(-1)[:,np.newaxis] * hkl
def gm(hkl, a, b, c, bf):
s_tols = -0.25 * (hkl**2).sum(-1)
inner_exp = np.exp(np.outer(-s_tols,b))
sf = np.dot(inner_exp, a) + c
bf = np.exp(bf * s_tols)
return sf * bf
def gm_grad(hkl, a, b, c, bf):
s_tols = -0.25 * (hkl**2).sum(-1)
g = np.exp(np.outer(-s_tols,b))
sf = np.dot(g, a) + c
gsf = np.sum(g * a[np.newaxis,:] * b[np.newaxis,:] * 0.5, -1)
bb = np.exp(bf * s_tols)
gb = bb * bf * - 0.5
grad = sf * gb + gsf * bb
return grad[:,np.newaxis] * hkl
sf = ScatteringFactor(structure)
X = np.array([a.vector for a in sf._atoms])
X -= X.mean(0)
if False:
n = 10
X = X[:n]
sf._seq = sf._seq[:n]
sf._elements = ['N', 'C']
sf._atoms = sf._atoms[:n]
sf._bfactor = sf._bfactor[:n]
dgm, f1 = sf._calculate_gm_grad(hkl)
f = sf._calculate_scattering_factors(hkl)
f *= sf._calculate_debyewaller_factors(hkl)
scatter(f.real.ravel(), f1.real.ravel())
dgm2 = dgm * 0.0
eps = 1e-7
for i in range(3):
hkl[:, i] += eps
fprime = sf._calculate_scattering_factors(hkl)
fprime *= sf._calculate_debyewaller_factors(hkl)
dgm2[:, :, i] = (fprime - f)/eps
hkl[:, i] -= eps
figure()
scatter(dgm.real.ravel(), dgm2.real.ravel())
G, FF = sf.calculate_structure_factor_gradient(X, hkl)
G2 = G * 0.0
F = sf.calculate_structure_factors(X, hkl)
eps = 1e-7
for i in range(3):
hkl[:,i] += eps
G2[:,i] = (sf.calculate_structure_factors(X, hkl) - F)/eps
hkl[:,i] -= eps
figure()
scatter(G.real.ravel(), G2.real.ravel())
scatter(G.imag.ravel(), G2.imag.ravel())
figure()
scatter(F.real.ravel(), FF.real.ravel())
show()
t0 = time.time()
G, FF = sf.calculate_structure_factor_gradient(X, hkl)
print "hkl gradient: {} \n".format(time.time() - t0)
t0 = time.time()
g = sf.grad_hkl(X, hkl)
print "X gradient: {} \n".format(time.time() - t0)
raise
sf = ScatteringFactor(structure)
sf._hkl = hkl
X = np.array([a.vector for a in sf._atoms])
X -= X.mean(0)
g,g2 = sf.grad_hkl(X, hkl)
F = sf.calculate_structure_factors(X,hkl)
gi= sf.calculate_intensity_gradient(X)
raise
F = F.reshape(h.shape)
rho = np.fft.fftshift(np.abs(np.fft.ifftn(F,[250,250,250])))
grid = Density.from_voxels(np.abs(F)**2,1.)
grid.write_gaussian(os.path.expanduser("~/mr.cube"))
raise
grid = Density.from_voxels(rho,1.)
grid.write_gaussian(os.path.expanduser("~/mr2.cube"))
raise
if True:
fig = pylab.figure()
ax = fig.add_subplot(131)
xi, yi= np.mgrid[0:500:1,0:500:1]
ax.contour(xi,yi, rho.sum(0), 30)
pylab.show()
ax = fig.add_subplot(132)
xi, yi= np.mgrid[0:500:1,0:500:1]
ax.contour(xi,yi, rho.sum(1), 30)
pylab.show()
ax = fig.add_subplot(133)
xi, yi= np.mgrid[0:500:1,0:500:1]
ax.contour(xi,yi, rho.sum(2), 30)
pylab.show()
raise
from mayavi import mlab
xi, yi, zi = np.mgrid[0:500:1,0:500:1,0:500:1]
obj = mlab.contour3d(rho, contours=10, transparent=True)
mlab.show()
from mayavi import mlab
obj = mlab.contour3d(np.abs(F), contours=10, transparent=True)
mlab.show()
raise
for ii in range(0,F.shape[0],25):
fig = pylab.figure()
ax = fig.add_subplot(111)
xi, yi= np.mgrid[0:500:1,0:500:1]
ax.contour(xi,yi,rho[ii,:,:], 30)
pylab.show()
I = np.abs(F)**2
fig = pylab.figure()
ax = fig.add_subplot(111)
nx, ny, nz = I.shape
xi, yi= np.mgrid[0:nx:1,0:ny:1]
ax.contour(xi,yi, I.sum(2), 15)
| mit |
bioinformatics-centre/AsmVar | src/AsmvarVarScore/FeatureToScore2.py | 2 | 12476 | """
========================================================
Statistic the SV Stat after AGE Process
========================================================
Author: Shujia Huang & Siyang Liu
Date : 2014-03-07 0idx:54:15
"""
import sys
import re
import os
import string
import numpy as np
import matplotlib.pyplot as plt
def DrawFig(figureFile, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden, inbCoe):
fig = plt.figure(num=None, figsize=(16, 30), facecolor='w', edgecolor='k')
title = ['Distance distribution', 'NRatio', 'Perfect Depth', 'Imperfect depth', '', '', '']
ylabel = ['The position of breakpoint', 'N Ratio of varints', \
'Perfect Depth', 'Both ImPerfect Depth', 'InbreedCoefficient', \
'Map score', 'Mismapping Probability' , 'Average Identity', \
'ProperReadDepth', 'ImProperReadDepth']
al = 0.5
for i, data in enumerate ([distance, nr, aa, bb, inbCoe, mscore, misprob, aveIden, properDepth, imProperDepth ]):
plt.subplot(10,2,2 * i + 1)
#plt.title(title[i], fontsize=16)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=al, linewidths = 0.1, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=al, linewidths = 0.1, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='*', c = 'Y', alpha=al, linewidths = 0.1, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper right')
plt.xlim(-10, 50)
if i == 9: plt.xlabel('Score', fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(10, 2, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad], data[:,2][NEW][bad], marker='o', c = 'm', alpha=al, linewidths = 0.1, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=al, linewidths = 0.1, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.xlim(-3, 30)
plt.legend(loc='upper right')
if i == 9: plt.xlabel('Score', fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def DrawPhredScale (figureFile, phredScal):
fig = plt.figure()
ylabel = ['Phred Scale']
for i, data in enumerate ([phredScal ]):
plt.subplot(2, 1, 2 * i + 1)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=0.5, linewidths = 0, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=0.5, linewidths = 0, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='o', c = 'Y', alpha=0.5, linewidths = 0, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper left')
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(2, 1, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad] , data[:,2][NEW][bad] , marker='o', c = 'm', alpha=0.5, linewidths = 0, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=0.5, linewidths = 0, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.legend(loc='upper left')
plt.xlabel('Score' , fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def Accum (data, isBig = False):
tmpD= data
k = sorted(tmpD.keys(), key = lambda d: float(d))
dat = []
for i in range(len(k)):
if isBig:
for j in range(i,len(k)): tmpD[k[i]][1] += tmpD[k[j]][0]
else:
for j in range(i+1): tmpD[k[i]][1] += tmpD[k[j]][0]
dat.append([float(k[i]), float(tmpD[k[i]][0]), float(tmpD[k[i]][1]) ])
return dat
def SampleFaLen (faLenFile):
if faLenFile[-3:] == '.gz': I = os.popen('gzip -dc %s' % faLenFile)
else : I = open(faLenFile)
data = {}
while 1:
lines = I.readlines (100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
data[col[0]] = string.atoi(col[1])
I.close()
return data
def LoadFaLen (faLenLstFile):
data = {}
I = open (faLenLstFile)
for line in I.readlines():
if len(line.strip('\n').split()) != 2: raise ValueError('[ERROR] The format of Fa length list maybe not right. It could just be: "sample FalenghtFile", but found',line)
sampleId, fileName = line.strip('\n').split()
if sampleId not in data: data[sampleId] = {}
data[sampleId] = SampleFaLen(fileName)
I.close()
return data
def main (argv):
qFaLen = LoadFaLen(argv[1])
figPrefix = 'test'
if len(argv) > 2: figPrefix = argv[2]
if argv[0][-3:] == '.gz':
I = os.popen('gzip -dc %s' % argv[0])
else:
I = open (argv[0])
s, annotations, mark = set(), [], []
print '#Chr\tPosition\tDistance\tLeftIden\tRightIden\tAveIden\tN-Ratio\tAA'
while 1: # VCF format
lines = I.readlines(100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
if re.search(r'^#CHROM', line): col2sam = { i+9:sam for i,sam in enumerate(col[9:]) }
if re.search(r'^#', line): continue
key = col[0] + ':' + col[1]
if key in s: continue
s.add(key)
#if re.search(r'^PASS', col[6]): continue
#if not re.search(r'_TRAIN_SITE', col[7]): continue
#if not re.search(r'^PASS', col[6]): continue
isbad = False
for i, sample in enumerate (col[9:]):
if re.search(r'NULL', sample): isbad = True
if isbad: continue
fmat = { k:i for i,k in enumerate(col[8].split(':')) }
if 'VS' not in fmat or 'QR' not in fmat: continue
if 'AGE' not in fmat: continue
if len(annotations) == 0: annotations = [[] for _ in col[9:] ]
vcfinfo = { d.split('=')[0]: d.split('=')[1] for d in col[7].split(';') if len(d.split('=')) == 2 }
vq = string.atof(vcfinfo['VQ'])
inb = string.atof(vcfinfo['InbCoeff'])
if ('POSITIVE_TRAIN_SITE' in col[7]) and ('NEGATIVE_TRAIN_SITE' in col[7]):
mark.append([3, vq, inb])
elif 'POSITIVE_TRAIN_SITE' in col[7]:
mark.append([1, vq, inb])
elif 'NEGATIVE_TRAIN_SITE' in col[7]:
mark.append([2, vq, inb])
else:
mark.append([0, vq, inb])
# GT:AA:AE:FN:MIP:MS:QR:RR:VS:VT
for i, sample in enumerate (col[9:]):
sampleId = col2sam[9+i]
field = sample.split(':')
if sample == './.' or len(field) < fmat['QR'] + 1 or field[fmat['QR']].split(',')[-1] == '.' or field[fmat['AS']] == '.':
annotations[i].append([0, 0, 0, 0, 0, 0, 0, 0, 0])
continue
qr = field[fmat['QR']].split(',')[-1]
qregion = np.array(qr.split('-'))
if len(qregion) > 3: qId = qregion[0] + '-' + qregion[1]
else : qId = qregion[0]
qSta = string.atoi(qregion[-2])
qEnd = string.atoi(qregion[-1])
if sampleId not in qFaLen:
raise ValueError ('[ERROR] The sample name $s(in vcf) is not in the name of Fa list.' % sampleId)
if qId not in qFaLen[sampleId]:
raise ValueError ('[ERROR]', qId, 'is not been found in file', opt.qFalen, '\n')
qSta= int(qSta * 100 / qFaLen[sampleId][qId] + 0.5)
qEnd= int(qEnd * 100 / qFaLen[sampleId][qId] + 0.5)
if qSta > 100 or qEnd > 100:
raise ValueError ('[ERROR] Query size Overflow! sample: %s; scaffold: %s' % (sampleId, qId))
leg = qSta
if 100 - qEnd < qSta: leg = qEnd
nn = string.atof(sample.split(':')[fmat['NR']])
n = round(1000 * nn) / 10.0 # N ratio
alt = string.atoi(sample.split(':')[fmat['AA']].split(',')[1]) # Alternate perfect
bot = string.atoi(sample.split(':')[fmat['AA']].split(',')[3]) # Both imperfect
pro, ipr = [0,0]
ms = string.atoi(sample.split(':')[fmat['AS']]) # Mapping score
mip = string.atof(sample.split(':')[fmat['MS']]) # Mismapping probability
if sample.split(':')[fmat['AGE']] != '.':
aveI = string.atoi(sample.split(':')[fmat['AGE']].split(',')[3]) # ave_iden in AGE
else:
aveI = 0
annotations[i].append([leg, n, alt, bot, pro, ipr, ms, mip, aveI])
I.close()
print >> sys.stderr, '# Number of Positions: %d' % len(mark)
if len(mark) != len(annotations[0]):
raise ValueError ('[ERROR] The size is not match mark=%d, annotations=%d!' % (len(mark), len(annotations)))
annotations = np.array(annotations);
sampleNum = len(annotations)
data, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden = [],[],[],[],[],[],[],[],[],[]
inbreedCoe, phredScal = [], []
for i in range(len(annotations[0])):
anno = np.array([annotations[s][i] for s in range(sampleNum) if len(annotations[s][i][annotations[s][i]!=0]) > 0 ]) # each person in the same position
score = np.array([annotations[s][i][-3] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
msprob = np.array([annotations[s][i][-2] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
phred = -10 * np.log10(1.0 - score.sum() / np.sum(score/(1.0 - msprob))) # Phred scale
if len(anno) == 0: continue
leg, n, alt, bot, pro,ipr, ms, mip, aveI = np.median(anno, axis=0)
distance.append ([mark[i][0], mark[i][1], leg ])
properDepth.append ([mark[i][0], mark[i][1], pro ])
imProperDepth.append ([mark[i][0], mark[i][1], ipr ])
nr.append ([mark[i][0], mark[i][1], n ])
aa.append ([mark[i][0], mark[i][1], alt ])
bb.append ([mark[i][0], mark[i][1], bot ])
mscore.append ([mark[i][0], mark[i][1], ms ])
misprob.append ([mark[i][0], mark[i][1], mip ])
aveIden.append ([mark[i][0], mark[i][1], aveI])
phredScal.append ([mark[i][0], mark[i][1], phred])
inbreedCoe.append ([mark[i][0], mark[i][1], mark[i][2]])
data.append([leg, alt, pro, ipr, n, bot])
print mark[i][0], mark[i][1], mark[i][2], '\t', leg, '\t', pro, '\t', ipr,'\t', n, '\t', alt, '\t', bot
data = np.array(data)
print >> sys.stderr, '\nPosition\tALTernatePerfect\tLeftIdentity\tRightIdentity\tAveIden\tNRatio\tBothImperfect'
print >> sys.stderr, 'Means: ', data.mean(axis=0), '\nstd : ', data.std(axis=0), '\nMedian: ', np.median(data, axis=0)
print >> sys.stderr, '25 Percentile:', np.percentile(data, 25,axis=0), '\n50 Percentile:', np.percentile(data, 50,axis=0), '\n75 Percentile:', np.percentile(data, 75,axis=0)
DrawFig(figPrefix, \
np.array (distance ), \
np.array (properDepth ), \
np.array (imProperDepth), \
np.array (nr ), \
np.array (aa ), \
np.array (bb ), \
np.array (mscore ), \
np.array (misprob ), \
np.array (aveIden ), \
np.array (inbreedCoe ) )
DrawPhredScale (figPrefix + '.phred', np.array(phredScal))
if __name__ == '__main__':
VQ_CUTOFF = 3.0
main(sys.argv[1:])
| mit |
camallen/aggregation | experimental/condor/animal_EM.py | 2 | 7334 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import cPickle as pickle
import bisect
import csv
import matplotlib.pyplot as plt
import random
import math
import urllib
import matplotlib.cbook as cbook
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/classifier")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
sys.path.append("/home/greg/github/reduction/experimental/classifier")
#from divisiveDBSCAN import DivisiveDBSCAN
from divisiveDBSCAN_multi import DivisiveDBSCAN
from divisiveKmeans import DivisiveKmeans
from iterativeEM import IterativeEM
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['condor_2014-11-23']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
big_userList = []
big_subjectList = []
animal_count = 0
f = open(base_directory+"/Databases/condor_ibcc.csv","wb")
f.write("a,b,c\n")
alreadyDone = []
animals_in_image = {}
animal_index = -1
global_user_list = []
animal_to_image = []
zooniverse_list = []
condor_votes = {}
animal_votes = {}
#subject_vote = {}
results = []
to_sample_from = list(subject_collection.find({"state":"complete"}))
to_sample_from2 = list(subject_collection.find({"classification_count":1,"state":"active"}))
votes = []
sample = random.sample(to_sample_from,100)
#sample.extend(random.sample(to_sample_from2,1000))
# for subject_index,subject in enumerate(sample):
# print "== " + str(subject_index)
# zooniverse_id = subject["zooniverse_id"]
# for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
# if "user_name" in classification:
# user = classification["user_name"]
# else:
# user = classification["user_ip"]
#
# try:
# tt = index(big_userList,user)
# except ValueError:
# bisect.insort(big_userList,user)
for subject_index,subject in enumerate(sample):
print subject_index
zooniverse_id = subject["zooniverse_id"]
annotation_list = []
user_list = []
animal_list = []
#local_users = []
for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
if "user_name" in classification:
user = classification["user_name"]
else:
user = classification["user_ip"]
found_condor = False
for animal in markings.values():
scale = 1.875
x = scale*float(animal["x"])
y = scale*float(animal["y"])
animal_type = animal["animal"]
if not(animal_type in ["carcassOrScale","carcass"]):
annotation_list.append((x,y))
#print annotation_list
user_list.append(user)
animal_list.append(animal_type)
if not(user in global_user_list):
global_user_list.append(user)
#local_users.append(user)
if animal_type == "condor":
found_condor = True
except (ValueError,KeyError):
pass
#if there were any markings on the image, use divisive kmeans to cluster the points so that each
#cluster represents an image
if annotation_list != []:
user_identified,clusters = DivisiveKmeans(3).fit2(annotation_list,user_list,debug=True)
#fix split clusters if necessary
if user_identified != []:
user_identified,clusters = DivisiveKmeans(3).__fix__(user_identified,clusters,annotation_list,user_list,200)
for center,c in zip(user_identified,clusters):
animal_index += 1
#animal_votes.append([])
animal_to_image.append(zooniverse_id)
if not(zooniverse_id in animals_in_image):
animals_in_image[zooniverse_id] = [animal_index]
else:
animals_in_image[zooniverse_id].append(animal_index)
results.append((zooniverse_id,center))
for pt in c:
pt_index = annotation_list.index(pt)
user_index = global_user_list.index(user_list[pt_index])
animal_type = animal_list[annotation_list.index(pt)]
if animal_type == "condor":
votes.append((user_index,animal_index,1))
if not(animal_index in animal_votes):
animal_votes[animal_index] = [1]
else:
animal_votes[animal_index].append(1)
else:
votes.append((user_index,animal_index,0))
if not(animal_index in animal_votes):
animal_votes[animal_index] = [0]
else:
animal_votes[animal_index].append(0)
print "=====---"
#print votes
classify = IterativeEM()
classify.__classify__(votes)
most_likely = classify.__getMostLikely__()
estimates = classify.__getEstimates__()
X = []
Y = []
X2 = []
Y2 = []
#for subject_index,zooniverse_id in enumerate(big_subjectList):
for ii in range(animal_index):
x = np.mean(animal_votes[ii])
y = estimates[ii][1]
X.append(x)
Y.append(y)
if math.fabs(x-y) > 0.3:
zooniverse_id,(centerX,centerY) = results[ii]
print x,y
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
url = subject["location"]["standard"]
slash_index = url.rfind("/")
object_id = url[slash_index+1:]
if not(os.path.isfile(base_directory+"/Databases/condors/images/"+object_id)):
urllib.urlretrieve (url, base_directory+"/Databases/condors/images/"+object_id)
image_file = cbook.get_sample_data(base_directory+"/Databases/condors/images/"+object_id)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
plt.plot([centerX,],[centerY,],'o')
plt.show()
# #if ((x < 0.5) and (y > 0.5)) or ((x > 0.5) and (y < 0.5)):
# subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
# print x,y
# print subject["location"]["standard"]
# #print most_likely[subject_index],estimates[subject_index],np.mean(subject_vote[zooniverse_id])
#else:
# print estimates[subject_index],0
plt.plot(X,Y,'.',color="blue")
plt.plot(X2,Y2,'.',color="red")
plt.xlim((-0.05,1.05))
plt.ylim((-0.05,1.05))
plt.show() | apache-2.0 |
Titan-C/scikit-learn | examples/linear_model/plot_ols.py | 74 | 2047 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# Make predictions using the testing set
diabetes_y_pred = regr.predict(diabetes_X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(diabetes_y_test, diabetes_y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
sillvan/hyperspy | doc/user_guide/conf.py | 2 | 9753 | # -*- coding: utf-8 -*-
#
# HyperSpy User Guide documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 29 15:14:48 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.append('../../')
sys.path.append(os.path.abspath('../sphinxext'))
from hyperspy import Release
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'gen_rst',
'numpydoc',
'matplotlib.sphinxext.only_directives',
'sphinx.ext.intersphinx',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
'ipython_console_highlighting'] # , 'rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HyperSpy User Guide [Draft]'
copyright = u'2011-2013, The HyperSpy Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = Release.version
# The full version, including alpha/beta/rc tags.
release = Release.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "HyperSpy User Guide v%s" % Release.version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HyperSpyUserGuidedoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'HyperSpyUserGuide.tex', u'HyperSpy User Guide',
u'The HyperSpy Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/hyperspy_logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hyperspyuserguide', u'HyperSpy User Guide Documentation',
[u'The HyperSpy Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'HyperSpyUserGuide', u'HyperSpy User Guide Documentation',
u'The HyperSpy Developers', 'HyperSpyUserGuide', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'HyperSpy User Guide'
epub_author = u'The HyperSpy Developers'
epub_publisher = u'he HyperSpy Developers'
epub_copyright = u'2011-2013, he HyperSpy Developers'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'hyperspyweb': ('http://hyperspy.org/', None)}
| gpl-3.0 |
danmackinlay/AutoGP | experiments/sarcos.py | 2 | 3138 | import os
import subprocess
import sklearn.cluster
import numpy as np
import autogp
from autogp import likelihoods
from autogp import kernels
import tensorflow as tf
from autogp import datasets
from autogp import losses
from autogp import util
import pandas
import scipy.io as sio
DATA_DIR = "experiments/data/"
TRAIN_PATH = DATA_DIR + "sarcos_inv.mat"
TEST_PATH = DATA_DIR + "sarcos_inv_test"
def init_z(train_inputs, num_inducing):
# Initialize inducing points using clustering.
mini_batch = sklearn.cluster.MiniBatchKMeans(num_inducing)
cluster_indices = mini_batch.fit_predict(train_inputs)
inducing_locations = mini_batch.cluster_centers_
return inducing_locations
def get_sarcos_data():
print "Getting sarcos data ..."
os.chdir('experiments/data')
subprocess.call(["./get_sarcos_data.sh"])
os.chdir("../../")
print "done"
def sarcos_all_joints_data():
"""
Loads and returns data of SARCOS dataset for all joints.
Returns
-------
data : list
A list of length = 1, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
"""
train = sio.loadmat(TRAIN_PATH)['sarcos_inv']
test = sio.loadmat(TEST_PATH)['sarcos_inv_test']
return{
'train_inputs': train[:, :21],
'train_outputs': train[:, 21:],
'test_inputs': test[:, :21],
'test_outputs': test[:, 21:],
'id': 0
}
if __name__ == '__main__':
FLAGS = util.util.get_flags()
BATCH_SIZE = FLAGS.batch_size
LEARNING_RATE = FLAGS.learning_rate
DISPLAY_STEP = FLAGS.display_step
EPOCHS = FLAGS.n_epochs
NUM_SAMPLES = FLAGS.mc_train
NUM_INDUCING = FLAGS.n_inducing
IS_ARD = FLAGS.is_ard
if os.path.exists(TRAIN_PATH) is False: # directory does not exist, download the data
get_sarcos_data()
d = sarcos_all_joints_data()
data = datasets.DataSet(d['train_inputs'].astype(np.float32), d['train_outputs'].astype(np.float32))
test = datasets.DataSet(d['test_inputs'].astype(np.float32), d['test_outputs'].astype(np.float32))
# Setup initial values for the model.
likelihood = likelihoods.RegressionNetwork(7, 0.1)
kern = [kernels.RadialBasis(data.X.shape[1], lengthscale=8.0, input_scaling = IS_ARD) for i in range(8)]
# kern = [kernels.ArcCosine(data.X.shape[1], 1, 3, 5.0, 1.0, input_scaling=True) for i in range(10)]
Z = init_z(data.X, NUM_INDUCING)
m = autogp.GaussianProcess(likelihood, kern, Z, num_samples=NUM_SAMPLES)
# setting up loss to be reported during training
error_rate = None #losses.StandardizedMeanSqError(d['train_outputs'].astype(np.float32), data.Dout)
import time
o = tf.train.RMSPropOptimizer(LEARNING_RATE)
start = time.time()
m.fit(data, o, loo_steps=0, var_steps=50, epochs = EPOCHS, batch_size = BATCH_SIZE, display_step=DISPLAY_STEP, test = test,
loss = error_rate )
print time.time() - start
ypred = m.predict(test.X)[0]
print("Final " + error_rate.get_name() + "=" + "%.4f" % error_rate.eval(test.Y, ypred))
| apache-2.0 |
wkfwkf/statsmodels | statsmodels/distributions/mixture_rvs.py | 27 | 9592 | from statsmodels.compat.python import range
import numpy as np
def _make_index(prob,size):
"""
Returns a boolean index for given probabilities.
Notes
---------
prob = [.75,.25] means that there is a 75% chance of the first column
being True and a 25% chance of the second column being True. The
columns are mutually exclusive.
"""
rv = np.random.uniform(size=(size,1))
cumprob = np.cumsum(prob)
return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob)
def mixture_rvs(prob, size, dist, kwargs=None):
"""
Sample from a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty(size)
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale,
size=sample_size))
return sample
class MixtureDistribution(object):
'''univariate mixture distribution
for simple case for now (unbound support)
does not yet inherit from scipy.stats.distributions
adding pdf to mixture_rvs, some restrictions on broadcasting
Currently it does not hold any state, all arguments included in each method.
'''
#def __init__(self, prob, size, dist, kwargs=None):
def rvs(self, prob, size, dist, kwargs=None):
return mixture_rvs(prob, size, dist, kwargs=kwargs)
def pdf(self, x, prob, dist, kwargs=None):
"""
pdf a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
else:
pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
return pdf_
def cdf(self, x, prob, dist, kwargs=None):
"""
cdf of a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
else:
cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
return cdf_
def mv_mixture_rvs(prob, size, dist, nvars, **kwargs):
"""
Sample from a mixture of multivariate distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions instances with callable method rvs.
nvargs : int
dimension of the multivariate distribution, could be inferred instead
kwargs : tuple of dicts, optional
ignored
Examples
--------
Say we want 2000 random variables from mixture of normals with two
multivariate normal distributions, and we want to sample from the
first with probability .4 and the second with probability .6.
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
mu2 = np.array([4, 2.0, 2.0])
mvn3 = mvd.MVNormal(mu, cov3)
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty((size, nvars))
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
#loc = kwargs[i].get('loc',0)
#scale = kwargs[i].get('scale',1)
#args = kwargs[i].get('args',())
# use int to avoid numpy bug with np.random.multivariate_normal
sample[sample_idx] = dist[i].rvs(size=int(sample_size))
return sample
if __name__ == '__main__':
from scipy import stats
obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
kwargs=(dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
nobs = 10000
mix = MixtureDistribution()
## mrvs = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
## kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.75)))
mix_kwds = (dict(loc=-1,scale=.25),dict(loc=1,scale=.75))
mrvs = mix.rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
grid = np.linspace(-4,4, 100)
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
mcdf = mix.cdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
import matplotlib.pyplot as plt
plt.figure()
plt.hist(mrvs, bins=50, normed=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mpdf, lw=2, color='black')
plt.figure()
plt.hist(mrvs, bins=50, normed=True, cumulative=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mcdf, lw=2, color='black')
plt.show()
| bsd-3-clause |
btabibian/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
ssaeger/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
kaichogami/scikit-learn | sklearn/manifold/t_sne.py | 7 | 34867 | # Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
| bsd-3-clause |
ricardog/raster-project | projections/r2py/lm.py | 1 | 1635 | from rpy2.robjects import Formula
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
class LM(object):
'''Class for fitting (simple) linear models using rpy2. When extracting
the coefficients for a model (lmerMod or glmerMod) that uses orthogonal
polynomials (poly in R syntax), it is necessary to fit a linear model
that maps from the original data to the fitted polynomial. The mermod
class uses this class to fir such linear models.
'''
def __init__(self, formula=None, response=None, predictors=None):
self.__stats = importr('stats')
self.formula = formula
self.response = response
self.predictors = predictors
self._coefs = None
@property
def formula(self):
return self._formula
@formula.setter
def formula(self, f):
self._formula = Formula(f)
self.env = self._formula.environment
def fit(self):
'''Fit the linear model and extract the coefficients.
FIXME: This function assumes the model has a single predictor variable (x), but may appear multiple times with different exponents. That is, the equation must be of the form
y ~ x + I(x^2) + I(x^3)
'''
if self.formula is None or self.response is None or self.predictors is None:
raise RuntimeError('set formula, response, and predictor variables')
## FIXME: This is a quick and dirty hack.
self.env['y'] = self.response
self.env['x'] = self.predictors.loc[:, 'x']
fit = self.__stats.lm(self.formula)
self._coefs = pandas2ri.ri2py(fit.rx('coefficients')[0])
@property
def coefs(self):
if self._coefs == None:
self.fit()
return self._coefs
| apache-2.0 |
prheenan/prhUtil | python/IgorUtil.py | 2 | 8803 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
# import the patrick-specific utilities
import GenUtilities as pGenUtil
import PlotUtilities as pPlotUtil
import CheckpointUtilities as pCheckUtil
from scipy.signal import savgol_filter
DEF_FILTER_CONST = 0.005 # 0.5%
BASE_GROUP = "/Volumes/group/4Patrick/"
SUBDIR_BINARIES = "PRH_AFM_Databases/BinaryFilesTimeSeparationForce/"
def getDatabaseFolder():
"""
Returns the location of the database binary folder location
Args:
None
Returns:
Where the database is, as a string.
"""
# XXX TODO: right now assumes mac-style mounting...
return BASE_GROUP + SUBDIR_BINARIES
def getDatabaseFile(fileName,extension=".hdf"):
"""
Returns the absolute path to a previously-saved file with the given filename
Path is *not* guaranteed to exist, if the file hasn't been saved already.
Args:
fileName: the name of the file (usually according to the "TraceData"
table, field "FileTimSepFor")
extension: the recquired extension
Returns:
Where the file is located, an absolute path. Doesn't guarantee the file
*does* exist, just that *if* it does, it would be there.
"""
fileWithExt = pGenUtil.ensureEnds(fileName,extension)
return getDatabaseFolder() + fileWithExt
def DemoDir():
'''
:return: the absolute path to the demo directory
'''
return BASE_GROUP + "DemoData/IgorDemos/"
# all demo directories should have an input and output directory
def GetDemoInOut(demoName,baseDir=DemoDir(),raiseOnError=True):
"""
Returns the demo input and output directories, given a path baseDir and
name demoName. Recquires files to exist at "<baseDir><demoName>". If
encountering an error (e.g. permissions, something isn't mounted), raises
an error.
Args:
demoName: The name of the demo. Assumed to be the subdir under "basedir"
we want to use
baseDir: the base directory. Input and output directories are
"<baseDir><demoName>Input/" and "<baseDir><demoName>Output/", resp.
raiseOnError : if true, raises an error on an OS. otherwise, just
prints a warning that something went wrong.
Returns:
tuple of <inputDir>,<outputDir>
"""
fullBase = baseDir + demoName
inputV = pGenUtil.getSanitaryPath(fullBase + "/Input/")
outputV = pGenUtil.getSanitaryPath(fullBase + "/Output/")
try:
pGenUtil.ensureDirExists(inputV)
pGenUtil.ensureDirExists(outputV)
except OSError as e:
if (raiseOnError):
raise(e)
print("Warning, couldn't open demo directories based in " + fullBase +
". Most likely, not connected to JILA network")
return inputV,outputV
def DemoJilaOrLocal(demoName,localPath):
"""
Looks for the demo dir in the default (jila-hosted) space. If nothing is
found, looks in the paths specified by localpath (where it puts input
and output directories according to its name)
Args:
demoName: see GetDemoInOut
localPath: equivalent of baseDir in GetDemoInOut. Where we put the input and Output directories for the unit test if JILA can't be found.
Returns:
tuple of <inputDir>,<outputDir>
"""
inDir,outDir = GetDemoInOut(demoName,raiseOnError=False)
if (not pGenUtil.dirExists(inDir)):
print("Warning: Couldn't connect to JILA's Network. Using local data.")
# get "sanitary paths" which as OS-indepdent (in theory..)
localPath = pGenUtil.ensureEnds(localPath,"/")
inDir = pGenUtil.getSanitaryPath(localPath)
outDir = pGenUtil.getSanitaryPath(localPath + "Output" + demoName +"/")
pGenUtil.ensureDirExists(outDir)
if (not pGenUtil.dirExists(inDir)):
# whoops...
raise IOError("Demo Directory {:s} not found anywhere.".\
format(inDir))
return inDir,outDir
# read a txt or similarly formatted file
def readIgorWave(mFile,skip_header=3,skip_footer=1,comments="X "):
data = np.genfromtxt(mFile,comments=comments,skip_header=skip_header,
skip_footer=skip_footer)
return data
def savitskyFilter(inData,nSmooth = None,degree=2):
if (nSmooth is None):
nSmooth = int(len(inData)/200)
# POST: have an nSmooth
if (nSmooth % 2 == 0):
# must be odd
nSmooth += 1
# get the filtered version of the data
return savgol_filter(inData,nSmooth,degree)
def SplitIntoApproachAndRetract(sep,force,sepToSplit=None):
'''
Given a full force/sep curve, returns the approach/retract
according to before/after sepToSplit, cutting out the surface (assumed
at minimm separation )
:param sep: the separation, units not important. minimum is surface
:param force: the force, units not important
:param sepToSplot: the separation where we think the surface is. same units
as sep
'''
# find where sep is closest to sepToSplit before/after minIdx (surface)
if (sepToSplit is None):
sepToSplit = np.min(sep)
surfIdx = np.argmin(sep)
sepAppr = sep[:surfIdx]
sepRetr = sep[surfIdx:]
apprIdx = np.argmin(np.abs(sepAppr-sepToSplit))
retrIdx = surfIdx + np.argmin(np.abs(sepRetr-sepToSplit))
forceAppr = force[:apprIdx]
forceRetr = force[retrIdx:]
sepAppr = sep[:apprIdx]
sepRetr = sep[retrIdx:]
return sepAppr,sepRetr,forceAppr,forceRetr
def NormalizeSepForce(sep,force,surfIdx=None,normalizeSep=True,
normalizeFor=True,sensibleUnits=True):
if (sensibleUnits):
sepUnits = sep * 1e9
forceUnits = force * 1e12
else:
sepUnits = sep
forceUnits= force
if (surfIdx is None):
surfIdx = np.argmin(sep)
if (normalizeSep):
sepUnits -= sepUnits[surfIdx]
if (normalizeFor):
# reverse: sort low to high
sortIdx = np.argsort(sep)[::-1]
# get the percentage of points we want
percent = 0.05
nPoints = int(percent*sortIdx.size)
idxForMedian = sortIdx[:nPoints]
# get the median force at these indices
forceMedUnits = np.median(forceUnits[idxForMedian])
# correct the force
forceUnits -= forceMedUnits
# multiply it by -1 (flip)
forceUnits *= -1
return sepUnits,forceUnits
# plot a force extension curve with approach and retract
def PlotFec(sep,force,surfIdx = None,normalizeSep=True,normalizeFor=True,
filterN=None,sensibleUnits=True):
"""
Plot a force extension curve
:param sep: The separation in meters
:param force: The force in meters
:param surfIdx: The index between approach and retract. if not present,
intuits approximate index from minmmum Sep
:param normalizeSep: If true, then zeros sep to its minimum
:paran normalizeFor: If true, then zeros force to the median-filtered last
5% of data, by separation (presummably, already detached)
:param filterT: Plots the raw data in grey, and filters
the force to the Number of points given. If none, assumes default % of curve
:param sensibleUnits: Plots in nm and pN, defaults to true
"""
if (surfIdx is None):
surfIdx = np.argmin(sep)
sepUnits,forceUnits = NormalizeSepForce(sep,force,surfIdx,normalizeSep,
normalizeFor,sensibleUnits)
if (filterN is None):
filterN = int(np.ceil(DEF_FILTER_CONST*sepUnits.size))
# POST: go ahead and normalize/color
sepAppr = sepUnits[:surfIdx]
sepRetr = sepUnits[surfIdx:]
forceAppr = forceUnits[:surfIdx]
forceRetr = forceUnits[surfIdx:]
PlotFilteredSepForce(sepAppr,forceAppr,filterN=filterN,color='r',
label="Approach")
PlotFilteredSepForce(sepRetr,forceRetr,filterN=filterN,color='b',
label="Retract")
plt.xlim([min(sepUnits),max(sepUnits)])
pPlotUtil.lazyLabel("Separation [nm]","Force [pN]","Force Extension Curve")
return sepUnits,forceUnits
def filterForce(force,filterN=None):
if (filterN is None):
filterN = int(np.ceil(DEF_FILTER_CONST*force.size))
return savitskyFilter(force,filterN)
def PlotFilteredSepForce(sep,force,filterN=None,labelRaw=None,
linewidthFilt=2.0,color='r',**kwargs):
forceFilt =filterForce(force,filterN)
plt.plot(sep,forceFilt,color=color,lw=linewidthFilt,**kwargs)
# plot the raw data as grey
plt.plot(sep,force,color='k',label=labelRaw,alpha=0.3)
return forceFilt
| gpl-2.0 |
Myasuka/scikit-learn | setup.py | 143 | 7364 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
mathhun/scipy_2015_sklearn_tutorial | notebooks/figures/plot_rbf_svm_parameters.py | 19 | 2018 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
from .plot_2d_separator import plot_2d_separator
def make_handcrafted_dataset():
# a carefully hand-designed dataset lol
X, y = make_blobs(centers=2, random_state=4, n_samples=30)
y[np.array([7, 27])] = 0
mask = np.ones(len(X), dtype=np.bool)
mask[np.array([0, 1, 5, 26])] = 0
X, y = X[mask], y[mask]
return X, y
def plot_rbf_svm_parameters():
X, y = make_handcrafted_dataset()
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
for ax, C in zip(axes, [1e0, 5, 10, 100]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(kernel='rbf', C=C).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("C = %f" % C)
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for ax, gamma in zip(axes, [0.1, .5, 1, 10]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(gamma=gamma, kernel='rbf', C=1).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("gamma = %f" % gamma)
def plot_svm(log_C, log_gamma):
X, y = make_handcrafted_dataset()
C = 10. ** log_C
gamma = 10. ** log_gamma
svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
ax = plt.gca()
plot_2d_separator(svm, X, ax=ax, eps=.5)
# plot data
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
# plot support vectors
sv = svm.support_vectors_
ax.scatter(sv[:, 0], sv[:, 1], s=230, facecolors='none', zorder=10, linewidth=3)
ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
def plot_svm_interactive():
from IPython.html.widgets import interactive, FloatSlider
C_slider = FloatSlider(min=-3, max=3, step=.1, value=0, readout=False)
gamma_slider = FloatSlider(min=-2, max=2, step=.1, value=0, readout=False)
return interactive(plot_svm, log_C=C_slider, log_gamma=gamma_slider)
| cc0-1.0 |
bcaine/maddux | maddux/environment.py | 1 | 6599 | """
Our experiment environment.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
GRAVITY = -9.81
class Environment:
def __init__(self, dimensions=None, dynamic_objects=None,
static_objects=None, robot=None):
"""An environment to run experiments in
:param dimensions: (Optional) The dimensions of env
:type dimensions: 1x3 numpy.array or None
:param dynamic_objects: (Optional) A list of objects that can move
:type dynamic_objects: list of maddux.objects.DynamicObject or None
:param static_objects: (Optional) A list of stationary objects
:type static_objects: list of maddux.objects.StaticObject or None
:param robot: (Optional) A robot to simulate
:type robot: maddux.robot.Arm or None
:rtype: None
"""
if dimensions is not None:
self.dimensions = np.array(dimensions)
else:
self.dimensions = np.array([10.0, 10.0, 100.0])
self.dynamic_objects = dynamic_objects if dynamic_objects else []
self.static_objects = static_objects if static_objects else []
self.robot = robot
def run(self, duration):
"""Run for a certain duration
:param duration: duration to run environment in seconds
:type duration: integer
:rtype: None
"""
duration_ms = int(duration * 1000)
for _ in xrange(duration_ms):
map(lambda obj: obj.step(), self.dynamic_objects)
if self.collision():
break
def animate(self, duration=None, save_path=None):
"""Animates the running of the program
:param duration: (Optional) Duration of animation in seconds
:type duration: int or None
:param save_path: (Optional) Path to save mp4 in instead of displaying
:type save_path: String or None
:rtype: None
"""
fps = 15
dynamic_iter_per_frame = 10 * fps
if duration is None:
if self.robot is None:
# Sensible Default
frames = fps * 5
else:
frames = len(self.robot.qs)
else:
frames = int(fps * duration)
def update(i):
ax.clear()
for _ in xrange(dynamic_iter_per_frame):
map(lambda obj: obj.step(), self.dynamic_objects)
# Check for collisions
self.collision()
if self.robot is not None:
next_q = self.robot.qs[i]
self.robot.update_angles(next_q)
self.plot(ax=ax, show=False)
fig = plt.figure(figsize=(8, 8))
ax = Axes3D(fig)
self.plot(ax=ax, show=False)
# If we don't assign its return to something, it doesn't run.
# Seems like really weird behavior..
ani = animation.FuncAnimation(fig, update, frames=frames, blit=False)
if save_path is None:
plt.show()
else:
Writer = animation.writers['ffmpeg']
writer = Writer(
fps=fps, metadata=dict(
artist='Maddux'), bitrate=1800)
ani.save(save_path, writer=writer)
def hypothetical_landing_position(self):
"""Find the position that the ball would land (or hit a wall)
:returns: Position (x, y, z) of hypothetical landing position of a
thrown object based on end effector velocity.
:rtype: numpy.ndarray or None
"""
pos = self.robot.end_effector_position().copy()
# Only need linear velocity
v = self.robot.end_effector_velocity()[0:3]
for t in np.linspace(0, 15, 5000):
# Check if it hit a target
for static in self.static_objects:
if static.is_hit(pos):
return pos.copy()
# Or a wall
for i in range(len(pos)):
in_negative_space = pos[i] <= 0
past_boundary = pos[i] >= self.dimensions[i]
if in_negative_space or past_boundary:
return pos.copy()
# Otherwise step forward
v[2] += t * GRAVITY
pos += t * v
# If we never hit anything (which is completely impossible (TM))
# return None
return None
def collision(self):
"""Check if any dynamic objects collide with any static
objects or walls.
:return: Whether there was a collision
:rtype: bool
"""
for dynamic in self.dynamic_objects:
if dynamic.attached:
continue
for static in self.static_objects:
if static.is_hit(dynamic.position):
dynamic.attach()
return True
for i in range(len(dynamic.position)):
in_negative_space = dynamic.position[i] <= 0
past_boundary = (dynamic.position[i] >=
self.dimensions[i])
if in_negative_space or past_boundary:
dynamic.attach()
return True
return False
def plot(self, ax=None, show=True):
"""Plot throw trajectory and ball
:param ax: Current axis if a figure already exists
:type ax: matplotlib.axes
:param show: (Default: True) Whether to show the figure
:type show: bool
:rtype: None
"""
if ax is None:
fig = plt.figure(figsize=(12, 12))
ax = Axes3D(fig)
# Set the limits to be environment ranges
ax.set_xlim([0, self.dimensions[0]])
ax.set_ylim([0, self.dimensions[1]])
if self.dynamic_objects:
zmax = max([o.positions[:, 2].max()
for o in self.dynamic_objects])
else:
zmax = 10
ax.set_zlim([0, max(10, zmax)])
# And set our labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
for dynamic in self.dynamic_objects:
# Plot Trajectory
ax.plot(dynamic.positions[:, 0], dynamic.positions[:, 1],
dynamic.positions[:, 2], 'r--', label='Trajectory')
# Plot objects
map(lambda obj: obj.plot(ax), self.dynamic_objects)
map(lambda obj: obj.plot(ax), self.static_objects)
if self.robot:
self.robot.plot(ax)
if show:
plt.show()
| mit |
matpalm/malmomo | viz_advantage_surface.py | 1 | 3160 | #!/usr/bin/env python
# hacktasic viz of the quadratic surface of advantage around the max output
# for a couple of clear block on right / left / center cases
import agents
import argparse
import base_network
import Image
import numpy as np
import models
import sys
import tensorflow as tf
import replay_memory
import util
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
np.set_printoptions(precision=5, threshold=10000, suppress=True, linewidth=10000)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--width', type=int, default=160, help="render width")
parser.add_argument('--height', type=int, default=120, help="render height")
agents.add_opts(parser)
models.add_opts(parser)
replay_memory.add_opts(parser)
util.add_opts(parser)
opts = parser.parse_args()
#opts.ckpt_dir = "runs/14/d/ckpts" # last known good
print >>sys.stderr, "OPTS", opts
# init our rl_agent
agent_cstr = eval("agents.NafAgent")
agent = agent_cstr(opts)
an = agent.network
# prepare three plots; one for each of block on left, in center, or on right
fig = plt.figure(figsize=plt.figaspect(0.3))
plt.title(opts.ckpt_dir)
R = np.arange(-1, 1.25, 0.25)
X, Y = np.meshgrid(R, R)
for plot_idx, (img_file, desc) in enumerate([("runs/14/d/imgs/ep_00007/e0000.png", "on left"),
("runs/14/d/imgs/ep_00007/e0019.png", "center"),
("runs/14/d/imgs/ep_00007/e0034.png", "on right")]):
print "calculating for", desc, "..."
# slurp in bitmap
img = Image.open(img_file)
img = np.array(img)[:,:,:3]
# collect q-value for all x, y values in one hit
all_x_y_pairs = np.stack(zip(np.ravel(X), np.ravel(Y)))
img_repeated = [img] * all_x_y_pairs.shape[0]
q_values = agent.sess.run(an.q_value,
feed_dict={an.input_state: img_repeated,
an.input_action: all_x_y_pairs,
base_network.FLIP_HORIZONTALLY: False})
Z = q_values.reshape(X.shape)
# plot as surface
ax = fig.add_subplot(1,3,plot_idx+1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, color='b', cmap=cm.coolwarm, linewidth=1)
ax.set_title(desc)
ax.set_xlabel("turn")
ax.set_ylabel("move")
ax.set_zlabel("q")
# include single vertical line where q was maximised (according to output_action)
output = agent.sess.run(an.output_action,
feed_dict={an.input_state: [img],
base_network.FLIP_HORIZONTALLY: False})
turn, move = np.squeeze(output)
q_value = agent.sess.run(an.q_value,
feed_dict={an.input_state: [img],
an.input_action: [[turn, move]],
base_network.FLIP_HORIZONTALLY: False})
print "turn", turn, "move", move, "=> q", np.squeeze(q_value), "Zmin=", np.min(Z), "Zmax=", np.max(Z)
ax.plot([turn, turn], [move, move], [np.min(Z), np.max(Z)], linewidth=5)
# render
plt.savefig("/tmp/test.png")
plt.show()
| mit |
joshzarrabi/e-mission-server | emission/analysis/plotting/leaflet_osm/our_plotter.py | 1 | 14864 | import pandas as pd
import folium.folium as folium
import itertools
import numpy as np
import logging
import geojson as gj
import copy
import attrdict as ad
from functional import seq
# import emission.analysis.classification.cleaning.location_smoothing as ls
import bson.json_util as bju
import emission.storage.decorations.location_queries as lq
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.place_queries as esdp
import emission.storage.decorations.stop_queries as esds
import emission.storage.decorations.section_queries as esdsc
import emission.storage.timeseries.abstract_timeseries as esta
import emission.core.wrapper.stop as ecws
import emission.core.wrapper.section as ecwsc
import emission.analysis.plotting.geojson.geojson_feature_converter as gfc
import emission.analysis.plotting.leaflet_osm.folium_geojson_plugin as fgjp
import emission.net.usercache.abstract_usercache as enua
import emission.net.api.usercache as enau
all_color_list = ['black', 'brown', 'blue', 'chocolate', 'cyan', 'fuschia', 'green', 'lime', 'magenta', 'navy', 'pink', 'purple', 'red', 'snow', 'yellow']
sel_color_list = ['black', 'blue', 'chocolate', 'cyan', 'fuschia', 'green', 'lime', 'magenta', 'pink', 'purple', 'red', 'yellow']
def df_to_string_list(df):
"""
Convert the input df into a list of strings, suitable for using as popups in a map.
This is a utility function.
"""
# print "Converting df with size %s to string list" % df.shape[0]
array_list = df.to_dict(orient='records')
return [str(line) for line in array_list]
def get_maps_for_range(user_id, start_ts, end_ts):
map_list = []
geojson_list = gfc.get_geojson_for_ts(user_id, start_ts, end_ts)
return get_maps_for_geojson_list(geojson_list)
def get_maps_for_usercache(user_id):
data_to_phone = seq(enau.sync_server_to_phone(user_id))
logging.debug("Before pipeline, trips to phone list has length %d" % len(data_to_phone.to_list()))
logging.debug("keys are %s" % data_to_phone.map(lambda e: ad.AttrDict(e).metadata.key))
trips_to_phone = data_to_phone.map(lambda e: ad.AttrDict(e))\
.filter(lambda e: e.metadata.key.startswith("diary/trips")) \
.map(lambda e: e.data)
logging.debug("After pipeline, trips to phone list has length %d" % len(trips_to_phone.to_list()))
# logging.debug("trips_to_phone = %s" % trips_to_phone)
maps_for_day = []
for day in trips_to_phone:
maps_for_day.append(get_maps_for_geojson_list(day))
return maps_for_day
def get_maps_for_geojson_list(trip_geojson_list):
map_list = []
for trip_doc in trip_geojson_list:
# logging.debug(trip_doc)
trip_geojson = ad.AttrDict(trip_doc)
logging.debug("centering based on start = %s, end = %s " % (trip_geojson.features[0], trip_geojson.features[1]))
flipped_midpoint = lambda(p1, p2): [(p1.coordinates[1] + p2.coordinates[1])/2,
(p1.coordinates[0] + p2.coordinates[0])/2]
curr_map = folium.Map(flipped_midpoint((trip_geojson.features[0].geometry,
trip_geojson.features[1].geometry)))
curr_plugin = fgjp.FoliumGeojsonPlugin(dict(trip_geojson))
curr_map.add_plugin(curr_plugin)
map_list.append(curr_map)
return map_list
def flipped(coord):
return (coord[1], coord[0])
def get_center_for_map(coords):
# logging.debug(trip_geojson)
midpoint = lambda(p1, p2): [(p1[0] + p2[0])/2,
(p1[1] + p2[1])/2]
if len(coords) == 0:
return None
if len(coords) == 1:
return flipped(coords)
if len(coords) > 0:
logging.debug("Getting midpoint of %s and %s" % (coords[0], coords[-1]))
return flipped(midpoint((coords[0], coords[-1])))
def get_maps_for_geojson_unsectioned(feature_list):
map_list = []
for feature in feature_list:
# logging.debug("Getting map for feature %s" % bju.dumps(feature))
feature_coords = list(get_coords(feature))
# feature_coords = list(gj.utils.coords(feature))
curr_map = folium.Map(get_center_for_map(feature_coords))
curr_plugin = fgjp.FoliumGeojsonPlugin(dict(feature))
curr_map.add_plugin(curr_plugin)
map_list.append(curr_map)
return map_list
def get_coords(feature):
# logging.debug("Getting coordinates for feature %s" % bju.dumps(feature))
if feature["type"] == "FeatureCollection":
retVal = []
for f in feature["features"]:
retVal.extend(get_coords(f))
return retVal
else:
return gj.utils.coords(feature)
def get_maps_for_range_old(user_id, start_ts, end_ts):
# First, get the timeline for that range.
ts = esta.TimeSeries.get_time_series(user_id)
trip_list = esdt.get_trips(user_id, enua.UserCache.TimeQuery("start_ts", start_ts, end_ts))
# TODO: Should the timeline support random access as well?
# If it did, we wouldn't need this additional map
# I think that it would be good to support a doubly linked list, i.e. prev and next in addition
# to the iteration interface
place_list = esdp.get_places(user_id, enua.UserCache.TimeQuery("exit_ts", start_ts, end_ts))
place_list = place_list + (esdp.get_places(user_id, enua.UserCache.TimeQuery("enter_ts", start_ts, end_ts)))
place_map = dict([(p.get_id(), p) for p in place_list])
map_list = []
flipped_midpoint = lambda(p1, p2): [(p1.coordinates[1] + p2.coordinates[1])/2,
(p1.coordinates[0] + p2.coordinates[0])/2]
for i, trip in enumerate(trip_list):
logging.debug("-" * 20 + trip.start_fmt_time + "=>" + trip.end_fmt_time
+ "(" + str(trip.end_ts - trip.start_ts) + ")")
if (len(esdt.get_sections_for_trip(user_id, trip.get_id())) == 0 and
len(esdt.get_stops_for_trip(user_id, trip.get_id())) == 0):
logging.debug("Skipping trip because it has no stops and no sections")
continue
start_point = gj.GeoJSON.to_instance(trip.start_loc)
end_point = gj.GeoJSON.to_instance(trip.end_loc)
curr_map = folium.Map(flipped_midpoint((start_point, end_point)))
map_list.append(curr_map)
logging.debug("About to display places %s and %s" % (trip.start_place, trip.end_place))
update_place(curr_map, trip.start_place, place_map, marker_color='green')
update_place(curr_map, trip.end_place, place_map, marker_color='red')
# TODO: Should get_timeline_for_trip work on a trip_id or on a trip object
# it seems stupid to convert trip object -> id -> trip object
curr_trip_timeline = esdt.get_timeline_for_trip(user_id, trip.get_id())
for i, trip_element in enumerate(curr_trip_timeline):
# logging.debug("Examining element %s of type %s" % (trip_element, type(trip_element)))
if type(trip_element) == ecws.Stop:
time_query = esds.get_time_query_for_stop(trip_element.get_id())
logging.debug("time_query for stop %s = %s" % (trip_element, time_query))
stop_points_df = ts.get_data_df("background/filtered_location", time_query)
# logging.debug("stop_points_df.head() = %s" % stop_points_df.head())
if len(stop_points_df) > 0:
update_line(curr_map, stop_points_df, line_color = sel_color_list[-1],
popup="%s -> %s" % (trip_element.enter_fmt_time, trip_element.exit_fmt_time))
else:
assert(type(trip_element) == ecwsc.Section)
time_query = esdsc.get_time_query_for_section(trip_element.get_id())
logging.debug("time_query for section %s = %s" %
(trip_element, "[%s,%s,%s]" % (time_query.timeType, time_query.startTs, time_query.endTs)))
section_points_df = ts.get_data_df("background/filtered_location", time_query)
logging.debug("section_points_df.tail() = %s" % section_points_df.tail())
if len(section_points_df) > 0:
update_line(curr_map, section_points_df, line_color = sel_color_list[trip_element.sensed_mode.value],
popup="%s (%s -> %s)" % (trip_element.sensed_mode, trip_element.start_fmt_time,
trip_element.end_fmt_time))
else:
logging.warn("found no points for section %s" % trip_element)
return map_list
def update_place(curr_map, place_id, place_map, marker_color='blue'):
if place_id is not None and place_id in place_map:
place = place_map[place_id]
logging.debug("Retrieved place %s" % place)
if hasattr(place, "location"):
coords = copy.copy(place.location.coordinates)
coords.reverse()
logging.debug("Displaying place at %s" % coords)
curr_map.simple_marker(location=coords, popup=str(place), marker_color=marker_color)
else:
logging.debug("starting place has no location, skipping")
else:
logging.warn("place not mapped because place_id = %s and place_id in place_map = %s" % (place_id, place_id in place_map))
def update_line(currMap, line_points, line_color = None, popup=None):
currMap.div_markers(line_points[['latitude', 'longitude']].as_matrix().tolist(),
df_to_string_list(line_points), marker_size=5)
currMap.line(line_points[['latitude', 'longitude']].as_matrix().tolist(),
line_color = line_color,
popup = popup)
##########################
# Everything below this line is from the time when we were evaluating
# segmentation and can potentially be deleted. It is also likely to have bitrotted.
# Let's hold off a bit on that until we have the replacement, though
##########################
def get_map_list(df, potential_splits):
mapList = []
potential_splits_list = list(potential_splits)
for start, end in zip(potential_splits_list, potential_splits_list[1:]):
trip = df[start:end]
print "Considering trip from %s to %s because start = %d and end = %d" % (df.formatted_time.loc[start], df.formatted_time.loc[end], start, end)
if end - start < 4:
# If there are only 3 entries, that means that there is only one
# point other than the start and the end, bail
print "Ignoring trip from %s to %s because start = %d and end = %d" % (df.formatted_time.loc[start], df.formatted_time.loc[end], start, end)
continue
mapList.append(get_map(trip))
return mapList
def get_map_list_after_segmentation(section_map, outlier_algo = None, filter_algo = None):
mapList = []
for trip, section_list in section_map:
logging.debug("%s %s -> %s %s" % ("=" * 20, trip.start_time, trip.end_time, "=" * 20))
trip_df = lq.get_points_for_section(trip)
curr_map = folium.Map([trip_df.mLatitude.mean(), trip_df.mLongitude.mean()])
last_section_end = None
for (i, section) in enumerate(section_list):
logging.debug("%s %s: %s -> %s %s" %
("-" * 20, i, section.start_time, section.end_time, "-" * 20))
raw_section_df = trip_df[np.logical_and(trip_df.mTime >= section.start_ts,
trip_df.mTime <= section.end_ts)]
section_df = ls.filter_points(raw_section_df, outlier_algo, filter_algo)
if section_df.shape[0] == 0:
logging.info("Found empty df! skipping...")
continue
logging.debug("for section %s, section_df.shape = %s, formatted_time.head() = %s" %
(section, section_df.shape, section_df["formatted_time"].head()))
update_map(curr_map, section_df, line_color = sel_color_list[section.activity.value],
popup = "%s" % (section.activity))
if section_df.shape[0] > 0:
curr_section_start = section_df.iloc[0]
if i != 0 and last_section_end is not None:
# We want to join this to the previous section.
curr_map.line([[last_section_end.mLatitude, last_section_end.mLongitude],
[curr_section_start.mLatitude, curr_section_start.mLongitude]],
line_color = sel_color_list[-1],
popup = "%s -> %s" % (section_list[i-1].activity, section.activity))
last_section_end = section_df.iloc[-1]
mapList.append(curr_map)
return mapList
def get_map(section_points, line_color = None, popup=None):
currMap = folium.Map([section_points.mLatitude.mean(), section_points.mLongitude.mean()])
update_map(currMap, section_points, line_color, popup)
return currMap
def update_map(currMap, section_points, line_color = None, popup=None):
currMap.div_markers(section_points[['mLatitude', 'mLongitude']].as_matrix().tolist(),
df_to_string_list(section_points), marker_size=5)
currMap.line(section_points[['mLatitude', 'mLongitude']].as_matrix().tolist(),
line_color = line_color,
popup = popup)
def evaluate_filtering(section_list, outlier_algos, filtering_algos):
"""
TODO: Is this the best place for this? If not, what is?
It almost seems like we need to have a separate evaluation module that is
separate from the plotting and the calculation modules.
But then, what is the purpose of this module?
"""
nCols = 2 + len(outlier_algos) * len(filtering_algos)
nRows = len(section_list)
map_list = []
for section in section_list:
curr_compare_list = []
section_df = ls.get_section_points(section)
curr_compare_list.append(get_map(section_df))
curr_compare_list.append(get_map(ls.filter_points(section_df, None, None)))
for (oa, fa) in itertools.product(outlier_algos, filtering_algos):
curr_filtered_df = ls.filter_points(section_df, oa, fa)
print ("After filtering with %s, %s, size is %s" % (oa, fa, curr_filtered_df.shape))
if "activity" in section:
curr_compare_list.append(get_map(curr_filtered_df,
line_color = sel_color_list[section.activity.value],
popup = "%s" % (section.activity)))
else:
curr_compare_list.append(get_map(curr_filtered_df))
assert(len(curr_compare_list) == nCols)
map_list.append(curr_compare_list)
assert(len(map_list) == nRows)
return map_list
| bsd-3-clause |
AVGInnovationLabs/DoNotSnap | train.py | 1 | 4886 | import cv2
import sys
import pickle
import numpy as np
import matplotlib.pyplot as plt
from AffineInvariantFeatures import AffineInvariant
from TemplateMatcher import TemplateMatch, Templates
from PIL import Image
from itertools import izip_longest
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report, roc_curve, auc, confusion_matrix, accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.tree import export_graphviz, DecisionTreeClassifier
def line_count(filename):
with open(filename) as data:
return sum(1 for line in data)
def read_image(filename):
return np.array(Image.open(filename.strip('\n')).convert('L'), np.uint8)
def read_file(filename, limit=0):
n = 0
lines = line_count(filename)
with open(filename) as data:
while True:
line = next(data, None)
if not line or (limit and n >= limit):
break
n += 1
print '\r%s %d/%d' % (filename, n, limit or lines),
try:
yield read_image(line)
except:
continue
def get_templates():
return np.array(list(read_file('templates.txt')))
def get_images(limit=0):
positive = read_file('positive.txt', limit / 2 if limit else 0)
negative = read_file('negative.txt', limit / 2 if limit else 0)
for p, n in izip_longest(positive, negative):
if p is not None:
yield (1, p)
if n is not None:
yield (0, n)
def get_dataset(limit):
return map(np.asarray, zip(*get_images(limit)))
def plot_roc(fpr, tpr, roc_auc):
# Plot all ROC curves
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Affine Invariant SURF + Decision Tree Classifier')
plt.legend(loc='lower right')
plt.show()
def plot_importance(feature_count, importances, indices):
plt.figure()
plt.title('Feature importances')
plt.bar(range(feature_count), importances[indices], color='r', align='center')
plt.xticks(range(feature_count), indices)
plt.xlim([-1, feature_count])
plt.show()
def main(name, dataset_size):
templates = get_templates()
print 'templates: %d' % len(templates)
labels, samples = get_dataset(dataset_size)
print 'samples: %d' % len(samples)
extractor = cv2.FeatureDetector_create('SURF')
detector = cv2.DescriptorExtractor_create('SURF')
print 'applying affine invariant transform'
affine = AffineInvariant(extractor, detector)
templates = affine.transform(templates)
samples = affine.transform(samples)
model = Pipeline([
('match', TemplateMatch(Templates(templates))), # XXX: hack to bypass cloning error
# ('reduce_dim', PCA(n_components = 12 * 6))
])
samples = model.fit_transform(samples)
rng = np.random.RandomState()
X_train, X_test, y_train, y_test = train_test_split(samples, labels, test_size=0.5, random_state=rng)
print 'train: %d, test: %d' % (len(X_train), len(X_test))
params = dict(
min_samples_split = [5, 6, 7, 8, 9, 10],
min_samples_leaf = [3, 4, 5, 6, 7],
max_leaf_nodes = [10, 9, 8, 7, 6],
class_weight = [{1: w} for w in [10, 8, 4, 2, 1]]
)
tree = DecisionTreeClassifier(max_depth=4, random_state=rng)
cvmodel = GridSearchCV(tree, params, cv=10, n_jobs=cv2.getNumberOfCPUs())
cvmodel.fit(X_train, y_train)
print 'grid scores'
for params, mean_score, scores in cvmodel.grid_scores_:
print '%0.3f (+/-%0.03f) for %r' % (mean_score, scores.std() * 2, params)
print 'best parameters'
print cvmodel.best_params_
importances = cvmodel.best_estimator_.feature_importances_
indices = np.argsort(importances)[::-1]
plot_importance(6, importances, indices)
y_pred = cvmodel.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print 'accuracy: %f' % accuracy
print classification_report(y_test, y_pred)
print confusion_matrix(y_test, y_pred)
y_score = cvmodel.predict_proba(X_test)[:, 1]
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plot_roc(fpr, tpr, roc_auc)
export_graphviz(cvmodel.best_estimator_, out_file=name + '.dot', class_names=['background', 'badge'], filled=True, rounded=True, special_characters=True)
pickle.dump(dict(params=params, pipe=model, model=cvmodel.best_estimator_), open(name + '.pkl', 'wb'))
if __name__ == '__main__':
name = sys.argv[1] if len(sys.argv) >= 2 else 'classifier'
dataset_size = int(sys.argv[2]) if len(sys.argv) >= 3 else 0
main(name, dataset_size)
| gpl-3.0 |
hstau/covar-cryo | covariance/rotatefill.py | 1 | 1524 | '''function [out] = imrotateFill(inp, angle)
% function [out] = imrotateFill(inp)
% Rotates an 2D image couterclockwise by angle in degrees
% Output image has the same dimension as input.
% Undefined regions are filled in by repeating the original image
% Note: input images must be square
%
% Copyright (c) UWM, Peter Schwander Mar. 20, 2014
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
version = 'imrotateFill, V0.9';
Ported to python. Hstau Liao Oct. 2016
'''
import numpy as np
import logging,sys
import math
from scipy.ndimage.interpolation import rotate
import matplotlib.pyplot as plt
def op(input, angle, visual=False):
nPix = input.shape[0]
inpRep = np.tile(input, (3, 3))
outRep = rotate(inpRep, angle, reshape=False)
out = outRep[nPix:2 * nPix, nPix:2 * nPix]
if visual:
plt.subplot(2, 2, 1)
plt.imshow(input,cmap = plt.get_cmap('gray'))
plt.title('Input')
plt.subplot(2, 2, 2)
plt.imshow(out, cmap=plt.get_cmap('gray'))
plt.title('Output')
plt.subplot(2, 2, 3)
plt.imshow(inpRep, cmap=plt.get_cmap('gray'))
plt.title('Input 3x3')
plt.subplot(2, 2, 4)
plt.imshow(outRep, cmap=plt.get_cmap('gray'))
plt.title('Output 3x3')
plt.show()
return out
if __name__ == '__main__':
# tested using a 6x6 image
img = np.loadtxt(sys.argv[1])
ang = float(sys.argv[2]) # in degrees
visual = bool(sys.argv[3])
result = op(img,ang,visual)
| gpl-2.0 |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_compute_raw_data_spectrum.py | 8 | 3431 | """
==================================================
Compute the power spectral density of raw data
==================================================
This script shows how to compute the power spectral density (PSD)
of measurements on a raw dataset. It also show the effect of applying SSP
to the data to reduce ECG and EOG artifacts.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_multitaper
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
tmin, tmax = 0, 60 # use the first 60s of data
# Setup for reading the raw data (to save memory, crop before loading)
raw = io.read_raw_fif(raw_fname).crop(tmin, tmax).load_data()
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
# Let's first check out all channel types
raw.plot_psd(area_mode='range', tmax=10.0, show=False)
# Now let's focus on a smaller subset:
# Pick MEG magnetometers in the Left-temporal region
selection = read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's just look at the first few channels for demonstration purposes
picks = picks[:4]
plt.figure()
ax = plt.axes()
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks,
show=False)
# And now do the same with SSP applied
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks,
show=False)
# And now do the same with SSP + notch filtering
# Pick all channels for notch since the SSP projection mixes channels together
raw.notch_filter(np.arange(60, 241, 60), n_jobs=1)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks,
show=False)
ax.set_title('Four left-temporal magnetometers')
plt.legend(['Without SSP', 'With SSP', 'SSP + Notch'])
# Alternatively, you may also create PSDs from Raw objects with ``psd_*``
f, ax = plt.subplots()
psds, freqs = psd_multitaper(raw, low_bias=True, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, proj=True, picks=picks,
n_jobs=1)
psds = 10 * np.log10(psds)
psds_mean = psds.mean(0)
psds_std = psds.std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD', xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
plt.show()
| bsd-3-clause |
ashhher3/seaborn | seaborn/tests/test_utils.py | 11 | 11338 | """Tests for plotting utilities."""
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nose
import nose.tools as nt
from nose.tools import assert_equal, raises
import numpy.testing as npt
import pandas.util.testing as pdt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from pandas.util.testing import network
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from . import PlotTestCase
from .. import utils, rcmod
from ..utils import get_dataset_names, load_dataset
a_norm = np.random.randn(100)
def test_pmf_hist_basics():
"""Test the function to return barplot args for pmf hist."""
out = utils.pmf_hist(a_norm)
assert_equal(len(out), 3)
x, h, w = out
assert_equal(len(x), len(h))
# Test simple case
a = np.arange(10)
x, h, w = utils.pmf_hist(a, 10)
nose.tools.assert_true(np.all(h == h[0]))
def test_pmf_hist_widths():
"""Test histogram width is correct."""
x, h, w = utils.pmf_hist(a_norm)
assert_equal(x[1] - x[0], w)
def test_pmf_hist_normalization():
"""Test that output data behaves like a PMF."""
x, h, w = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
def test_pmf_hist_bins():
"""Test bin specification."""
x, h, w = utils.pmf_hist(a_norm, 20)
assert_equal(len(x), 20)
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
npt.assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert_equal(out1, (.75, .25, .25))
out2 = utils.desaturate("#00FF00", .5)
assert_equal(out2, (.25, .75, .25))
out3 = utils.desaturate((0, 0, 1), .5)
assert_equal(out3, (.25, .25, .75))
out4 = utils.desaturate("red", .5)
assert_equal(out4, (.75, .25, .25))
@raises(ValueError)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert_equal(out, (1, 0, 0))
def test_iqr():
"""Test the IQR function."""
a = np.arange(5)
iqr = utils.iqr(a)
assert_equal(iqr, 2)
class TestSpineUtils(PlotTestCase):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine()
for side in self.outer_sides:
nt.assert_true(~ax.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
nt.assert_true(~ax.spines[side].get_visible())
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
nt.assert_true(ax1.spines[side].get_visible())
for side in self.outer_sides:
nt.assert_true(~ax2.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax2.spines[side].get_visible())
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
nt.assert_equal(new_position, self.offset_position)
else:
nt.assert_equal(new_position, self.original_position)
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
if ax2.spines[side].get_visible():
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
else:
nt.assert_equal(ax2.spines[side].get_position(),
self.original_position)
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
nt.assert_equal(ax.get_yticks().size, 0)
def test_offset_spines_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
utils.offset_spines(offset=self.offset)
nt.assert_true('deprecated' in str(w[0].message))
nt.assert_true(issubclass(w[0].category, UserWarning))
def test_offset_spines(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.offset_spines(offset=self.offset)
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.offset_position)
def test_offset_spines_specific_axes(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, (ax1, ax2) = plt.subplots(2, 1)
utils.offset_spines(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_categorical_order():
x = ["a", "c", "c", "b", "a", "d"]
y = [3, 2, 5, 1, 4]
order = ["a", "b", "c", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(x, order)
nt.assert_equal(out, order)
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
out = utils.categorical_order(np.array(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(pd.Series(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(y)
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(np.array(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(pd.Series(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
if pandas_has_categoricals:
x = pd.Categorical(x, order)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.categories))
x = pd.Series(x)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.cat.categories))
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
if LooseVersion(pd.__version__) >= "0.15":
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
# With Python 3.2+, we could use the tempfile.TemporaryDirectory()
# context manager instead of this try...finally statement
tmpdir = tempfile.mkdtemp()
try:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
pdt.assert_frame_equal(ds, ds2)
finally:
shutil.rmtree(tmpdir)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
names = get_dataset_names()
assert(len(names) > 0)
assert(u"titanic" in names)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
| bsd-3-clause |
parekhmitchell/Machine-Learning | Machine Learning A-Z Template Folder/Part 2 - Regression/Section 8 - Decision Tree Regression/regression_template.py | 22 | 1424 | # Regression Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting the Regression Model to the dataset
# Create your regressor here
# Predicting a new result
y_pred = regressor.predict(6.5)
# Visualising the Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, regressor.predict(X), color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | mit |
jhuapl-boss/intern | examples/dvid/general_test.py | 1 | 3757 | import intern
from intern.remote.dvid import DVIDRemote
from intern.resource.dvid.resource import DataInstanceResource
from intern.resource.dvid.resource import RepositoryResource
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
########### NOTE ###########
# This test requires an accessible DVID instance
# DVID Data fetch:
dvid = DVIDRemote({"protocol": "http", "host": "localhost:8001",})
DATA_INSTANCE = "ex_EM"
ALIAS = "Test_alias"
########### Test Project API ###########
## Create DataInstanceResource and force the creation of a RepositoryResource
instance_setup_em = DataInstanceResource(
DATA_INSTANCE, None, "uint8blk", ALIAS, "Example channel.", datatype="uint8"
)
# Get the channel and create a project
instance_actual_repo = dvid.create_project(instance_setup_em)
print("Repo UUID:" + instance_actual_repo)
# Create an instance within given repo(UUID)
instance_setup_anno = DataInstanceResource(
DATA_INSTANCE + "_the_second",
instance_actual_repo,
"uint8blk",
ALIAS,
"Example channel.",
datatype="uint8",
)
instance_actual_anno_uuid = dvid.create_project(instance_setup_anno)
print("Data Instance UUID: {}".format(instance_actual_anno_uuid))
# Create a dummy repo with the Repository Resource for deletion
instance_setup_em_delete = RepositoryResource(None, "Test_for_deletion")
instance_actual_em_delete_uuid = dvid.create_project(instance_setup_em_delete)
instance_actual_em_delete = dvid.delete_project(instance_setup_em_delete)
print("Successfully deleted Repo project: {}".format(instance_actual_em_delete_uuid))
# Delete the data instance of a repo
instance_setup_em_delete = DataInstanceResource(
DATA_INSTANCE, None, "uint8blk", ALIAS, "Example channel.", datatype="uint8"
)
instance_actual_em_delete_uuid = dvid.create_project(instance_setup_em_delete)
dvid.delete_project(dvid.get_instance(instance_actual_em_delete_uuid, DATA_INSTANCE))
print(
"Successfully deleted data instance project: {}".format(
instance_actual_em_delete_uuid
)
)
########### Test Versioning API ###########
# Set up a new project with a channel
instance_setup_merge = DataInstanceResource(
DATA_INSTANCE + "_the_second",
None,
"uint8blk",
"Mege_repo",
"Example channel.",
datatype="uint8",
)
chan_actual_parent1 = dvid.create_project(instance_setup_merge)
print("\nParent1 UUID: " + chan_actual_parent1)
commit_1 = dvid.commit(chan_actual_parent1, note="Test the commit")
branch_1 = dvid.branch(chan_actual_parent1, note="Test the versioning system once")
branch_2 = dvid.branch(chan_actual_parent1, note="Test the versioning system twice")
print("Created branches {} and {} from Parent1".format(branch_1, branch_2))
########### Test Metadat API ###########
# Set up a new project with a channel
print(dvid.get_info(instance_setup_merge))
print(dvid.get_server_info())
print(dvid.get_server_compiled_types())
dvid.server_reload_metadata()
########### Test Voluming API ###########
#
# Prepare the data
img = Image.open("<somedir>/*.png")
data_tile = np.asarray(img)
print(data_tile.shape)
data_tile = np.expand_dims(data_tile, axis=0)
data_tile = data_tile.copy(order="C")
# Create the project
instance_setup_up = DataInstanceResource(
DATA_INSTANCE + "_the_second",
None,
"imagetile",
"Upload Test",
"Example channel.",
datatype="uint8",
)
chan_actual_up = dvid.create_project(instance_setup_up)
# Create the cutout
dvid.create_cutout(instance_setup_up, 0, [0, 454], [0, 480], [0, 1], data_tile)
print("Create cutout successful")
# Get the cutout
got_cutout = dvid.get_cutout(instance_setup_up, 0, [0, 454], [0, 480], [0, 1])
# Check for equality
if (got_cutout == data_tile).all():
print("Both tiles equate")
| apache-2.0 |
dalejung/naginpy | naginpy/special_eval/tests/test_manifest.py | 1 | 15685 | import ast
from unittest import TestCase
from textwrap import dedent
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal
import nose.tools as nt
from asttools import (
ast_equal
)
from ..manifest import (
Expression,
Manifest,
_manifest
)
from ..exec_context import (
ContextObject,
SourceObject,
ExecutionContext,
get_source_key
)
from .common import ArangeSource
def grab_expression_from_assign(code):
node = code.body[0].value
expr = ast.Expression(lineno=0, col_offset=0, body=node)
return expr
class TestExpression(TestCase):
def test_expression(self):
source = """
arr = np.arange(20)
res = np.sum(arr)
"""
source = dedent(source)
lines = source.strip().split('\n')
load_names = [['np'], ['np', 'arr']]
for i, line in enumerate(lines):
code = ast.parse(line, '<>', 'exec')
# expression must be evaluable, assignments are not
with nt.assert_raises(Exception):
Expression(code.body[0])
extracted_expr = grab_expression_from_assign(code)
# skip the assign
base_expr = ast.parse(line.split('=')[1].strip(), mode='eval')
exp1 = Expression(extracted_expr)
exp2 = Expression(base_expr)
nt.assert_equal(exp1, exp2)
nt.assert_is_not(exp1, exp2)
nt.assert_count_equal(exp1.load_names(), load_names[i])
def test_single_line(self):
""" Expressoins can only be single line """
source = """
np.arange(20)
np.sum(arr)
"""
source = dedent(source)
code = ast.parse(source)
# expression must be single line
with nt.assert_raises(Exception):
Expression(code)
# single line still works
Expression(code.body[0])
Expression(code.body[1])
def test_expression_conversion(self):
"""
So I'm not 100% sure on converting all code into ast.Expressions.
Right now it is what I'm doing, so might as well explicitly test?
"""
source = """
np.arange(20)
np.sum(arr)
"""
source = dedent(source)
code = ast.parse(source)
expr1 = Expression(code.body[0])
nt.assert_is_instance(expr1.code, ast.Expression)
expr2 = Expression(code.body[1])
nt.assert_is_instance(expr2.code, ast.Expression)
expr3 = Expression("np.arange(15)")
nt.assert_is_instance(expr3.code, ast.Expression)
def test_key(self):
""" stable hash key """
source = """
np.arange(20)
np.sum(arr)
"""
source = dedent(source)
code = ast.parse(source)
expr1 = Expression(code.body[0])
expr2 = Expression(code.body[1])
import binascii
# changed key to return str, same hash just different rep
correct1 = b'}\xff\x1c\x0er\xe8k3\x84\x96R\x98\x9a\xa4\xe0i'
correct1 = binascii.b2a_hex(correct1).decode('utf-8')
correct2 = b'\xd6\x88\x08\xa2\xd0\x01\xa4\xc6\xabb\x1aTj\xce\x98\x18'
correct2 = binascii.b2a_hex(correct2).decode('utf-8')
# keys are stable and should not change between lifecycles
nt.assert_equal(expr1.key, correct1)
nt.assert_equal(expr2.key, correct2)
# key also works for equals
nt.assert_equal(expr1, correct1)
nt.assert_equal(expr2, correct2)
def test_copy(self):
"""
test copy
"""
source = """
np.arange(20)
"""
source = dedent(source)
code = ast.parse(source)
expr1 = Expression(code.body[0])
expr2 = expr1.copy()
# equivalent value
nt.assert_true(ast_equal(expr1.code, expr2.code))
# but not the same
nt.assert_is_not(expr1.code, expr2.code)
nt.assert_is_not(expr1.code.body, expr2.code.body)
# mutability
nt.assert_false(expr2.mutable)
expr3 = expr1.copy(mutable=True)
nt.assert_true(expr3.mutable)
def test_mutability(self):
""" test immutability """
source = """
np.arange(20)
"""
source = dedent(source)
code = ast.parse(source)
new_num = ast.Num(n=3)
expr1 = Expression(code.body[0])
with nt.assert_raises_regexp(Exception, "This expression is not mutable"):
expr1.replace(new_num, expr1.code.body, 'args', 0)
expr2 = expr1.copy(mutable=True)
old_key = expr2.key
expr2.replace(new_num, expr2.code.body, 'args', 0)
nt.assert_not_equal(expr2.key, old_key)
# expr2 was changed
nt.assert_false(ast_equal(expr1.code, expr2.code))
nt.assert_equal(expr2.get_source(), 'np.arange(3)')
class TestManifest(TestCase):
def test_eval(self):
source = "d * string_test"
context = {
'd': 13,
'string_test': 'string_test'
}
expr = Expression(source)
exec_context = ExecutionContext(context)
manifest = Manifest(expr, exec_context)
nt.assert_equal(manifest.eval(), 'string_test' * 13)
def test_equals(self):
source = "d * string_test"
context = {
'd': 13,
'string_test': 'string_test'
}
expr = Expression(source)
exec_context = ExecutionContext(context)
manifest = Manifest(expr, exec_context)
manifest2 = Manifest(expr, exec_context)
nt.assert_equal(manifest, manifest2)
# change expression
expr3 = Expression("d * string_test * 2")
manifest3 = Manifest(expr3, exec_context)
nt.assert_not_equal(manifest, manifest3)
# change context
context4 = {
'd': 11,
'string_test': 'string_test'
}
exec_context4 = ExecutionContext(context4)
manifest4 = Manifest(expr, exec_context4)
nt.assert_not_equal(manifest, manifest4)
def test_nested_eval(self):
"""
d * (1 + arr + arr2[10:])
which is really two manifests
arr_manifest = (1 + arr + arr2[10:])
manfiest = (d * (arr_manifest))
"""
arr_source = "1 + arr + arr2[10:]"
aranger = ArangeSource()
arr_context = {
'arr': SourceObject(aranger, 10),
'arr2': SourceObject(aranger, 20),
}
arr_expr = Expression(arr_source)
arr_exec_context = ExecutionContext.from_ns(arr_context)
arr_manifest = Manifest(arr_expr, arr_exec_context)
source = "d * arr"
context = {
'd': 13,
'arr': arr_manifest
}
expr = Expression(source)
exec_context = ExecutionContext.from_ns(context)
manifest = Manifest(expr, exec_context)
correct = 13 * (1 + np.arange(10) + np.arange(20)[10:])
# up till this point, everything is lazy
nt.assert_equal(len(aranger.cache), 0)
assert_almost_equal(correct, manifest.eval())
nt.assert_equal(len(aranger.cache), 2)
def test_hashable(self):
source = "d * string_test"
context = {
'd': 13,
'string_test': 'string_test'
}
expr = Expression(source)
exec_context = ExecutionContext(context)
manifest = Manifest(expr, exec_context)
d = {}
d[manifest] = manifest #hashable
key = tuple([manifest.expression, manifest.context])
# test key
nt.assert_in(key, d)
# a feature is being able to check expression.key for cases
# where we don't have the source and just the stable key
stable_key = tuple([expr.key, manifest.context])
nt.assert_in(stable_key, d)
def test_stateless(self):
"""
stateless-ness of Manifest depends on context
"""
source = "d * string_test"
context = {
'd': 13,
'string_test': 'string_test'
}
expr = Expression(source)
exec_context = ExecutionContext(context)
manifest = Manifest(expr, exec_context)
nt.assert_equal(manifest.stateless, True)
context = {
'd': 13,
'string_test': object(),
}
expr = Expression(source)
exec_context = ExecutionContext.from_ns(context)
manifest = Manifest(expr, exec_context)
nt.assert_equal(manifest.stateless, False)
def test_fragment():
"""
This is a failing test atm. What I want is the ability to take two manifest
and see whether one is within the other.
A couple of notes. They sub-expression itself would obviously need to
match. With each sub expression, you can have a subset of execution
contexts. it is that subset that needs to match.
Manifest 1:
Expression:
arr1 + np.log(arr2)
ExecutionContext:
arr1 = np.random(10)
arr2 = np.arange(10)
Manifest 2:
Expression:
np.log(arr1)
ExecutionContext:
arr1 = np.arange(10)
Here manifest 2 should be considered subset of Manfiest 1, provided
that np.arange is wrapped to be stateless.
Now, currently our hash is done via the string repr. Since `arr2` in
Manifest 1 is `arr1` in Manfiest 2, we currently wouldn't match.
So we'd need to match the load name by value and not by name. I suppose
one could have a modified ast_source that replaced load names with pos
IDs.
"""
c = 1
df = pd.DataFrame(np.random.randn(30, 3), columns=['a', 'bob', 'c'])
source = """pd.core.window.Rolling(np.log(df + 10), 5, min_periods=c).sum()"""
ns = locals()
ns.update({k:v for k, v in globals().items() if k not in ns})
manifest = _manifest(source, ns)
sub_mf = _manifest("np.log(df+10)", ns.copy())
nt.assert_in(sub_mf, manifest)
# new dataframe, does effect contains
ns['df'] = pd.DataFrame(np.random.randn(30, 3), columns=['a', 'bob', 'c'])
sub_mf = _manifest("np.log(df+10)", ns.copy())
nt.assert_not_in(sub_mf, manifest)
# c is changed but not part of fragment, so doesn't effect contains
ns['c'] = 3
manifest = _manifest(source, ns)
sub_mf = _manifest("np.log(df+10)", ns.copy())
nt.assert_in(sub_mf, manifest)
def test_fragment_var_name():
"""
This should match even though the variable names are different.
"""
c = 1
df = pd.DataFrame(np.random.randn(30, 3), columns=['a', 'bob', 'c'])
source = """pd.core.window.Rolling(np.log(df + 10), 5, min_periods=c).sum()"""
ns = locals()
ns.update({k:v for k, v in globals().items() if k not in ns})
manifest = _manifest(source, ns)
# use blah instead of df. same code.
ns['blah'] = ns['df']
sub_mf = _manifest("np.log(blah+10)", ns)
nt.assert_in(sub_mf, manifest)
# now change blah to be a differnt value
ns['blah'] = 1
sub_mf = _manifest("np.log(blah+10)", ns)
nt.assert_not_in(sub_mf, manifest)
def test_fragment_order_of_ops():
"""
So, in a pure math sense, you should be able to
do this replacement:
E1 = a + b + a + (a + b)
S = b + a + (a + b)
E3 = a + S
E1 == E3
But since in python, the order of operations matters, you can't just
treat that as a subset.(a + b) is not always the same as (b + a)
Dumb example:
class Bob:
def __add__(self, other):
return other
a = Bob()
b = Bob()
nt.assert_not_equal(a + b, b + a)
"""
# TODO, is there a way to subset when dealing with types where operations
# are commutative?
ns = {'a': 1, 'b': 2}
manifest = _manifest("a + b + a + (a + b)", ns)
manifest2 = _manifest("b + a + (a + b)", ns)
nt.assert_not_in(manifest2, manifest)
def test_manifest_partial():
"""
Mechanism where the take a Manifest and supply a partial value via
another Manifest.
"""
ns = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
parent = _manifest("a + (c + d)", ns)
sub = _manifest("(x + y)", {'x': 3, 'y': 4})
# note we are purposely giving wrong answer
items = {sub: 3}
test = parent.eval_with(items, ignore_var_names=True)
nt.assert_equal(test, 4)
# parent unaffected
nt.assert_equal(parent.eval(), 8)
# sub also un affected
nt.assert_equal(sub.eval(), 7)
def test_manifest_partial_multi():
ns = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
parent = _manifest("a + (c + d) + (a + b)", ns)
# we are expecting these to match by execution context
sub = _manifest("(x + y)", {'x': 3, 'y': 4})
sub2 = _manifest("(x + y)", {'x': 1, 'y': 2})
items = {sub: sub.eval(), sub2: sub2.eval()}
# this errors since we don't multi match on the ast_contains
test = parent.eval_with(items, ignore_var_names=True)
nt.assert_equal(test, 11)
items = {sub: sub, sub2: sub2}
test = parent.eval_with(items, ignore_var_names=True)
nt.assert_equal(test, 11)
# pass in only manifest
test = parent.eval_with([sub, sub2], ignore_var_names=True)
nt.assert_equal(test, 11)
def test_eval_with_execution_count():
class Value:
""" value that keeps track of when it is used in ops """
def __init__(self, value):
self.value = value
self.op_count = 0
def get_obj(self):
return self.value
def __add__(self, other):
self.op_count += 1
return self.value + other
def __radd__(self, other):
self.op_count += 1
return self.value + other
ns = {
'a': Value(1),
'b': Value(2),
'c': Value(3),
'd': Value(4),
'e': Value(5)
}
parent = _manifest("e + (c + d) + (a + b)", ns)
# we are expecting these to match by execution context
sub = _manifest("(a + b)", ns)
sub2 = _manifest("(c + d)", ns)
items = {sub: sub.eval(), sub2: sub2.eval()}
# a through d should have been used
nt.assert_equal(ns['a'].op_count, 1)
nt.assert_equal(ns['b'].op_count, 1)
nt.assert_equal(ns['c'].op_count, 1)
nt.assert_equal(ns['d'].op_count, 1)
nt.assert_equal(ns['e'].op_count, 0)
res = parent.eval_with(items)
nt.assert_equal(res, 1+2+3+4+5)
# make sure we did not use the Value again
nt.assert_equal(ns['a'].op_count, 1)
nt.assert_equal(ns['b'].op_count, 1)
nt.assert_equal(ns['c'].op_count, 1)
nt.assert_equal(ns['d'].op_count, 1)
nt.assert_equal(ns['e'].op_count, 1) # gets used
# normal non partial eval
res = parent.eval()
nt.assert_equal(res, 1+2+3+4+5)
# since we did a full eval, everything got run again
nt.assert_equal(ns['a'].op_count, 2)
nt.assert_equal(ns['b'].op_count, 2)
nt.assert_equal(ns['c'].op_count, 2)
nt.assert_equal(ns['d'].op_count, 2)
nt.assert_equal(ns['e'].op_count, 2) # gets used
def test_expanded_multi_nested_partial():
# we are expecting these to match by execution context
ns = {'test1':0, 'test2': 1}
leaf = _manifest("(test1 + test2)", ns)
ns = {'x':1, 'y': leaf}
xy = _manifest("(x + y)", ns)
ns = {'a': 1, 'b': xy}
sub = _manifest("(a + b)", ns)
parent_ns = {'e': 3, 'a': sub}
parent = _manifest("e + a", parent_ns)
expanded = parent.expand()
nt.assert_count_equal(expanded.context.keys(),
['a', 'e', 'x', 'test1', 'test2'])
nt.assert_equal(expanded.expression.get_source(),
"(e + (a + (x + (test1 + test2))))")
nt.assert_equal(expanded.eval(), 6)
| mit |
erscott/Wellderly | SWGR_v1.0/masterVar_chr_split.py | 1 | 3344 | '''
Splits Complete Genomics masterVar files into chromosome specific masterVar
files when given an input file path and an output directory path.
e.g. >python masterVar_chr_split.py -i /path/to/masterVar.tsv.bz2 -o /path/to/output_dir/
Python package dependencies:
pandas, numpy
python 2.7 for argparse module
'''
import pandas as pd
import os, sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', type=str,help='Specifies the input file, /path/to/CG_data/masterVar.tsv.bz2')
parser.add_argument('-o', type=str,help='Specifies the output directory, e.g. /path/to/CG_data/chromosome/')
def chr_split_mastervar(f_path, target_path):
#Get header for masterVar
header = os.popen('bzcat ' + f_path+ ' | head -100 | grep chromosome -n').readlines()
#Creating Reader object for iterating through NA12878 CG masterVar file
skip_rows = int(header[0].split(":")[0]) -1
mastervar_headings = os.popen('head -' + str(skip_rows) + f_path).readlines()
#Creating pandas dataframe with chunksize 200,000 lines
chunk = pd.read_table(f_path, chunksize=200000, sep="\t", skiprows=skip_rows,compression='bz2',dtype=object)
chunk.columns = header[0].rstrip('\n').split(":")[1].split("\t") #relabeling columns
prev_chr = 'chr1' #tracking chromosome position
prev_target_write_file = None
for mastervar in chunk: #iterate through mastervar file
for current_chrom,chr_df in mastervar.groupby(['chromosome']): #split dataframe by chromosome for writing
#check for increment to new chromosome
if prev_chr != current_chrom:
os.system('bzip2 ' + prev_target_write_file) #compress last chromosome file
prev_chr = current_chrom
#specifying output file path and chromosome-specific name
file_name = f_path.split("/")[-1].rstrip(".tsv.bz2") #getting file prefix
target_path = target_path.rstrip("/")+"/" #ensuring target path ends with fwd slash
write_target_file_path = target_path +file_name + "_" + current_chrom +".tsv" #specify target directory and chrom file name
#print write_target_file_path
if len(os.popen('find '+ write_target_file_path + '').readlines()) == 0: #checking for output file
os.system('bzcat '+ f_path + '| head -' + str(skip_rows) + " > " +write_target_file_path) #writing header if no output file found
chr_df.to_csv(write_target_file_path, sep="\t", index=False, mode='a') #writing chromosome specific variants to output file
else: #Suppress header if target file found
chr_df.to_csv(write_target_file_path, sep="\t", index=False, mode='a', header=False) #writing chromosome specifc variants to output file w/o header
prev_target_write_file = write_target_file_path #increment to current write_target_file_path
return 'complete'
opts = parser.parse_known_args()
f_path, target_path = opts[0].i, opts[0].o
assert f_path.split(".")[-2:] == ['tsv','bz2'], "expecting masterVar input file suffix .tsv.bz2"
test = chr_split_mastervar(f_path, target_path)
if test == 'complete':
print 'All chromosomes processed'
| bsd-3-clause |
DGrady/pandas | pandas/io/date_converters.py | 10 | 1827 | """This module is designed for community supported date conversion functions"""
from pandas.compat import range, map
import numpy as np
import pandas._libs.lib as lib
def parse_date_time(date_col, time_col):
date_col = _maybe_cast(date_col)
time_col = _maybe_cast(time_col)
return lib.try_parse_date_and_time(date_col, time_col)
def parse_date_fields(year_col, month_col, day_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
return lib.try_parse_year_month_day(year_col, month_col, day_col)
def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
second_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
hour_col = _maybe_cast(hour_col)
minute_col = _maybe_cast(minute_col)
second_col = _maybe_cast(second_col)
return lib.try_parse_datetime_components(year_col, month_col, day_col,
hour_col, minute_col, second_col)
def generic_parser(parse_func, *cols):
N = _check_columns(cols)
results = np.empty(N, dtype=object)
for i in range(N):
args = [c[i] for c in cols]
results[i] = parse_func(*args)
return results
def _maybe_cast(arr):
if not arr.dtype.type == np.object_:
arr = np.array(arr, dtype=object)
return arr
def _check_columns(cols):
if not len(cols):
raise AssertionError("There must be at least 1 column")
head, tail = cols[0], cols[1:]
N = len(head)
for i, n in enumerate(map(len, tail)):
if n != N:
raise AssertionError('All columns must have the same length: {0}; '
'column {1} has length {2}'.format(N, i, n))
return N
| bsd-3-clause |
aajtodd/zipline | zipline/algorithm.py | 4 | 46969 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
import warnings
import pytz
import pandas as pd
import numpy as np
from datetime import datetime
from itertools import groupby, chain
from six.moves import filter
from six import (
exec_,
iteritems,
itervalues,
string_types,
)
from operator import attrgetter
from zipline.errors import (
AddTermPostInit,
OrderDuringInitialize,
OverrideCommissionPostInit,
OverrideSlippagePostInit,
RegisterAccountControlPostInit,
RegisterTradingControlPostInit,
UnsupportedCommissionModel,
UnsupportedOrderParameters,
UnsupportedSlippageModel,
)
from zipline.finance.trading import TradingEnvironment
from zipline.finance.blotter import Blotter
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.controls import (
LongOnly,
MaxOrderCount,
MaxOrderSize,
MaxPositionSize,
MaxLeverage,
RestrictedListOrder
)
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.performance import PerformanceTracker
from zipline.finance.slippage import (
VolumeShareSlippage,
SlippageModel,
transact_partial
)
from zipline.assets import Asset, Future
from zipline.assets.futures import FutureChain
from zipline.gens.composites import date_sorted_sources
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.modelling.engine import (
NoOpFFCEngine,
SimpleFFCEngine,
)
from zipline.sources import DataFrameSource, DataPanelSource
from zipline.utils.api_support import (
api_method,
require_not_initialized,
ZiplineAPI,
)
import zipline.utils.events
from zipline.utils.events import (
EventManager,
make_eventrule,
DateRuleFactory,
TimeRuleFactory,
)
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.math_utils import tolerant_equals
import zipline.protocol
from zipline.protocol import Event
from zipline.history import HistorySpec
from zipline.history.history_container import HistoryContainer
DEFAULT_CAPITAL_BASE = float("1.0e5")
class TradingAlgorithm(object):
"""
Base class for trading algorithms. Inherit and overload
initialize() and handle_data(data).
A new algorithm could look like this:
```
from zipline.api import order, symbol
def initialize(context):
context.sid = symbol('AAPL')
context.amount = 100
def handle_data(context, data):
sid = context.sid
amount = context.amount
order(sid, amount)
```
To then to run this algorithm pass these functions to
TradingAlgorithm:
my_algo = TradingAlgorithm(initialize, handle_data)
stats = my_algo.run(data)
"""
def __init__(self, *args, **kwargs):
"""Initialize sids and other state variables.
:Arguments:
:Optional:
initialize : function
Function that is called with a single
argument at the begninning of the simulation.
handle_data : function
Function that is called with 2 arguments
(context and data) on every bar.
script : str
Algoscript that contains initialize and
handle_data function definition.
data_frequency : {'daily', 'minute'}
The duration of the bars.
capital_base : float <default: 1.0e5>
How much capital to start with.
instant_fill : bool <default: False>
Whether to fill orders immediately or on next bar.
asset_finder : An AssetFinder object
A new AssetFinder object to be used in this TradingEnvironment
asset_metadata: can be either:
- dict
- pandas.DataFrame
- object with 'read' property
If dict is provided, it must have the following structure:
* keys are the identifiers
* values are dicts containing the metadata, with the metadata
field name as the key
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the metadata fields
* index must be the different asset identifiers
* array contents should be the metadata value
If an object with a 'read' property is provided, 'read' must
return rows containing at least one of 'sid' or 'symbol' along
with the other metadata fields.
identifiers : List
Any asset identifiers that are not provided in the
asset_metadata, but will be traded by this TradingAlgorithm
"""
self.sources = []
# List of trading controls to be used to validate orders.
self.trading_controls = []
# List of account controls to be checked on each bar.
self.account_controls = []
self._recorded_vars = {}
self.namespace = kwargs.get('namespace', {})
self._platform = kwargs.pop('platform', 'zipline')
self.logger = None
self.benchmark_return_source = None
# default components for transact
self.slippage = VolumeShareSlippage()
self.commission = PerShare()
self.instant_fill = kwargs.pop('instant_fill', False)
# set the capital base
self.capital_base = kwargs.pop('capital_base', DEFAULT_CAPITAL_BASE)
self.sim_params = kwargs.pop('sim_params', None)
if self.sim_params is None:
self.sim_params = create_simulation_parameters(
capital_base=self.capital_base,
start=kwargs.pop('start', None),
end=kwargs.pop('end', None)
)
self.perf_tracker = PerformanceTracker(self.sim_params)
# Update the TradingEnvironment with the provided asset metadata
self.trading_environment = kwargs.pop('env',
TradingEnvironment.instance())
self.trading_environment.update_asset_finder(
asset_finder=kwargs.pop('asset_finder', None),
asset_metadata=kwargs.pop('asset_metadata', None),
identifiers=kwargs.pop('identifiers', None)
)
# Pull in the environment's new AssetFinder for quick reference
self.asset_finder = self.trading_environment.asset_finder
self.init_engine(kwargs.pop('ffc_loader', None))
# Maps from name to Term
self._filters = {}
self._factors = {}
self._classifiers = {}
self.blotter = kwargs.pop('blotter', None)
if not self.blotter:
self.blotter = Blotter()
# Set the dt initally to the period start by forcing it to change
self.on_dt_changed(self.sim_params.period_start)
self.portfolio_needs_update = True
self.account_needs_update = True
self.performance_needs_update = True
self._portfolio = None
self._account = None
self.history_container_class = kwargs.pop(
'history_container_class', HistoryContainer,
)
self.history_container = None
self.history_specs = {}
# If string is passed in, execute and get reference to
# functions.
self.algoscript = kwargs.pop('script', None)
self._initialize = None
self._before_trading_start = None
self._analyze = None
self.event_manager = EventManager()
if self.algoscript is not None:
filename = kwargs.pop('algo_filename', None)
if filename is None:
filename = '<string>'
code = compile(self.algoscript, filename, 'exec')
exec_(code, self.namespace)
self._initialize = self.namespace.get('initialize')
if 'handle_data' not in self.namespace:
raise ValueError('You must define a handle_data function.')
else:
self._handle_data = self.namespace['handle_data']
self._before_trading_start = \
self.namespace.get('before_trading_start')
# Optional analyze function, gets called after run
self._analyze = self.namespace.get('analyze')
elif kwargs.get('initialize') and kwargs.get('handle_data'):
if self.algoscript is not None:
raise ValueError('You can not set script and \
initialize/handle_data.')
self._initialize = kwargs.pop('initialize')
self._handle_data = kwargs.pop('handle_data')
self._before_trading_start = kwargs.pop('before_trading_start',
None)
self.event_manager.add_event(
zipline.utils.events.Event(
zipline.utils.events.Always(),
# We pass handle_data.__func__ to get the unbound method.
# We will explicitly pass the algorithm to bind it again.
self.handle_data.__func__,
),
prepend=True,
)
# If method not defined, NOOP
if self._initialize is None:
self._initialize = lambda x: None
# Alternative way of setting data_frequency for backwards
# compatibility.
if 'data_frequency' in kwargs:
self.data_frequency = kwargs.pop('data_frequency')
self._most_recent_data = None
# Prepare the algo for initialization
self.initialized = False
self.initialize_args = args
self.initialize_kwargs = kwargs
def init_engine(self, loader):
"""
Construct and save an FFCEngine from loader.
If loader is None, constructs a NoOpFFCEngine.
"""
if loader is not None:
self.engine = SimpleFFCEngine(
loader,
self.trading_environment.trading_days,
self.asset_finder,
)
else:
self.engine = NoOpFFCEngine()
def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self)
def before_trading_start(self, data):
if self._before_trading_start is None:
return
self._before_trading_start(self, data)
def handle_data(self, data):
self._most_recent_data = data
if self.history_container:
self.history_container.update(data, self.datetime)
self._handle_data(self, data)
# Unlike trading controls which remain constant unless placing an
# order, account controls can change each bar. Thus, must check
# every bar no matter if the algorithm places an order or not.
self.validate_account_controls()
def analyze(self, perf):
if self._analyze is None:
return
with ZiplineAPI(self):
self._analyze(self, perf)
def __repr__(self):
"""
N.B. this does not yet represent a string that can be used
to instantiate an exact copy of an algorithm.
However, it is getting close, and provides some value as something
that can be inspected interactively.
"""
return """
{class_name}(
capital_base={capital_base}
sim_params={sim_params},
initialized={initialized},
slippage={slippage},
commission={commission},
blotter={blotter},
recorded_vars={recorded_vars})
""".strip().format(class_name=self.__class__.__name__,
capital_base=self.capital_base,
sim_params=repr(self.sim_params),
initialized=self.initialized,
slippage=repr(self.slippage),
commission=repr(self.commission),
blotter=repr(self.blotter),
recorded_vars=repr(self.recorded_vars))
def _create_data_generator(self, source_filter, sim_params=None):
"""
Create a merged data generator using the sources attached to this
algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
if sim_params is None:
sim_params = self.sim_params
if self.benchmark_return_source is None:
if sim_params.data_frequency == 'minute' or \
sim_params.emission_rate == 'minute':
def update_time(date):
return self.trading_environment.get_open_and_close(date)[1]
else:
def update_time(date):
return date
benchmark_return_source = [
Event({'dt': update_time(dt),
'returns': ret,
'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
'source_id': 'benchmarks'})
for dt, ret in
self.trading_environment.benchmark_returns.iteritems()
if dt.date() >= sim_params.period_start.date() and
dt.date() <= sim_params.period_end.date()
]
else:
benchmark_return_source = self.benchmark_return_source
date_sorted = date_sorted_sources(*self.sources)
if source_filter:
date_sorted = filter(source_filter, date_sorted)
with_benchmarks = date_sorted_sources(benchmark_return_source,
date_sorted)
# Group together events with the same dt field. This depends on the
# events already being sorted.
return groupby(with_benchmarks, attrgetter('dt'))
def _create_generator(self, sim_params, source_filter=None):
"""
Create a basic generator setup using the sources to this algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
if not self.initialized:
self.initialize(*self.initialize_args, **self.initialize_kwargs)
self.initialized = True
if self.perf_tracker is None:
# HACK: When running with the `run` method, we set perf_tracker to
# None so that it will be overwritten here.
self.perf_tracker = PerformanceTracker(sim_params)
self.portfolio_needs_update = True
self.account_needs_update = True
self.performance_needs_update = True
self.data_gen = self._create_data_generator(source_filter, sim_params)
self.trading_client = AlgorithmSimulator(self, sim_params)
transact_method = transact_partial(self.slippage, self.commission)
self.set_transact(transact_method)
return self.trading_client.transform(self.data_gen)
def get_generator(self):
"""
Override this method to add new logic to the construction
of the generator. Overrides can use the _create_generator
method to get a standard construction generator.
"""
return self._create_generator(self.sim_params)
# TODO: make a new subclass, e.g. BatchAlgorithm, and move
# the run method to the subclass, and refactor to put the
# generator creation logic into get_generator.
def run(self, source, overwrite_sim_params=True,
benchmark_return_source=None):
"""Run the algorithm.
:Arguments:
source : can be either:
- pandas.DataFrame
- zipline source
- list of sources
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the different asset identifiers
* index must be DatetimeIndex
* array contents should be price info.
:Returns:
daily_stats : pandas.DataFrame
Daily performance metrics such as returns, alpha etc.
"""
# Ensure that source is a DataSource object
if isinstance(source, list):
if overwrite_sim_params:
warnings.warn("""List of sources passed, will not attempt to extract start and end
dates. Make sure to set the correct fields in sim_params passed to
__init__().""", UserWarning)
overwrite_sim_params = False
elif isinstance(source, pd.DataFrame):
# if DataFrame provided, map columns to sids and wrap
# in DataFrameSource
copy_frame = source.copy()
copy_frame.columns = \
self.asset_finder.map_identifier_index_to_sids(
source.columns, source.index[0]
)
source = DataFrameSource(copy_frame)
elif isinstance(source, pd.Panel):
# If Panel provided, map items to sids and wrap
# in DataPanelSource
copy_panel = source.copy()
copy_panel.items = self.asset_finder.map_identifier_index_to_sids(
source.items, source.major_axis[0]
)
source = DataPanelSource(copy_panel)
if isinstance(source, list):
self.set_sources(source)
else:
self.set_sources([source])
# Override sim_params if params are provided by the source.
if overwrite_sim_params:
if hasattr(source, 'start'):
self.sim_params.period_start = source.start
if hasattr(source, 'end'):
self.sim_params.period_end = source.end
# Changing period_start and period_close might require updating
# of first_open and last_close.
self.sim_params._update_internal()
# The sids field of the source is the reference for the universe at
# the start of the run
self._current_universe = set()
for source in self.sources:
for sid in source.sids:
self._current_universe.add(sid)
# Check that all sids from the source are accounted for in
# the AssetFinder. This retrieve call will raise an exception if the
# sid is not found.
for sid in self._current_universe:
self.asset_finder.retrieve_asset(sid)
# force a reset of the performance tracker, in case
# this is a repeat run of the algorithm.
self.perf_tracker = None
# create zipline
self.gen = self._create_generator(self.sim_params)
# Create history containers
if self.history_specs:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.sim_params.first_open,
self.sim_params.data_frequency,
)
# loop through simulated_trading, each iteration returns a
# perf dictionary
perfs = []
for perf in self.gen:
perfs.append(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
self.analyze(daily_stats)
return daily_stats
def _create_daily_stats(self, perfs):
# create daily and cumulative stats dataframe
daily_perfs = []
# TODO: the loop here could overwrite expected properties
# of daily_perf. Could potentially raise or log a
# warning.
for perf in perfs:
if 'daily_perf' in perf:
perf['daily_perf'].update(
perf['daily_perf'].pop('recorded_vars')
)
perf['daily_perf'].update(perf['cumulative_risk_metrics'])
daily_perfs.append(perf['daily_perf'])
else:
self.risk_report = perf
daily_dts = [np.datetime64(perf['period_close'], utc=True)
for perf in daily_perfs]
daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)
return daily_stats
@api_method
def add_transform(self, transform, days=None):
"""
Ensures that the history container will have enough size to service
a simple transform.
:Arguments:
transform : string
The transform to add. must be an element of:
{'mavg', 'stddev', 'vwap', 'returns'}.
days : int <default=None>
The maximum amount of days you will want for this transform.
This is not needed for 'returns'.
"""
if transform not in {'mavg', 'stddev', 'vwap', 'returns'}:
raise ValueError('Invalid transform')
if transform == 'returns':
if days is not None:
raise ValueError('returns does use days')
self.add_history(2, '1d', 'price')
return
elif days is None:
raise ValueError('no number of days specified')
if self.sim_params.data_frequency == 'daily':
mult = 1
freq = '1d'
else:
mult = 390
freq = '1m'
bars = mult * days
self.add_history(bars, freq, 'price')
if transform == 'vwap':
self.add_history(bars, freq, 'volume')
@api_method
def get_environment(self, field='platform'):
env = {
'arena': self.sim_params.arena,
'data_frequency': self.sim_params.data_frequency,
'start': self.sim_params.first_open,
'end': self.sim_params.last_close,
'capital_base': self.sim_params.capital_base,
'platform': self._platform
}
if field == '*':
return env
else:
return env[field]
def add_event(self, rule=None, callback=None):
"""
Adds an event to the algorithm's EventManager.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
)
@api_method
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True):
"""
Schedules a function to be called with some timed rules.
"""
date_rule = date_rule or DateRuleFactory.every_day()
time_rule = ((time_rule or TimeRuleFactory.market_open())
if self.sim_params.data_frequency == 'minute' else
# If we are in daily mode the time_rule is ignored.
zipline.utils.events.Always())
self.add_event(
make_eventrule(date_rule, time_rule, half_days),
func,
)
@api_method
def record(self, *args, **kwargs):
"""
Track and record local variable (i.e. attributes) each day.
"""
# Make 2 objects both referencing the same iterator
args = [iter(args)] * 2
# Zip generates list entries by calling `next` on each iterator it
# receives. In this case the two iterators are the same object, so the
# call to next on args[0] will also advance args[1], resulting in zip
# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.
positionals = zip(*args)
for name, value in chain(positionals, iteritems(kwargs)):
self._recorded_vars[name] = value
@api_method
def symbol(self, symbol_str):
"""
Default symbol lookup for any source that directly maps the
symbol to the Asset (e.g. yahoo finance).
"""
return self.asset_finder.lookup_symbol_resolve_multiple(
symbol_str,
as_of_date=self.sim_params.period_end
)
@api_method
def symbols(self, *args):
"""
Default symbols lookup for any source that directly maps the
symbol to the Asset (e.g. yahoo finance).
"""
return [self.symbol(identifier) for identifier in args]
@api_method
def sid(self, a_sid):
"""
Default sid lookup for any source that directly maps the integer sid
to the Asset.
"""
return self.asset_finder.retrieve_asset(a_sid)
@api_method
def future_chain(self, root_symbol, as_of_date=None):
""" Look up a future chain with the specified parameters.
Parameters
----------
root_symbol : str
The root symbol of a future chain.
as_of_date : datetime.datetime or pandas.Timestamp or str, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc.
Returns
-------
FutureChain
The future chain matching the specified parameters.
Raises
------
RootSymbolNotFound
If a future chain could not be found for the given root symbol.
"""
return FutureChain(
asset_finder=self.asset_finder,
get_datetime=self.get_datetime,
root_symbol=root_symbol.upper(),
as_of_date=as_of_date
)
def _calculate_order_value_amount(self, asset, value):
"""
Calculates how many shares/contracts to order based on the type of
asset being ordered.
"""
last_price = self.trading_client.current_data[asset].price
if tolerant_equals(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(
psid=asset
)
if self.logger:
self.logger.debug(zero_message)
# Don't place any order
return 0
if isinstance(asset, Future):
value_multiplier = asset.contract_multiplier
else:
value_multiplier = 1
return value / (last_price * value_multiplier)
@api_method
def order(self, sid, amount,
limit_price=None,
stop_price=None,
style=None):
"""
Place an order using the specified parameters.
"""
def round_if_near_integer(a, epsilon=1e-4):
"""
Round a to the nearest integer if that integer is within an epsilon
of a.
"""
if abs(a - round(a)) <= epsilon:
return round(a)
else:
return a
# Truncate to the integer share count that's either within .0001 of
# amount or closer to zero.
# E.g. 3.9999 -> 4.0; 5.5 -> 5.0; -5.5 -> -5.0
amount = int(round_if_near_integer(amount))
# Raises a ZiplineError if invalid parameters are detected.
self.validate_order_params(sid,
amount,
limit_price,
stop_price,
style)
# Convert deprecated limit_price and stop_price parameters to use
# ExecutionStyle objects.
style = self.__convert_order_params_for_blotter(limit_price,
stop_price,
style)
return self.blotter.order(sid, amount, style)
def validate_order_params(self,
asset,
amount,
limit_price,
stop_price,
style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
if not isinstance(asset, Asset):
raise UnsupportedOrderParameters(
msg="Passing non-Asset argument to 'order()' is not supported."
" Use 'sid()' or 'symbol()' methods to look up an Asset."
)
for control in self.trading_controls:
control.validate(asset,
amount,
self.updated_portfolio(),
self.get_datetime(),
self.trading_client.current_data)
@staticmethod
def __convert_order_params_for_blotter(limit_price, stop_price, style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
# TODO_SS: DeprecationWarning for usage of limit_price and stop_price.
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price)
if limit_price:
return LimitOrder(limit_price)
if stop_price:
return StopOrder(stop_price)
else:
return MarketOrder()
@api_method
def order_value(self, sid, value,
limit_price=None, stop_price=None, style=None):
"""
Place an order by desired value rather than desired number of shares.
If the requested sid is found in the universe, the requested value is
divided by its price to imply the number of shares to transact.
If the Asset being ordered is a Future, the 'value' calculated
is actually the exposure, as Futures have no 'value'.
value > 0 :: Buy/Cover
value < 0 :: Sell/Short
Market order: order(sid, value)
Limit order: order(sid, value, limit_price)
Stop order: order(sid, value, None, stop_price)
StopLimit order: order(sid, value, limit_price, stop_price)
"""
amount = self._calculate_order_value_amount(sid, value)
return self.order(sid, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@property
def recorded_vars(self):
return copy(self._recorded_vars)
@property
def portfolio(self):
return self.updated_portfolio()
def updated_portfolio(self):
if self.portfolio_needs_update:
self._portfolio = \
self.perf_tracker.get_portfolio(self.performance_needs_update)
self.portfolio_needs_update = False
self.performance_needs_update = False
return self._portfolio
@property
def account(self):
return self.updated_account()
def updated_account(self):
if self.account_needs_update:
self._account = \
self.perf_tracker.get_account(self.performance_needs_update)
self.account_needs_update = False
self.performance_needs_update = False
return self._account
def set_logger(self, logger):
self.logger = logger
def on_dt_changed(self, dt):
"""
Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here.
"""
assert isinstance(dt, datetime), \
"Attempt to set algorithm's current time with non-datetime"
assert dt.tzinfo == pytz.utc, \
"Algorithm expects a utc datetime"
self.datetime = dt
self.perf_tracker.set_date(dt)
self.blotter.set_date(dt)
@api_method
def get_datetime(self, tz=None):
"""
Returns the simulation datetime.
"""
dt = self.datetime
assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime"
if tz is not None:
# Convert to the given timezone passed as a string or tzinfo.
if isinstance(tz, string_types):
tz = pytz.timezone(tz)
dt = dt.astimezone(tz)
return dt # datetime.datetime objects are immutable.
def set_transact(self, transact):
"""
Set the method that will be called to create a
transaction from open orders and trade events.
"""
self.blotter.transact = transact
def update_dividends(self, dividend_frame):
"""
Set DataFrame used to process dividends. DataFrame columns should
contain at least the entries in zp.DIVIDEND_FIELDS.
"""
self.perf_tracker.update_dividends(dividend_frame)
@api_method
def set_slippage(self, slippage):
if not isinstance(slippage, SlippageModel):
raise UnsupportedSlippageModel()
if self.initialized:
raise OverrideSlippagePostInit()
self.slippage = slippage
@api_method
def set_commission(self, commission):
if not isinstance(commission, (PerShare, PerTrade, PerDollar)):
raise UnsupportedCommissionModel()
if self.initialized:
raise OverrideCommissionPostInit()
self.commission = commission
def set_sources(self, sources):
assert isinstance(sources, list)
self.sources = sources
# Remain backwards compatibility
@property
def data_frequency(self):
return self.sim_params.data_frequency
@data_frequency.setter
def data_frequency(self, value):
assert value in ('daily', 'minute')
self.sim_params.data_frequency = value
@api_method
def order_percent(self, sid, percent,
limit_price=None, stop_price=None, style=None):
"""
Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Note that percent must expressed as a decimal (0.50 means 50\%).
"""
value = self.portfolio.portfolio_value * percent
return self.order_value(sid, value,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
"""
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
req_shares = target - current_position
return self.order(sid, req_shares,
limit_price=limit_price,
stop_price=stop_price,
style=style)
else:
return self.order(sid, target,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target_value(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
"""
target_amount = self._calculate_order_value_amount(sid, target)
return self.order_target(sid, target_amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target_percent(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Note that target must expressed as a decimal (0.50 means 50\%).
"""
target_value = self.portfolio.portfolio_value * target
return self.order_target_value(sid, target_value,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def get_open_orders(self, sid=None):
if sid is None:
return {
key: [order.to_api_obj() for order in orders]
for key, orders in iteritems(self.blotter.open_orders)
if orders
}
if sid in self.blotter.open_orders:
orders = self.blotter.open_orders[sid]
return [order.to_api_obj() for order in orders]
return []
@api_method
def get_order(self, order_id):
if order_id in self.blotter.orders:
return self.blotter.orders[order_id].to_api_obj()
@api_method
def cancel_order(self, order_param):
order_id = order_param
if isinstance(order_param, zipline.protocol.Order):
order_id = order_param.id
self.blotter.cancel(order_id)
@api_method
def add_history(self, bar_count, frequency, field, ffill=True):
data_frequency = self.sim_params.data_frequency
history_spec = HistorySpec(bar_count, frequency, field, ffill,
data_frequency=data_frequency)
self.history_specs[history_spec.key_str] = history_spec
if self.initialized:
if self.history_container:
self.history_container.ensure_spec(
history_spec, self.datetime, self._most_recent_data,
)
else:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.sim_params.first_open,
self.sim_params.data_frequency,
)
def get_history_spec(self, bar_count, frequency, field, ffill):
spec_key = HistorySpec.spec_key(bar_count, frequency, field, ffill)
if spec_key not in self.history_specs:
data_freq = self.sim_params.data_frequency
spec = HistorySpec(
bar_count,
frequency,
field,
ffill,
data_frequency=data_freq,
)
self.history_specs[spec_key] = spec
if not self.history_container:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.datetime,
self.sim_params.data_frequency,
bar_data=self._most_recent_data,
)
self.history_container.ensure_spec(
spec, self.datetime, self._most_recent_data,
)
return self.history_specs[spec_key]
@api_method
def history(self, bar_count, frequency, field, ffill=True):
history_spec = self.get_history_spec(
bar_count,
frequency,
field,
ffill,
)
return self.history_container.get_history(history_spec, self.datetime)
####################
# Account Controls #
####################
def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control)
def validate_account_controls(self):
for control in self.account_controls:
control.validate(self.updated_portfolio(),
self.updated_account(),
self.get_datetime(),
self.trading_client.current_data)
@api_method
def set_max_leverage(self, max_leverage=None):
"""
Set a limit on the maximum leverage of the algorithm.
"""
control = MaxLeverage(max_leverage)
self.register_account_control(control)
####################
# Trading Controls #
####################
def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control)
@api_method
def set_max_position_size(self,
sid=None,
max_shares=None,
max_notional=None):
"""
Set a limit on the number of shares and/or dollar value held for the
given sid. Limits are treated as absolute values and are enforced at
the time that the algo attempts to place an order for sid. This means
that it's possible to end up with more than the max number of shares
due to splits/dividends, and more than the max notional due to price
improvement.
If an algorithm attempts to place an order that would result in
increasing the absolute value of shares/dollar value exceeding one of
these limits, raise a TradingControlException.
"""
control = MaxPositionSize(asset=sid,
max_shares=max_shares,
max_notional=max_notional)
self.register_trading_control(control)
@api_method
def set_max_order_size(self, sid=None, max_shares=None, max_notional=None):
"""
Set a limit on the number of shares and/or dollar value of any single
order placed for sid. Limits are treated as absolute values and are
enforced at the time that the algo attempts to place an order for sid.
If an algorithm attempts to place an order that would result in
exceeding one of these limits, raise a TradingControlException.
"""
control = MaxOrderSize(asset=sid,
max_shares=max_shares,
max_notional=max_notional)
self.register_trading_control(control)
@api_method
def set_max_order_count(self, max_count):
"""
Set a limit on the number of orders that can be placed within the given
time interval.
"""
control = MaxOrderCount(max_count)
self.register_trading_control(control)
@api_method
def set_do_not_order_list(self, restricted_list):
"""
Set a restriction on which sids can be ordered.
"""
control = RestrictedListOrder(restricted_list)
self.register_trading_control(control)
@api_method
def set_long_only(self):
"""
Set a rule specifying that this algorithm cannot take short positions.
"""
self.register_trading_control(LongOnly())
###########
# FFC API #
###########
@api_method
@require_not_initialized(AddTermPostInit())
def add_factor(self, factor, name):
if name in self._factors:
raise ValueError("Name %r is already a factor!" % name)
self._factors[name] = factor
@api_method
@require_not_initialized(AddTermPostInit())
def add_filter(self, filter):
name = "anon_filter_%d" % len(self._filters)
self._filters[name] = filter
# Note: add_classifier is not yet implemented since you can't do anything
# useful with classifiers yet.
def _all_terms(self):
# Merge all three dicts.
return dict(
chain.from_iterable(
iteritems(terms)
for terms in (self._filters, self._factors, self._classifiers)
)
)
def compute_factor_matrix(self, start_date):
"""
Compute a factor matrix starting at start_date.
"""
days = self.trading_environment.trading_days
start_date_loc = days.get_loc(start_date)
sim_end = self.sim_params.last_close.normalize()
end_loc = min(start_date_loc + 252, days.get_loc(sim_end))
end_date = days[end_loc]
return self.engine.factor_matrix(
self._all_terms(),
start_date,
end_date,
), end_date
def current_universe(self):
return self._current_universe
@classmethod
def all_api_methods(cls):
"""
Return a list of all the TradingAlgorithm API methods.
"""
return [
fn for fn in itervalues(vars(cls))
if getattr(fn, 'is_api_method', False)
]
| apache-2.0 |
Og192/Python | machine-learning-algorithms/memoryNN/memNN_ExactTest.py | 2 | 7973 | import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
corpusSize = 1977#2358
testDataSize = 49
testMaxLength = 82
batchSize = 1
vectorLength = 50
sentMaxLength = 82
hopNumber = 3
classNumber = 4
num_epoches = 2000
weightDecay = 0.001
trainDatasetPath = "/home/laboratory/memoryCorpus/train/"
testDatasetPath = "/home/laboratory/memoryCorpus/test/"
resultOutput = '/home/laboratory/memoryCorpus/result/'
if not os.path.exists(resultOutput):
os.makedirs(resultOutput)
def loadData(datasetPath, shape, sentMaxLength):
print("load " + datasetPath)
datasets = np.loadtxt(datasetPath, np.float)
datasets = np.reshape(datasets, shape)
return datasets
atten = np.loadtxt("atten", np.float,delimiter= ',').reshape((1, 2 * vectorLength))
atten_b = -0.002059555053710938
flinearLayer_W = np.loadtxt("linearLayer_W", np.float,delimiter= ',').reshape((vectorLength, vectorLength))
flinearLayer_b = np.loadtxt("linearLayer_b", np.float,delimiter= ',').reshape((vectorLength, 1))
fsoftmaxLayer_W = np.loadtxt("softmaxLayer_W", np.float,delimiter= ',').reshape((classNumber, vectorLength))
fsoftmaxLayer_b = np.loadtxt("softmaxLayer_b", np.float,delimiter= ',').reshape((classNumber, 1))
def shuffleDatasets(datasets, orders):
shuffleDatasets = np.zeros(datasets.shape)
index = 0
for i in orders:
shuffleDatasets[index] = datasets[i]
index += 1
del datasets
return shuffleDatasets
def generateData(datasetPath,corpusSize, sentMaxLength):
contxtWordsDir = datasetPath + 'contxtWords'
aspectWordsDir = datasetPath + 'aspectWords'
labelsDir = datasetPath + 'labels'
positionsDir = datasetPath + 'positions'
sentLengthsDir = datasetPath + 'sentLengths'
maskDir = datasetPath + 'mask'
contxtWords = loadData(contxtWordsDir, (corpusSize, vectorLength, sentMaxLength), sentMaxLength)
aspectWords = loadData(aspectWordsDir, (corpusSize, vectorLength, 1), sentMaxLength)
labels = loadData(labelsDir, (corpusSize, classNumber, 1), sentMaxLength)
position = loadData(positionsDir, (corpusSize, 1, sentMaxLength), sentMaxLength)
sentLength = loadData(sentLengthsDir, (corpusSize, 1, 1), sentMaxLength)
mask = loadData(maskDir, (corpusSize, 1, sentMaxLength), sentMaxLength)
return (contxtWords, aspectWords, labels, position, sentLength, mask)
def plot(loss_list):
plt.cla()
plt.plot(loss_list)
plt.draw()
plt.pause(0.0001)
contxtWords_placeholder = tf.placeholder(tf.float32, [vectorLength, None], name="contxtWords")#
aspectWords_placeholder = tf.placeholder(tf.float32, [vectorLength, 1], name="aspectWords")
labels_placeholder = tf.placeholder(tf.float32, [classNumber, 1], name="labels")
position_placeholder = tf.placeholder(tf.float32, [1, None], name="position")#
sentLength_placeholder = tf.placeholder(tf.float32, [1, 1], name="sentLength")
mask_placeholder = tf.placeholder(tf.float32, [1, None], name="mask")
attention_W = tf.Variable(atten, dtype = tf.float32, name="attention_W")
attention_b = tf.Variable(atten_b, dtype = tf.float32, name="attention_b")
linearLayer_W = tf.Variable(flinearLayer_W , dtype=tf.float32, name="linearLayer_W")
linearLayer_b = tf.Variable(flinearLayer_b , dtype = tf.float32, name="linearLayer_b")
softmaxLayer_W = tf.Variable(fsoftmaxLayer_W, dtype= tf.float32, name="softmaxLayer_W")
softmaxLayer_b = tf.Variable(fsoftmaxLayer_b, dtype= tf.float32, name="softmaxLayer_b")
vaspect = aspectWords_placeholder
for i in range(hopNumber):
Vi = 1.0 - position_placeholder / sentLength_placeholder - (hopNumber / vectorLength) * (1.0 - 2.0 * (position_placeholder / sentLength_placeholder))
Mi = Vi * contxtWords_placeholder
expanded_vaspect = vaspect
for j in range(sentMaxLength - 1):
expanded_vaspect = tf.concat(1, [expanded_vaspect, vaspect])
attentionInputs = tf.concat(0, [Mi, expanded_vaspect])
gi = tf.tanh(tf.matmul(attention_W, attentionInputs) + attention_b) + mask_placeholder
alpha = tf.nn.softmax(gi)
linearLayerOut = tf.matmul(linearLayer_W, vaspect) + linearLayer_b
vaspect = tf.reduce_sum(alpha * Mi, 1, True) + linearLayerOut
linearLayerOut = tf.matmul(softmaxLayer_W, vaspect) + softmaxLayer_b
# regu = tf.reduce_sum(attention_W * attention_W)
# regu += tf.reduce_sum(attention_b * attention_b)
# regu += tf.reduce_sum(linearLayer_W * linearLayer_W)
# regu += tf.reduce_sum(linearLayer_b * linearLayer_b)
# regu += tf.reduce_sum(softmaxLayer_W * softmaxLayer_W)
# regu += tf.reduce_sum(softmaxLayer_b * softmaxLayer_b)
# regu = weightDecay * regu
calssification = tf.nn.softmax(linearLayerOut - tf.reduce_max(linearLayerOut), dim=0)
total_loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(linearLayerOut - tf.reduce_max(linearLayerOut), labels_placeholder, dim=0))
ada = tf.train.AdagradOptimizer(0.01)# 0.3 for hopNumber = 1
train_step = ada.minimize(total_loss)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
loss_list = []
contxtWords, aspectWords, labels, position, sentLength, mask = generateData(trainDatasetPath, corpusSize, sentMaxLength)
contxtWordsT,aspectWordsT,labelsT,positionT,sentLengthT, maskT = generateData(testDatasetPath, testDataSize, testMaxLength)
for epoch_idx in range(num_epoches):
results = []
sum_loss= 0.0
print("New data, epoch", epoch_idx)
orders = np.arange(corpusSize)
np.random.shuffle(orders)
contxtWords = shuffleDatasets(contxtWords, orders)
aspectWords = shuffleDatasets(aspectWords, orders)
labels = shuffleDatasets(labels, orders)
position = shuffleDatasets(position, orders)
sentLength = shuffleDatasets(sentLength, orders)
mask = shuffleDatasets(mask, orders)
count = 0
correct = 0
for i in range(corpusSize):
_calssification, _total_loss, _train_step, _attention_W = sess.run(
[calssification, total_loss, train_step, attention_W],
feed_dict=
{
contxtWords_placeholder:contxtWords[i],
aspectWords_placeholder:aspectWords[i],
labels_placeholder :labels[i],
position_placeholder :position[i],
sentLength_placeholder :sentLength[i],
mask_placeholder :mask[i]
}
)
sum_loss += _total_loss
if np.argmax(_calssification.reshape(4)) == np.argmax(labels[i]):
correct += 1.0
count += 1
# print(_attention_W)
# print(sentLength[i])
print("Iteration", epoch_idx, "Loss", sum_loss / (corpusSize * 2), "train_step", _train_step, "Accuracy: ", float(correct / count))
loss_list.append(sum_loss / (corpusSize * 2))
plot(loss_list)
count = 0
correct = 0
for i in range(testDataSize):
_calssification = sess.run(
calssification,
feed_dict=
{
contxtWords_placeholder:contxtWordsT[i],
aspectWords_placeholder:aspectWordsT[i],
labels_placeholder :labelsT[i],
position_placeholder :positionT[i],
sentLength_placeholder :sentLengthT[i],
mask_placeholder :maskT[i]
}
)
results.append(_calssification.reshape(4))
if np.argmax(_calssification.reshape(4)) == np.argmax(labelsT[i]):
correct += 1.0
count += 1
print("test Accuracy: ", float(correct / count))
np.savetxt(resultOutput + "predict_" + str(epoch_idx) + ".txt", np.asarray(results, dtype=np.float32), fmt='%.5f',delimiter=' ') | gpl-2.0 |
moreati/numpy | numpy/lib/npyio.py | 35 | 71412 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Mote
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/examples/old_animation/histogram_tkagg.py | 3 | 1847 | """
This example shows how to use a path patch to draw a bunch of
rectangles for an animated histogram
"""
import numpy as np
import matplotlib
matplotlib.use('TkAgg') # do this before importing pylab
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
fig, ax = plt.subplots()
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
nrects = len(left)
# here comes the tricky part -- we have to set up the vertex and path
# codes arrays using moveto, lineto and closepoly
# for each rect: 1 for the MOVETO, 3 for the LINETO, 1 for the
# CLOSEPOLY; the vert for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices
nverts = nrects*(1+3+1)
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * path.Path.LINETO
codes[0::5] = path.Path.MOVETO
codes[4::5] = path.Path.CLOSEPOLY
verts[0::5,0] = left
verts[0::5,1] = bottom
verts[1::5,0] = left
verts[1::5,1] = top
verts[2::5,0] = right
verts[2::5,1] = top
verts[3::5,0] = right
verts[3::5,1] = bottom
barpath = path.Path(verts, codes)
patch = patches.PathPatch(barpath, facecolor='green', edgecolor='yellow', alpha=0.5)
ax.add_patch(patch)
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
def animate():
if animate.cnt>=100:
return
animate.cnt += 1
# simulate new data coming in
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
top = bottom + n
verts[1::5,1] = top
verts[2::5,1] = top
fig.canvas.draw()
fig.canvas.manager.window.after(100, animate)
animate.cnt = 0
fig.canvas.manager.window.after(100, animate)
plt.show()
| apache-2.0 |
lin-credible/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
whn09/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 30 | 2039 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
zhuangjun1981/retinotopic_mapping | retinotopic_mapping/tools/PlottingTools.py | 1 | 15373 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 31 11:07:20 2014
@author: junz
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.colors as col
import scipy.ndimage as ni
import ImageAnalysis as ia
try:
import skimage.external.tifffile as tf
except ImportError:
import tifffile as tf
try: import cv2
except ImportError as e: print e
def get_rgb(colorStr):
"""
get R,G,B int value from a hex color string
"""
return int(colorStr[1:3], 16), int(colorStr[3:5], 16), int(colorStr[5:7], 16)
def get_color_str(R, G, B):
"""
get hex color string from R,G,B value (integer with uint8 format)
"""
if not (isinstance(R, (int, long)) and isinstance(G, (int, long)) and isinstance(G, (int, long))):
raise TypeError, 'Input R, G and B should be integer!'
if not ((0 <= R <= 255) and (0 <= G <= 255) and (
0 <= B <= 255)): raise ValueError, 'Input R, G and B should between 0 and 255!'
return '#' + ''.join(map(chr, (R, G, B))).encode('hex')
def binary_2_rgba(img, foregroundColor='#ff0000', backgroundColor='#000000', foregroundAlpha=255, backgroundAlpha=0):
"""
generate display image in (RGBA).(np.uint8) format which can be displayed by imshow
:param img: input image, should be a binary array (np.bool, or np.(u)int
:param foregroundColor: color for 1 in the array, RGB str, i.e. '#ff0000'
:param backgroundColor: color for 0 in the array, RGB str, i.e. '#ff00ff'
:param foregroundAlpha: alpha for 1 in the array, int, 0-255
:param backgroundAlpha: alpha for 1 in the array, int, 0-255
:return: displayImg, (RGBA).(np.uint8) format, ready for imshow
"""
if img.dtype == np.bool:
pass
elif issubclass(img.dtype.type, np.integer):
if np.amin(img) < 0 or np.amax(img) > 1: raise ValueError, 'Values of input image should be either 0 or 1.'
else:
raise TypeError, 'Data type of input image should be either np.bool or integer.'
if type(foregroundAlpha) is int:
if foregroundAlpha < 0 or foregroundAlpha > 255: raise ValueError, 'Value of foreGroundAlpha should be between 0 and 255.'
else:
raise TypeError, 'Data type of foreGroundAlpha should be integer.'
if type(backgroundAlpha) is int:
if backgroundAlpha < 0 or backgroundAlpha > 255: raise ValueError, 'Value of backGroundAlpha should be between 0 and 255.'
else:
raise TypeError, 'Data type of backGroundAlpha should be integer.'
fR, fG, fB = get_rgb(foregroundColor)
bR, bG, bB = get_rgb(backgroundColor)
displayImg = np.zeros((img.shape[0], img.shape[1], 4)).astype(np.uint8)
displayImg[img == 1] = np.array([fR, fG, fB, foregroundAlpha]).astype(np.uint8)
displayImg[img == 0] = np.array([bR, bG, bB, backgroundAlpha]).astype(np.uint8)
return displayImg
def scalar_2_rgba(img, color='#ff0000'):
"""
generate display a image in (RGBA).(np.uint8) format which can be displayed by imshow
alpha is defined by values in the img
:param img: input image
:param alphaMatrix: matrix of alpha
:param foreGroundColor: color for 1 in the array, RGB str, i.e. '#ff0000'
:return: displayImg, (RGBA).(np.uint8) format, ready for imshow
"""
R, G, B = get_rgb(color)
RMatrix = (R * ia.array_nor(img.astype(np.float32))).astype(np.uint8)
GMatrix = (G * ia.array_nor(img.astype(np.float32))).astype(np.uint8)
BMatrix = (B * ia.array_nor(img.astype(np.float32))).astype(np.uint8)
alphaMatrix = (ia.array_nor(img.astype(np.float32)) * 255).astype(np.uint8)
displayImg = np.zeros((img.shape[0], img.shape[1], 4)).astype(np.uint8)
displayImg[:, :, 0] = RMatrix;
displayImg[:, :, 1] = GMatrix;
displayImg[:, :, 2] = BMatrix;
displayImg[:, :, 3] = alphaMatrix
return displayImg
def bar_graph(left,
height,
error,
errorDir='both', # 'both', 'positive' or 'negative'
width=0.1,
plotAxis=None,
lw=3,
faceColor='#000000',
edgeColor='none',
capSize=10,
label=None
):
"""
plot a single bar with error bar
"""
if not plotAxis:
f = plt.figure()
plotAxis = f.add_subplot(111)
if errorDir == 'both':
yerr = error
elif errorDir == 'positive':
yerr = [[0], [error]]
elif errorDir == 'negative':
yerr = [[error], [0]]
plotAxis.errorbar(left + width / 2,
height,
yerr=yerr,
lw=lw,
capsize=capSize,
capthick=lw,
color=edgeColor)
plotAxis.bar(left,
height,
width=width,
color=faceColor,
edgecolor=edgeColor,
lw=lw,
label=label)
return plotAxis
def random_color(numOfColor=10):
"""
generate as list of random colors
"""
numOfColor = int(numOfColor)
colors = []
Cmatrix = (np.random.rand(numOfColor, 3) * 255).astype(np.uint8)
for i in range(numOfColor):
r = hex(Cmatrix[i][0]).split('x')[1]
if len(r) == 1:
r = '0' + r
g = hex(Cmatrix[i][1]).split('x')[1]
if len(g) == 1:
g = '0' + g
b = hex(Cmatrix[i][2]).split('x')[1]
if len(b) == 1:
b = '0' + b
colors.append('#' + r + g + b)
return colors
def show_movie(path, # tif file path or numpy arrary of the movie
mode='raw', # 'raw', 'dF' or 'dFoverF'
baselinePic=None, # picuture of baseline
baselineType='mean', # way to calculate baseline
cmap='gray'):
"""
plot tf movie in the way defined by mode
"""
if isinstance(path, str):
rawMov = tf.imread(path)
elif isinstance(path, np.ndarray):
rawMov = path
if mode == 'raw':
mov = rawMov
else:
_, dFMov, dFoverFMov = ia.normalize_movie(rawMov,
baselinePic=baselinePic,
baselineType=baselineType)
if mode == 'dF':
mov = dFMov
elif mode == 'dFoverF':
mov = dFoverFMov
else:
raise LookupError, 'The "mode" should be "raw", "dF" or "dFoverF"!'
if isinstance(path, str):
tf.imshow(mov,
cmap=cmap,
vmax=np.amax(mov),
vmin=np.amin(mov),
title=mode + ' movie of ' + path)
elif isinstance(path, np.ndarray):
tf.imshow(mov,
cmap=cmap,
vmax=np.amax(mov),
vmin=np.amin(mov),
title=mode + ' Movie')
return mov
def standalone_color_bar(vmin, vmax, cmap, sectionNum=10):
"""
plot a stand alone color bar.
"""
a = np.array([[vmin, vmax]])
plt.figure(figsize=(0.1, 9))
img = plt.imshow(a, cmap=cmap, vmin=vmin, vmax=vmax)
plt.gca().set_visible(False)
cbar = plt.colorbar()
cbar.set_ticks(np.linspace(vmin, vmax, num=sectionNum + 1))
def alpha_blending(image, alphaData, vmin, vmax, cmap='Paired', sectionNum=10, background=-1, interpolation='nearest',
isSave=False, savePath=None):
"""
Generate image with transparency weighted by another matrix.
Plot numpy array 'image' with colormap 'cmap'. And define the tranparency
of each pixel by the value in another numpy array alphaData.
All the elements in alphaData should be non-negative.
"""
if image.shape != alphaData.shape:
raise LookupError, '"image" and "alphaData" should have same shape!!'
if np.amin(alphaData) < 0:
raise ValueError, 'All the elements in alphaData should be bigger than zero.'
# normalize image
image[image > vmax] = vmax
image[image < vmin] = vmin
image = (image - vmin) / (vmax - vmin)
# get colored image of image
exec ('colorImage = cm.' + cmap + '(image)')
# normalize alphadata
alphaDataNor = alphaData / np.amax(alphaData)
alphaDataNor = np.sqrt(alphaDataNor)
colorImage[:, :, 3] = alphaDataNor
# plt.figure()
# plot dummy figure for colorbar
a = np.array([[vmin, vmax]])
plt.imshow(a, cmap=cmap, vmin=vmin, vmax=vmax, alpha=0)
# plt.gca().set_visible(False)
cbar = plt.colorbar()
cbar.set_ticks(np.linspace(vmin, vmax, num=sectionNum + 1))
cbar.set_alpha(1)
cbar.draw_all()
# generate black background
b = np.array(colorImage)
b[:] = background
b[:, :, 3] = 1
plt.imshow(b, cmap='gray')
# plot map
plt.imshow(colorImage, interpolation=interpolation)
return colorImage
def plot_mask(mask, plotAxis=None, color='#ff0000', zoom=1, borderWidth=None, closingIteration=None):
"""
plot mask borders in a given color
"""
if not plotAxis:
f = plt.figure()
plotAxis = f.add_subplot(111)
cmap1 = col.ListedColormap(color, 'temp')
cm.register_cmap(cmap=cmap1)
if zoom != 1:
mask = ni.interpolation.zoom(mask, zoom, order=0)
mask2 = mask.astype(np.float32)
mask2[np.invert(np.isnan(mask2))] = 1.
mask2[np.isnan(mask2)] = 0.
struc = ni.generate_binary_structure(2, 2)
if borderWidth:
border = mask2 - ni.binary_erosion(mask2, struc, iterations=borderWidth).astype(np.float32)
else:
border = mask2 - ni.binary_erosion(mask2, struc).astype(np.float32)
if closingIteration:
border = ni.binary_closing(border, iterations=closingIteration).astype(np.float32)
border[border == 0] = np.nan
currfig = plotAxis.imshow(border, cmap='temp', interpolation='nearest')
return currfig
def plot_mask_borders(mask, plotAxis=None, color='#ff0000', zoom=1, borderWidth=2, closingIteration=None, **kwargs):
"""
plot mask (ROI) borders by using pyplot.contour function. all the 0s and Nans in the input mask will be considered
as background, and non-zero, non-nan pixel will be considered in ROI.
"""
if not plotAxis:
f = plt.figure()
plotAxis = f.add_subplot(111)
plotingMask = np.ones(mask.shape, dtype=np.uint8)
plotingMask[np.logical_or(np.isnan(mask), mask == 0)] = 0
if zoom != 1:
plotingMask = cv2.resize(plotingMask.astype(np.float),
dsize=(int(plotingMask.shape[1] * zoom), int(plotingMask.shape[0] * zoom)))
plotingMask[plotingMask < 0.5] = 0
plotingMask[plotingMask >= 0.5] = 1
plotingMask = plotingMask.astype(np.uint8)
if closingIteration is not None:
plotingMask = ni.binary_closing(plotingMask, iterations=closingIteration).astype(np.uint8)
plotingMask = ni.binary_erosion(plotingMask, iterations=borderWidth)
currfig = plotAxis.contour(plotingMask, levels=[0.5], colors=color, linewidths=borderWidth, **kwargs)
# put y axis in decreasing order
y_lim = list(plotAxis.get_ylim())
y_lim.sort()
plotAxis.set_ylim(y_lim[::-1])
plotAxis.set_aspect('equal')
return currfig
def grid_axis(rowNum, columnNum, totalPlotNum, **kwarg):
"""
return figure handles and axis handels for multiple subplots and figures
"""
figureNum = totalPlotNum // (rowNum * columnNum) + 1
figureHandles = []
for i in range(figureNum):
f = plt.figure(**kwarg)
figureHandles.append(f)
axisHandles = []
for i in range(totalPlotNum):
currFig = figureHandles[i // (rowNum * columnNum)]
currIndex = i % (rowNum * columnNum)
currAxis = currFig.add_subplot(rowNum, columnNum, currIndex + 1)
axisHandles.append(currAxis)
return figureHandles, axisHandles
def tile_axis(f, rowNum, columnNum, topDownMargin=0.05, leftRightMargin=0.05, rowSpacing=0.05, columnSpacing=0.05):
if 2 * topDownMargin + (
(rowNum - 1) * rowSpacing) >= 1: raise ValueError, 'Top down margin or row spacing are too big!'
if 2 * leftRightMargin + (
(columnNum - 1) * columnSpacing) >= 1: raise ValueError, 'Left right margin or column spacing are too big!'
height = (1 - (2 * topDownMargin) - (rowNum - 1) * rowSpacing) / rowNum
width = (1 - (2 * leftRightMargin) - (columnNum - 1) * columnSpacing) / columnNum
xStarts = np.arange(leftRightMargin, 1 - leftRightMargin, (width + columnSpacing))
yStarts = np.arange(topDownMargin, 1 - topDownMargin, (height + rowSpacing))[::-1]
axisList = [[f.add_axes([xStart, yStart, width, height]) for xStart in xStarts] for yStart in yStarts]
return axisList
def save_figure_without_borders(f,
savePath,
removeSuperTitle=True,
**kwargs):
"""
remove borders of a figure
"""
f.gca().get_xaxis().set_visible(False)
f.gca().get_yaxis().set_visible(False)
f.gca().set_title('')
if removeSuperTitle:
f.suptitle('')
f.savefig(savePath, pad_inches=0, bbox_inches='tight', **kwargs)
def merge_normalized_images(imgList, isFilter=True, sigma=50, mergeMethod='mean', dtype=np.float32):
"""
merge images in a list in to one, for each image, local intensity variability will be removed by subtraction of
gaussian filtered image. Then all images will be collapsed by the mergeMethod in to single image
"""
imgList2 = []
for currImg in imgList:
imgList2.append(ia.array_nor(currImg.astype(dtype)))
if mergeMethod == 'mean':
mergedImg = np.mean(np.array(imgList2), axis=0)
elif mergeMethod == 'min':
mergedImg = np.min(np.array(imgList2), axis=0)
elif mergeMethod == 'max':
mergedImg = np.max(np.array(imgList2), axis=0)
elif mergeMethod == 'median':
mergedImg = np.median(np.array(imgList2), axis=0)
if isFilter:
mergedImgf = ni.filters.gaussian_filter(mergedImg.astype(np.float), sigma=sigma)
return ia.array_nor(mergedImg - mergedImgf).astype(dtype)
else:
return ia.array_nor(mergedImg).astype(dtype)
# def hue2RGB(hue):
# """
# get the RGB value as format as hex string from the decimal ratio of hue (from 0 to 1)
# color model as described in:
# https://en.wikipedia.org/wiki/Hue
# """
# if hue < 0: hue = 0
# if hue > 1: hue = 1
# color = colorsys.hsv_to_rgb(hue,1,1)
# color = [int(x*255) for x in color]
# return get_color_str(*color)
#
#
def hot_2_rgb(hot):
"""
get the RGB value as format as hex string from the decimal ratio of hot colormap (from 0 to 1)
"""
if hot < 0: hot = 0
if hot > 1: hot = 1
cmap_hot = plt.get_cmap('hot')
color = cmap_hot(hot)[0:3];
color = [int(x * 255) for x in color]
return get_color_str(*color)
def value_2_rgb(value, cmap):
"""
get the RGB value as format as hex string from the decimal ratio of a given colormap (from 0 to 1)
"""
if value < 0: value = 0
if value > 1: value = 1
cmap = plt.get_cmap(cmap)
color = cmap(value)[0:3];
color = [int(x * 255) for x in color]
return get_color_str(*color)
if __name__ == '__main__':
plt.ioff()
print 'for debug'
| gpl-3.0 |
yuzie007/ph_analysis | ph_analysis/structure/displacements.py | 1 | 3767 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
import pandas as pd
__author__ = 'Yuji Ikeda'
__version__ = '0.1.0'
def create_statistical_functions():
return [
('sum', np.sum),
('avg.', np.average),
('s.d.', lambda x: np.std(x, ddof=0)),
('abs._sum', lambda x: np.sum(np.abs(x))),
('abs._avg.', lambda x: np.average(np.abs(x))),
('abs._s.d.', lambda x: np.std(np.abs(x), ddof=0)),
]
def create_data_stat(data, keys, properties):
"""
:param data: pandas.DataFrame
:param keys: List of strings
:param properties: List of strings
:return:
"""
functions = create_statistical_functions()
return data.groupby(keys, sort=False).agg(functions)[properties]
class Displacements(object):
def __init__(self, atoms, atoms_ideal):
self._atoms = atoms
self._atoms_ideal = atoms_ideal
self._initialize_data()
def _initialize_data(self):
data = pd.DataFrame()
data['symbol'] = self._atoms.get_chemical_symbols()
data['atom'] = ''
data = data.reset_index() # data['index'] is created
self._data = data
def run(self):
self.calculate_displacements()
self.write()
def _calculate_origin(self):
positions = self._atoms.get_scaled_positions()
positions_ideal = self._atoms_ideal.get_scaled_positions()
diff = positions - positions_ideal
diff -= np.rint(diff)
return np.average(diff, axis=0)
def calculate_displacements(self):
atoms = self._atoms
atoms_ideal =self._atoms_ideal
origin = self._calculate_origin()
positions = atoms.get_scaled_positions()
positions_ideal = atoms_ideal.get_scaled_positions()
diff = positions - (positions_ideal + origin)
diff -= np.rint(diff)
diff = np.dot(diff, atoms.get_cell()) # frac -> A
displacements = np.sqrt(np.sum(diff ** 2, axis=1))
self._data['displacement'] = displacements
def write(self):
filename = self._create_filename()
data = self._data
properties = ['displacement']
with open(filename, 'w') as f:
f.write(self._create_header())
f.write('{:<22s}{:<18s}'.format('#', 'displacements_(A)'))
f.write('\n')
for i, x in data.iterrows():
f.write('atom ')
f.write('{:11d}'.format(x['index']))
f.write(' {:5s}'.format(x['symbol']))
f.write('{:18.12f}'.format(x['displacement']))
f.write('\n')
f.write('\n')
# Write statistics for all atoms
data_stat = create_data_stat(data, 'atom', properties)
for k0, x in data_stat.iterrows():
for k1, v in x.iteritems():
f.write('{:16}'.format(k1[1]))
f.write(' {:5s}'.format(k0))
f.write('{:18.12f}'.format(v))
f.write('\n')
f.write('\n')
# Write statistics for each symbol
data_stat = create_data_stat(data, 'symbol', properties)
for k0, x in data_stat.iterrows():
for k1, v in x.iteritems():
f.write('{:16s}'.format(k1[1]))
f.write(' {:5s}'.format(k0))
f.write('{:18.12f}'.format(v))
f.write('\n')
f.write('\n')
def _create_header(self):
return ''
def _create_filename(self):
return 'displacements.dat'
def get_data(self):
return self._data
| mit |
hugobowne/scikit-learn | examples/svm/plot_oneclass.py | 80 | 2338 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s)
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s)
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
PredictiveScienceLab/py-mcmc | demos/demo4.py | 2 | 4183 | """
This demo demonstrates how to use a mean function in a GP and allow the model
to discover the most important basis functions.
This model is equivalent to a Relevance Vector Machine.
Author:
Ilias Bilionis
Date:
3/20/2014
"""
import numpy as np
import GPy
import pymcmc as pm
import matplotlib.pyplot as plt
# Write a class that represents the mean you wish to use:
class PolynomialBasis(object):
"""
A simple set of polynomials.
:param degree: The degree of the polynomials.
:type degree: int
"""
def __init__(self, degree):
"""
The constructor can do anything you want. The object should be
constructed before doing anything with pymcmc in any case.
Just make sure that inside the constructor you define the ``num_output``
attribute whose value should be equal to the number of basis functions.
"""
self.degree = degree
self.num_output = degree + 1 # YOU HAVE TO DEFINE THIS ATTRIBUTE!
def __call__(self, X):
"""
Evaluate the basis functions at ``X``.
Now, you should assume that ``X`` is a 2D numpy array of size
``num_points x input_dim``. If ``input_dim`` is 1, then you still need
to consider it as a 2D array because this is the kind of data that GPy
requires. If you want to make the function work also with 1D arrays if
``input_dim`` is one the use the trick below.
The output of this function should be the design matrix. That is,
it should be the matrix ``phi`` of dimensions
``num_points x num_output``. In otherwors, ``phi[i, j]`` should be
the value of basis function ``phi_j`` at ``X[i, :]``.
"""
if X.ndim == 1:
X = X[:, None] # Trick for 1D arrays
return np.hstack([X ** i for i in range(self.degree + 1)])
# Pick your degree
degree = 5
# Construct your basis
poly_basis = PolynomialBasis(degree)
# Let us generate some random data to play with
# The number of input dimensions
input_dim = 1
# The number of observations
num_points = 50
# The noise level we are going to add to the observations
noise = 0.1
# Observed inputs
X = 20. * np.random.rand(num_points, 1) - 10.
# The observations we make
Y = np.sin(X) / X + noise * np.random.randn(num_points, 1) - 0.1 * X + 0.1 * X ** 3
# Let's construct a GP model with just a mean and a diagonal covariance
# This is the mean (and at the same time the kernel)
mean = pm.MeanFunction(input_dim, poly_basis, ARD=True)
# Add an RBF kernel
kernel = GPy.kern.RBF(input_dim)
# Now, let's construct the model
model = GPy.models.GPRegression(X, Y, kernel=mean + kernel)
print 'Model before training:'
print str(model)
# You may just train the model by maximizing the likelihood:
model.optimize_restarts(messages=True)
print 'Trained model:'
print str(model)
print model.add.mean.variance
# And just plot the predictions
model.plot(plot_limits=(-10, 15))
# Let us also plot the full function
x = np.linspace(-10, 15, 100)[:, None]
y = np.sin(x) / x - 0.1 * x + 0.1 * x ** 3
plt.plot(x, y, 'r', linewidth=2)
plt.legend(['Mean of GP', '5% percentile of GP', '95% percentile of GP',
'Observations', 'Real Underlying Function'], loc='best')
plt.title('Model trained by maximizing the likelihood')
plt.show()
a = raw_input('press enter to continue...')
# Or you might want to do it using MCMC:
new_mean = pm.MeanFunction(input_dim, poly_basis, ARD=True)
new_kernel = GPy.kern.RBF(input_dim)
new_model = GPy.models.GPRegression(X, Y, kernel=mean + new_kernel)
proposal = pm.MALAProposal(dt=0.1)
mcmc = pm.MetropolisHastings(new_model, proposal=proposal)
mcmc.sample(50000, num_thin=100, num_burn=1000, verbose=True)
print 'Model trained with MCMC:'
print str(new_model)
print new_model.add.mean.variance
# Plot everything for this too:
new_model.plot(plot_limits=(-10., 15.))
# Let us also plot the full function
plt.plot(x, y, 'r', linewidth=2)
plt.legend(['Mean of GP', '5% percentile of GP', '95% percentile of GP',
'Observations', 'Real Underlying Function'], loc='best')
plt.title('Model trained by MCMC')
plt.show()
a = raw_input('press enter to continue...')
| lgpl-3.0 |
matthiasplappert/motion_classification | src/toolkit/util.py | 1 | 2470 | import numpy as np
from sklearn.utils.validation import check_array
class NotFittedError(ValueError, AttributeError):
pass
def check_feature_array(array, n_features=None):
array = check_array(array, ensure_2d=True, allow_nd=False)
if n_features is not None and array.shape[1] != n_features:
raise ValueError('feature array must have exactly %d features' % n_features)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
# Based on sklearn.util.validation.check_is_fitted but also ensures
# that the attribute is not None
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % estimator)
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
if not all_or_any([getattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_multilabel_array(array, n_labels=None, force_binary=True):
array = check_array(array, ensure_2d=True, allow_nd=False, dtype=int)
if n_labels is not None and array.shape[1] != n_labels:
raise ValueError('multilabel array must have exactly %d labels' % n_labels)
if force_binary:
count_ones = np.count_nonzero(array == 1)
count_zeros = np.count_nonzero(array == 0)
if np.size(array) != count_ones + count_zeros:
raise ValueError('multilabel array must be binary')
return array
def pad_sequences(X):
# Find longest sequence
n_samples_max = 0
for X_curr in X:
n_samples_curr = X_curr.shape[0]
if n_samples_curr > n_samples_max:
n_samples_max = n_samples_curr
# Adjust length of all sequences to be equal
for idx, X_curr in enumerate(X):
n_samples_curr = X_curr.shape[0]
delta_samples = n_samples_max - n_samples_curr
assert delta_samples >= 0
if delta_samples > 0:
fill_array = np.zeros((delta_samples, X_curr.shape[1]))
X[idx] = np.append(X_curr, fill_array, axis=0)
assert X[idx].shape[0] == n_samples_max
X = np.asarray(X)
return X
| mit |
pangwong11/jumpball | bd_analyze/nba_season_stats_analyzer.py | 1 | 5069 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as pyplot
from datetime import datetime
import os
import glob
import sys
import re
import argparse
import cv2
import random
import ast
# Argument parsing
#parser = argparse.ArgumentParser(description='Jumpball analyze')
#parser.add_argument('-s', '--season', action='store', help='Season in year', dest="year_season", required=True)
#parser.add_argument('-n', '--next-season', action='store', help='Season in year', dest="next_year_season", required=True)
#args = parser.parse_args()
#
#season = args.year_season
#next_season = args.next_year_season
#data_directory = "./nba_data"
team_stat_path = './nba_data/*.csv'
team_stat_files = glob.glob(team_stat_path)
data_types = ['Height', 'Weight', 'WL_PERC']
num_data_types = len(data_types)
def readTeamStats(file_name):
dtypes = np.dtype({ 'names' : ('team', 'Height', 'Weight', 'WL_PERC'),
'formats' : ['S10', np.float, np.float, np.float] })
data = np.loadtxt(file_name, delimiter=',', skiprows=1,
usecols=(0,2,3,4), dtype=dtypes)
#data_list = list(data)
return data
def readTeamRecord(file_name):
dtypes = np.dtype({ 'names' : ('team', 'WL_PERC'),
'formats' : ['S10', np.float] })
data = np.loadtxt(file_name, delimiter=',', skiprows=1,
usecols=(0,3), dtype=dtypes)
return data
# Iterate through each NBA team stats file and output find the mean for each teams weight and height
def analyzeTeamStats(season):
data_set=[]
team_labels_set = []
data_directory = ("/Users/aidan.wong/Documents/mystuff/cs454/jumpball/bd_collect/nba_data/%s/" % season)
for root, dirs, files in os.walk(data_directory):
for f in files:
if f.endswith("agg_data.csv"):
teamStats = readTeamStats(data_directory + f)
teamStats_list = zip(*teamStats)
team = teamStats_list[0][0]
ht_mean = np.array(teamStats_list[1], dtype=float).mean()
wt_mean = np.array(teamStats_list[2], dtype=float).mean()
wl_perc = teamStats_list[3][0]
data = [ht_mean, wt_mean,wl_perc]
data_set.append(data)
team_labels_set.append(team)
#print data_set
#print "--------------"
return data_set,team_labels_set
season = "2011"
next_year_season = "2012"
trainData_1, teamData_1 = analyzeTeamStats(season)
trainData_2, teamData_2 = analyzeTeamStats(next_year_season)
#print trainData_2
#print trainData_2[3]
print "------------------------"
print teamData_1
print teamData_2
#print teamData_2[3]
# This variable is to defined the new sample team to run K-NN with and the number should range from 0 to 30
new_team_index = 3
trainData_array = np.array(trainData_1).astype(np.float32)
newData_array = np.array(trainData_2)
#print trainData_array
#print newData_array
#print newData_array[0]
#print trainData_array
labels = np.array(np.arange(30))
#print labels
#labels = array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
for label, x, y in zip(teamData_1,trainData_array[:,0],trainData_array[:,1]):
pyplot.annotate(label,xy =(x,y), xytext=(-20,20),textcoords = 'offset points',
ha ='right', va = 'bottom',bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow',
alpha = 0.5),arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
for i in range(0,30):
r = lambda: random.randint(0,255)
color = '#%02X%02X%02X' % (r(),r(),r())
team_data = trainData_array[labels.ravel()==i]
pyplot.scatter(team_data[:,0],team_data[:,1],marker = 'o',s = team_data[:,2]*4500,c = color,cmap = pyplot.get_cmap('Spectral'))
#print newData_array[labels.ravel()==1]
#for i in range(1,2):
new_team_data = newData_array[labels.ravel()==new_team_index]
print "new_team_data =", new_team_data
pyplot.scatter(new_team_data[:,0],new_team_data[:,1],marker = '^',s = new_team_data[:,2]*4500,c = color,cmap = pyplot.get_cmap('Spectral'))
i = 0
for label, x, y in zip(teamData_2[:new_team_index+1],newData_array[:,0],newData_array[:,1]):
if i < new_team_index:
print i
i += 1
continue
print zip(teamData_2[:new_team_index+1],newData_array[:,0],newData_array[:,1])
print (label,x,y)
# print type(label)
pyplot.annotate(label,xy =(x,y), xytext=(-20,20),textcoords = 'offset points',
ha ='right', va = 'bottom',bbox = dict(boxstyle = 'round,pad=0.5', fc = 'blue',
alpha = 0.5),arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
knn = cv2.KNearest()
#print trainData_array
#
#
#knn.train(trainData_array,labels)
knn.train(trainData_array,np.array(labels).astype(np.float32))
ret, results, neighbours ,dist = knn.find_nearest((new_team_data).astype(np.float32), 1)
print (ret, results, neighbours, dist)
#
print "result: ", results,"\n"
print "neighbours: ", neighbours,"\n"
print "distance: ", dist
pyplot.show()
| apache-2.0 |
Jim61C/VTT_Show_Atten_And_Tell | prepro.py | 4 | 8670 | from scipy import ndimage
from collections import Counter
from core.vggnet import Vgg19
from core.utils import *
import tensorflow as tf
import numpy as np
import pandas as pd
import hickle
import os
import json
def _process_caption_data(caption_file, image_dir, max_length):
with open(caption_file) as f:
caption_data = json.load(f)
# id_to_filename is a dictionary such as {image_id: filename]}
id_to_filename = {image['id']: image['file_name'] for image in caption_data['images']}
# data is a list of dictionary which contains 'captions', 'file_name' and 'image_id' as key.
data = []
for annotation in caption_data['annotations']:
image_id = annotation['image_id']
annotation['file_name'] = os.path.join(image_dir, id_to_filename[image_id])
data += [annotation]
# convert to pandas dataframe (for later visualization or debugging)
caption_data = pd.DataFrame.from_dict(data)
del caption_data['id']
caption_data.sort_values(by='image_id', inplace=True)
caption_data = caption_data.reset_index(drop=True)
del_idx = []
for i, caption in enumerate(caption_data['caption']):
caption = caption.replace('.','').replace(',','').replace("'","").replace('"','')
caption = caption.replace('&','and').replace('(','').replace(")","").replace('-',' ')
caption = " ".join(caption.split()) # replace multiple spaces
caption_data.set_value(i, 'caption', caption.lower())
if len(caption.split(" ")) > max_length:
del_idx.append(i)
# delete captions if size is larger than max_length
print "The number of captions before deletion: %d" %len(caption_data)
caption_data = caption_data.drop(caption_data.index[del_idx])
caption_data = caption_data.reset_index(drop=True)
print "The number of captions after deletion: %d" %len(caption_data)
return caption_data
def _build_vocab(annotations, threshold=1):
counter = Counter()
max_len = 0
for i, caption in enumerate(annotations['caption']):
words = caption.split(' ') # caption contrains only lower-case words
for w in words:
counter[w] +=1
if len(caption.split(" ")) > max_len:
max_len = len(caption.split(" "))
vocab = [word for word in counter if counter[word] >= threshold]
print ('Filtered %d words to %d words with word count threshold %d.' % (len(counter), len(vocab), threshold))
word_to_idx = {u'<NULL>': 0, u'<START>': 1, u'<END>': 2}
idx = 3
for word in vocab:
word_to_idx[word] = idx
idx += 1
print "Max length of caption: ", max_len
return word_to_idx
def _build_caption_vector(annotations, word_to_idx, max_length=15):
n_examples = len(annotations)
captions = np.ndarray((n_examples,max_length+2)).astype(np.int32)
for i, caption in enumerate(annotations['caption']):
words = caption.split(" ") # caption contrains only lower-case words
cap_vec = []
cap_vec.append(word_to_idx['<START>'])
for word in words:
if word in word_to_idx:
cap_vec.append(word_to_idx[word])
cap_vec.append(word_to_idx['<END>'])
# pad short caption with the special null token '<NULL>' to make it fixed-size vector
if len(cap_vec) < (max_length + 2):
for j in range(max_length + 2 - len(cap_vec)):
cap_vec.append(word_to_idx['<NULL>'])
captions[i, :] = np.asarray(cap_vec)
print "Finished building caption vectors"
return captions
def _build_file_names(annotations):
image_file_names = []
id_to_idx = {}
idx = 0
image_ids = annotations['image_id']
file_names = annotations['file_name']
for image_id, file_name in zip(image_ids, file_names):
if not image_id in id_to_idx:
id_to_idx[image_id] = idx
image_file_names.append(file_name)
idx += 1
file_names = np.asarray(image_file_names)
return file_names, id_to_idx
def _build_image_idxs(annotations, id_to_idx):
image_idxs = np.ndarray(len(annotations), dtype=np.int32)
image_ids = annotations['image_id']
for i, image_id in enumerate(image_ids):
image_idxs[i] = id_to_idx[image_id]
return image_idxs
def main():
# batch size for extracting feature vectors from vggnet.
batch_size = 100
# maximum length of caption(number of word). if caption is longer than max_length, deleted.
max_length = 15
# if word occurs less than word_count_threshold in training dataset, the word index is special unknown token.
word_count_threshold = 1
# vgg model path
vgg_model_path = './data/imagenet-vgg-verydeep-19.mat'
caption_file = 'data/annotations/captions_train2014.json'
image_dir = 'image/%2014_resized/'
# about 80000 images and 400000 captions for train dataset
train_dataset = _process_caption_data(caption_file='data/annotations/captions_train2014.json',
image_dir='image/train2014_resized/',
max_length=max_length)
# about 40000 images and 200000 captions
val_dataset = _process_caption_data(caption_file='data/annotations/captions_val2014.json',
image_dir='image/val2014_resized/',
max_length=max_length)
# about 4000 images and 20000 captions for val / test dataset
val_cutoff = int(0.1 * len(val_dataset))
test_cutoff = int(0.2 * len(val_dataset))
print 'Finished processing caption data'
save_pickle(train_dataset, 'data/train/train.annotations.pkl')
save_pickle(val_dataset[:val_cutoff], 'data/val/val.annotations.pkl')
save_pickle(val_dataset[val_cutoff:test_cutoff].reset_index(drop=True), 'data/test/test.annotations.pkl')
for split in ['train', 'val', 'test']:
annotations = load_pickle('./data/%s/%s.annotations.pkl' % (split, split))
if split == 'train':
word_to_idx = _build_vocab(annotations=annotations, threshold=word_count_threshold)
save_pickle(word_to_idx, './data/%s/word_to_idx.pkl' % split)
captions = _build_caption_vector(annotations=annotations, word_to_idx=word_to_idx, max_length=max_length)
save_pickle(captions, './data/%s/%s.captions.pkl' % (split, split))
file_names, id_to_idx = _build_file_names(annotations)
save_pickle(file_names, './data/%s/%s.file.names.pkl' % (split, split))
image_idxs = _build_image_idxs(annotations, id_to_idx)
save_pickle(image_idxs, './data/%s/%s.image.idxs.pkl' % (split, split))
# prepare reference captions to compute bleu scores later
image_ids = {}
feature_to_captions = {}
i = -1
for caption, image_id in zip(annotations['caption'], annotations['image_id']):
if not image_id in image_ids:
image_ids[image_id] = 0
i += 1
feature_to_captions[i] = []
feature_to_captions[i].append(caption.lower() + ' .')
save_pickle(feature_to_captions, './data/%s/%s.references.pkl' % (split, split))
print "Finished building %s caption dataset" %split
# extract conv5_3 feature vectors
vggnet = Vgg19(vgg_model_path)
vggnet.build()
with tf.Session() as sess:
tf.initialize_all_variables().run()
for split in ['train', 'val', 'test']:
anno_path = './data/%s/%s.annotations.pkl' % (split, split)
save_path = './data/%s/%s.features.hkl' % (split, split)
annotations = load_pickle(anno_path)
image_path = list(annotations['file_name'].unique())
n_examples = len(image_path)
all_feats = np.ndarray([n_examples, 196, 512], dtype=np.float32)
for start, end in zip(range(0, n_examples, batch_size),
range(batch_size, n_examples + batch_size, batch_size)):
image_batch_file = image_path[start:end]
image_batch = np.array(map(lambda x: ndimage.imread(x, mode='RGB'), image_batch_file)).astype(
np.float32)
feats = sess.run(vggnet.features, feed_dict={vggnet.images: image_batch})
all_feats[start:end, :] = feats
print ("Processed %d %s features.." % (end, split))
# use hickle to save huge feature vectors
hickle.dump(all_feats, save_path)
print ("Saved %s.." % (save_path))
if __name__ == "__main__":
main() | mit |
aetilley/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
mozman/ezdxf | examples/text_layout_engine_usage.py | 1 | 12087 | # Copyright (c) 2021, Manfred Moitzi
# License: MIT License
import sys
from typing import Iterable
import pathlib
import random
import ezdxf
from ezdxf import zoom, print_config
from ezdxf.math import Matrix44
from ezdxf.tools import fonts
from ezdxf.tools import text_layout as tl
"""
This example shows the usage of the internal text_layout module to render
complex text layouts. The module is designed to render MText like entities,
but could be used for other tasks too. The layout engine supports a multi
column setup, each column contains paragraphs, and these paragraphs can
automatically flow across the columns. All locations are relative to each other,
absolute locations are not supported - tabulators are not supported.
The layout engine knows nothing about the content itself, it just manages
content boxes of a fixed given width and height and "glue" spaces in between.
The engine does not alter the size of the content boxes, but resizes the glue
if necessary. The actual rendering is done by a rendering object associated to
each content box.
The only text styling manged by the layout engine is underline, overline and
strike through multiple content boxes.
Features:
- layout alignment like MText: top-middle-bottom combined with left-center-right
- paragraph alignments: left, right, center, justified
- paragraph indentation: left, right, special first line
- cell alignments: top, center, bottom
- fraction cells: over, slanted, tolerance style
- columns have a fixed height or grows automatically, paragraphs which do not
fit "flow" into the following column.
- pass through of transformation matrix to the rendering object
TODO:
- bullet- and numbered lists
- refinements to replicate MText features as good as possible
Used for:
- drawing add-on to render MTEXT with columns
- explode MTEXT into DXF primitives (TEXT, LINE)
"""
if not ezdxf.options.use_matplotlib:
print("The Matplotlib package is required.")
sys.exit(1)
# Type alias:
Content = Iterable[tl.Cell]
DIR = pathlib.Path("~/Desktop/Outbox").expanduser()
STYLE = "Style0"
FONT = "OpenSans-Regular.ttf"
COLUMN_HEIGHT: float = 12
print_config()
doc = ezdxf.new()
msp = doc.modelspace()
style = doc.styles.new(STYLE, dxfattribs={"font": FONT})
def measure_space(font):
return font.text_width(" X") - font.text_width("X")
class SizedFont:
def __init__(self, height: float):
self.height = float(height)
self.font = fonts.make_font(FONT, self.height)
self.space = measure_space(self.font)
def text_width(self, text: str):
return self.font.text_width(text)
fix_sized_fonts = [
SizedFont(0.18),
SizedFont(0.35),
SizedFont(0.50),
SizedFont(0.70),
SizedFont(1.00),
]
class FrameRenderer(tl.ContentRenderer):
"""Render object to render a frame around a content collection.
This renderer can be used by collections which just manages content
but do not represent a content by itself (Layout, Column, Paragraph).
"""
def __init__(self, color):
self.color = color
def render(
self,
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
) -> None:
"""Render a frame as LWPOLYLINE."""
pline = msp.add_lwpolyline(
[(left, top), (right, top), (right, bottom), (left, bottom)],
close=True,
dxfattribs={"color": self.color},
)
if m:
pline.transform(m)
def line(
self, x1: float, y1: float, x2: float, y2: float, m: Matrix44 = None
) -> None:
"""Line renderer used to create underline, overline, strike through
and fraction dividers.
"""
line = msp.add_line(
(x1, y1), (x2, y2), dxfattribs={"color": self.color}
)
if m:
line.transform(m)
class TextRenderer(tl.ContentRenderer):
"""Text content renderer."""
def __init__(self, text, attribs):
self.text = text
self.attribs = attribs
self.line_attribs = {"color": attribs["color"]}
def render(
self,
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
):
"""Create/render the text content"""
text = msp.add_text(self.text, dxfattribs=self.attribs)
text.set_pos((left, bottom), align="LEFT")
if m:
text.transform(m)
def line(
self, x1: float, y1: float, x2: float, y2: float, m: Matrix44 = None
) -> None:
"""Line renderer used to create underline, overline, strike through
and fraction dividers.
"""
line = msp.add_line((x1, y1), (x2, y2), dxfattribs=self.line_attribs)
if m:
line.transform(m)
class Word(tl.Text):
"""Represent a word as content box for the layout engine."""
def __init__(self, text: str, font: SizedFont, stroke: int = 0):
# Each content box can have individual properties:
attribs = {
"color": random.choice((1, 2, 3, 4, 6, 7, 7)),
"height": font.height,
"style": STYLE,
}
super().__init__(
# Width and height of the content are fixed given values and will
# not be changed by the layout engine:
width=font.text_width(text),
height=font.height,
stroke=stroke,
# Each content box can have it's own rendering object:
renderer=TextRenderer(text, attribs),
)
def uniform_content(count: int, size: int = 1) -> Content:
"""Create content with one text size."""
font = fix_sized_fonts[size]
for word in tl.lorem_ipsum(count):
yield Word(word, font)
yield tl.Space(font.space)
def random_sized_content(count: int) -> Content:
"""Create content with randomized text size."""
def size():
return random.choice([0, 1, 1, 1, 1, 1, 2, 3])
for word in tl.lorem_ipsum(count):
font = fix_sized_fonts[size()]
yield Word(word, font)
yield tl.Space(font.space)
def stroke_groups(words: Iterable[str]):
group = []
count = 0
stroke = 0
for word in words:
if count == 0:
if group:
yield group, stroke
count = random.randint(1, 4)
group = [word]
stroke = random.choice([0, 0, 0, 0, 1, 1, 1, 2, 2, 4])
else:
count -= 1
group.append(word)
if group:
yield group, stroke
def stroked_content(count: int, size: int = 1) -> Content:
"""Create content with one text size and groups of words with or without
strokes.
"""
font = fix_sized_fonts[size]
groups = stroke_groups(tl.lorem_ipsum(count))
for group, stroke in groups:
# strokes should span across spaces in between words:
# Spaces between words are bound to the preceding content box renderer,
# MText is more flexible, but this implementation is easy and good
# enough, otherwise spaces would need a distinct height and a rendering
# object, both are not implemented for glue objects.
continue_stroke = stroke + 8 if stroke else 0
for word in group[:-1]:
yield Word(word, font=font, stroke=continue_stroke)
yield tl.Space(font.space)
# strokes end at the last word, without continue stroke:
yield Word(group[-1], font=font, stroke=stroke)
yield tl.Space(font.space)
class Fraction(tl.Fraction):
"""Represents a fraction for the layout engine, which consist of a top-
and bottom content box, divided by horizontal or slanted line.
The "tolerance style" has no line between the stacked content boxes.
This implementation is more flexible than MText, the content boxes can be
words but also fractions or cell groups.
"""
def __init__(
self, t1: str, t2: str, stacking: tl.Stacking, font: SizedFont
):
top = Word(t1, font)
bottom = Word(t2, font)
super().__init__(
top=top,
bottom=bottom,
stacking=stacking,
# Uses only the generic line renderer to render the divider line,
# the top- and bottom content boxes use their own render objects.
renderer=FrameRenderer(color=7),
)
def fraction_content() -> Content:
"""Create content with one text size and place random fractions between
words.
"""
words = list(uniform_content(120))
for word in words:
word.valign = tl.CellAlignment.BOTTOM
stacking_options = list(tl.Stacking)
font = SizedFont(0.25) # fraction font
for _ in range(10):
stacking = random.choice(stacking_options)
top = str(random.randint(1, 1000))
bottom = str(random.randint(1, 1000))
pos = random.randint(0, len(words) - 1)
if isinstance(words[pos], tl.Space):
pos += 1
words.insert(pos, Fraction(top, bottom, stacking, font))
words.insert(pos + 1, tl.Space(font.space))
return words
def create_layout(align: tl.ParagraphAlignment, content: Content) -> tl.Layout:
# Create a flow text paragraph for the content:
paragraph = tl.Paragraph(align=align)
paragraph.append_content(content)
# Start the layout engine and set default column width:
layout = tl.Layout(
width=8, # default column width for columns without define width
margins=(0.5,), # space around the layout
# The render object of collections like Layout, Column or Paragraph is
# called before the render objects of the content managed by the
# collection.
# This could be used to render a frame or a background:
renderer=FrameRenderer(color=2),
)
# Append the first column with default width and a content height of 12 drawing
# units. At least the first column has to be created by the client.
layout.append_column(height=COLUMN_HEIGHT, gutter=1)
# Append the content. The content will be distributed across the available
# columns and automatically overflow into adjacent columns if necessary.
# The layout engine creates new columns automatically if required by
# cloning the last column.
layout.append_paragraphs([paragraph])
# Content- and total size is always up to date, only the final location
# has to be updated by calling Layout.place().
print()
print(f"Layout has {len(layout)} columns.")
print(f"Layout total width: {layout.total_width}")
print(f"Layout total height: {layout.total_height}")
for n, column in enumerate(layout, start=1):
print()
print(f" {n}. column has {len(column)} paragraph(s)")
print(f" Column total width: {column.total_width}")
print(f" Column total height: {column.total_height}")
# It is recommended to place the layout at origin (0, 0) and use a
# transformation matrix to move the layout to the final location in
# the DXF target layout - the model space in this example.
# Set final layout location in the xy-plane with alignment:
layout.place(align=tl.LayoutAlignment.BOTTOM_LEFT)
# It is possible to add content after calling place(), but place has to be
# called again before calling the render() method of the layout.
return layout
def create(content: Content, y: float) -> None:
x: float = 0
for align in list(tl.ParagraphAlignment):
# Build and place the layout at (0, 0):
layout = create_layout(align, content)
# Render and move the layout to the final location:
m = Matrix44.translate(x, y, 0)
layout.render(m)
x += layout.total_width + 2
dy: float = COLUMN_HEIGHT + 3
create(list(uniform_content(200)), 0)
create(list(random_sized_content(200)), dy)
create(list(stroked_content(200)), 2 * dy)
create(fraction_content(), 3 * dy)
# zooming needs the longest time:
zoom.extents(msp, factor=1.1)
doc.saveas(str(DIR / "text_layout.dxf"))
| mit |
raymondxyang/tensorflow | tensorflow/examples/get_started/regression/linear_regression.py | 8 | 3291 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear regression using the LinearRegressor Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import imports85 # pylint: disable=g-bad-import-order
STEPS = 1000
def main(argv):
"""Builds, trains, and evaluates the model."""
assert len(argv) == 1
(x_train, y_train), (x_test, y_test) = imports85.load_data()
# Build the training input_fn.
input_train = tf.estimator.inputs.pandas_input_fn(
x=x_train,
y=y_train,
# Setting `num_epochs` to `None` lets the `inpuf_fn` generate data
# indefinitely, leaving the call to `Estimator.train` in control.
num_epochs=None,
shuffle=True)
# Build the validation input_fn.
input_test = tf.estimator.inputs.pandas_input_fn(
x=x_test, y=y_test, shuffle=True)
feature_columns = [
# "curb-weight" and "highway-mpg" are numeric columns.
tf.feature_column.numeric_column(key="curb-weight"),
tf.feature_column.numeric_column(key="highway-mpg"),
]
# Build the Estimator.
model = tf.estimator.LinearRegressor(feature_columns=feature_columns)
# Train the model.
# By default, the Estimators log output every 100 steps.
model.train(input_fn=input_train, steps=STEPS)
# Evaluate how the model performs on data it has not yet seen.
eval_result = model.evaluate(input_fn=input_test)
# The evaluation returns a Python dictionary. The "average_loss" key holds the
# Mean Squared Error (MSE).
average_loss = eval_result["average_loss"]
# Convert MSE to Root Mean Square Error (RMSE).
print("\n" + 80 * "*")
print("\nRMS error for the test set: ${:.0f}".format(average_loss**0.5))
# Run the model in prediction mode.
input_dict = {
"curb-weight": np.array([2000, 3000]),
"highway-mpg": np.array([30, 40])
}
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
input_dict, shuffle=False)
predict_results = model.predict(input_fn=predict_input_fn)
# Print the prediction results.
print("\nPrediction results:")
for i, prediction in enumerate(predict_results):
msg = ("Curb weight: {: 4d}lbs, "
"Highway: {: 0d}mpg, "
"Prediction: ${: 9.2f}")
msg = msg.format(input_dict["curb-weight"][i], input_dict["highway-mpg"][i],
prediction["predictions"][0])
print(" " + msg)
print()
if __name__ == "__main__":
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=main)
| apache-2.0 |
vybstat/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
wangmiao1981/spark | python/pyspark/sql/tests/test_pandas_cogrouped_map.py | 20 | 9306 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.functions import array, explode, col, lit, udf, pandas_udf
from pyspark.sql.types import DoubleType, StructType, StructField, Row
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa # noqa: F401
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class CogroupedMapInPandasTests(ReusedSQLTestCase):
@property
def data1(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks')))\
.withColumn("v", col('k') * 10)\
.drop('ks')
@property
def data2(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks'))) \
.withColumn("v2", col('k') * 100) \
.drop('ks')
def test_simple(self):
self._test_merge(self.data1, self.data2)
def test_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_merge(left, self.data2)
def test_right_group_empty(self):
right = self.data2.where(col("id") % 2 == 0)
self._test_merge(self.data1, right)
def test_different_schemas(self):
right = self.data2.withColumn('v3', lit('a'))
self._test_merge(self.data1, right, 'id long, k int, v int, v2 int, v3 string')
def test_complex_group_by(self):
left = pd.DataFrame.from_dict({
'id': [1, 2, 3],
'k': [5, 6, 7],
'v': [9, 10, 11]
})
right = pd.DataFrame.from_dict({
'id': [11, 12, 13],
'k': [5, 6, 7],
'v2': [90, 100, 110]
})
left_gdf = self.spark\
.createDataFrame(left)\
.groupby(col('id') % 2 == 0)
right_gdf = self.spark \
.createDataFrame(right) \
.groupby(col('id') % 2 == 0)
def merge_pandas(l, r):
return pd.merge(l[['k', 'v']], r[['k', 'v2']], on=['k'])
result = left_gdf \
.cogroup(right_gdf) \
.applyInPandas(merge_pandas, 'k long, v long, v2 long') \
.sort(['k']) \
.toPandas()
expected = pd.DataFrame.from_dict({
'k': [5, 6, 7],
'v': [9, 10, 11],
'v2': [90, 100, 110]
})
assert_frame_equal(expected, result)
def test_empty_group_by(self):
left = self.data1
right = self.data2
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left.groupby().cogroup(right.groupby())\
.applyInPandas(merge_pandas, 'id long, k int, v int, v2 int') \
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
def test_mixed_scalar_udfs_followed_by_cogrouby_apply(self):
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby().cogroup(df.groupby()) \
.applyInPandas(lambda x, y: pd.DataFrame([(x.sum().sum(), y.sum().sum())]),
'sum1 int, sum2 int').collect()
self.assertEqual(result[0]['sum1'], 165)
self.assertEqual(result[0]['sum2'], 165)
def test_with_key_left(self):
self._test_with_key(self.data1, self.data1, isLeft=True)
def test_with_key_right(self):
self._test_with_key(self.data1, self.data1, isLeft=False)
def test_with_key_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_with_key(left, self.data1, isLeft=True)
def test_with_key_right_group_empty(self):
right = self.data1.where(col("id") % 2 == 0)
self._test_with_key(self.data1, right, isLeft=False)
def test_with_key_complex(self):
def left_assign_key(key, l, _):
return l.assign(key=key[0])
result = self.data1 \
.groupby(col('id') % 2 == 0)\
.cogroup(self.data2.groupby(col('id') % 2 == 0)) \
.applyInPandas(left_assign_key, 'id long, k int, v int, key boolean') \
.sort(['id', 'k']) \
.toPandas()
expected = self.data1.toPandas()
expected = expected.assign(key=expected.id % 2 == 0)
assert_frame_equal(expected, result)
def test_wrong_return_type(self):
# Test that we get a sensible exception invalid values passed to apply
left = self.data1
right = self.data2
with QuietTest(self.sc):
with self.assertRaisesRegex(
NotImplementedError,
'Invalid return type.*ArrayType.*TimestampType'):
left.groupby('id').cogroup(right.groupby('id')).applyInPandas(
lambda l, r: l, 'id long, v array<timestamp>')
def test_wrong_args(self):
left = self.data1
right = self.data2
with self.assertRaisesRegex(ValueError, 'Invalid function'):
left.groupby('id').cogroup(right.groupby('id')) \
.applyInPandas(lambda: 1, StructType([StructField("d", DoubleType())]))
def test_case_insensitive_grouping_column(self):
# SPARK-31915: case-insensitive grouping column should work.
df1 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df1.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
df2 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df2.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
def test_self_join(self):
# SPARK-34319: self-join with FlatMapCoGroupsInPandas
df = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df.groupby("ColUmn").cogroup(
df.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long")
row = row.join(row).first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
@staticmethod
def _test_with_key(left, right, isLeft):
def right_assign_key(key, l, r):
return l.assign(key=key[0]) if isLeft else r.assign(key=key[0])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(right_assign_key, 'id long, k int, v int, key long') \
.toPandas()
expected = left.toPandas() if isLeft else right.toPandas()
expected = expected.assign(key=expected.id)
assert_frame_equal(expected, result)
@staticmethod
def _test_merge(left, right, output_schema='id long, k int, v int, v2 int'):
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(merge_pandas, output_schema)\
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_cogrouped_map import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
ishanic/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
akhilaananthram/nupic.fluent | fluent/utils/text_preprocess.py | 1 | 10244 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file contains text pre-processing functions for NLP experiments.
"""
import os
import pandas
import re
import string
from collections import Counter
from functools import partial
class TextPreprocess(object):
"""Class for text pre-processing"""
alphabet = string.ascii_lowercase
def __init__(self,
corpusTxt="compilation.txt",
abbrCSV="abbreviations.csv",
contrCSV="contractions.csv"):
"""
@param corpusTxt (str) A compilation of most frequent words. The
default file 'compilation.txt' is the most frequent words from both
British National Corpus, Wiktionary, and books from Project Guttenberg.
@param abbrCSV (str) A compilation of domain specific
abbreviations. The file is a csv with the header "Abbr,Expansion". The
default file 'abbreviations.csv' contains basic abbreviations.
@param contrCSV (str) A compilation of common contractions. The
file is a csv with the header "Contr,Expansion". The default file
'contractions.csv' contains a short list of common contractions.
"""
self.abbrCSV = abbrCSV
self.contrCSV = contrCSV
self.corpusTxt = corpusTxt
self.abbrs = None
self.bagOfWords = None
self.contrs = None
self.corpus = None
def _setupCorpus(self, corpusSource):
"""Create member vars for English language corpus and bag of words."""
corpusPath = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../..', 'data/etc', corpusSource))
try:
self.corpus = file(corpusPath).read()
except IOError:
raise
self.bagOfWords = Counter(self.tokenize(self.corpus))
def _setupAbbrs(self, abbrsSource):
"""
Read in abbreviations, and combine all into one regex that will only match
exact matches and not words containing them.
E.g. if "WFH" is an abbreviation, it will match "WFH", but not "XWFH".
"""
self.abbrs = self.readExpansionFile(abbrsSource, ["", "s", "'s"])
self.abbrRegex = re.compile(r"\b%s\b" % r"\b|\b".join(self.abbrs.keys()))
def _setupContr(self, contrSource):
"""
Read in contractions, and combine all into one regex that will match any
word ending in the contraction.
E.g. if "'ll" is a contraction, it will match "he'll".
"""
self.contrs = self.readExpansionFile(contrSource)
self.contrRegex = re.compile(r"%s\b" % r"\b|".join(self.contrs.keys()))
@staticmethod
def readExpansionFile(filename, suffixes=None):
"""
Read the csv file to get the original/expansion pairs and add suffixes if
necessary.
@param filename (str) Name of csv file to read. Expected format
is original text in col 0, and expansion
in col 1.
@param suffixes (list) Strings that are added to the end of
the original and expanded form if
provided
@return expansionPairs (dict) The keys are the original form with
the suffixes added and the values are
the expanded form with the suffixes
added
"""
if suffixes is None:
suffixes = [""]
expansionPairs = {}
try:
# Allow absolute paths
if os.path.exists(filename):
path = filename
# Allow relative paths
else:
path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../..', 'data/etc', filename))
dataFrame = pandas.read_csv(path)
for i in xrange(dataFrame.shape[0]):
original = dataFrame.iloc[i][0].lower()
expansion = dataFrame.iloc[i][1].lower()
for suffix in suffixes:
originalSuffix = "{}{}".format(original, suffix)
expSuffix = "{}{}".format(expansion, suffix)
expansionPairs[originalSuffix] = expSuffix
except IOError:
raise
# Add an empty string if empty so the regex compiles
if not expansionPairs:
expansionPairs[""] = ""
return expansionPairs
def tokenize(self,
text,
ignoreCommon=None,
removeStrings=None,
correctSpell=False,
expandAbbr=False,
expandContr=False):
"""
Tokenize, returning only lower-case letters and "$".
@param text (str) Single string to tokenize.
@param ignoreCommon (int) This many most frequent words
will be filtered out from the
returned tokens.
@param removeStrings (list) List of strings to delete from
the text.
@param correctSpell (bool) Run tokens through spelling
correction.
@param expandAbbr (bool) Run text through abbreviation
expander
@param expandContr (bool) Run text through contraction
expander
"""
if not isinstance(text, str):
raise ValueError("Must input a single string object to tokenize.")
text = text.lower()
if expandAbbr:
if not self.abbrs:
self._setupAbbrs(self.abbrCSV)
getAbbrExpansion = partial(self.getExpansion, table=self.abbrs)
text = self.abbrRegex.sub(getAbbrExpansion, text)
if expandContr:
if not self.contrs:
self._setupContr(self.contrCSV)
getContrExpansion = partial(self.getExpansion, table=self.contrs)
text = self.contrRegex.sub(getContrExpansion, text)
if removeStrings:
for removal in removeStrings:
text = text.replace(removal, "")
tokens = re.findall('[a-z$]+', text)
if correctSpell:
tokens = [self.correct(t) for t in tokens]
if ignoreCommon:
tokens = self.removeMostCommon(tokens, n=ignoreCommon)
return tokens
def removeMostCommon(self, tokenList, n=100):
"""
Remove the n most common tokens as counted in the bag-of-words corpus.
@param tokenList (list) List of token strings.
@param n (int) Will filter out the n-most
frequent terms.
"""
if not self.bagOfWords:
self._setupCorpus(self.corpusTxt)
ignoreList = [word[0] for word in self.bagOfWords.most_common(n)]
return [token for token in tokenList if token not in ignoreList]
@staticmethod
def getExpansion(match, table):
"""
Gets the expanded version of the regular expression
@param match (_sre.SRE_Match) Regex match to expand
@param table (dict) Maps the string version of the
regular expression to the
string expansion
@return (string) Expansion
"""
return table[match.string[match.start(): match.end()]]
def correct(self, word):
"""
Find the best spelling correction for this word. Prefer edit distance of 0,
then one, then two; otherwise default to the word itself.
"""
if not self.bagOfWords:
self._setupCorpus(self.corpusTxt)
candidates = (self._known({word}) or
self._known(self._editDistance1(word)) or
self._known(self._editDistance2(word)) or
[word])
return max(candidates, key=self.bagOfWords.get)
def _known(self, words):
"""Return the subset of words that are in the corpus."""
return {w for w in words if w in self.bagOfWords}
@staticmethod
def _editDistance1(word):
"""
Return all strings that are edit distance =1 from the input word.
Damerau-Levenshtein edit distance:
- deletion(x,y) is the count(xy typed as x)
- insertion(x,y) is the count(x typed as xy)
- substitution(x,y) is the count(x typed as y)
- transposition(x,y) is the count(xy typed as yx)
"""
# First split the word into tuples of all possible pairs.
# Note: need +1 so we can perform edits at front and back end of the word.
splits = [(word[:i], word[i:]) for i in range(len(word)+1)]
# Now perform the edits at every possible split location.
# Substitution is essentially a deletion and insertion.
delete = [a+b[1:] for a,b in splits if b]
insert = [a+b+c for a,b in splits for c in TextPreprocess.alphabet]
subs = [a+c+b[1:] for a,b in splits for c in TextPreprocess.alphabet if b]
trans = [a+b[1]+b[0]+b[2:] for a,b in splits if len(b)>1]
return set(delete + insert + subs + trans)
def _editDistance2(self, word):
"""
Return all strings that are edit distance =2 from the input word; i.e. call
the _editDistance1() method twice for edits with distances of two.
"""
return {edits2 for edits1 in self._editDistance1(word)
for edits2 in self._editDistance1(edits1)}
| agpl-3.0 |
craigcitro/pydatalab | google/datalab/stackdriver/monitoring/_query.py | 5 | 2818 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Provides access to metric data as pandas dataframes."""
from __future__ import absolute_import
import google.cloud.monitoring
from . import _query_metadata
from . import _utils
class Query(google.cloud.monitoring.Query):
"""Query object for retrieving metric data."""
def __init__(self,
metric_type=google.cloud.monitoring.Query.DEFAULT_METRIC_TYPE,
end_time=None, days=0, hours=0, minutes=0, context=None):
"""Initializes the core query parameters.
The start time (exclusive) is determined by combining the
values of ``days``, ``hours``, and ``minutes``, and subtracting
the resulting duration from the end time.
It is also allowed to omit the end time and duration here,
in which case :meth:`~google.cloud.monitoring.query.Query.select_interval`
must be called before the query is executed.
Args:
metric_type: The metric type name. The default value is
:data:`Query.DEFAULT_METRIC_TYPE
<google.cloud.monitoring.query.Query.DEFAULT_METRIC_TYPE>`, but
please note that this default value is provided only for
demonstration purposes and is subject to change.
end_time: The end time (inclusive) of the time interval for which
results should be returned, as a datetime object. The default
is the start of the current minute.
days: The number of days in the time interval.
hours: The number of hours in the time interval.
minutes: The number of minutes in the time interval.
context: An optional Context object to use instead of the global default.
Raises:
ValueError: ``end_time`` was specified but ``days``, ``hours``, and
``minutes`` are all zero. If you really want to specify a point in
time, use
:meth:`~google.cloud.monitoring.query.Query.select_interval`.
"""
client = _utils.make_client(context)
super(Query, self).__init__(client, metric_type,
end_time=end_time,
days=days, hours=hours, minutes=minutes)
def metadata(self):
"""Retrieves the metadata for the query."""
return _query_metadata.QueryMetadata(self)
| apache-2.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/simple_plot.py | 1 | 1292 | """
===========
Simple Plot
===========
Create a simple plot.
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Data for plotting
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2 * np.pi * t)
# Note that using plt.subplots below is equivalent to using
# fig = plt.figure and then ax = fig.add_subplot(111)
fig, ax = plt.subplots()
ax.plot(t, s)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',
title='About as simple as it gets, folks')
ax.grid()
fig.savefig("test.png")
pltshow(plt)
| mit |
adelomana/cassandra | conditionedFitness/figurePatterns/script.sustained.py | 2 | 1771 | import pickle
import statsmodels,statsmodels.api
import matplotlib,matplotlib.pyplot
matplotlib.rcParams.update({'font.size':36,'font.family':'Arial','xtick.labelsize':28,'ytick.labelsize':28})
thePointSize=12
# 0. user defined variables
jarDir='/Users/adriandelomana/scratch/'
# sustained trajectories
selected=['clonal.2.1','engineered.1.1','engineered.2.2','mutagenized.2.2'] # should be n = 6
# 1. iterate over selected trajectories
allx=[]; ally=[]
for replicate in selected:
print(replicate)
# read jar file
jarFile=jarDir+replicate+'.pickle'
f=open(jarFile,'rb')
trajectory=pickle.load(f)
f.close()
# recover data into a format amenable to plotting
x=trajectory[0]
y=trajectory[1]
z=trajectory[2]
for a,b in zip(x,y):
allx.append(a)
ally.append(b)
# run lowess over each replicate. if lowess does not work, do correlation. if not pchip
# plot
matplotlib.pyplot.errorbar(x,y,yerr=z,fmt='o',color='black',ecolor='black',markeredgecolor='black',capsize=0,ms=thePointSize,mew=0,alpha=0.33)
# run lowess over all data
lowess = statsmodels.api.nonparametric.lowess(ally,allx,it=10)
matplotlib.pyplot.plot(lowess[:, 0], lowess[:, 1],color='red',lw=4,zorder=100)
# close figure
matplotlib.pyplot.plot([0,300],[0,0],'--',color='black')
matplotlib.pyplot.xlim([-25,325])
matplotlib.pyplot.ylim([-0.44,0.44])
matplotlib.pyplot.xticks([0,100,200,300])
matplotlib.pyplot.yticks([-0.4,-0.2,0,0.2,0.4])
matplotlib.pyplot.xlabel('Generation')
matplotlib.pyplot.ylabel('Conditioned\nFitness')
#matplotlib.pyplot.text(-20,0.3,'Sustained')
matplotlib.pyplot.text(120,-0.38,'Sustained',color='red')
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.savefig('figure.sustained.pdf')
| gpl-3.0 |
gnychis/grforwarder | gnuradio-examples/python/pfb/resampler.py | 7 | 4207 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys
try:
import scipy
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print "Resampling from %f to %f by %f " %(fs_in, fs_out, rerate)
# Creating our own taps
taps = gr.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = gr.sig_source_c(fs_in, gr.GR_SIN_WAVE, fc, 1)
#self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = blks2.pfb_arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = blks2.pfb_arb_resampler_ccf(rerate)
self.snk_in = gr.vector_sink_c()
self.snk_0 = gr.vector_sink_c()
self.snk_1 = gr.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pylab.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp1.set_xlim([-fs_in/2, fs_in/2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
sp2.set_xlim([-fs_out/2, fs_out/2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0/fs_in
Ts_out = 1.0/fs_out
t_in = scipy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = scipy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pylab.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
r = float(fs_out)/float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
xubenben/data-science-from-scratch | code/clustering.py | 60 | 6438 | from __future__ import division
from linear_algebra import squared_distance, vector_mean, distance
import math, random
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
class KMeans:
"""performs k-means clustering"""
def __init__(self, k):
self.k = k # number of clusters
self.means = None # means of clusters
def classify(self, input):
"""return the index of the cluster closest to the input"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = map(self.classify, inputs)
# If no assignments have changed, we're done.
if assignments == new_assignments:
return
# Otherwise keep the new assignments,
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# avoid divide-by-zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""finds the total squared error from k-means clustering the inputs"""
clusterer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = map(clusterer.classify, inputs)
return sum(squared_distance(input,means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(plt):
ks = range(1, len(inputs) + 1)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("k")
plt.ylabel("total squared error")
plt.show()
#
# using clustering to recolor an image
#
def recolor_image(input_file, k=5):
img = mpimg.imread(path_to_png_file)
pixels = [pixel for row in img for pixel in row]
clusterer = KMeans(k)
clusterer.train(pixels) # this might take a while
def recolor(pixel):
cluster = clusterer.classify(pixel) # index of the closest cluster
return clusterer.means[cluster] # mean of the closest cluster
new_img = [[recolor(pixel) for pixel in row]
for row in img]
plt.imshow(new_img)
plt.axis('off')
plt.show()
#
# hierarchical clustering
#
def is_leaf(cluster):
"""a cluster is a leaf if it has length 1"""
return len(cluster) == 1
def get_children(cluster):
"""returns the two children of this cluster if it's a merged cluster;
raises an exception if this is a leaf cluster"""
if is_leaf(cluster):
raise TypeError("a leaf cluster has no children")
else:
return cluster[1]
def get_values(cluster):
"""returns the value in this cluster (if it's a leaf cluster)
or all the values in the leaf clusters below it (if it's not)"""
if is_leaf(cluster):
return cluster # is already a 1-tuple containing value
else:
return [value
for child in get_children(cluster)
for value in get_values(child)]
def cluster_distance(cluster1, cluster2, distance_agg=min):
"""finds the aggregate distance between elements of cluster1
and elements of cluster2"""
return distance_agg([distance(input1, input2)
for input1 in get_values(cluster1)
for input2 in get_values(cluster2)])
def get_merge_order(cluster):
if is_leaf(cluster):
return float('inf')
else:
return cluster[0] # merge_order is first element of 2-tuple
def bottom_up_cluster(inputs, distance_agg=min):
# start with every input a leaf cluster / 1-tuple
clusters = [(input,) for input in inputs]
# as long as we have more than one cluster left...
while len(clusters) > 1:
# find the two closest clusters
c1, c2 = min([(cluster1, cluster2)
for i, cluster1 in enumerate(clusters)
for cluster2 in clusters[:i]],
key=lambda (x, y): cluster_distance(x, y, distance_agg))
# remove them from the list of clusters
clusters = [c for c in clusters if c != c1 and c != c2]
# merge them, using merge_order = # of clusters left
merged_cluster = (len(clusters), [c1, c2])
# and add their merge
clusters.append(merged_cluster)
# when there's only one cluster left, return it
return clusters[0]
def generate_clusters(base_cluster, num_clusters):
# start with a list with just the base cluster
clusters = [base_cluster]
# as long as we don't have enough clusters yet...
while len(clusters) < num_clusters:
# choose the last-merged of our clusters
next_cluster = min(clusters, key=get_merge_order)
# remove it from the list
clusters = [c for c in clusters if c != next_cluster]
# and add its children to the list (i.e., unmerge it)
clusters.extend(get_children(next_cluster))
# once we have enough clusters...
return clusters
if __name__ == "__main__":
inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
random.seed(0) # so you get the same results as me
clusterer = KMeans(3)
clusterer.train(inputs)
print "3-means:"
print clusterer.means
print
random.seed(0)
clusterer = KMeans(2)
clusterer.train(inputs)
print "2-means:"
print clusterer.means
print
print "errors as a function of k"
for k in range(1, len(inputs) + 1):
print k, squared_clustering_errors(inputs, k)
print
print "bottom up hierarchical clustering"
base_cluster = bottom_up_cluster(inputs)
print base_cluster
print
print "three clusters, min:"
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
print
print "three clusters, max:"
base_cluster = bottom_up_cluster(inputs, max)
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
| unlicense |
deepesch/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
Starkiller4011/astroSF | m2_convert.py | 1 | 1131 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
#####################################
# ╔╗ ┬ ┬ ┬┌─┐ ╔╦╗┌─┐┌┬┐ #
# ╠╩╗│ │ │├┤ ║║│ │ │ #
# ╚═╝┴─┘└─┘└─┘ ═╩╝└─┘ ┴ #
# ╔═╗┌─┐┌─┐┌┬┐┬ ┬┌─┐┬─┐┌─┐ #
# ╚═╗│ │├┤ │ │││├─┤├┬┘├┤ #
# ╚═╝└─┘└ ┴ └┴┘┴ ┴┴└─└─┘ #
#####################################
Author: Derek Blue
"""
# Imports
from __future__ import division
import pandas as pd
from dfgui import show as pdui
import sf_methods as sfm
from tqdm import tqdm as pbar
VEGA_MAG = 16.85
ctf = float('8.489e-16')
M335LC = sfm.load_data('./RAW/mkn335_xrt_uvot_lc.dat', headers=True)
x_col = M335LC['MJD'].tolist()
y_col = M335LC['M2_MAG'].tolist()
e_col = M335LC['M2_MG_ERR'].tolist()
M2_DATA = pd.DataFrame({'Time':x_col,
'Flux':y_col,
'Error':e_col})
M2_DATA = M2_DATA[M2_DATA['Flux'] > 0]
pdui(M2_DATA)
| mit |
wei-Z/Python-Machine-Learning | self_practice/CH2A.py | 1 | 4506 | import numpy as np
class Perceptron(object):
"""Perceptron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
"""Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples
is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-----------
self : object
"""
self.w_ = np.zeros(1 + X.shape[1]) # return zero array, X.shape[1] =2L, return array([0., 0., 0.])
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0) # if update = 0.0, errors = 0; if update unequal 0.0, errors =1.
self.errors_.append(errors)
return self
def net_input(self, X):
#"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0] # matrix multiplication
def predict(self, X):
#"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, -1) # numpy.where(condition[, x, y])
# Return elements, either from x or y, depending on condition.
# Training a perceptron model on the Iris dataset
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
df.tail()
import matplotlib.pyplot as plt
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)# if y == 'Iris-setosa', y = -1, otherwise if y == 'Iris-versicolor', y =1.
X = df.iloc[0:100, [0,2]].values
plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='versicolor')
plt.xlabel('petal length')
plt.ylabel('sepal length')
plt.legend(loc='upper left')
plt.show()
ppn = Perceptron()
ppn.fit(X,y)
plt.plot(range(1, len(ppn.errors_)+1), ppn.errors_, marker='o', color="green")
plt.xlabel('Epochs')
plt.ylabel('Number of misclassifications')
plt.show()
# Implement a small convenience function to visualize the decision boundaries for 2D datasets:
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
print "xx1: ", xx1
print "xx2: ", xx2
print "Z: ", Z
print "xx1.ravel(): ", xx1.ravel()
print "xx2.ravel(): ", xx2.ravel()
Z = Z.reshape(xx1.shape)
print "Z: ", Z
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc = 'upper left')
plt.show()
# meshgrid, ravel, reshape, contourf, xlim, ylim
# how to use contourf and meshgrid:
x = np.arange(-5, 5, 0.1)
y = np.arange(-5, 5, 0.1)
xx, yy = np.meshgrid(x, y, sparse=True)
z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
h = plt.contourf(x,y,z)
| mit |
CVML/scikit-learn | sklearn/tests/test_multiclass.py | 72 | 24581 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
mrshu/scikit-learn | sklearn/preprocessing.py | 1 | 40726 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD
from collections import Sequence
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from .utils import check_arrays, array2d, atleast2d_or_csr, safe_asarray
from .utils import warn_if_not_float
from .utils.fixes import unique
from .base import BaseEstimator, TransformerMixin
from .utils.sparsefuncs import inplace_csr_row_normalize_l1
from .utils.sparsefuncs import inplace_csr_row_normalize_l2
from .utils.sparsefuncs import inplace_csr_column_scale
from .utils.sparsefuncs import mean_variance_axis0
__all__ = ['Binarizer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'Normalizer',
'StandardScaler',
'binarize',
'normalize',
'scale']
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if sp.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
warn_if_not_float(X, estimator='The scale function')
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis0(X)
var[var == 0.0] = 1.0
inplace_csr_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
warn_if_not_float(X, estimator='The scale function')
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std / (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default is True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
warn_if_not_float(X, estimator=self)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
min_ = np.min(X, axis=0)
scale_ = np.max(X, axis=0) - min_
# Do not scale constant features
scale_[scale_ == 0.0] = 1.0
self.scale_ = (feature_range[1] - feature_range[0]) / scale_
self.min_ = feature_range[0] - min_ / scale_
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
X *= self.scale_
X += self.min_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen indepently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Attributes
----------
`mean_` : array of floats with shape [n_features]
The mean value for each feature in the training set.
`std_` : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_arrays(X, copy=self.copy, sparse_format="csr")[0]
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
warn_if_not_float(X, estimator=self)
self.mean_ = None
var = mean_variance_axis0(X)[1]
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
return self
else:
warn_if_not_float(X, estimator=self)
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
X = check_arrays(X, copy=copy, sparse_format="csr")[0]
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
warn_if_not_float(X, estimator=self)
inplace_csr_column_scale(X, 1 / self.std_)
else:
warn_if_not_float(X, estimator=self)
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
inplace_csr_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class Scaler(StandardScaler):
def __init__(self, copy=True, with_mean=True, with_std=True):
warnings.warn("Scaler was renamed to StandardScaler. The old name "
" will be removed in 0.15.", DeprecationWarning)
super(Scaler, self).__init__(copy, with_mean, with_std)
def normalize(X, norm='l2', axis=1, copy=True):
"""Normalize a dataset along any axis
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sp.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)[:, np.newaxis]
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = np.sqrt(np.sum(X ** 2, axis=1))[:, np.newaxis]
norms[norms == 0.0] = 1.0
X /= norms
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
atleast2d_or_csr(X)
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
atleast2d_or_csr(X)
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
The lower bound that triggers feature values to be replaced by 1.0.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_arrays(X, sparse_format='csr', copy=copy)[0]
if sp.issparse(X):
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
# FIXME: if enough values became 0, it may be worth changing
# the sparsity structure
X.data[not_cond] = 0
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
The default threshold is 0.0 so that any non-zero values are set to 1.0
and zeros are left untouched.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modeled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
The lower bound that triggers feature values to be replaced by 1.0.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
atleast2d_or_csr(X)
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
def _is_label_indicator_matrix(y):
return hasattr(y, "shape") and len(y.shape) == 2
def _is_multilabel(y):
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
return (not isinstance(y[0], np.ndarray) and isinstance(y[0], Sequence) and
not isinstance(y[0], basestring) or _is_label_indicator_matrix(y))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix were each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to scikit-learn
estimators.
Parameters
----------
n_values : 'auto', int or array of int
Number of values per feature.
'auto' : determine value range from training data.
int : maximum value for all features.
array : maximum value per feature.
dtype : number type, default=np.float
Desired dtype of output.
Attributes
----------
`active_features_` : array
Indices for active features, meaning values that actually occur in the
training set. Only available when n_values is ``'auto'``.
`feature_indices_` : array of shape (n_features,)
Indices to feature ranges. Feature ``i`` in the original data is mapped
to features ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and potentially masked by `active_features_` afterwards)
`n_values_` : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
OneHotEncoder(dtype=<type 'float'>, n_values='auto')
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
LabelEncoder : performs a one-hot encoding on arbitrary class labels.
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
"""
def __init__(self, n_values="auto", dtype=np.float):
self.n_values = n_values
self.dtype = dtype
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
X = check_arrays(X, sparse_format='dense', dtype=np.int)[0]
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sp.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, feature_indices_[-1])
Input array of type int.
Returns
-------
X_out : sparse matrix, dtype=int
Transformed input.
"""
X = check_arrays(X, sparse_format='dense', dtype=np.int)[0]
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
n_values_check = np.max(X, axis=0) + 1
if (n_values_check > self.n_values_).any():
raise ValueError("Feature out of bounds. Try setting n_values.")
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sp.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
`classes_`: array of shape [n_class]
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelNormalizer was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
self : returns an instance of self.
"""
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self.classes_, y = unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label: int (default: 0)
Value with which negative labels must be encoded.
pos_label: int (default: 1)
Value with which positive labels must be encoded.
Attributes
----------
`classes_`: array of shape [n_class]
Holds the label for each class.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
>>> lb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> lb.classes_
array([1, 2, 3])
"""
def __init__(self, neg_label=0, pos_label=1):
if neg_label >= pos_label:
raise ValueError("neg_label must be strictly less than pos_label.")
self.neg_label = neg_label
self.pos_label = pos_label
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
self : returns an instance of self.
"""
self.multilabel = _is_multilabel(y)
if self.multilabel:
self.indicator_matrix_ = _is_label_indicator_matrix(y)
if self.indicator_matrix_:
self.classes_ = np.arange(y.shape[1])
else:
self.classes_ = np.array(sorted(set.union(*map(set, y))))
else:
self.classes_ = np.unique(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
Y : numpy array of shape [n_samples, n_classes]
"""
self._check_fitted()
if self.multilabel or len(self.classes_) > 2:
if _is_label_indicator_matrix(y):
# nothing to do as y is already a label indicator matrix
return y
Y = np.zeros((len(y), len(self.classes_)), dtype=np.int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += self.neg_label
y_is_multilabel = _is_multilabel(y)
if y_is_multilabel and not self.multilabel:
raise ValueError("The object was not fitted with multilabel"
" input!")
elif self.multilabel:
if not _is_multilabel(y):
raise ValueError("y should be a list of label lists/tuples,"
"got %r" % (y,))
# inverse map: label => column index
imap = dict((v, k) for k, v in enumerate(self.classes_))
for i, label_tuple in enumerate(y):
for label in label_tuple:
Y[i, imap[label]] = self.pos_label
return Y
else:
y = np.asarray(y)
if len(self.classes_) == 2:
Y[y == self.classes_[1], 0] = self.pos_label
return Y
elif len(self.classes_) >= 2:
for i, k in enumerate(self.classes_):
Y[y == k, i] = self.pos_label
return Y
else:
# Only one class, returns a matrix with all negative labels.
return Y
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array of shape [n_samples, n_classes]
Target values.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
half = (self.pos_label - self.neg_label) / 2.0
threshold = self.neg_label + half
if self.multilabel:
Y = np.array(Y > threshold, dtype=int)
# Return the predictions in the same format as in fit
if self.indicator_matrix_:
# Label indicator matrix format
return Y
else:
# Lists of tuples format
return [tuple(self.classes_[np.flatnonzero(Y[i])])
for i in range(Y.shape[0])]
if len(Y.shape) == 1 or Y.shape[1] == 1:
y = np.array(Y.ravel() > threshold, dtype=int)
else:
y = Y.argmax(axis=1)
return self.classes_[y]
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x_i, x_j) be a kernel defined by K(x_i, x_j) = phi(x_i)^T phi(x_j),
where phi(x) is a function mapping x to a hilbert space. KernelCenterer is
a class to center (i.e., normalize to have zero-mean) the data without
explicitly computing phi(x). It is equivalent equivalent to centering
phi(x) with sklearn.preprocessing.StandardScaler(with_std=False).
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = array2d(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
K = array2d(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Example
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = safe_asarray(X)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sp.issparse(X):
if sp.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sp.coo_matrix((data, (row, col)), shape)
elif sp.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sp.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
X = klass(add_dummy_feature(X.tocoo(), value))
return klass(X)
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
| bsd-3-clause |
mganeva/mantid | qt/python/mantidqt/project/projectsaver.py | 1 | 5274 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
from __future__ import (absolute_import, division, print_function, unicode_literals)
from json import dump
import os
from mantid.api import AnalysisDataService as ADS
from mantidqt.project.workspacesaver import WorkspaceSaver
from mantidqt.project.plotssaver import PlotsSaver
from mantid import logger
class ProjectSaver(object):
def __init__(self, project_file_ext):
self.project_file_ext = project_file_ext
def save_project(self, file_name, workspace_to_save=None, plots_to_save=None, interfaces_to_save=None,
project_recovery=True):
"""
The method that will actually save the project and call relevant savers for workspaces, plots, interfaces etc.
:param file_name: String; The file_name of the
:param workspace_to_save: List; of Strings that will have workspace names in it, if None will save all
:param plots_to_save: List; of matplotlib.figure objects to save to the project file.
:param interfaces_to_save: List of Lists of Window and Encoder; the interfaces to save and the encoders to use
:param project_recovery: Bool; If the behaviour of Project Save should be altered to function correctly inside
of project recovery
:return: None; If the method cannot be completed.
"""
# Check if the file_name doesn't exist
if file_name is None:
logger.warning("Please select a valid file name")
return
# Check this isn't saving a blank project file
if (workspace_to_save is None and plots_to_save is None and interfaces_to_save is None) and project_recovery:
logger.warning("Can not save an empty project")
return
directory = os.path.dirname(file_name)
# Save workspaces to that location
if project_recovery:
workspace_saver = WorkspaceSaver(directory=directory)
workspace_saver.save_workspaces(workspaces_to_save=workspace_to_save)
saved_workspaces = workspace_saver.get_output_list()
else:
# Assume that this is project recovery so pass a list of workspace names
saved_workspaces = ADS.getObjectNames()
# Generate plots
plots_to_save_list = PlotsSaver().save_plots(plots_to_save)
# Save interfaces
if interfaces_to_save is None:
interfaces_to_save = []
interfaces = self._return_interfaces_dicts(directory=directory, interfaces_to_save=interfaces_to_save)
# Pass dicts to Project Writer
writer = ProjectWriter(workspace_names=saved_workspaces,
plots_to_save=plots_to_save_list,
interfaces_to_save=interfaces,
save_location=file_name,
project_file_ext=self.project_file_ext)
writer.write_out()
@staticmethod
def _return_interfaces_dicts(directory, interfaces_to_save):
interfaces = []
for interface, encoder in interfaces_to_save:
# Add to the dictionary encoded data with the key as the first tag in the list on the encoder attributes
try:
tag = encoder.tags[0]
encoded_dict = encoder.encode(interface, directory)
encoded_dict["tag"] = tag
interfaces.append(encoded_dict)
except Exception as e:
# Catch any exception and log it
if isinstance(e, KeyboardInterrupt):
raise
logger.warning("Project Saver: An interface could not be saver error: " + str(e))
return interfaces
class ProjectWriter(object):
def __init__(self, save_location, workspace_names, project_file_ext, plots_to_save, interfaces_to_save):
self.workspace_names = workspace_names
self.file_name = save_location
self.project_file_ext = project_file_ext
self.plots_to_save = plots_to_save
self.interfaces_to_save = interfaces_to_save
def write_out(self):
"""
Write out the project file that contains workspace names, interfaces information, plot preferences etc.
"""
# Get the JSON string versions
to_save_dict = {"workspaces": self.workspace_names, "plots": self.plots_to_save,
"interfaces": self.interfaces_to_save}
# Open file and save the string to it alongside the workspace_names
if self.project_file_ext not in os.path.basename(self.file_name):
self.file_name = self.file_name + self.project_file_ext
try:
with open(self.file_name, "w+") as f:
dump(obj=to_save_dict, fp=f)
except Exception as e:
# Catch any exception and log it
if isinstance(e, KeyboardInterrupt):
raise
logger.warning("JSON project file unable to be opened/written to")
| gpl-3.0 |
DaveBackus/Data_Bootcamp | Code/Lab/googlefinance.py | 1 | 3562 | """
This file demonstrates how to read minute level data from the
Google finance api
"""
import datetime as dt
import numpy as np
import pandas as pd
import pandas_datareader as pdr
import requests as r
import sys
from io import StriongIO
def retrieve_single_timeseries(ticker, secs=60, ndays=5):
"""
Grabs data from Google finance. It retrieves the data for `ticker` at
`secs` intervals for the most recent `ndays`. The fields it retrieves
for each interval is (time, open price, close price, volume of trade)
Parameters
----------
ticker : String
Single ticker name
secs : scalar(Int)
Number of seconds to sample at
ndays : scalar(Int)
Number of days of data to retrieve (max is 5)
Returns
-------
data : DataFrame
Pandas DataFrame with the stock open, close, volume, and date
information.
"""
# Get the Base url
baseurl = "http://www.google.com/finance/getprices"
# Dictionary for parameters
pdict = {}
pdict["q"] = ticker
pdict["i"] = secs
pdict["p"] = str(ndays) + "d"
pdict["f"] = "d,c,v,o"
# Retrieve data
raw_html = r.get(baseurl, params=pdict)
raw_text = raw_html.text
# Clean data
data = clean_data(raw_text, ticker)
return data
def clean_data(raw_text, ticker):
"""
Takes the raw text output of the html request and cleans it into a
pandas dataframe
Parameters
----------
raw_text : String
The text generated by the html request
Returns
-------
data : Pandas.DataFrame
DataFrame with relevant data
"""
# Split by line separators
all_lines = raw_text.split("\n")
metadata = all_lines[1:7]
data_csv = StringIO("\n".join(all_lines[7:]))
# Deal with metadata that we care about
for line in metadata:
if "COLUMNS=" in line:
# Get columns
columns = line.split("COLUMNS=")[1].split(",")
elif "INTERVAL" in line:
timeincrement = int(line.split("INTERVAL=")[1])
elif "TIMEZONE_OFFSET" in line:
# Get timezone offset in seconds
tzoffset = int(line.split("TIMEZONE_OFFSET=")[1])*60
elif "MARKET_OPEN_MINUTE" in line:
opentime = ()
# Load data into pandas
data = pd.read_csv(data_csv, names=columns)
# Fix the date rows
data.insert(0, "DateTime", np.NaN)
data.insert(1, "TICKER", ticker)
for i, row in data.iterrows():
if row["DATE"][0] is "a":
secsfromepoch = int(row["DATE"][1:])
basedate = dt.datetime.utcfromtimestamp(secsfromepoch + tzoffset)
data.set_value(i, "DateTime", basedate)
else:
secsfrombase = int(row["DATE"])
data.set_value(i, "DateTime", basedate + dt.timedelta(seconds=secsfrombase*timeincrement))
# Fix the data (no price changes when market is closed)
# Drop irrelevant info, compute incremental returns, rename, and set index
data = data.drop(labels="DATE", axis=1)
data = data.rename(columns={"DateTime": "DATE"})
data.insert(1, "RETURNS", 100*(data["CLOSE"]/data["OPEN"] - 1))
data.set_value(data.index, "VOLUME", data["VOLUME"]/1e6)
return data
stock_tickers = ["AAPL", "F", "GM" ,"GOOG", "MSFT",
"MDLZ", "FOXA", "VRSK", "PCLN",
"SBUX", "ROST", "WFM", "CHKP",
"MAT", "LVNTA", "AMAT", "ADI"]
dfs = []
for tick in stock_tickers:
dfs.append(retrieve_single_timeseries(tick))
fulldata = pd.concat(dfs)
| mit |
dingocuster/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
PetaVision/projects | momentLearn/scripts/recon_simple.py | 2 | 1138 | import os, sys
lib_path = os.path.abspath("/home/slundquist/workspace/PetaVision/plab/")
sys.path.append(lib_path)
from plotRecon import plotRecon
from plotReconError import plotReconError
#For plotting
#import matplotlib.pyplot as plt
outputDir = "/nh/compneuro/Data/momentLearn/output/simple_momentum_out/"
skipFrames = 10 #Only print every 20th frame
startFrames = 0
doPlotRecon = True
doPlotErr = False
errShowPlots = False
layers = [
"a1_ImageRescale",
"a4_Recon",
]
#Layers for constructing recon error
preErrLayers = [
"a1_LeftDownsample",
"a5_RightDownsample",
]
postErrLayers = [
"a3_LeftRecon",
"a7_RightRecon",
]
gtLayers = None
#gtLayers = [
# #"a25_DepthRescale",
# #"a25_DepthRescale",
# #"a25_DepthRescale",
# "a25_DepthRescale",
# "a25_DepthRescale",
# "a25_DepthRescale",
#]
preToPostScale = [
.007,
.007,
]
if(doPlotRecon):
print("Plotting reconstructions")
plotRecon(layers, outputDir, skipFrames)
if(doPlotErr):
print("Plotting reconstruction error")
plotReconError(preErrLayers, postErrLayers, preToPostScale, outputDir, errShowPlots, skipFrames, gtLayers)
| epl-1.0 |
kaichogami/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
Obus/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
MBARIMike/stoqs | stoqs/contrib/parquet/extract_columns.py | 2 | 11789 | #!/usr/bin/env python
"""
Pull all the temperature and salinity data out of a STOQS database no
matter what platform and write it out in Parquet file format.
This is a companion to select_data_in_columns_for_data_science.ipynb
where we operationalize the explorations demonstrated in this Notebook:
https://nbviewer.jupyter.org/github/stoqs/stoqs/blob/master/stoqs/contrib/notebooks/select_data_in_columns_for_data_science.ipynb
Sample command line executions:
(base) ➜ docker git:(master) ✗ docker-compose exec stoqs stoqs/contrib/parquet/extract_columns.py --db stoqs_canon_october2020 --platforms dorado -o dorado.parquet -v
INFO 2021-02-24 21:35:53,588 extract_columns.py _estimate_memory():161 Estimated required_memory = 146584085.76
INFO 2021-02-24 21:35:53,588 extract_columns.py _sql_to_df():65 Reading from SQL query into DataFrame...
INFO 2021-02-24 21:36:11,430 extract_columns.py _sql_to_df():77 df.shape: (2245467, 8) - read_sql_query() in 17.8 sec
INFO 2021-02-24 21:36:11,433 extract_columns.py _sql_to_df():78 df.memory_usage().sum(): 143710016
INFO 2021-02-24 21:36:13,876 extract_columns.py pivot_table_to_parquet():172 Writing data to file dorado.parquet...
INFO 2021-02-24 21:36:14,378 extract_columns.py pivot_table_to_parquet():176 dfp.shape: (169159, 16) - to_parquet() in 0.5 sec
INFO 2021-02-24 21:36:14,378 extract_columns.py pivot_table_to_parquet():177 Done
stoqs container peak memory usage: 2.1 GB
(base) ➜ docker git:(master) ✗ docker-compose exec stoqs stoqs/contrib/parquet/extract_columns.py --db stoqs_canon_october2020 --platforms pontus makai -o lrauv.parquet -v
INFO 2021-02-24 21:38:42,623 extract_columns.py _estimate_memory():161 Estimated required_memory = 645795521.28
INFO 2021-02-24 21:38:42,624 extract_columns.py _sql_to_df():65 Reading from SQL query into DataFrame...
INFO 2021-02-24 21:40:13,936 extract_columns.py _sql_to_df():77 df.shape: (9892701, 8) - read_sql_query() in 91.3 sec
INFO 2021-02-24 21:40:13,938 extract_columns.py _sql_to_df():78 df.memory_usage().sum(): 633132992
INFO 2021-02-24 21:40:30,517 extract_columns.py pivot_table_to_parquet():172 Writing data to file lrauv.parquet...
INFO 2021-02-24 21:40:31,798 extract_columns.py pivot_table_to_parquet():176 dfp.shape: (744662, 25) - to_parquet() in 1.3 sec
INFO 2021-02-24 21:40:31,798 extract_columns.py pivot_table_to_parquet():177 Done
stoqs container peak memory usage: 7.9 GB
(base) ➜ docker git:(master) ✗ docker-compose exec stoqs stoqs/contrib/parquet/extract_columns.py --db stoqs_canon_october2020 -o all_plats.parquet -v
INFO 2021-02-24 21:41:53,151 extract_columns.py _estimate_memory():161 Estimated required_memory = 896823624.96
INFO 2021-02-24 21:41:53,153 extract_columns.py _sql_to_df():65 Reading from SQL query into DataFrame...
INFO 2021-02-24 21:43:56,723 extract_columns.py _sql_to_df():77 df.shape: (13738107, 8) - read_sql_query() in 123.6 sec
INFO 2021-02-24 21:43:56,725 extract_columns.py _sql_to_df():78 df.memory_usage().sum(): 879238976
INFO 2021-02-24 21:44:20,578 extract_columns.py pivot_table_to_parquet():172 Writing data to file all_plats.parquet...
INFO 2021-02-24 21:44:23,349 extract_columns.py pivot_table_to_parquet():176 dfp.shape: (1123909, 61) - to_parquet() in 2.8 sec
INFO 2021-02-24 21:44:23,349 extract_columns.py pivot_table_to_parquet():177 Done
stoqs container peak memory usage: 11 GB
A regression of estimated df size to container memory usage gives a factor of 12.3
Mike McCann
MBARI 29 January 2021
"""
import os
import sys
# Insert Django App directory (parent of config) into python path
sys.path.insert(0, os.path.abspath(os.path.join(
os.path.dirname(__file__), "../../")))
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'config.settings.local'
# django >=1.7
try:
import django
django.setup()
except AttributeError:
pass
import argparse
import logging
import pandas as pd
from django.db import connections
from stoqs.models import Platform
from time import time
class Columnar():
logger = logging.getLogger(__name__)
_handler = logging.StreamHandler()
_formatter = logging.Formatter('%(levelname)s %(asctime)s %(filename)s '
'%(funcName)s():%(lineno)d %(message)s')
_handler.setFormatter(_formatter)
_log_levels = (logging.WARN, logging.INFO, logging.DEBUG)
logger.addHandler(_handler)
# Set to GB of RAM that have been resourced to the Docker engine
MAX_CONTAINER_MEMORY = 16
DF_TO_RAM_FACTOR = 12.3
def _set_platforms(self):
'''Set plats and plat_list member variables
'''
platforms = (self.args.platforms or
Platform.objects.using(self.args.db).all()
.values_list('name', flat=True).order_by('name'))
self.logger.debug(platforms)
self.plats = ''
self.plat_list = []
for platform in platforms:
if platform in self.args.platforms_omit:
# Omit some platforms for shorter execution times
continue
self.plats += f"'{platform}',"
self.plat_list.append(platform)
self.plats = self.plats[:-2] + "'"
def _sql_to_df(self, sql, extract=False):
if extract:
self.logger.info('Reading from SQL query into DataFrame...')
# More than 10 GB of RAM is needed in Docker Desktop for reading data
# from stoqs_canon_october2020. The chunksize option in read_sql_query()
# does not help reduce the server side memory usage.
# See: https://stackoverflow.com/a/31843091/1281657
# https://github.com/pandas-dev/pandas/issues/12265#issuecomment-181809005
# https://github.com/pandas-dev/pandas/issues/35689
stime = time()
df = pd.read_sql_query(sql, connections[self.args.db])
etime = time() - stime
if extract:
self.logger.info(f"df.shape: {df.shape} <- read_sql_query() in {etime:.1f} sec")
self.logger.info(f"Actual df.memory_usage().sum():"
f" {(df.memory_usage().sum()/1.e9):.3f} GB")
self.logger.debug(f"Head of original df:\n{df.head()}")
return df
def _build_sql(self, limit=None, order=True, count=False):
self._set_platforms()
# Base query that's similar to the one behind the api/measuredparameter.csv request
sql = f'''\nFROM public.stoqs_measuredparameter
INNER JOIN stoqs_measurement ON (stoqs_measuredparameter.measurement_id = stoqs_measurement.id)
INNER JOIN stoqs_instantpoint ON (stoqs_measurement.instantpoint_id = stoqs_instantpoint.id)
INNER JOIN stoqs_activity ON (stoqs_instantpoint.activity_id = stoqs_activity.id)
INNER JOIN stoqs_platform ON (stoqs_activity.platform_id = stoqs_platform.id)
INNER JOIN stoqs_parameter ON (stoqs_measuredparameter.parameter_id = stoqs_parameter.id)
WHERE stoqs_platform.name IN ({self.plats})
AND stoqs_parameter.{self.args.collect} is not null'''
if count:
sql = 'SELECT count(*) ' + sql
else:
sql = f'''SELECT stoqs_platform.name as platform,
stoqs_instantpoint.timevalue, stoqs_measurement.depth,
ST_X(stoqs_measurement.geom) as longitude,
ST_Y(stoqs_measurement.geom) as latitude,
stoqs_parameter.{self.args.collect},
stoqs_measuredparameter.datavalue {sql}'''
if order:
sql += ('\nORDER BY stoqs_platform.name, stoqs_instantpoint.timevalue,'
' stoqs_measurement.depth, stoqs_parameter.name')
if limit:
sql += f"\nLIMIT {limit}"
self.logger.debug(f'sql = {sql}')
return sql
def _estimate_memory(self):
'''Perform a small query on the selection and extrapolate
to estimate the server-side memory required for the full extraction.
'''
SAMPLE_SIZE = 100
sql = self._build_sql(limit=SAMPLE_SIZE, order=False)
df = self._sql_to_df(sql)
sample_memory = df.memory_usage().sum()
self.logger.debug(f"{sample_memory} B for {SAMPLE_SIZE} records")
total_recs = self._sql_to_df(self._build_sql(count=True))['count'][0]
self.logger.debug(f"total_recs = {total_recs}")
required_memory = total_recs * sample_memory / SAMPLE_SIZE / 1.e9
container_memory = self.DF_TO_RAM_FACTOR * required_memory
self.logger.info(f"Estimated required_memory:"
f" {required_memory:.3f} GB for DataFrame,"
f" {container_memory:.3f} GB for container RAM,")
if container_memory > self.MAX_CONTAINER_MEMORY:
self.logger.exception(f"Request of {container_memory:.3f} GB would"
f" exceed {self.MAX_CONTAINER_MEMORY} GB"
f" of RAM available")
sys.exit(-1)
def pivot_table_to_parquet(self):
'''Approach 4. Use Pandas do a pivot on data read into a DataFrame
'''
self._estimate_memory()
sql = self._build_sql()
df = self._sql_to_df(sql, extract=True)
context = ['platform', 'timevalue', 'depth', 'latitude', 'longitude']
dfp = df.pivot_table(index=context, columns=self.args.collect, values='datavalue')
self.logger.debug(dfp.shape)
self.logger.info(f'Writing data to file {self.args.output}...')
stime = time()
dfp.to_parquet(self.args.output)
etime = time() - stime
self.logger.info(f"dfp.shape: {dfp.shape} -> to_parquet() in {etime:.1f} sec")
self.logger.debug(f"Head of pivoted df:\n{dfp.head()}")
self.logger.info('Done')
def process_command_line(self):
parser = argparse.ArgumentParser(description='Transform STOQS data into columnar Parquet file format')
parser.add_argument('--platforms', action='store', nargs='*',
help='Restrict to just these platforms')
parser.add_argument('--platforms_omit', action='store', nargs='*', default=[],
help='Restrict to all but these platforms')
parser.add_argument('--collect', action='store', default='name',
choices=['name', 'standard_name'],
help='The column to collect: name or standard_name')
parser.add_argument('--db', action='store', required=True,
help='Database alias, e.g. stoqs_canon_october2020')
parser.add_argument('-o', '--output', action='store', required=True,
help='Output file name')
parser.add_argument('--start', action='store', help='Start time in YYYYMMDDTHHMMSS format',
default='19000101T000000')
parser.add_argument('--end', action='store', help='End time in YYYYMMDDTHHMMSS format',
default='22000101T000000')
parser.add_argument('-v', '--verbose', type=int, choices=range(3),
action='store', default=0, const=1, nargs='?',
help="verbosity level: " + ', '.join(
[f"{i}: {v}" for i, v, in enumerate(('WARN', 'INFO', 'DEBUG'))]))
self.args = parser.parse_args()
self.commandline = ' '.join(sys.argv)
self.logger.setLevel(self._log_levels[self.args.verbose])
self.logger.debug(f"Using databases at DATABASE_URL ="
f" {os.environ['DATABASE_URL']}")
if __name__ == '__main__':
c = Columnar()
c.process_command_line()
c.pivot_table_to_parquet()
| gpl-3.0 |
gwpy/seismon | RfPrediction/StackedEnsemble_Rfamplitude_prediction.py | 2 | 11411 | # Stacked Ensemble RfAmp Prediction Model
# Multiple ML regressors are individually trained and then combined via meta-regressor.
# Hyperparameters are tuned via GridSearchCV
# coding: utf-8
from __future__ import division
import optparse
import numpy as np
import pandas as pd
import os
if not os.getenv("DISPLAY", None):
import matplotlib
matplotlib.use("agg", warn=False)
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.cm as cm
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.externals import joblib
from mlxtend.regressor import StackingCVRegressor
from sklearn.datasets import load_boston
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD, Adagrad, Adadelta, RMSprop, Adam
from keras import losses
from keras import callbacks
from keras.utils import plot_model
import pickle
__author__ = "Nikhil Mukund <[email protected]>, Michael Coughlin <michael.coughlin.ligo.org>"
__version__ = 1.0
__date__ = "11/26/2017"
def parse_commandline():
"""@parse the options given on the command-line.
"""
parser = optparse.OptionParser(usage=__doc__,version=__version__)
parser.add_option("-f", "--earthquakesFile", help="Seismon earthquakes file.",default ="/home/mcoughlin/Seismon/Predictions/L1O1O2_CMT_GPR/earthquakes.txt")
parser.add_option("-o", "--outputDirectory", help="output folder.",default ="/home/mcoughlin/Seismon/MLA/L1O1O2/")
parser.add_option("-r", "--runType", help="run type (original, lowlatency, cmt)", default ="lowlatency")
parser.add_option("-m", "--minMagnitude", help="Minimum earthquake magnitude.", default=5.0,type=float)
parser.add_option("-N", "--Nepoch", help="number of epochs", default =10, type=int)
parser.add_option("--doPlots", action="store_true", default=False)
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Run verbosely. (Default: False)")
opts, args = parser.parse_args()
# show parameters
if opts.verbose:
print >> sys.stderr, ""
print >> sys.stderr, "running network_eqmon..."
print >> sys.stderr, "version: %s"%__version__
print >> sys.stderr, ""
print >> sys.stderr, "***************** PARAMETERS ********************"
for o in opts.__dict__.items():
print >> sys.stderr, o[0]+":"
print >> sys.stderr, o[1]
print >> sys.stderr, ""
return opts
'''
0: earthquake gps time
1: earthquake mag
2: p gps time
3: s gps time
4: r (2 km/s)
5: r (3.5 km/s)
6: r (5 km/s)
7: predicted ground motion (m/s)
8: lower bounding time
9: upper bounding time
10: latitude
11: longitude
12: distance
13: depth (m)
14: azimuth (deg)
15: nodalPlane1_strike
16: nodalPlane1_rake
17: nodalPlane1_dip
18: momentTensor_Mrt
19: momentTensor_Mtp
20: momentTensor_Mrp
21: momentTensor_Mtt
22: momentTensor_Mrr
23: momentTensor_Mpp
24: peak ground velocity gps time
25: peak ground velocity (m/s)
26: peak ground acceleration gps time
27: peak ground acceleration (m/s^2)
28: peak ground displacement gps time
29: peak ground displacement (m)
30: Lockloss time
31: Detector Status
'''
# Parse command line
opts = parse_commandline()
outputDirectory = os.path.join(opts.outputDirectory,opts.runType)
if not os.path.isdir(outputDirectory):
os.makedirs(outputDirectory)
data = pd.read_csv(opts.earthquakesFile,delimiter=' ',header=None)
neqs, ncols = data.shape
if ncols == 32:
fileType = "seismon"
elif ncols == 27:
fileType = "usarray"
data = data.drop(data.columns[[24]], 1)
data = data.rename(index=int, columns={25: 24, 26: 25})
else:
print("I do not understand the file type...")
exit(0)
# find magnitudes greater than minimum magnitude
index = data[1] > opts.minMagnitude
data = data[:][index]
# find depth = 0
index = np.where(data[[13]] == 0)[0]
data.iloc[index,13] = 1.0
# shuffle data
data = data.reindex(np.random.permutation(data.index))
Mag_idx = 1
Dist_idx = 12
Depth_idx = 13
Rf_Amp_idx = 25
# Mag threshold
Rf_Amp_thresh = 1e-8;
index = data[Rf_Amp_idx] > Rf_Amp_thresh
data = data[:][index]
if opts.runType == "cmt":
# Select features
FeatSet_index = [1,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
elif opts.runType == "lowlatency":
#FeatSet_index = [1,7,10,11,12,13,14,15,16,17] # these lower set paramaters makes sense
FeatSet_index = [1,10,11,12,13,14] # these lower set paramaters makes sense
elif opts.runType == "original":
FeatSet_index = [1,12,13] # Just Mag, Dist, Depth
else:
print("--runType must be original, lowlatency, and cmt")
exit(0)
Target_index = [Rf_Amp_idx]
# Artificially increase samples
data_temp = data
copy_num = 6
noise_level = 1e-2 # 1e-2
Rfamp_orig = data_temp[Target_index];
data_orig = data_temp
def boost_samples(x_samples,y_samples,copy_num=3,noise_level=1e-2):
# Artificially increase samples
data_x_temp = x_samples
data_y_temp = y_samples
for i in range(copy_num):
data_x_temp = np.vstack((data_x_temp,data_x_temp))
data_y_temp = np.vstack((data_y_temp,data_y_temp))
data_x_orig = data_x_temp
data_y_orig = data_y_temp
x1 = data_x_temp
x2 = np.random.randn(*data_x_temp.shape)*noise_level
x_samples_boosted = x1 + np.multiply(x1,x2)
y1 = data_y_temp
y2 = np.random.randn(*data_y_temp.shape)*noise_level
y_samples_boosted = y1 + np.multiply(y1,y2)
# Shuffle samples
#IDX = np.random.permutation(y_samples_boosted.index)
IDX = np.random.permutation(np.arange(0,len(y_samples_boosted)))
x_samples_boosted = x_samples_boosted[IDX,:]
y_samples_boosted = y_samples_boosted[IDX,:]
return x_samples_boosted, y_samples_boosted
data = data_temp
# Take Log10 of certain features (Mag, Dist, Depth)
data[[Dist_idx, Depth_idx]] = np.log10(data[[Dist_idx, Depth_idx]])
data[Target_index] = np.log10(data[Target_index])
X = np.asarray(data[FeatSet_index])
Y = np.asarray(data[Target_index])
# Normalize samples
x_scaler = preprocessing.MinMaxScaler()
#x_scaler = preprocessing.data.QuantileTransformer()
X = x_scaler.fit_transform(X)
y_scaler = preprocessing.MinMaxScaler()
#y_scaler = preprocessing.data.QuantileTransformer()
Y = y_scaler.fit_transform(Y)
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,random_state=42)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.3,random_state=42)
# boost_samples + normalize + shuffle them
TUPLE1 = boost_samples(x_train,y_train,copy_num,noise_level)
TUPLE2 = boost_samples(x_val,y_val,copy_num,noise_level)
TUPLE3 = boost_samples(x_test,y_test,copy_num,noise_level)
x_train = TUPLE1[0]
y_train = TUPLE1[1]
x_val = TUPLE2[0]
y_val = TUPLE2[1]
x_test = TUPLE3[0]
y_test = TUPLE3[1]
#############################################
# Construct Stacked Ensemble Model #
#############################################
RANDOM_SEED = 42
ridge = Ridge()
lasso = Lasso()
svr_lin = SVR(kernel='linear')
svr_rbf = SVR(kernel='rbf')
lr = LinearRegression()
rf = RandomForestRegressor(random_state=RANDOM_SEED)
np.random.seed(RANDOM_SEED)
regressors = [svr_lin, svr_rbf, lr, ridge, lasso]
stack = StackingCVRegressor(regressors=regressors,
meta_regressor=rf,
use_features_in_secondary=True)
'''params = {'lasso__alpha': [0.1, 1.0, 10.0],
'ridge__alpha': [0.1, 1.0, 10.0],
'svr__C': [0.1, 1.0, 10.0],
'meta-svr__C': [0.1, 1.0, 10.0, 100.0],
'meta-svr__gamma': [0.1, 1.0, 10.0]}
params = {'lasso__alpha': [0.1, 1.0, 10.0],
'ridge__alpha': [0.1, 1.0, 10.0]}'''
model = GridSearchCV(
estimator=stack,
param_grid={
'lasso__alpha': [x/5.0 for x in range(1, 10)],
'ridge__alpha': [x/20.0 for x in range(1, 10)],
'meta-randomforestregressor__n_estimators': [10, 100]
},
cv=5,
refit=True,
verbose=10,
n_jobs=8,
)
###################################################
model.fit(x_train, y_train.ravel())
print("Best: %f using %s" % (grid.best_score_, grid.best_params_))
###################################################
y_pred = model.predict(x_test)
y_pred = np.expand_dims(y_pred,axis=1)
# Rescale Back
y_pred = 10**y_scaler.inverse_transform(y_pred)
y_test = 10**y_scaler.inverse_transform(y_test)
# Reject test samples below certain threshold
Rf_thresh = 0.5*1e-7 # 0.5*1e-6
ijk = y_test > Rf_thresh
y_test = y_test[ijk]
y_pred = y_pred[ijk]
x_test = x_test[ijk.flatten(),:]
# Add bias
#y_pred = y_pred + 0.1*y_pred
# sort results in Ascending order
y_test_sort = np.sort(y_test,axis=0)
y_pred_sort = y_pred[np.argsort(y_test,axis=0)]
## Percentage within the specified factor
Fac = 2
IDX = y_pred_sort/(y_test_sort+np.finfo(float).eps) >= 1
K = y_pred_sort[IDX]
Q = y_test_sort[IDX]
L = y_pred_sort[~IDX]
M = y_test_sort[~IDX]
Upper_indices = [i for i, x in enumerate(K <= Fac*Q) if x == True]
Lower_indices = [i for i, x in enumerate(L >= M/Fac) if x == True]
Percent_within_Fac = (len(Upper_indices) + len(Lower_indices))/len(y_pred)*100
print("Percentage captured within a factor of {} = {:.2f}".format(Fac,Percent_within_Fac))
Diff = abs(y_pred_sort - y_test_sort)
# Errorbar values
yerr_lower = y_test_sort - y_test_sort/Fac
yerr_upper = Fac*y_test_sort - y_test_sort
idx = np.arange(0,len(y_test_sort))
if opts.doPlots:
font = {'weight' : 'bold',
'size' : 15}
plt.rc('font', **font)
plt.rc('legend',**{'fontsize':15})
plt.figure(figsize=(10,8))
plt.style.use('dark_background')
#plt.style.use('ggplot')
diff_plt = plt.scatter(idx,Diff,color='lightgreen',alpha=0.1)
errorbar_plt = plt.errorbar(idx,y_test_sort,yerr=[yerr_lower,yerr_upper], alpha=0.05 ,color='lightgrey')
actual_plt = plt.scatter(idx,y_test_sort,color='#1f77b4',alpha=0.9)
idx2 = np.arange(0,len(y_pred_sort))
pred_plt = plt.scatter(idx2,y_pred_sort,color='#d62728',alpha=0.2)
plt.yscale('log')
plt.grid()
plt.ylim([1e-7, 1e-3])
#plt.ylim([0, 1])
#plt.ylabel('Rf Amplitude (m/s) \n (Normalized to 1)',fontsize=25)
plt.ylabel('Rf Amplitude (m/s) ',fontsize=25)
plt.xlabel('Samples',fontsize=25)
plt.title("Percentage captured within a factor of {} = {:.2f}".format(Fac,Percent_within_Fac))
legend_plt = plt.legend([pred_plt,actual_plt, diff_plt],['Prediction', 'Actual', 'Difference'],loc=2,markerscale=2., scatterpoints=100)
plt.autoscale(enable=True, axis='x', tight=True)
plt.grid(linestyle=':')
plt.savefig(os.path.join(outputDirectory,'performance.pdf'),bbox_inches='tight')
plt.close()
# Save Model
# serialize model & pickle
pickle.dump(model, open("%s/model.p"%outputDirectory, "wb"))
print("Saved model to disk")
'''
# Load Saved Model
# load pickle
pickle_file = open('%s/model.p'%outputDirectory, 'rb')
loaded_model_pickle = pickle.load(pickle_file)
print("Loaded model from disk")
'''
| gpl-3.0 |
hiuwo/acq4 | acq4/analysis/tools/Fitting.py | 1 | 36006 | #!/usr/bin/env python
"""
Python class wrapper for data fitting.
Includes the following external methods:
getFunctions returns the list of function names (dictionary keys)
FitRegion performs the fitting
Note that FitRegion will plot on top of the current data using MPlots routines
if the current curve and the current plot instance are passed.
"""
# January, 2009
# Paul B. Manis, Ph.D.
# UNC Chapel Hill
# Department of Otolaryngology/Head and Neck Surgery
# Supported by NIH Grants DC000425-22 and DC004551-07 to PBM.
# Copyright Paul Manis, 2009
#
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Additional Terms:
The author(s) would appreciate that any modifications to this program, or
corrections of erros, be reported to the principal author, Paul Manis, at
[email protected], with the subject line "PySounds Modifications".
Note: This program also relies on the TrollTech Qt libraries for the GUI.
You must obtain these libraries from TrollTech directly, under their license
to use the program.
"""
import sys
import numpy
import scipy
try:
import openopt
HAVE_OPENOPT = True
except ImportError:
HAVE_OPENOPT = False
print "There was an error importing openopt. Continuing...."
import ctypes
import numpy.random
#from numba import autojit
usingMPlot = False
if usingMPlot:
import MPlot # we include plotting as part of the fitting
def debug_trace():
'''Set a tracepoint in the Python debugger that works with Qt'''
if pyqt:
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
if pyqt:
pyqtRemoveInputHook()
set_trace()
class Fitting():
# dictionary contains:
# name of function: function call, initial parameters, iterations, plot color, then x and y for testing
# target valutes, names of parameters, contant values, and derivative function if needed.
#
def __init__(self):
self.fitfuncmap = {
'exp0' : (self.exp0eval, [0.0, 20.0], 2000, 'k', [0, 100, 1.],
[1.0, 5.0], ['A0', 'tau'], None, None),
'exp1' : (self.expeval, [0.0, 0.0, 20.0], 2000, 'k', [0, 100, 1.],
[0.5, 1.0, 5.0], ['DC', 'A0', 'tau'], None, self.expevalprime),
'expsum' : (self.expsumeval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'expsum2' : (self.expsumeval2, [0., -0.5, -0.250], 50000, 'k', [0, 1000, 1.],
[0., -0.5, -0.25], ['A0', 'A1'], [5., 20.], None),
'exp2' : (self.exp2eval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'exppow' : (self.exppoweval, [0.0, 1.0, 100, ], 2000, 'k', [0, 100, 0.1],
[0.0, 1.0, 100.0], ['DC', 'A0', 'tau'], None, None),
'exppulse' : (self.expPulse, [3.0, 2.5, 0.2, 2.5, 2.0, 0.5], 2000, 'k', [0, 10, 0.3],
[0.0, 0., 0.75, 4., 1.5, 1.], ['DC', 't0', 'tau1', 'tau2', 'amp', 'width'], None, None),
'boltz' : (self.boltzeval, [0.0, 1.0, -50.0, -5.0], 5000, 'r', [-130., -30., 1.],
[0.00, 0.010, -100.0, 7.0], ['DC', 'A0', 'x0', 'k'], None, None),
'gauss' : (self.gausseval, [1.0, 0.0, 0.5], 2000, 'y', [-10., 10., 0.2],
[1.0, 1.0, 2.0], ['A', 'mu', 'sigma'], None, None),
'line' : (self.lineeval, [1.0, 0.0], 500, 'r', [-10., 10., 0.5],
[0.0, 2.0], ['m', 'b'], None, None),
'poly2' : (self.poly2eval, [1.0, 1.0, 0.0], 500, 'r', [0, 100, 1.],
[0.5, 1.0, 5.0], ['a', 'b', 'c'], None, None),
'poly3' : (self.poly3eval, [1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd'], None, None),
'poly4' : (self.poly4eval, [1.0, 1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.1, 0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd', 'e'], None, None),
'sin' : (self.sineeval, [-1., 1.0, 4.0, 0.0], 1000, 'r', [0., 100., 0.2],
[0.0, 1.0, 9.0, 0.0], ['DC', 'A', 'f', 'phi'], None, None),
'boltz2' : (self.boltzeval2, [0.0, 0.5, -50.0, 5.0, 0.5, -20.0, 3.0], 1200, 'r',
[-100., 50., 1.], [0.0, 0.3, -45.0, 4.0, 0.7, 10.0, 12.0],
['DC', 'A1', 'x1', 'k1', 'A2', 'x2', 'k2'], None, None),
'taucurve' : (self.taucurve, [50., 300.0, 60.0, 10.0, 8.0, 65.0, 10.0], 50000, 'r',
[-150., 50., 1.], [0.0, 237.0, 60.0, 12.0, 17.0, 60.0, 14.0],
['DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'], None, self.taucurveder),
}
self.fitSum2Err = 0
def getFunctions(self):
return(self.fitfuncmap.keys())
def exp0eval(self, p, x, y=None, C = None, sumsq = False):
"""
Exponential function with an amplitude and 0 offset
"""
yd = p[0] * numpy.exp(-x/p[1])
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def expsumeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials with independent time constants and amplitudes,
and a DC offset
"""
yd = p[0] + (p[1]* numpy.exp(-x/p[2])) + (p[3]*numpy.exp(-x/p[4]))
if y is None:
return yd
else:
yerr = y - yd
if weights is not None:
yerr = yerr * weights
if sumsq is True:
return numpy.sum(yerr**2)
else:
return yerr
def expsumeval2(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials, with predefined time constants , allowing
only the amplitudes and DC offset to vary
"""
yd = p[0] + (p[1]* numpy.exp(-x/C[0])) + (p[2]*numpy.exp(-x/C[1]))
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def expeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Exponential with offset
"""
yd = p[0] + p[1] * numpy.exp(-x/p[2])
# print yd.shape
# print y.shape
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def expevalprime(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Derivative for exponential with offset
"""
ydp = p[1] * numpy.exp(-x/p[2])/(p[2]*p[2])
yd = p[0] + p[1] * numpy.exp(-x/p[2])
print y
if y is None:
return (yd, ydp)
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def exppoweval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Single exponential function, rising to a ppower
"""
if C is None:
cx = 1.0
else:
cx = C[0]
yd = p[0] + p[1] * (1.0-numpy.exp(-x/p[2]))**cx
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def exp2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
For fit to activation currents...
"""
yd = p[0] + (p[1] * (1.0 - numpy.exp(-x/p[2]))**2.0 ) + (p[3] * (1.0 - numpy.exp(-x/p[4])))
if y == None:
return yd
else:
if sumsq is True:
ss = numpy.sqrt(numpy.sum((y - yd)**2.0))
# if p[4] < 3.0*p[2]:
# ss = ss*1e6 # penalize them being too close
return ss
else:
return y - yd
# @autojit
def expPulse(self, p, x, y=None, C=None, sumsq = False, weights = None):
"""Exponential pulse function (rising exponential with optional variable-length
plateau followed by falling exponential)
Parameter p is [yOffset, t0, tau1, tau2, amp, width]
"""
yOffset, t0, tau1, tau2, amp, width = p
yd = numpy.empty(x.shape)
yd[x<t0] = yOffset
m1 = (x>=t0)&(x<(t0+width))
m2 = (x>=(t0+width))
x1 = x[m1]
x2 = x[m2]
yd[m1] = amp*(1-numpy.exp(-(x1-t0)/tau1))+yOffset
amp2 = amp*(1-numpy.exp(-width/tau1)) ## y-value at start of decay
yd[m2] = ((amp2)*numpy.exp(-(x2-(width+t0))/tau2))+yOffset
if y == None:
return yd
else:
if sumsq is True:
ss = numpy.sqrt(numpy.sum((y-yd)**2.0))
return ss
else:
return y-yd
def boltzeval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + (p[1]-p[0])/(1.0 + numpy.exp((x-p[2])/p[3]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sqrt(numpy.sum((y - yd)**2.0))
else:
return y - yd
def boltzeval2(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]/(1 + numpy.exp((x-p[2])/p[3])) + p[4]/(1 + numpy.exp((x-p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def gausseval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = (p[0]/(p[2]*numpy.sqrt(2.0*numpy.pi)))*numpy.exp(-((x - p[1])**2.0)/(2.0*(p[2]**2.0)))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def lineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x + p[1]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def poly2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**2.0 + p[1]*x + p[2]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def poly3eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**3.0 + p[1]*x**2.0 + p[2]*x +p[3]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def poly4eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**4.0 + p[1]*x**3.0 + p[2]*x**2.0 + p[3]*x +p[4]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def sineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]*numpy.sin((x*2.0*numpy.pi/p[2])+p[3])
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def taucurve(self, p, x, y=None, C = None, sumsq=True, weights=None):
"""
HH-like description of activation/inactivation function
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
yd = p[0] + 1.0/(p[1]*numpy.exp((x+p[2])/p[3]) +p[4]*numpy.exp(-(x+p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sqrt(numpy.sum((y - yd)**2))
else:
return y - yd
def taucurveder(self, p, x):
"""
Derivative for taucurve
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
y = -(p[1]*numpy.exp((p[2] + x)/p[3])/p[3] - p[4]*numpy.exp(-(p[5] + x)/p[6])/p[6])/(p[1]*numpy.exp((p[2] + x)/p[3]) +
p[4]*numpy.exp(-(p[5] + x)/p[6]))**2.0
# print 'dy: ', y
return y
def getClipData(self, x, y, t0, t1):
"""
Return the values in y that match the x range in tx from
t0 to t1. x must be monotonic increasing or decreasing.
Allow for reverse ordering. """
it0 = (numpy.abs(x-t0)).argmin()
it1 = (numpy.abs(x-t1)).argmin()
if it0 > it1:
t = it1
it1 = it0
it0 = t
return(x[it0:it1], y[it0:it1])
def FitRegion(self, whichdata, thisaxis, tdat, ydat, t0 = None, t1 = None,
fitFunc = 'exp1', fitFuncDer = None, fitPars = None, fixedPars = None,
fitPlot = None, plotInstance = None, dataType= 'xy', method = None,
bounds=None, weights=None, constraints=()):
"""
**Arguments**
============= ===================================================
whichdata
thisaxis
tdat
ydat
t0 (optional) Minimum of time data - determined from tdat if left unspecified
t1 (optional) Maximum of time data - determined from tdat if left unspecified
fitFunc (optional) The function to fit the data to (as defined in __init__). Default is 'exp1'.
fitFuncDer (optional) default=None
fitPars (optional) Initial fit parameters. Use the values defined in self.fitfuncmap if unspecified.
fixedPars (optional) Fixed parameters to pass to the function. Default=None
fitPlot (optional) default=None
plotInstance (optional) default=None
dataType (optional) Options are ['xy', 'blocks']. Default='xy'
method (optional) Options are ['curve_fit', 'fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B', 'openopt']. Default='leastsq'
bounds (optional) default=None
weights (optional) default=None
constraints (optional) default=()
============= ===================================================
To call with tdat and ydat as simple arrays:
FitRegion(1, 0, tdat, ydat, FitFunc = 'exp1')
e.g., the first argument should be 1, but this axis is ignored if datatype is 'xy'
"""
self.fitSum2Err = 0.0
if t0 == t1:
if plotInstance is not None and usingMPlot:
(x, y) = plotInstance.getCoordinates()
t0 = x[0]
t1 = x[1]
if t1 is None:
t1 = numpy.max(tdat)
if t0 is None:
t0 = numpy.min(tdat)
func = self.fitfuncmap[fitFunc]
if func is None:
print "FitRegion: unknown function %s" % (fitFunc)
return
xp = []
xf = []
yf = []
yn = []
tx = []
names = func[6]
if fitPars is None:
fpars = func[1]
else:
fpars = fitPars
if method == 'simplex': # remap calls if needed for newer versions of scipy (>= 0.11)
method = 'Nelder-Mead'
if ydat.ndim == 1 or dataType == 'xy' or dataType == '2d': # check if 1-d, then "pretend" its only a 1-element block
nblock = 1
else:
nblock = ydat.shape[0] # otherwise, this is the number of traces in the block
# print 'datatype: ', dataType
# print 'nblock: ', nblock
# print 'whichdata: ', whichdata
# for block in range(nblock):
for record in whichdata:
if dataType == 'blocks':
(tx, dy) = self.getClipData(tdat[block], ydat[block][record, thisaxis, :], t0, t1)
else:
(tx, dy) = self.getClipData(tdat, ydat[record,:], t0, t1)
# print 'Fitting.py: block, type, Fit data: ', block, dataType
# print tx.shape
# print dy.shape
yn.append(names)
if not any(tx):
continue # no data in the window...
ier = 0
#
# Different optimization methods are included here. Not all have been tested fully with
# this wrapper.
#
if method is None or method == 'leastsq': # use standard leastsq, no bounds
plsq, cov, infodict, mesg, ier = scipy.optimize.leastsq(func[0], fpars,
args=(tx.astype('float64'), dy.astype('float64'), fixedPars),
full_output = 1, maxfev = func[2])
if ier > 4:
print "optimize.leastsq error flag is: %d" % (ier)
print mesg
elif method == 'curve_fit':
print fpars
print fixedPars
plsq, cov = scipy.optimize.curve_fit(func[0], tx.astype('float64'), dy.astype('float64'), p0=fpars)
ier = 0
elif method in ['fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B']: # use standard wrapper from scipy for those routintes
res = scipy.optimize.minimize(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True),
method=method, jac=None, hess=None, hessp=None, bounds=bounds, constraints=constraints, tol=None, callback=None,
options={'maxiter': func[2], 'disp': False })
plsq = res.x
#print " method:", method
#print " bounds:", bounds
#print " result:", plsq
# next section is replaced by the code above - kept here for reference if needed...
# elif method == 'fmin' or method == 'simplex':
# plsq = scipy.optimize.fmin(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True),
# maxfun = func[2]) # , iprint=0)
# ier = 0
# elif method == 'bfgs':
# plsq, cov, infodict = scipy.optimize.fmin_l_bfgs_b(func[0], fpars, fprime=func[8],
# args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True, weights),
# maxfun = func[2], bounds = bounds,
# approx_grad = True) # , disp=0, iprint=-1)
elif method == 'openopt': # use OpenOpt's routines - usually slower, but sometimes they converge better
if not HAVE_OPENOPT:
raise Exception("Requested openopt fitting method but openopt is not installed.")
if bounds is not None:
# unpack bounds
lb = [y[0] for y in bounds]
ub = [y[1] for y in bounds]
fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer, lb=lb, ub=ub)
# fopt.df = func[8]
r = fopt.solve('nlp:ralg', plot=0, iprint = 10)
plsq = r.xf
ier = 0
else:
fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer)
print func[8]
# fopt.df = func[7]
fopt.checkdf()
r = fopt.solve('nlp:ralg', plot=0, iprint = 10)
plsq = r.xf
ier = 0
else:
print 'method %s not recognized, please check Fitting.py' % (method)
return
xfit = numpy.arange(min(tx), max(tx), (max(tx)-min(tx))/100.0)
yfit = func[0](plsq, xfit, C=fixedPars)
yy = func[0](plsq, tx, C=fixedPars) # calculate function
self.fitSum2Err = numpy.sum((dy - yy)**2)
if usingMPlot and FitPlot != None and plotInstance != None:
self.FitPlot(xFit = xfit, yFit = yfit, fitFunc = fund[0],
fitPars = plsq, plot = fitPlot, plotInstance = plotInstance)
xp.append(plsq) # parameter list
xf.append(xfit) # x plot point list
yf.append(yfit) # y fit point list
# print xp
# print len(xp)
return(xp, xf, yf, yn) # includes names with yn and range of tx
def FitPlot(self, xFit = None, yFit = None, fitFunc = 'exp1',
fitPars = None, fixedPars = None, fitPlot=None, plotInstance = None,
color=None):
""" Plot the fit data onto the fitPlot with the specified "plot Instance".
if there is no xFit, or some parameters are missing, we just return.
if there is xFit, but no yFit, then we try to compute the fit with
what we have. The plot is superimposed on the specified "fitPlot" and
the color is specified by the function color in the fitPars list.
"""
if xFit is None or fitPars is None:
return
func = self.fitfuncmap[fitFunc]
if color is None:
fcolor = func[3]
else:
fcolor = color
if yFit is None:
yFit = numpy.array([])
for k in range(0, len(fitPars)):
yFit[k] = func[0](fitPars[k], xFit[k], C=fixedPars)
if plotInstance is None or fitPlot is None:
return(yfit)
for k in range(0, len(fitPars)):
plotInstance.PlotLine(fitPlot, xFit[k], yFit[k], color = fcolor)
return(yfit)
def getFitErr(self):
""" Return the fit error for the most recent fit
"""
return(self.fitSum2Err)
def expfit(self, x, y):
""" find best fit of a single exponential function to x and y
using the chebyshev polynomial approximation.
returns (DC, A, tau) for fit.
Perform a single exponential fit to data using Chebyshev polynomial method.
Equation fit: y = a1 * exp(-x/tau) + a0
Call: [a0 a1 tau] = expfit(x,y);
Calling parameter x is the time base, y is the data to be fit.
Returned values: a0 is the offset, a1 is the amplitude, tau is the time
constant (scaled in units of x).
Relies on routines chebftd to generate polynomial coeffs, and chebint to compute the
coefficients for the integral of the data. These are now included in this
.py file source.
This version is based on the one in the pClamp manual: HOWEVER, since
I use the bounded [-1 1] form for the Chebyshev polynomials, the coefficients are different,
and the resulting equation for tau is different. I manually optimized the tau
estimate based on fits to some simulated noisy data. (Its ok to use the whole range of d1 and d0
when the data is clean, but only the first few coeffs really hold the info when
the data is noisy.)
NOTE: The user is responsible for making sure that the passed data is appropriate,
e.g., no large noise or electronic transients, and that the time constants in the
data are adequately sampled.
To do a double exp fit with this method is possible, but more complex.
It would be computationally simpler to try breaking the data into two regions where
the fast and slow components are dominant, and fit each separately; then use that to
seed a non-linear fit (e.g., L-M) algorithm.
Final working version 4/13/99 Paul B. Manis
converted to Python 7/9/2009 Paul B. Manis. Seems functional.
"""
n = 30; # default number of polynomials coeffs to use in fit
a = numpy.amin(x)
b = numpy.amax(x)
d0 = self.chebftd(a, b, n, x, y) # coeffs for data trace...
d1 = self.chebint(a, b, d0, n) # coeffs of integral...
tau = -numpy.mean(d1[2:3]/d0[2:3])
try:
g = numpy.exp(-x/tau)
except:
g = 0.0
dg = self.chebftd(a, b, n, x, g) # generate chebyshev polynomial for unit exponential function
# now estimate the amplitude from the ratios of the coeffs.
a1 = self.estimate(d0, dg, 1)
a0 = (d0[0]-a1*dg[0])/2.0 # get the offset here
return(a0, a1, tau)#
def estimate(self, c, d, m):
""" compute optimal estimate of parameter from arrays of data """
n = len(c)
a = sum(c[m:n]*d[m:n])/sum(d[m:n]**2.0)
return(a)
# note : the following routine is a bottleneck. It should be coded in C.
def chebftd(self, a, b, n, t, d):
""" Chebyshev fit; from Press et al, p 192.
matlab code P. Manis 21 Mar 1999
"Given a function func, lower and upper limits of the interval [a,b], and
a maximum degree, n, this routine computes the n coefficients c[1..n] such that
func(x) sum(k=1, n) of ck*Tk(y) - c0/2, where y = (x -0.5*(b+a))/(0.5*(b-a))
This routine is to be used with moderately large n (30-50) the array of c's is
subsequently truncated at the smaller value m such that cm and subsequent
terms are negligible."
This routine is modified so that we find close points in x (data array) - i.e., we find
the best Chebyshev terms to describe the data as if it is an arbitrary function.
t is the x data, d is the y data...
"""
bma = 0.5*(b-a)
bpa = 0.5*(b+a)
inc = t[1]-t[0]
f = numpy.zeros(n)
for k in range(0, n):
y = numpy.cos(numpy.pi*(k+0.5)/n)
pos = int(0.5+(y*bma+bpa)/inc)
if pos < 0:
pos = 0
if pos >= len(d)-2:
pos = len(d)-2
try:
f[k]= d[pos+1]
except:
print "error in chebftd: k = %d (len f = %d) pos = %d, len(d) = %d\n" % (k, len(f), pos, len(d))
print "you should probably make sure this doesn't happen"
fac = 2.0/n
c=numpy.zeros(n)
for j in range(0, n):
sum=0.0
for k in range(0, n):
sum = sum + f[k]*numpy.cos(numpy.pi*j*(k+0.5)/n)
c[j]=fac*sum
return(c)
def chebint(self, a, b, c, n):
""" Given a, b, and c[1..n] as output from chebft or chebftd, and given n,
the desired degree of approximation (length of c to be used),
this routine computes cint, the Chebyshev coefficients of the
integral of the function whose coeffs are in c. The constant of
integration is set so that the integral vanishes at a.
Coded from Press et al, 3/21/99 P. Manis (Matlab)
Python translation 7/8/2009 P. Manis
"""
sum = 0.0
fac = 1.0
con = 0.25*(b-a) # factor that normalizes the interval
cint = numpy.zeros(n)
for j in range(1,n-2):
cint[j]=con*(c[j-1]-c[j+1])/j
sum = sum + fac * cint[j]
fac = - fac
cint[n-1] = con*c[n-2]/(n-1)
sum = sum + fac*cint[n-1]
cint[0] = 2.0*sum # set constant of integration.
return(cint)
# routine to flatten an array/list.
#
def flatten(self, l, ltypes=(list, tuple)):
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
if not len(l):
break
else:
l[i:i+1] = list(l[i])
i += 1
return l
# flatten()
# run tests if we are "main"
if __name__ == "__main__":
# import matplotlib.pyplot as pyplot
import timeit
import Fitting
import matplotlib as MP
MP.use('Qt4Agg')
################## Do not modify the following code
# sets up matplotlib with sans-serif plotting...
import matplotlib.gridspec as GS
# import mpl_toolkits.axes_grid1.inset_locator as INSETS
# #import inset_axes, zoomed_inset_axes
# import mpl_toolkits.axes_grid1.anchored_artists as ANCHOR
# # import AnchoredSizeBar
stdFont = 'Arial'
import matplotlib.pyplot as pylab
pylab.rcParams['text.usetex'] = True
pylab.rcParams['interactive'] = False
pylab.rcParams['font.family'] = 'sans-serif'
pylab.rcParams['font.sans-serif'] = 'Arial'
pylab.rcParams['mathtext.default'] = 'sf'
pylab.rcParams['figure.facecolor'] = 'white'
# next setting allows pdf font to be readable in Adobe Illustrator
pylab.rcParams['pdf.fonttype'] = 42
pylab.rcParams['text.dvipnghack'] = True
##################### to here (matplotlib stuff - touchy!
Fits = Fitting.Fitting()
# x = numpy.arange(0, 100.0, 0.1)
# y = 5.0-2.5*numpy.exp(-x/5.0)+0.5*numpy.random.randn(len(x))
# (dc, aFit,tauFit) = Fits.expfit(x,y)
# yf = dc + aFit*numpy.exp(-x/tauFit)
# pyplot.figure(1)
# pyplot.plot(x,y,'k')
# pyplot.hold(True)
# pyplot.plot(x, yf, 'r')
# pyplot.show()
exploreError = False
if exploreError is True:
# explore the error surface for a function:
func = 'exppulse'
f = Fits.fitfuncmap[func]
p1range = numpy.arange(0.1, 5.0, 0.1)
p2range = numpy.arange(0.1, 5.0, 0.1)
err = numpy.zeros((len(p1range), len(p2range)))
x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2]))
C = None
if func == 'expsum2':
C = f[7]
# check exchange of tau1 ([1]) and width[4]
C = None
yOffset, t0, tau1, tau2, amp, width = f[1] # get inital parameters
y0 = f[0](f[1], x, C=C)
noise = numpy.random.random(y0.shape) - 0.5
y0 += 0.0* noise
sh = err.shape
yp = numpy.zeros((sh[0], sh[1], len(y0)))
for i, p1 in enumerate(p1range):
tau1t = tau1*p1
for j, p2 in enumerate(p2range):
ampt = amp*p2
pars = (yOffset, t0, tau1t, tau2, ampt, width) # repackage
err[i,j] = f[0](pars, x, y0, C=C, sumsq = True)
yp[i,j] = f[0](pars, x, C=C, sumsq = False)
pylab.figure()
CS=pylab.contour(p1range*tau1, p2range*width, err, 25)
CB = pylab.colorbar(CS, shrink=0.8, extend='both')
pylab.figure()
for i, p1 in enumerate(p1range):
for j, p2 in enumerate(p2range):
pylab.plot(x, yp[i,j])
pylab.plot(x, y0, 'r-', linewidth=2.0)
# run tests for each type of fit, return results to compare parameters
cons = None
bnds = None
signal_to_noise = 100000.
for func in Fits.fitfuncmap:
if func != 'exppulse':
continue
print "\nFunction: %s\nTarget: " % (func),
f = Fits.fitfuncmap[func]
for k in range(0,len(f[1])):
print "%f " % (f[1][k]),
print "\nStarting: ",
for k in range(0,len(f[5])):
print "%f " % (f[5][k]),
# nstep = 500.0
# if func == 'sin':
# nstep = 100.0
x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2]))
C = None
if func == 'expsum2':
C = f[7]
if func == 'exppulse':
C = f[7]
y = f[0](f[1], x, C=C)
yd = numpy.array(y)
noise = numpy.random.normal(0, 0.1, yd.shape)
my = numpy.amax(yd)
#yd = yd + sigmax*0.05*my*(numpy.random.random_sample(shape(yd))-0.5)
yd += noise*my/signal_to_noise
testMethod = 'SLSQP'
if func == 'taucurve':
continue
bounds=[(0., 100.), (0., 1000.), (0.0, 500.0), (0.1, 50.0),
(0., 1000), (0.0, 500.0), (0.1, 50.0)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'boltz':
continue
bounds = [(-0.5,0.5), (0.0, 20.0), (-120., 0.), (-20., 0.)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'exp2':
bounds=[(-0.001, 0.001), (-5.0, 0.), (1.0, 500.0), (-5.0, 0.0),
(1., 10000.)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'exppulse':
# set some constraints to the fitting
# yOffset, tau1, tau2, amp, width = f[1] # order of constraings
dt = numpy.mean(numpy.diff(x))
bounds = [(-5, 5), (-15., 15.), (-2, 2.0), (2-10, 10.), (-5, 5.), (0., 5.)]
# cxample for constraints:
# cons = ({'type': 'ineq', 'fun': lambda x: x[4] - 3.0*x[2]},
# {'type': 'ineq', 'fun': lambda x: - x[4] + 12*x[2]},
# {'type': 'ineq', 'fun': lambda x: x[2]},
# {'type': 'ineq', 'fun': lambda x: - x[4] + 2000},
# )
cons = ({'type': 'ineq', 'fun': lambda x: x[3] - x[2] }, # tau1 < tau2
)
C = None
tv = f[5]
initialgr = f[0](f[5], x, None )
(fpar, xf, yf, names) = Fits.FitRegion(
numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bounds, method=testMethod)
# print xf
# print yf
# print fpar
# print names
else:
(fpar, xf, yf, names) = Fits.FitRegion(
numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bnds, method=testMethod)
#print fpar
s = numpy.shape(fpar)
j = 0
outstr = ""
initstr = ""
truestr = ""
for i in range(0, len(names[j])):
# print "%f " % fpar[j][i],
outstr = outstr + ('%s = %f, ' % (names[j][i], fpar[j][i]))
initstr = initstr + '%s = %f, ' % (names[j][i], tv[i])
truestr = truestr + '%s = %f, ' % (names[j][i], f[1][i])
print( "\nTrue(%d) : %s" % (j, truestr) )
print( "FIT(%d) : %s" % (j, outstr) )
print( "init(%d) : %s" % (j, initstr) )
print( "Error: : %f" % (Fits.fitSum2Err))
if func is 'exppulse':
pylab.figure()
pylab.plot(numpy.array(x), yd, 'ro-')
pylab.hold(True)
pylab.plot(numpy.array(x), initialgr, 'k--')
pylab.plot(xf[0], yf[0], 'b-') # fit
pylab.show()
| mit |
nilearn/nilearn_sandbox | examples/rpbi/plot_localizer_rpbi.py | 1 | 4435 | """
Massively univariate analysis of a computation task from the Localizer dataset
==============================================================================
A permuted Ordinary Least Squares algorithm is run at each voxel in
order to determine which voxels are specifically active when a healthy subject
performs a computation task as opposed to a sentence reading task.
Randomized Parcellation Based Inference [1] is also used so as to illustrate
that it conveys more sensitivity.
"""
# Author: Virgile Fritsch, <[email protected]>, Mar. 2014
import numpy as np
from nilearn import datasets
from scipy import linalg
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols
from nilearn_sandbox.mass_univariate.rpbi import randomized_parcellation_based_inference
### Load Localizer motor contrast #############################################
n_samples = 20
# localizer_dataset = datasets.fetch_localizer_calculation_task(
# n_subjects=n_samples)
localizer_dataset = datasets.fetch_localizer_contrasts(
["calculation vs sentences"],
n_subjects=n_samples)
### Mask data #################################################################
nifti_masker = NiftiMasker(
memory='nilearn_cache', memory_level=1) # cache options
fmri_masked = nifti_masker.fit_transform(localizer_dataset.cmaps)
### Perform massively univariate analysis with permuted OLS ###################
tested_var = np.ones((n_samples, 1), dtype=float) # intercept
neg_log_pvals, all_scores, h0 = permuted_ols(
tested_var, fmri_masked, model_intercept=False,
n_perm=5000, # 5,000 for the sake of time. 10,000 is recommended
two_sided_test=False, # RPBI does not perform a two-sided test
n_jobs=1) # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
np.ravel(neg_log_pvals))
### Randomized Parcellation Based Inference ###################################
neg_log_pvals_rpbi, _, _ = randomized_parcellation_based_inference(
tested_var, fmri_masked,
np.asarray(nifti_masker.mask_img_.get_data()).astype(bool),
n_parcellations=30, # 30 for the sake of time, 100 is recommended
n_parcels=1000,
threshold='auto',
n_perm=5000, # 5,000 for the sake of time. 10,000 is recommended
random_state=0, memory='nilearn_cache', n_jobs=1, verbose=True)
neg_log_pvals_rpbi_unmasked = nifti_masker.inverse_transform(
neg_log_pvals_rpbi)
### Visualization #############################################################
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
# Here, we should use a structural image as a background, when available.
# Various plotting parameters
z_slice = 39 # plotted slice
from nilearn.image.resampling import coord_transform
affine = neg_log_pvals_unmasked.get_affine()
_, _, k_slice = coord_transform(0, 0, z_slice,
linalg.inv(affine))
k_slice = round(k_slice)
threshold = - np.log10(0.1) # 10% corrected
vmax = min(np.amax(neg_log_pvals),
np.amax(neg_log_pvals_rpbi))
# Plot permutation p-values map
fig = plt.figure(figsize=(5, 7), facecolor='k')
display = plot_stat_map(neg_log_pvals_unmasked,
threshold=threshold, cmap=plt.cm.autumn,
display_mode='z', cut_coords=[z_slice],
figure=fig, vmax=vmax, black_bg=True)
neg_log_pvals_data = neg_log_pvals_unmasked.get_data()
neg_log_pvals_slice_data = \
neg_log_pvals_data[..., k_slice]
n_detections = (neg_log_pvals_slice_data > threshold).sum()
title = ('Negative $\log_{10}$ p-values'
'\n(Non-parametric + '
'\nmax-type correction)'
'\n%d detections') % n_detections
display.title(title, y=1.2)
# Plot RPBI p-values map
fig = plt.figure(figsize=(5, 7), facecolor='k')
display = plot_stat_map(neg_log_pvals_rpbi_unmasked,
threshold=threshold, cmap=plt.cm.autumn,
display_mode='z', cut_coords=[z_slice],
figure=fig, vmax=vmax, black_bg=True)
neg_log_pvals_rpbi_data = \
neg_log_pvals_rpbi_unmasked.get_data()
neg_log_pvals_rpbi_slice_data = \
neg_log_pvals_rpbi_data[..., k_slice]
n_detections = (neg_log_pvals_rpbi_slice_data > threshold).sum()
title = ('Negative $\log_{10}$ p-values' + '\n(RPBI)'
'\n%d detections') % n_detections
display.title(title, y=1.2)
plt.show()
| bsd-3-clause |
zutshi/S3CAMR | examples/spi/spi_plant.py | 1 | 2022 |
# Must satisfy the signature
# [t,X,D,P] = sim_function(T,X0,D0,P0,I0);
import numpy as np
from scipy.integrate import ode
import matplotlib.pyplot as PLT
PLOT = True
class SIM(object):
def __init__(self, plt, pvt_init_data):
#print I
# atol = 1e-10
rtol = 1e-5
# tt,YY,dummy_D,dummy_P
self.solver = ode(dyn).set_integrator('dopri5', rtol=rtol)
return
def sim(self, TT, X0, D, P, U, W, property_checker):
if PLOT:
num_dim_x = len(X0)
plot_data = [np.empty(0, dtype=float), np.empty((0, num_dim_x), dtype=float)]
else:
plot_data = None
Ti = TT[0]
Tf = TT[1]
T = Tf - Ti
self.solver.set_solout(solout_fun(property_checker, plot_data)) # (2)
self.solver.set_initial_value(X0, t=0.0)
self.solver.set_f_params(W)
X_ = self.solver.integrate(T)
pvf = property_checker.check(Tf, X_)
dummy_D = np.zeros(D.shape)
dummy_P = np.zeros(P.shape)
ret_t = Tf
ret_X = X_
ret_D = dummy_D
ret_P = dummy_P
if PLOT:
PLT.figure(5)
PLT.plot(plot_data[0], plot_data[1][:, 0])
return (ret_t, ret_X, ret_D, ret_P), pvf
# State Space Modeling Template
# dx/dt = Ax + Bu
# y = Cx + Du
def dyn(t, X, w):
if w > 0:
u = 1.0
elif w < 0:
u = -1.0
else:
u = 0.0
x2 = u
X_ = np.array([x2])
return X_
def solout_fun(property_checker, plot_data):
def solout(t, Y):
if PLOT:
plot_data[0] = np.concatenate((plot_data[0], np.array([t])))
plot_data[1] = np.concatenate((plot_data[1], np.array([Y])))
if property_checker.check(t, Y):
#violating_state[0] = (np.copy(t), np.copy(Y))
# print 'violation found:', violating_state[0]
# return -1 to stop integration
return -1
else:
return 0
return 0
return solout
| bsd-2-clause |
pvcrossi/OnlineCS | online_CS.py | 1 | 4043 | '''
Bayesian Online Compressed Sensing (2016)
Paulo V. Rossi & Yoshiyuki Kabashima
'''
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import norm
from numpy.random import normal
from utils import DlnH, DDlnH, G, H, moments
def simulation(method='standard'):
signal_length = 2000
alpha_max = 20
sigma_n_2 = 1e-1
phi = prior()
P = posterior(signal_length, phi)
x0 = generate_signal(signal_length, phi)
print('Simulation parameters:')
print('N='+str(signal_length)+', sparsity='+str(phi.rho)+
', noise='+str(sigma_n_2)+', alpha_max='+str(alpha_max))
print('Measurement model: '+method+'\n')
number_of_measurements = alpha_max*signal_length
mean_square_error = np.zeros(number_of_measurements)
for measurement in range(number_of_measurements):
P = update_posterior(P, phi, x0, signal_length, sigma_n_2, method)
mean_square_error[measurement] = reconstruction_error(P, x0)
plot_results(P, x0, mean_square_error, phi)
def prior():
phi = namedtuple('prior_distribution', ['rho', 'sigma_x_2', 'bar_x'])
phi.rho = 0.1
phi.sigma_x_2 = 1.
phi.bar_x = 0.
return phi
def posterior(signal_length, phi):
P = namedtuple('posterior_distribution', ['m', 'v', 'a', 'h'])
P.m = np.zeros(signal_length)
P.v = phi.rho * phi.sigma_x_2 * np.ones(signal_length)
P.a = np.zeros(signal_length)
P.h = np.zeros(signal_length)
return P
def generate_signal (signal_length, phi):
x0 = np.zeros(signal_length)
number_of_non_zero_components = int(np.ceil(signal_length*phi.rho))
x0[:number_of_non_zero_components] = normal(loc=phi.bar_x,
scale=np.sqrt(phi.sigma_x_2),
size=number_of_non_zero_components)
return x0
def update_posterior(P, phi, x0, signal_length, sigma_n_2, method):
A_t = measurement_vector(signal_length)
P.a, P.h = update_and_project(method, A_t, x0, sigma_n_2, P)
P.m, P.v = moments(P, phi)
return P
def measurement_vector(signal_length):
A_t = normal(size=signal_length)
return A_t/norm(A_t)
def update_and_project(method, A_t, x0, sigma_n_2, P):
m, v, a, h = P.m, P.v, P.a, P.h
u0 = np.dot(A_t, x0)
if sigma_n_2 > 0:
noise = normal(scale=np.sqrt(sigma_n_2))
else:
noise = 0
y = u0 + noise
Delta = np.dot(A_t, m)
chi = np.dot(A_t**2, v)
if method == 'standard':
da, dh = update_and_project_std(y, Delta, chi, sigma_n_2, A_t, m)
elif method == '1bit':
da, dh = update_and_project_1bit(y, Delta, chi, sigma_n_2, A_t, m)
else:
raise ValueError('Measurement model not recognized. Please use "standard" or "1bit".')
return a+da, h+dh
def update_and_project_std(y, Delta, chi, sigma_n_2, A_t, m):
da = A_t**2 / (sigma_n_2 + chi)
dh = (y-Delta)*A_t / (sigma_n_2 + chi) + da*m
return da, dh
def update_and_project_1bit(y, Delta, chi, sigma_n_2, A_t, m):
y = np.sign(y)
u = y * np.dot(A_t, m)
chi_prime = chi + sigma_n_2
z = -u/np.sqrt(chi_prime)
da = -A_t**2/chi_prime * DDlnH(z)
dh = -y*A_t/np.sqrt(chi_prime) * DlnH(z) + da*m
return da, dh
def reconstruction_error(P, x0):
return norm(x0 - P.m)**2 / norm(x0)**2
def plot_results(P, x0, mse_t, phi):
plt.subplots(figsize=(10,20))
plt.subplot(211)
plt.plot(np.arange(len(mse_t))/float(len(P.m)), 10*np.log10(mse_t), color='k')
plt.xlabel(r'$\alpha$')
plt.ylabel(r'mse (dB)')
plt.subplot(212)
plt.plot(P.m, color='k', lw = 0.7, label=r'$m$')
plt.scatter(range(int(len(x0)*phi.rho)), x0[:int(len(x0)*phi.rho)], \
marker='o', facecolors='none', edgecolors='r', lw=1.5, label=r'$x^0$')
plt.xlim([0,len(P.m)])
plt.xlabel(r'Vector Component')
plt.legend()
plt.show()
if __name__ == '__main__':
simulation(method='1bit')
#simulation(method='standard')
| mit |
nelango/ViralityAnalysis | model/lib/pandas/tests/test_internals.py | 9 | 45145 | # -*- coding: utf-8 -*-
# pylint: disable=W0102
from datetime import datetime, date
import nose
import numpy as np
import re
import itertools
from pandas import Index, MultiIndex, DataFrame, DatetimeIndex, Series, Categorical
from pandas.compat import OrderedDict, lrange
from pandas.sparse.array import SparseArray
from pandas.core.internals import (BlockPlacement, SingleBlockManager, make_block,
BlockManager)
import pandas.core.common as com
import pandas.core.internals as internals
import pandas.util.testing as tm
import pandas as pd
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, randn, assert_series_equal)
from pandas.compat import zip, u
def assert_block_equal(left, right):
assert_almost_equal(left.values, right.values)
assert(left.dtype == right.dtype)
assert_almost_equal(left.mgr_locs, right.mgr_locs)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(
x=arr, shape=shape,
strides=(arr.itemsize,) + (0,) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt, M8[ns], M8[ns, tz]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
* category, category2
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N,)
shape = (num_items,) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2',
'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('b','bool',):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr.startswith('M8[ns'):
# datetime with tz
m = re.search('M8\[ns,\s*(\w+\/?\w*)\]', typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category',):
values = Categorical([1,1,2,2,3,3,3,3,4,4])
elif typestr in ('category2',):
values = Categorical(['a','a','a','a','b','b','c','c','c','d'])
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N,)
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr, placement, item_shape=item_shape,
num_offset=num_offset,))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
self.assertEqual(int32block.dtype, np.int32)
def test_pickle(self):
def _check(blk):
assert_block_equal(self.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
assert_almost_equal(self.fblock.mgr_locs, [0, 2, 4])
def test_attrs(self):
self.assertEqual(self.fblock.shape, self.fblock.values.shape)
self.assertEqual(self.fblock.dtype, self.fblock.values.dtype)
self.assertEqual(len(self.fblock), len(self.fblock.values))
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals,
ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals,
ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
assert_almost_equal(merged.mgr_locs, [0, 1, 2, 3])
assert_almost_equal(merged.values[[0, 2]], avals)
assert_almost_equal(merged.values[[1, 3]], bvals)
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
self.assertIsNot(cop, self.fblock)
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
assert_almost_equal(newb.mgr_locs, [2, 4])
self.assertTrue((newb.values[0] == 1).all())
newb = self.fblock.copy()
newb.delete(1)
assert_almost_equal(newb.mgr_locs, [0, 4])
self.assertTrue((newb.values[1] == 2).all())
newb = self.fblock.copy()
newb.delete(2)
assert_almost_equal(newb.mgr_locs, [0, 2])
self.assertTrue((newb.values[1] == 1).all())
newb = self.fblock.copy()
self.assertRaises(Exception, newb.delete, 3)
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
raise nose.SkipTest("skipping for now")
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
bs = list(self.fblock.split_block_at('c'))
self.assertEqual(len(bs), 2)
self.assertTrue(np.array_equal(bs[0].items, ['a']))
self.assertTrue(np.array_equal(bs[1].items, ['e']))
bs = list(self.fblock.split_block_at('e'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['a', 'c']))
bblock = get_bool_ex(['f'])
bs = list(bblock.split_block_at('f'))
self.assertEqual(len(bs), 0)
def test_get(self):
pass
def test_set(self):
pass
def test_fillna(self):
pass
def test_repr(self):
pass
class TestDatetimeBlock(tm.TestCase):
_multiprocess_can_split_ = True
def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
none_coerced = block._try_coerce_args(block.values, None)[2]
self.assertTrue(pd.Timestamp(none_coerced) is pd.NaT)
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'),
datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(block.values, val)[2]
self.assertEqual(np.int64, type(coerced))
self.assertEqual(pd.Timestamp('2010-10-10'), pd.Timestamp(coerced))
class TestBlockManager(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.mgr = create_mgr('a: f8; b: object; c: f8; d: object; e: f8;'
'f: bool; g: i8; h: complex')
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
self.assertEqual(mgr.nblocks, 2)
self.assertEqual(len(mgr), 6)
def test_is_mixed_dtype(self):
self.assertFalse(create_mgr('a,b:f8').is_mixed_type)
self.assertFalse(create_mgr('a:f8-1; b:f8-2').is_mixed_type)
self.assertTrue(create_mgr('a,b:f8; c,d: f4').is_mixed_type)
self.assertTrue(create_mgr('a,b:f8; c,d: object').is_mixed_type)
def test_is_indexed_like(self):
mgr1 = create_mgr('a,b: f8')
mgr2 = create_mgr('a:i8; b:bool')
mgr3 = create_mgr('a,b,c: f8')
self.assertTrue(mgr1._is_indexed_like(mgr1))
self.assertTrue(mgr1._is_indexed_like(mgr2))
self.assertTrue(mgr1._is_indexed_like(mgr3))
self.assertFalse(mgr1._is_indexed_like(
mgr1.get_slice(slice(-1), axis=1)))
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
self.assertRaises(AssertionError, BlockManager, blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self):
self.assertIn('a', self.mgr)
self.assertNotIn('baz', self.mgr)
def test_pickle(self):
mgr2 = self.round_trip_pickle(self.mgr)
assert_frame_equal(DataFrame(self.mgr), DataFrame(mgr2))
# share ref_items
# self.assertIs(mgr2.blocks[0].ref_items, mgr2.blocks[1].ref_items)
# GH2431
self.assertTrue(hasattr(mgr2, "_is_consolidated"))
self.assertTrue(hasattr(mgr2, "_known_consolidated"))
# reset to False on load
self.assertFalse(mgr2._is_consolidated)
self.assertFalse(mgr2._known_consolidated)
def test_non_unique_pickle(self):
mgr = create_mgr('a,a,a:f8')
mgr2 = self.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
mgr2 = self.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr('a: category')
mgr2 = self.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr('category')
smgr2 = self.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
def test_get_scalar(self):
for item in self.mgr.items:
for i, index in enumerate(self.mgr.axes[1]):
res = self.mgr.get_scalar((item, index))
exp = self.mgr.get(item, fastpath=False)[i]
assert_almost_equal(res, exp)
exp = self.mgr.get(item).internal_values()[i]
assert_almost_equal(res, exp)
def test_get(self):
cols = Index(list('abc'))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(),
placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
assert_almost_equal(mgr.get('a').internal_values(), values[0])
assert_almost_equal(mgr.get('b').internal_values(), values[1])
assert_almost_equal(mgr.get('c').internal_values(), values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3,))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
assert_almost_equal(mgr.get('a').internal_values(), [0] * 3)
assert_almost_equal(mgr.get('b').internal_values(), ['bar'] * 3)
assert_almost_equal(mgr.get('c').internal_values(), [2] * 3)
assert_almost_equal(mgr.get('d').internal_values(), ['foo'] * 3)
def test_insert(self):
self.mgr.insert(0, 'inserted', np.arange(N))
self.assertEqual(self.mgr.items[0], 'inserted')
assert_almost_equal(self.mgr.get('inserted'), np.arange(N))
for blk in self.mgr.blocks:
yield self.assertIs, self.mgr.items, blk.ref_items
def test_set_change_dtype(self):
self.mgr.set('baz', np.zeros(N, dtype=bool))
self.mgr.set('baz', np.repeat('foo', N))
self.assertEqual(self.mgr.get('baz').dtype, np.object_)
mgr2 = self.mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
self.assertEqual(mgr2.get('baz').dtype, np.object_)
mgr2.set('quux', randn(N).astype(int))
self.assertEqual(mgr2.get('quux').dtype, np.int_)
mgr2.set('quux', randn(N))
self.assertEqual(mgr2.get('quux').dtype, np.float_)
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([('1st','a'), ('2nd','b'), ('3rd','c')])
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
self.assertEqual(sorted(df.blocks.keys()), ['float64', 'int64'])
assert_frame_equal(df.blocks['float64'],
DataFrame([[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
assert_frame_equal(df.blocks['int64'],
DataFrame([[3], [6]], columns=cols[2:]))
def test_copy(self):
shallow = self.mgr.copy(deep=False)
# we don't guaranteee block ordering
for blk in self.mgr.blocks:
found = False
for cp_blk in shallow.blocks:
if cp_blk.values is blk.values:
found = True
break
self.assertTrue(found)
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
self.assertEqual(mgr.as_matrix().dtype, np.float64)
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
self.assertEqual(len(mgr.blocks), 3)
self.assertIsInstance(mgr, BlockManager)
# what to test here?
def test_as_matrix_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
self.assertEqual(mgr.as_matrix().dtype, np.float64)
mgr = create_mgr('c: f4; d: f2')
self.assertEqual(mgr.as_matrix().dtype, np.float32)
def test_as_matrix_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
self.assertEqual(mgr.as_matrix().dtype, np.bool_)
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
self.assertEqual(mgr.as_matrix().dtype, np.int64)
mgr = create_mgr('c: i4; d: i2; e: u1')
self.assertEqual(mgr.as_matrix().dtype, np.int32)
def test_as_matrix_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
self.assertEqual(mgr.as_matrix().dtype, 'M8[ns]')
def test_as_matrix_datetime_tz(self):
mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
self.assertEqual(mgr.get('h').dtype, 'datetime64[ns, US/Eastern]')
self.assertEqual(mgr.get('g').dtype, 'datetime64[ns, CET]')
self.assertEqual(mgr.as_matrix().dtype, 'object')
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
self.assertEqual(tmgr.get('c').dtype.type, t)
self.assertEqual(tmgr.get('d').dtype.type, t)
self.assertEqual(tmgr.get('e').dtype.type, t)
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
'e: f4; f: f2; g: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, raise_on_error=False)
self.assertEqual(tmgr.get('c').dtype.type, t)
self.assertEqual(tmgr.get('e').dtype.type, t)
self.assertEqual(tmgr.get('f').dtype.type, t)
self.assertEqual(tmgr.get('g').dtype.type, t)
self.assertEqual(tmgr.get('a').dtype.type, np.object_)
self.assertEqual(tmgr.get('b').dtype.type, np.object_)
if t != np.int64:
self.assertEqual(tmgr.get('d').dtype.type, np.datetime64)
else:
self.assertEqual(tmgr.get('d').dtype.type, t)
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
self.assertEqual(len(old_blocks), len(new_blocks))
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
self.assertTrue(found)
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
self.assertTrue(found)
# noops
mgr = create_mgr('f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr,new_mgr)
mgr = create_mgr('a, b: object; f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr,new_mgr)
# convert
mgr = create_mgr('a,b,foo: object; f: i8; g: f8')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
self.assertEqual(new_mgr.get('a').dtype, np.int64)
self.assertEqual(new_mgr.get('b').dtype, np.float64)
self.assertEqual(new_mgr.get('foo').dtype, np.object_)
self.assertEqual(new_mgr.get('f').dtype, np.int64)
self.assertEqual(new_mgr.get('g').dtype, np.float64)
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
self.assertEqual(new_mgr.get('a').dtype, np.int64)
self.assertEqual(new_mgr.get('b').dtype, np.float64)
self.assertEqual(new_mgr.get('foo').dtype, np.object_)
self.assertEqual(new_mgr.get('f').dtype, np.int32)
self.assertEqual(new_mgr.get('bool').dtype, np.bool_)
self.assertEqual(new_mgr.get('dt').dtype.type, np.datetime64)
self.assertEqual(new_mgr.get('i').dtype, np.int64)
self.assertEqual(new_mgr.get('g').dtype, np.float64)
self.assertEqual(new_mgr.get('h').dtype, np.float16)
def test_interleave(self):
# self
for dtype in ['f8','i8','object','bool','complex','M8[ns]','m8[ns]']:
mgr = create_mgr('a: {0}'.format(dtype))
self.assertEqual(mgr.as_matrix().dtype,dtype)
mgr = create_mgr('a: {0}; b: {0}'.format(dtype))
self.assertEqual(mgr.as_matrix().dtype,dtype)
# will be converted according the actual dtype of the underlying
mgr = create_mgr('a: category')
self.assertEqual(mgr.as_matrix().dtype,'i8')
mgr = create_mgr('a: category; b: category')
self.assertEqual(mgr.as_matrix().dtype,'i8'),
mgr = create_mgr('a: category; b: category2')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: category2')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: category2; b: category2')
self.assertEqual(mgr.as_matrix().dtype,'object')
# combinations
mgr = create_mgr('a: f8')
self.assertEqual(mgr.as_matrix().dtype,'f8')
mgr = create_mgr('a: f8; b: i8')
self.assertEqual(mgr.as_matrix().dtype,'f8')
mgr = create_mgr('a: f4; b: i8')
self.assertEqual(mgr.as_matrix().dtype,'f4')
mgr = create_mgr('a: f4; b: i8; d: object')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: bool; b: i8')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: complex')
self.assertEqual(mgr.as_matrix().dtype,'complex')
mgr = create_mgr('a: f8; b: category')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: M8[ns]; b: category')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: M8[ns]; b: bool')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: M8[ns]; b: i8')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: m8[ns]; b: bool')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: m8[ns]; b: i8')
self.assertEqual(mgr.as_matrix().dtype,'object')
mgr = create_mgr('a: M8[ns]; b: m8[ns]')
self.assertEqual(mgr.as_matrix().dtype,'object')
def test_interleave_non_unique_cols(self):
df = DataFrame([
[pd.Timestamp('20130101'), 3.5],
[pd.Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
self.assertEqual(df_unique.values.shape, df.values.shape)
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self):
self.mgr.set('f', randn(N))
self.mgr.set('d', randn(N))
self.mgr.set('b', randn(N))
self.mgr.set('g', randn(N))
self.mgr.set('h', randn(N))
cons = self.mgr.consolidate()
self.assertEqual(cons.nblocks, 1)
assert_almost_equal(cons.blocks[0].mgr_locs,
np.arange(len(cons.items)))
def test_reindex_index(self):
pass
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'
'f: bool; g: f8-2')
reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)
self.assertEqual(reindexed.nblocks, 2)
assert_almost_equal(reindexed.items, ['g', 'c', 'a', 'd'])
assert_almost_equal(mgr.get('g',fastpath=False), reindexed.get('g',fastpath=False))
assert_almost_equal(mgr.get('c',fastpath=False), reindexed.get('c',fastpath=False))
assert_almost_equal(mgr.get('a',fastpath=False), reindexed.get('a',fastpath=False))
assert_almost_equal(mgr.get('d',fastpath=False), reindexed.get('d',fastpath=False))
assert_almost_equal(mgr.get('g').internal_values(), reindexed.get('g').internal_values())
assert_almost_equal(mgr.get('c').internal_values(), reindexed.get('c').internal_values())
assert_almost_equal(mgr.get('a').internal_values(), reindexed.get('a').internal_values())
assert_almost_equal(mgr.get('d').internal_values(), reindexed.get('d').internal_values())
def test_multiindex_xs(self):
mgr = create_mgr('a,b,c: f8; d,e,f: i8')
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
mgr.set_axis(1, index)
result = mgr.xs('bar', axis=1)
self.assertEqual(result.shape, (6, 2))
self.assertEqual(result.axes[1][0], ('bar', 'one'))
self.assertEqual(result.axes[1][1], ('bar', 'two'))
def test_get_numeric_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3,))
mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
assert_almost_equal(numeric.items, ['int', 'float', 'complex', 'bool'])
assert_almost_equal(mgr.get('float',fastpath=False), numeric.get('float',fastpath=False))
assert_almost_equal(mgr.get('float').internal_values(), numeric.get('float').internal_values())
# Check sharing
numeric.set('float', np.array([100., 200., 300.]))
assert_almost_equal(mgr.get('float',fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(mgr.get('float').internal_values(), np.array([100., 200., 300.]))
numeric2 = mgr.get_numeric_data(copy=True)
assert_almost_equal(numeric.items, ['int', 'float', 'complex', 'bool'])
numeric2.set('float', np.array([1000., 2000., 3000.]))
assert_almost_equal(mgr.get('float',fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(mgr.get('float').internal_values(), np.array([100., 200., 300.]))
def test_get_bool_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3,))
mgr.set('obj', np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
assert_almost_equal(bools.items, ['bool'])
assert_almost_equal(mgr.get('bool',fastpath=False), bools.get('bool',fastpath=False))
assert_almost_equal(mgr.get('bool').internal_values(), bools.get('bool').internal_values())
bools.set('bool', np.array([True, False, True]))
assert_almost_equal(mgr.get('bool',fastpath=False), [True, False, True])
assert_almost_equal(mgr.get('bool').internal_values(), [True, False, True])
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set('bool', np.array([False, True, False]))
assert_almost_equal(mgr.get('bool',fastpath=False), [True, False, True])
assert_almost_equal(mgr.get('bool').internal_values(), [True, False, True])
def test_unicode_repr_doesnt_raise(self):
str_repr = repr(create_mgr(u('b,\u05d0: object')))
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.ix[:, u("\u05d0")] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
self.assertTrue(bm1.equals(bm2))
bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
self.assertTrue(bm1.equals(bm2))
def test_equals_block_order_different_dtypes(self):
# GH 9330
mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
self.assertTrue(bm.equals(bm_this))
self.assertTrue(bm_this.equals(bm))
def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
self.assertEqual(mgr.as_matrix().tolist(), [0., 1., 2., 3., 4.])
class TestIndexing(object):
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr('f8', N),
create_single_mgr('i8', N),
#create_single_mgr('sparse', N),
create_single_mgr('sparse_na', N),
# 2-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),
#create_mgr('a: sparse', item_shape=(N,)),
create_mgr('a: sparse_na', item_shape=(N,)),
# 3-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),
# create_mgr('a: sparse', item_shape=(1, N)),
]
# MANAGERS = [MANAGERS[6]]
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
# import pudb; pudb.set_trace()
mat = mgr.as_matrix()
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate([slobj, np.zeros(len(ax)-len(slobj),dtype=bool)])
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None),) * axis + (slobj,)
assert_almost_equal(mat[mat_slobj], sliced.as_matrix())
assert_almost_equal(mgr.axes[axis][slobj], sliced.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
yield assert_slice_ok, mgr, ax, slice(None)
yield assert_slice_ok, mgr, ax, slice(3)
yield assert_slice_ok, mgr, ax, slice(100)
yield assert_slice_ok, mgr, ax, slice(1, 4)
yield assert_slice_ok, mgr, ax, slice(3, 0, -2)
# boolean mask
yield assert_slice_ok, mgr, ax, np.array([], dtype=np.bool_)
yield (assert_slice_ok, mgr, ax,
np.ones(mgr.shape[ax], dtype=np.bool_))
yield (assert_slice_ok, mgr, ax,
np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
yield (assert_slice_ok, mgr, ax,
np.arange(mgr.shape[ax]) % 3 == 0)
yield (assert_slice_ok, mgr, ax,
np.array([True, True, False], dtype=np.bool_))
# fancy indexer
yield assert_slice_ok, mgr, ax, []
yield assert_slice_ok, mgr, ax, lrange(mgr.shape[ax])
if mgr.shape[ax] >= 3:
yield assert_slice_ok, mgr, ax, [0, 1, 2]
yield assert_slice_ok, mgr, ax, [-1, -2, -3]
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_matrix()
taken = mgr.take(indexer, axis)
assert_almost_equal(np.take(mat, indexer, axis),
taken.as_matrix())
assert_almost_equal(mgr.axes[axis].take(indexer),
taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
yield assert_take_ok, mgr, ax, []
yield assert_take_ok, mgr, ax, [0, 0, 0]
yield assert_take_ok, mgr, ax, lrange(mgr.shape[ax])
if mgr.shape[ax] >= 3:
yield assert_take_ok, mgr, ax, [0, 1, 2]
yield assert_take_ok, mgr, ax, [-1, -2, -3]
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels,
fill_value):
mat = mgr.as_matrix()
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis,
fill_value=fill_value)
assert_almost_equal(com.take_nd(mat, indexer, axis,
fill_value=fill_value),
reindexed.as_matrix())
assert_almost_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
yield assert_reindex_axis_is_ok, mgr, ax, [], fill_value
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][[0, 0, 0]], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
['foo', 'bar', 'baz'], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
['foo', mgr.axes[ax][0], 'baz'], fill_value)
if mgr.shape[ax] >= 3:
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][:-3], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][-3::-1], fill_value)
yield (assert_reindex_axis_is_ok, mgr, ax,
mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
def test_reindex_indexer(self):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
fill_value):
mat = mgr.as_matrix()
reindexed_mat = com.take_nd(mat, indexer, axis,
fill_value=fill_value)
reindexed = mgr.reindex_indexer(new_labels, indexer, axis,
fill_value=fill_value)
assert_almost_equal(reindexed_mat, reindexed.as_matrix())
assert_almost_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
yield (assert_reindex_indexer_is_ok, mgr, ax,
[], [], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
['foo'] * mgr.shape[ax], np.arange(mgr.shape[ax]),
fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
mgr.axes[ax], np.arange(mgr.shape[ax])[::-1],
fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
['foo', 'bar', 'baz'], [0, 0, 0], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
['foo', 'bar', 'baz'], [-1, 0, -1], fill_value)
yield (assert_reindex_indexer_is_ok, mgr, ax,
['foo', mgr.axes[ax][0], 'baz'], [-1, -1, -1],
fill_value)
if mgr.shape[ax] >= 3:
yield (assert_reindex_indexer_is_ok, mgr, ax,
['foo', 'bar', 'baz'], [0, 1, 2], fill_value)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
# reindex_axis(new_labels, axis)
# reindex_indexer(new_labels, indexer, axis)
class TestBlockPlacement(tm.TestCase):
_multiprocess_can_split_ = True
def test_slice_len(self):
self.assertEqual(len(BlockPlacement(slice(0, 4))), 4)
self.assertEqual(len(BlockPlacement(slice(0, 4, 2))), 2)
self.assertEqual(len(BlockPlacement(slice(0, 3, 2))), 2)
self.assertEqual(len(BlockPlacement(slice(0, 1, 2))), 1)
self.assertEqual(len(BlockPlacement(slice(1, 0, -1))), 1)
def test_zero_step_raises(self):
self.assertRaises(ValueError, BlockPlacement, slice(1, 1, 0))
self.assertRaises(ValueError, BlockPlacement, slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
# assertRaisesRegexp is not available in py2.6
# self.assertRaisesRegexp(ValueError, "unbounded slice",
# lambda: BlockPlacement(slc))
self.assertRaises(ValueError, BlockPlacement, slc)
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
assert_unbounded_slice_error(slice(None, None, -1))
assert_unbounded_slice_error(slice(None, 10, -1))
# These are "unbounded" because negative index will change depending on
# container shape.
assert_unbounded_slice_error(slice(-1, None))
assert_unbounded_slice_error(slice(None, -1))
assert_unbounded_slice_error(slice(-1, -1))
assert_unbounded_slice_error(slice(-1, None, -1))
assert_unbounded_slice_error(slice(None, -1, -1))
assert_unbounded_slice_error(slice(-1, -1, -1))
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
self.assertTrue(not BlockPlacement(slc).is_slice_like)
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
self.assertTrue(not BlockPlacement(slice(0, 0)).is_slice_like)
self.assertTrue(not BlockPlacement(slice(100, 100)).is_slice_like)
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
self.assertEqual(BlockPlacement(arr).as_slice, slc)
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))
assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
self.assertTrue(not BlockPlacement(arr).is_slice_like)
assert_not_slice_like([])
assert_not_slice_like([-1])
assert_not_slice_like([-1, -2, -3])
assert_not_slice_like([-10])
assert_not_slice_like([-1])
assert_not_slice_like([-1, 0, 1, 2])
assert_not_slice_like([-2, 0, 2, 4])
assert_not_slice_like([1, 0, -1])
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
self.assertEqual(list(BlockPlacement(slice(0, 3))), [0, 1, 2])
self.assertEqual(list(BlockPlacement(slice(0, 0))), [])
self.assertEqual(list(BlockPlacement(slice(3, 0))), [])
self.assertEqual(list(BlockPlacement(slice(3, 0, -1))), [3, 2, 1])
self.assertEqual(list(BlockPlacement(slice(3, None, -1))),
[3, 2, 1, 0])
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array,
np.asarray(asarray))
assert_as_array_equals(slice(0, 3), [0, 1, 2])
assert_as_array_equals(slice(0, 0), [])
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
self.assertEqual(bpl.add(1).as_slice, slice(1, 6, 1))
self.assertEqual(bpl.add(np.arange(5)).as_slice,
slice(0, 10, 2))
self.assertEqual(list(bpl.add(np.arange(5, 0, -1))),
[5, 5, 5, 5, 5])
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
self.assertEqual(list(BlockPlacement(val).add(inc)),
result)
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
self.assertRaises(ValueError,
lambda: BlockPlacement(slice(1, 4)).add(-10))
self.assertRaises(ValueError,
lambda: BlockPlacement([1, 2, 4]).add(-10))
self.assertRaises(ValueError,
lambda: BlockPlacement(slice(2, None, -1)).add(-1))
# def test_blockplacement_array_add(self):
# assert_add_equals(slice(0, 2), [0, 1, 1], [0, 2, 3])
# assert_add_equals(slice(2, None, -1), [1, 1, 0], [3, 2, 0])
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
pySTEPS/pysteps | examples/plot_optical_flow.py | 1 | 5240 | """
Optical flow
============
This tutorial offers a short overview of the optical flow routines available in
pysteps and it will cover how to compute and plot the motion field from a
sequence of radar images.
"""
from datetime import datetime
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
from pysteps import io, motion, rcparams
from pysteps.utils import conversion, transformation
from pysteps.visualization import plot_precip_field, quiver
################################################################################
# Read the radar input images
# ---------------------------
#
# First, we will import the sequence of radar composites.
# You need the pysteps-data archive downloaded and the pystepsrc file
# configured with the data_source paths pointing to data folders.
# Selected case
date = datetime.strptime("201505151630", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
###############################################################################
# Load the data from the archive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
timestep = data_source["timestep"]
# Find the input files from the archive
fns = io.archive.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=9
)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
R, quality, metadata = io.read_timeseries(fns, importer, **importer_kwargs)
del quality # Not used
###############################################################################
# Preprocess the data
# ~~~~~~~~~~~~~~~~~~~
# Convert to mm/h
R, metadata = conversion.to_rainrate(R, metadata)
# Store the reference frame
R_ = R[-1, :, :].copy()
# Log-transform the data [dBR]
R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)
# Nicely print the metadata
pprint(metadata)
################################################################################
# Lucas-Kanade (LK)
# -----------------
#
# The Lucas-Kanade optical flow method implemented in pysteps is a local
# tracking approach that relies on the OpenCV package.
# Local features are tracked in a sequence of two or more radar images. The
# scheme includes a final interpolation step in order to produce a smooth
# field of motion vectors.
oflow_method = motion.get_method("LK")
V1 = oflow_method(R[-3:, :, :])
# Plot the motion field on top of the reference frame
plot_precip_field(R_, geodata=metadata, title="LK")
quiver(V1, geodata=metadata, step=25)
plt.show()
################################################################################
# Variational echo tracking (VET)
# -------------------------------
#
# This module implements the VET algorithm presented
# by Laroche and Zawadzki (1995) and used in the McGill Algorithm for
# Prediction by Lagrangian Extrapolation (MAPLE) described in
# Germann and Zawadzki (2002).
# The approach essentially consists of a global optimization routine that seeks
# at minimizing a cost function between the displaced and the reference image.
oflow_method = motion.get_method("VET")
V2 = oflow_method(R[-3:, :, :])
# Plot the motion field
plot_precip_field(R_, geodata=metadata, title="VET")
quiver(V2, geodata=metadata, step=25)
plt.show()
################################################################################
# Dynamic and adaptive radar tracking of storms (DARTS)
# -----------------------------------------------------
#
# DARTS uses a spectral approach to optical flow that is based on the discrete
# Fourier transform (DFT) of a temporal sequence of radar fields.
# The level of truncation of the DFT coefficients controls the degree of
# smoothness of the estimated motion field, allowing for an efficient
# motion estimation. DARTS requires a longer sequence of radar fields for
# estimating the motion, here we are going to use all the available 10 fields.
oflow_method = motion.get_method("DARTS")
R[~np.isfinite(R)] = metadata["zerovalue"]
V3 = oflow_method(R) # needs longer training sequence
# Plot the motion field
plot_precip_field(R_, geodata=metadata, title="DARTS")
quiver(V3, geodata=metadata, step=25)
plt.show()
################################################################################
# Anisotropic diffusion method (Proesmans et al 1994)
# ---------------------------------------------------
#
# This module implements the anisotropic diffusion method presented in Proesmans
# et al. (1994), a robust optical flow technique which employs the notion of
# inconsitency during the solution of the optical flow equations.
oflow_method = motion.get_method("proesmans")
R[~np.isfinite(R)] = metadata["zerovalue"]
V4 = oflow_method(R[-2:, :, :])
# Plot the motion field
plot_precip_field(R_, geodata=metadata, title="Proesmans")
quiver(V4, geodata=metadata, step=25)
plt.show()
# sphinx_gallery_thumbnail_number = 1
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/units/evans_test.py | 3 | 2335 | """
A mockup "Foo" units class which supports
conversion and different tick formatting depending on the "unit".
Here the "unit" is just a scalar conversion factor, but this example shows mpl is
entirely agnostic to what kind of units client packages use
"""
import matplotlib
from matplotlib.cbook import iterable
import matplotlib.units as units
import matplotlib.ticker as ticker
from pylab import figure, show
class Foo:
def __init__( self, val, unit=1.0 ):
self.unit = unit
self._val = val * unit
def value( self, unit ):
if unit is None: unit = self.unit
return self._val / unit
class FooConverter:
@staticmethod
def axisinfo(unit, axis):
'return the Foo AxisInfo'
if unit==1.0 or unit==2.0:
return units.AxisInfo(
majloc = ticker.IndexLocator( 8, 0 ),
majfmt = ticker.FormatStrFormatter("VAL: %s"),
label='foo',
)
else:
return None
@staticmethod
def convert(obj, unit, axis):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence
"""
if units.ConversionInterface.is_numlike(obj):
return obj
if iterable(obj):
return [o.value(unit) for o in obj]
else:
return obj.value(unit)
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
if iterable(x):
for thisx in x:
return thisx.unit
else:
return x.unit
units.registry[Foo] = FooConverter()
# create some Foos
x = []
for val in range( 0, 50, 2 ):
x.append( Foo( val, 1.0 ) )
# and some arbitrary y data
y = [i for i in range( len(x) ) ]
# plot specifying units
fig = figure()
fig.suptitle("Custom units")
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(1,2,2)
ax.plot( x, y, 'o', xunits=2.0 )
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_ha('right')
ax.set_title("xunits = 2.0")
# plot without specifying units; will use the None branch for axisinfo
ax = fig.add_subplot(1,2,1)
ax.plot( x, y ) # uses default units
ax.set_title('default units')
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_ha('right')
show()
| gpl-2.0 |
sjperkins/tensorflow | tensorflow/python/estimator/canned/dnn_linear_combined_test.py | 5 | 26973 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_linear_combined.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import linear_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer as optimizer_lib
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn)
def _dnn_only_model_fn(self,
features,
labels,
mode,
head,
hidden_units,
feature_columns,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
return dnn_linear_combined._dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=[],
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
# A function to mimic linear-regressor init reuse same tests.
def _linear_regressor_fn(feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyRegressorPartitionerTest(
linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorEvaluationTest(
linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorPredictTest(
linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorIntegrationTest(
linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorTrainingTest(
linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn)
def _linear_classifier_fn(feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyClassifierTrainingTest(
linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierTrainingTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierClassesEvaluationTest(
linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierPredictTest(
linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierPredictTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierIntegrationTest(
linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class DNNLinearCombinedRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
# A function to mimic dnn-classifier init reuse same tests.
def _dnn_classifier_fn(hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
# A function to mimic dnn-regressor init reuse same tests.
def _dnn_regressor_fn(hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
class DNNLinearCombinedClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1)))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 2
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes-1., batch_size * input_dimension,
dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNLinearCombinedTests(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, real_optimizer, var_name_prefix):
"""Verifies global_step is None and var_names start with given prefix."""
def _minimize(loss, global_step=None, var_list=None):
self.assertIsNone(global_step)
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
var_names = [var.name for var in trainable_vars]
self.assertTrue(
all([name.startswith(var_name_prefix) for name in var_names]))
# var is used to check this op called by training.
var = variables_lib.Variable(0., name=(var_name_prefix + '_called'))
with ops.control_dependencies([var.assign(100.)]):
return real_optimizer.minimize(loss, global_step, var_list)
optimizer_mock = test.mock.NonCallableMagicMock(
spec=optimizer_lib.Optimizer, wraps=real_optimizer)
optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize)
return optimizer_mock
def test_train_op_calls_both_dnn_and_linear(self):
opt = gradient_descent.GradientDescentOptimizer(1.)
x_column = feature_column.numeric_column('x')
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[0.], [1.]])},
y=np.array([[0.], [1.]]),
batch_size=1,
shuffle=False)
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[x_column],
# verifies linear_optimizer is used only for linear part.
linear_optimizer=self._mock_optimizer(opt, 'linear'),
dnn_hidden_units=(2, 2),
dnn_feature_columns=[x_column],
# verifies dnn_optimizer is used only for linear part.
dnn_optimizer=self._mock_optimizer(opt, 'dnn'),
model_dir=self._model_dir)
est.train(input_fn, steps=1)
# verifies train_op fires linear minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'binary_logistic_head/linear_called'))
# verifies train_op fires dnn minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'binary_logistic_head/dnn_called'))
def test_dnn_and_linear_logits_are_added(self):
with ops.Graph().as_default():
variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')
variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')
variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')
variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')
variables_lib.Variable([[5.0]], name='dnn/logits/kernel')
variables_lib.Variable([6.0], name='dnn/logits/bias')
variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)
linear_testing_utils.save_variables_to_ckpt(self._model_dir)
x_column = feature_column.numeric_column('x')
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[x_column],
dnn_hidden_units=[1],
dnn_feature_columns=[x_column],
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
# linear logits = 10*1 + 2 = 12
# dnn logits = (10*3 + 4)*5 + 6 = 176
# logits = dnn + linear = 176 + 12 = 188
self.assertAllClose(
{
prediction_keys.PredictionKeys.PREDICTIONS: [188.],
},
next(est.predict(input_fn=input_fn)))
if __name__ == '__main__':
test.main()
| apache-2.0 |
aweimann/traitar | traitar/heatmap.py | 1 | 19822 | #!/usr/bin/env python
#adapted from Nathan Salomonis: http://code.activestate.com/recipes/578175-hierarchical-clustering-heatmap-python/
import matplotlib as mpl
#pick non-x display
mpl.use('Agg')
import matplotlib.pyplot as pylab
import scipy
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
import numpy
import string
import time
import sys, os
import getopt
import numpy as np
import pandas as ps
from .PhenotypeCollection import PhenotypeCollection
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
#ignore these warnings
#/usr/lib/pymodules/python2.7/matplotlib/collections.py:548: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
# if self._edgecolors == 'face':
#/usr/lib/pymodules/python2.7/matplotlib/backends/backend_pdf.py:2184: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
# different = bool(ours != theirs)
################# Perform the hierarchical clustering #################
def heatmap(x, row_header, column_header, primary_pt_models, color_f, row_method,
column_method, row_metric, column_metric,
filename, sample_f, secondary_pt_models):
print "\nrunning hiearchical clustering using %s for columns and %s for rows" % (column_metric,row_metric)
"""
This below code is based in large part on the protype methods:
http://old.nabble.com/How-to-plot-heatmap-with-matplotlib--td32534593.html
http://stackoverflow.com/questions/7664826/how-to-get-flat-clustering-corresponding-to-color-clusters-in-the-dendrogram-cre
x is an m by n ndarray, m observations, n genes
"""
### Define the color gradient to use based on the provided name
#if color_gradient == 'red_white_blue':
# cmap=pylab.cm.bwr
#if color_gradient == 'red_black_sky':
# cmap=RedBlackSkyBlue()
#if color_gradient == 'red_black_blue':
# cmap=RedBlackBlue()
#if color_gradient == 'red_black_green':
# cmap=RedBlackGreen()
#if color_gradient == 'yellow_black_blue':
# cmap=YellowBlackBlue()
#if color_gradient == 'seismic':
# cmap=pylab.cm.seismic
#if color_gradient == 'green_white_purple':
# cmap=pylab.cm.PiYG_r
#if color_gradient == 'coolwarm':
# cmap=pylab.cm.coolwarm
### Scale the max and min colors so that 0 is white/black
#vmin=x.min()
#vmax=x.max()
#vmax = max([vmax,abs(vmin)])
#vmin = vmax*-1
#norm = mpl.colors.Normalize(vmin/2, vmax/2) ### adjust the max and min to scale these colors
### Scale the Matplotlib window size
default_window_hight = 10.5
default_window_width = 10
fig = pylab.figure(figsize=(default_window_width,default_window_hight)) ### could use m,n to scale here
color_bar_w = 0.015 ### Sufficient size to show
color_bar_w = 0.015 ### Sufficient size to show
## calculate positions for all elements
# ax1, placement of dendrogram 1, on the left of the heatmap
#if row_method != None: w1 =
[ax1_x, ax1_y, ax1_w, ax1_h] = [0.05,0.42,0.2,0.4] ### The second value controls the position of the matrix relative to the bottom of the view
width_between_ax1_axr = 0.004
height_between_ax1_axc = 0.004 ### distance between the top color bar axis and the matrix
# axr, placement of row side colorbar
[axr_x, axr_y, axr_w, axr_h] = [0.31,0.1,color_bar_w,0.6] ### second to last controls the width of the side color bar - 0.015 when showing
axr_x = ax1_x + ax1_w + width_between_ax1_axr
axr_y = ax1_y; axr_h = ax1_h
width_between_axr_axm = 0.004
# axc, placement of column side colorbar
[axc_x, axc_y, axc_w, axc_h] = [0.4,0.63,0.5,color_bar_w] ### last one controls the hight of the top color bar - 0.015 when showing
axc_x = axr_x + axr_w + width_between_axr_axm
axc_y = ax1_y + ax1_h + height_between_ax1_axc
height_between_axc_ax2 = 0.004
# axm, placement of heatmap for the data matrix
[axm_x, axm_y, axm_w, axm_h] = [0.4,0.9,2.5,0.5]
axm_x = axr_x + axr_w + width_between_axr_axm
axm_y = ax1_y; axm_h = ax1_h
axm_w = axc_w
# ax2, placement of dendrogram 2, on the top of the heatmap
[ax2_x, ax2_y, ax2_w, ax2_h] = [0.3,0.72,0.6,0.15] ### last one controls hight of the dendrogram
ax2_x = axr_x + axr_w + width_between_axr_axm
ax2_y = ax1_y + ax1_h + height_between_ax1_axc + axc_h + height_between_axc_ax2
ax2_w = axc_w
# placement of the phenotype legend
[axpl_x, axpl_y, axpl_w, axpl_h] = [0.78,0.84,0.05,0.13]
# placement of the sample legend
# axcb - placement of the sample legend
[axsl_x, axsl_y, axsl_w, axsl_h] = [0.05,0.29,0.05,0.09]
# axcb - placement of the color legend
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.05, 0.88,0.05,0.09]
# Compute and plot top dendrogram
if not column_method is None and x.shape[1] > 1:
start_time = time.time()
d2 = dist.pdist(x.T)
D2 = dist.squareform(d2)
ax2 = fig.add_axes([ax2_x, ax2_y, ax2_w, ax2_h], frame_on=True)
Y2 = sch.linkage(D2, method=column_method, metric=column_metric) ### array-clustering metric - 'average', 'single', 'centroid', 'complete'
Z2 = sch.dendrogram(Y2)
ind2 = sch.fcluster(Y2,0.7*max(Y2[:,2]),'distance') ### This is the default behavior of dendrogram
time_diff = str(round(time.time()-start_time,1))
ax2.set_xticks([]) ### Hides ticks
ax2.set_yticks([])
#print 'Column clustering completed in %s seconds' % time_diff
else:
ind2 = ['NA']*len(column_header) ### Used for exporting the flat cluster data
# Compute and plot left dendrogram.
if not row_method is None and x.shape[0] > 1:
start_time = time.time()
x_bin = x.copy()
x_bin[x_bin > 0] = 1
d1 = dist.pdist(x_bin)
D1 = dist.squareform(d1) # full matrix
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=True) # frame_on may be False
ax1.set_xticks([]) ### Hides ticks
ax1.set_yticks([])
Y1 = sch.linkage(D1, method=row_method, metric=row_metric) ### gene-clustering metric - 'average', 'single', 'centroid', 'complete'
Z1 = sch.dendrogram(Y1, orientation='right')
ind1 = sch.fcluster(Y1,0.7*max(Y1[:,2]),'distance') ### This is the default behavior of dendrogram
time_diff = str(round(time.time()-start_time,1))
#print 'Row clustering completed in %s seconds' % time_diff
else:
ind1 = ['NA']*len(row_header) ### Used for exporting the flat cluster data
# Plot heatmap color legend
n = len(x[0]); m = len(x)
if secondary_pt_models is not None:
cmaplist = np.array([[247,247,247],[166,206,227],[178,223,138],[31,120,180]])/256.0
else:
cmaplist = np.array([[247,247,247],[31,120,180]])/256.0
cmap = mpl.colors.ListedColormap(cmaplist)
axcb = fig.add_axes([axcb_x, axcb_y, axcb_w, axcb_h], frame_on=False) # axes for colorbar
#cb = mpl.colorbar.ColorbarBase(axcb, cmap=cmap, orientation='horizontal')
bounds = numpy.linspace(0, len(cmaplist), len(cmaplist) + 1)
norm = mpl.colors.BoundaryNorm(bounds, len(cmaplist))
cb = mpl.colorbar.ColorbarBase(axcb, cmap=cmap, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds)
if secondary_pt_models is not None:
axcb.set_yticklabels(["negative", "%s positive" % primary_pt_models.get_name(), "%s positive" % secondary_pt_models.get_name(), "both predictors positive"], fontsize = 8)
axcb.yaxis.set_ticks([0.125, 0.375, 0.625, 0.875])
else:
axcb.set_yticklabels(["%s negative" % primary_pt_models.get_name(), "%s positive" % primary_pt_models.get_name()], fontsize = 8)
axcb.yaxis.set_ticks([0.25, 0.75])
axcb.set_title("Heatmap colorkey", fontsize = 10, loc = "left")
# Plot distance matrix.
axm = fig.add_axes([axm_x, axm_y, axm_w, axm_h]) # axes for the data matrix
xt = x
if not column_method is None and x.shape[1] > 1:
idx2 = Z2['leaves'] ### apply the clustering for the array-dendrograms to the actual matrix data
xt = xt[:,idx2]
ind2 = ind2[idx2] ### reorder the flat cluster to match the order of the leaves the dendrogram
pass
if not row_method is None and x.shape[0] > 1 :
idx1 = Z1['leaves'] ### apply the clustering for the gene-dendrograms to the actual matrix data
xt = xt[idx1,:] # xt is transformed x
ind1 = ind1[idx1] ### reorder the flat cluster to match the order of the leaves the dendrogram
### taken from http://stackoverflow.com/questions/2982929/plotting-results-of-hierarchical-clustering-ontop-of-a-matrix-of-data-in-python/3011894#3011894
im = axm.matshow(xt, aspect='auto', origin='lower', cmap=cmap, norm=norm) ### norm=norm added to scale coloring of expression with zero = white or black
axm.set_xticks([]) ### Hides x-ticks
axm.set_yticks([])
# Add text
new_row_header=[]
new_column_header=[]
for i in range(x.shape[0]):
margin = 0
if len(row_header) > 0 :
fontdict = {'fontsize': 7}
if len(row_header) > 30 :
fontdict = {'fontsize': 7}
margin = 0.5
if len(row_header) > 50 :
fontdict = {'fontsize': 4}
if len(row_header) > 100 :
fontdict = {'fontsize': 2}
if len(row_header) > 200:
fontdict = {'fontsize': 1}
#if len(row_header)<100: ### Don't visualize gene associations when more than 100 rows
axm.plot([-0.5, len(column_header)], [i - 0.5, i - 0.5], color = 'black', ls = '-')
if x.shape[0] > 1 and row_method is not None:
label = row_header[idx1[i]]
else:
label = row_header[i]
fontdict.items
axm.text(x.shape[1] + 0.2, i - margin , ' ' + label, fontdict = fontdict)
new_row_header.append(label)
for i in range(x.shape[1]):
if not column_method is None and x.shape[1] > 1:
axm.plot([i-0.5, i-0.5], [-0.5, len(row_header) - 0.5], color = 'black', ls = '-')
axm.text(i-0.5, -0.5, ' '+ column_header[idx2[i]], fontdict = {'fontsize': 7}, rotation=270, verticalalignment="top") # rotation could also be degrees
new_column_header.append(column_header[idx2[i]])
else: ### When not clustering columns
axm.plot([i-0.5, i-0.5], [-0.5, len(row_header) - 0.5], color = 'black', ls = '-')
axm.text(i-0.5, -0.8, ' '+column_header[i], fontdict = {'fontsize': 7}, rotation=270, verticalalignment="top")
new_column_header.append(column_header[i])
pt2acc = primary_pt_models.get_pt2acc()
pt2acc.index = pt2acc.loc[:, "accession"]
#colors
colors = ps.read_csv(color_f, index_col = None, sep = "\t")
if "category" in pt2acc.columns:
#assign categories to colors
import sets
#get unique categories in the order they appear in the pt mapping table
cats = sorted(set(pt2acc.loc[:, "category"].tolist()), key=lambda x: pt2acc.loc[:, "category"].tolist().index(x))
if not colors.shape[0] < len(cats):
# Plot phenotype legend
axpl = fig.add_axes([axpl_x, axpl_y, axpl_w, axpl_h], frame_on=False) # axes for colorbar
#for i in pt2cat2col.index:
# if pt2cat2col.loc[i,"Category"] not in cat2col:
# cat2col[pt2cat2col.loc[i,"Category"]] = pt2cat2col.loc[i, ["r", "g", "b"]]
# col2id[pt2cat2col.loc[i,"Category"]] = j
# j += 1
pt2cat = dict([(pt2acc.loc[i, "accession"], pt2acc.loc[i, "category"]) for i in pt2acc.index])
cat2id = dict([(cats[i - 1], i) for i in range(1, len(cats) + 1)])
cmaplist = ps.DataFrame(colors.iloc[:len(cats),])
cmaplist.index = cats
cmaplist = cmaplist / 256.0
cmap_p = mpl.colors.ListedColormap(cmaplist.values)
bounds = numpy.linspace(0, cmaplist.shape[0], cmaplist.shape[0] + 1)
norm = mpl.colors.BoundaryNorm(bounds, cmaplist.shape[0])
cb = mpl.colorbar.ColorbarBase(axpl, cmap=cmap_p, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds)
axpl.set_yticklabels([i for i in cats], fontsize = 6)
axpl.yaxis.set_ticks(np.arange(1.0 / len(cats) / 2, 1, 1.0 / len(cats)))
axpl.set_title("Phenotype colorkey", fontsize = 10, loc = "left")
# Plot colside colors
# axc --> axes for column side colorbar
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h]) # axes for column side colorbar
dc = numpy.array([cat2id[pt2cat[i]] for i in column_header]).T
if x.shape[1] > 1 and column_method is not None:
dc = dc[idx2]
dc.shape = (1, x.shape[1])
im_c = axc.matshow(dc, aspect='auto', origin='lower', cmap=cmap_p)
axc.set_xticks([]) ### Hides ticks
axc.set_yticks([])
# Plot rowside colors
if sample_f is not None and x.shape[0] > 1:
samples = ps.read_csv(sample_f, sep = "\t", index_col = "sample_name")
if "category" in samples.columns:
#get unique sample categories and sort according to the order they appear in the sampling file
sample_cats = sorted(set(samples.loc[:, "category"].tolist()), key = lambda x: samples.loc[:, "category"].tolist().index(x))
cat2col = dict([(sample_cats[i - 1], i) for i in range(1, len(sample_cats) + 1)])
cmaplist = ps.DataFrame(colors.iloc[:len(sample_cats),]) / 256.0
cmap_p = mpl.colors.ListedColormap(cmaplist.values)
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h]) # axes for row side colorbar
dr = numpy.array([cat2col[samples.loc[i, "category"]] for i in row_header]).T
if row_method is not None:
dr = dr[idx1]
dr.shape = (samples.shape[0], 1)
#cmap_r = mpl.colors.ListedColormap(['r', 'g', 'b', 'y', 'w', 'k', 'm'])
im_r = axr.matshow(dr, aspect='auto', origin='lower', cmap=cmap_p)
axr.set_xticks([]) ### Hides ticks
axr.set_yticks([])
# Plot sample legend
axsl = fig.add_axes([axsl_x, axsl_y, axsl_w, axsl_h], frame_on=False) # axes for colorbar
bounds = numpy.linspace(0, len(sample_cats), len(sample_cats) + 1)
norm = mpl.colors.BoundaryNorm(bounds, len(sample_cats))
cb = mpl.colorbar.ColorbarBase(axsl, cmap=cmap_p, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds)
axsl.yaxis.set_ticks(np.arange(1.0 / len(sample_cats) / 2, 1, 1.0 / len(sample_cats)))
axsl.set_yticklabels([i for i in sample_cats], fontsize = 6)
axsl.set_title("Sample colorkey", loc = "left", fontsize = 10)
#exportFlatClusterData(filename, new_row_header,new_column_header,xt,ind1,ind2)
### Render the graphic
if len(row_header)>50 or len(column_header)>50:
pylab.rcParams['font.size'] = 6
else:
pylab.rcParams['font.size'] = 8
pylab.savefig(filename)
pylab.savefig(filename, dpi=300) #,dpi=200
#pylab.show()
def getColorRange(x):
""" Determines the range of colors, centered at zero, for normalizing cmap """
vmax=x.max()
vmin=x.min()
if vmax<0 and vmin<0: direction = 'negative'
elif vmax>0 and vmin>0: direction = 'positive'
else: direction = 'both'
if direction == 'both':
vmax = max([vmax,abs(vmin)])
vmin = -1*vmax
return vmax,vmin
else:
return vmax,vmin
################# Export the flat cluster data #################
def exportFlatClusterData(filename, new_row_header,new_column_header,xt,ind1,ind2):
""" Export the clustered results as a text file, only indicating the flat-clusters rather than the tree """
filename = string.replace(filename,'.pdf','.txt')
export_text = open(filename,'w')
column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\t')+'\n' ### format column-names for export
export_text.write(column_header)
column_clusters = string.join(['column_clusters-flat','']+ map(str, ind2),'\t')+'\n' ### format column-flat-clusters for export
export_text.write(column_clusters)
### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match
new_row_header = new_row_header[::-1]
xt = xt[::-1]
### Export each row in the clustered data matrix xt
i=0
for row in xt:
export_text.write(string.join([new_row_header[i],str(ind1[i])]+map(str, row),'\t')+'\n')
i+=1
export_text.close()
### Export as CDT file
filename = string.replace(filename,'.txt','.cdt')
export_cdt = open(filename,'w')
column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\t')+'\n' ### format column-names for export
export_cdt.write(column_header)
eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\t')+'\n' ### format column-flat-clusters for export
export_cdt.write(eweight)
### Export each row in the clustered data matrix xt
i=0
for row in xt:
export_cdt.write(string.join([new_row_header[i]]*2+['1']+map(str, row),'\t')+'\n')
i+=1
export_cdt.close()
################# Create Custom Color Gradients #################
#http://matplotlib.sourceforge.net/examples/pylab_examples/custom_cmap.html
def RedBlackSkyBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.9),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def RedBlackBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def RedBlackGreen():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def YellowBlackBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.8),
(0.5, 0.1, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
### yellow is created by adding y = 1 to RedBlackSkyBlue green last tuple
### modulate between blue and cyan using the last y var in the first green tuple
my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
| gpl-3.0 |
balazssimon/ml-playground | udemy/lazyprogrammer/reinforcement-learning-python/comparing_explore_exploit_methods.py | 1 | 2913 | import numpy as np
import matplotlib.pyplot as plt
from comparing_epsilons import Bandit
from optimistic_initial_values import run_experiment as run_experiment_oiv
from ucb1 import run_experiment as run_experiment_ucb
class BayesianBandit:
def __init__(self, true_mean):
self.true_mean = true_mean
# parameters for mu - prior is N(0,1)
self.predicted_mean = 0
self.lambda_ = 1
self.sum_x = 0 # for convenience
self.tau = 1
def pull(self):
return np.random.randn() + self.true_mean
def sample(self):
return np.random.randn() / np.sqrt(self.lambda_) + self.predicted_mean
def update(self, x):
self.lambda_ += self.tau
self.sum_x += x
self.predicted_mean = self.tau*self.sum_x / self.lambda_
def run_experiment_decaying_epsilon(m1, m2, m3, N):
bandits = [Bandit(m1), Bandit(m2), Bandit(m3)]
data = np.empty(N)
for i in range(N):
# epsilon greedy
p = np.random.random()
if p < 1.0/(i+1):
j = np.random.choice(3)
else:
j = np.argmax([b.mean for b in bandits])
x = bandits[j].pull()
bandits[j].update(x)
# for the plot
data[i] = x
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(N)*m1)
plt.plot(np.ones(N)*m2)
plt.plot(np.ones(N)*m3)
plt.xscale('log')
plt.show()
for b in bandits:
print(b.mean)
return cumulative_average
def run_experiment(m1, m2, m3, N):
bandits = [BayesianBandit(m1), BayesianBandit(m2), BayesianBandit(m3)]
data = np.empty(N)
for i in range(N):
# optimistic initial values
j = np.argmax([b.sample() for b in bandits])
x = bandits[j].pull()
bandits[j].update(x)
# for the plot
data[i] = x
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(N)*m1)
plt.plot(np.ones(N)*m2)
plt.plot(np.ones(N)*m3)
plt.xscale('log')
plt.show()
return cumulative_average
if __name__ == '__main__':
m1 = 1.0
m2 = 2.0
m3 = 3.0
eps = run_experiment_decaying_epsilon(m1, m2, m3, 100000)
oiv = run_experiment_oiv(m1, m2, m3, 100000)
ucb = run_experiment_ucb(m1, m2, m3, 100000)
bayes = run_experiment(m1, m2, m3, 100000)
# log scale plot
plt.plot(eps, label='decaying-epsilon-greedy')
plt.plot(oiv, label='optimistic')
plt.plot(ucb, label='ucb1')
plt.plot(bayes, label='bayesian')
plt.legend()
plt.xscale('log')
plt.show()
# linear plot
plt.plot(eps, label='decaying-epsilon-greedy')
plt.plot(oiv, label='optimistic')
plt.plot(ucb, label='ucb1')
plt.plot(bayes, label='bayesian')
plt.legend()
plt.show()
| apache-2.0 |
diego0020/PySurfer | examples/plot_label.py | 4 | 1526 | """
Display ROI Labels
==================
Using PySurfer you can plot Freesurfer cortical labels on the surface
with a large amount of control over the visual representation.
"""
import os
from surfer import Brain
print(__doc__)
subject_id = "fsaverage"
hemi = "lh"
surf = "smoothwm"
brain = Brain(subject_id, hemi, surf)
# If the label lives in the normal place in the subjects directory,
# you can plot it by just using the name
brain.add_label("BA1")
# Some labels have an associated scalar value at each ID in the label.
# For example, they may be probabilistically defined. You can threshold
# what vertices show up in the label using this scalar data
brain.add_label("BA1", color="blue", scalar_thresh=.5)
# Or you can give a path to a label in an arbitrary location
subj_dir = os.environ["SUBJECTS_DIR"]
label_file = os.path.join(subj_dir, subject_id,
"label", "%s.MT.label" % hemi)
brain.add_label(label_file)
# By default the label is 'filled-in', but you can
# plot just the label boundaries
brain.add_label("BA44", borders=True)
# You can also control the opacity of the label color
brain.add_label("BA6", alpha=.7)
# Finally, you can plot the label in any color you want.
brain.show_view(dict(azimuth=-42, elevation=105, distance=225,
focalpoint=[-30, -20, 15]))
# Use any valid matplotlib color.
brain.add_label("V1", color="steelblue", alpha=.6)
brain.add_label("V2", color="#FF6347", alpha=.6)
brain.add_label("entorhinal", color=(.2, 1, .5), alpha=.6)
| bsd-3-clause |
akpetty/ibtopo2016 | calc_multi_atm.py | 1 | 9475 | ##############################################################
# Date: 20/01/16
# Name: calc_multi_atm.py
# Author: Alek Petty
# Description: Main script to calculate sea ice topography from IB ATM data
# Input requirements: ATM data, PosAV data (for geolocation)
# Output: topography datasets
import matplotlib
matplotlib.use("AGG")
import IB_functions as ro
import mpl_toolkits.basemap.pyproj as pyproj
from osgeo import osr, gdal
from pyproj import Proj
from glob import glob
from pylab import *
from scipy import ndimage
from matplotlib import rc
#from scipy.interpolate import griddata
from matplotlib.mlab import griddata
import time
import scipy.interpolate
import h5py
from scipy.spatial import cKDTree as KDTree
import os
def calc_bulk_stats():
ice_area=-999
ridge_area_all=-999
mean_ridge_height_all=-999
mean_ridge_heightL=-999
ridge_areaL=-999
num_ridges_out=-999
levpercent_out=-999
num_pts_section=-999
# IF SECTION GOOD THEN GET ICE SWATH AREA
if (points_good==1):
ice_area = ma.count(elevation2d)*(xy_res**2)
levpercent_out=levpercent
# IF SECTION GOOD AND HAVE SOME RIDGING THEN ASSIGN TOTAL RIDGE AREA AND ELEVATION
if ((points_good==1)&(found_ridges==1)):
ridge_area_all = ma.count(elevation2d_ridge_ma)*(xy_res**2)
mean_ridge_height_all = np.mean(elevation2d_ridge_ma) - level_elev
# IF SECTION GOOD AND WE HAVE NO RIDGING (AREA OF RIDGING = 0) THEN ASSIGN ZERO RIDGE AREA HEIGHT
if ((points_good==1)&(found_ridges==0)):
ridge_area_all = 0.
mean_ridge_height_all = 0.
#IF GOOD SECTION BUT NO BIG RIDGES THEN SET THESE VALUES TO ZERO
if ((points_good==1)&(found_big_ridge==0)):
mean_ridge_heightL=0.
ridge_areaL=0.
num_ridges_out=0
# IF WE FOUND SOME BIG RIDGES THENA SSIGN BIG RIDGE AREA HEIGHT AND NUMBER
if ((points_good==1)&(found_big_ridge==1)):
mean_ridge_heightL = np.mean(ridge_height_mesh)
ridge_areaL = ma.count(ridge_height_mesh)*(xy_res**2)
num_ridges_out = num_ridges
return [mean_x, mean_y, ice_area, num_ridges_out, ridge_area_all, ridge_areaL, mean_ridge_height_all, mean_ridge_heightL, mean_alt, mean_pitch, mean_roll, mean_vel, num_pts_section,levpercent_out, section_num, found_ridges, points_good, plane_good]
#-------------- ATM AND DMS PATHS------------------
datapath='./Data_output/'
rawdatapath = '../../../DATA/ICEBRIDGE/'
ATM_path = rawdatapath+'/ATM/ARCTIC/'
posAV_path =rawdatapath+'/POSAV/SEA_ICE/GR/'
#posAV_path ='/Volumes/TBOLT_HD_PETTY/POSAV/'
m=pyproj.Proj("+init=EPSG:3413")
#FREE PARAMETERS
min_ridge_height = 0.2
along_track_res=1000
pwidth=20
pint=5
xy_res=2
start_year=2009
end_year=2009
min_ridge_size=100
sh=0
if (sh==1):
print 'Ridge threshold:', sys.argv[1]
print 'Along track res:',sys.argv[2]
print 'xy res:',sys.argv[3]
print 'Start year:',sys.argv[4]
print 'End year:',sys.argv[5]
min_ridge_height = float(sys.argv[1])
along_track_res = int(sys.argv[2])
xy_res = int(sys.argv[3])
start_year=int(sys.argv[4])
end_year=int(sys.argv[5])
pts_threshold=15000
num_points_req = min_ridge_size/(xy_res**2)
section_num=0
print 'Num points req', num_points_req
ftype = str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm'
outpath = datapath+ftype+'/'
for year in xrange(start_year, end_year+1):
ATM_year = ATM_path+str(year)+'/'
atm_files_year = glob(ATM_year+'/*/')
#for days in xrange():
for days in xrange(size(atm_files_year)):
atm_path_date = atm_files_year[days]
print 'ATM day:', atm_path_date
atm_files_in_day = ro.get_atm_files(atm_path_date, year)
#load POS file
posAV = loadtxt(posAV_path+str(year)+'_GR_NASA/sbet_'+str(atm_path_date[-9:-1])+'.out.txt', skiprows=1)
#GET POSITION OF PLANE AND 1km MARKERS FROM POSAV
xp, yp, dist, km_idxs, km_utc_times = ro.get_pos_sections(posAV, m, along_track_res)
for atm_file in xrange(size(atm_files_in_day)):
atm_statsALL=np.array([]).reshape(0,3)
ridge_statsALL=np.array([]).reshape(0,9)
covarALL=np.array([]).reshape(0,5)
bulk_statsALL=np.array([]).reshape(0,18)
print 'ATM file:', atm_files_in_day[atm_file], str(atm_file)+'/'+str(size(atm_files_in_day))
lonT, latT, elevationT, utc_timeT= ro.get_atmqih5(atm_files_in_day[atm_file], year, 1)
#IF SIZE OF DATA IS LESS THAN SOME THRESHOLD THEN DONT BOTHER ANALYZING
if (size(utc_timeT)<100):
break
xT, yT = m(lonT, latT)
#GET POSAV INDICES COINCIDING WITH START AND END OF ATM FILE. ADD PLUS/MINUS 1 FOR SOME LEEWAY.
start_i = np.abs(km_utc_times - utc_timeT[0]).argmin()
end_i = np.abs(km_utc_times - utc_timeT[-1]).argmin()
print 'START/END:', start_i, end_i
for i in xrange(start_i -1, end_i + 1):
section_num+=1
found_ridges=0
found_big_ridge=0
plane_good=0
points_good=0
ridge_statsT = np.array([]).reshape(0,9)
cov_matrix = np.array([]).reshape(0,5)
#label_numsL=np.array(0)
mean_x, mean_y, mean_alt, mean_pitch, mean_roll, mean_vel = ro.posav_section_info(m, posAV[km_idxs[i]:km_idxs[i+1]] )
print ' '
print str(i)+'/'+str(end_i + 1)
print 'Mean altitude:', mean_alt
print 'Mean pitch:', mean_pitch
print 'Mean roll:', mean_roll
print 'Mean vel:', mean_vel
if (abs(mean_alt-500)<200) & (abs(mean_pitch)<5) & (abs(mean_roll)<5):
plane_good=1
poly_path, vertices, sides = ro.get_pos_poly(xp, yp, km_idxs[i], km_idxs[i+1])
xatm_km, yatm_km, elevation_km = ro.get_atm_poly(xT, yT, elevationT, km_utc_times, utc_timeT, poly_path, i)
num_pts_section = size(xatm_km)
print 'Num pts in section:', size(xatm_km)
#if there are more than 15000 pts in the 1km grid (average of around 20000) then proceed
if (num_pts_section>pts_threshold):
points_good=1
#ro.plot_atm_poly(m, xatm_km, yatm_km, elevation_km, poly_path, i, out_path, year)
#GET ATM GRID
xx2d, yy2d = ro.grid_atm(xatm_km, yatm_km, xy_res)
print 'Grid:', size(xx2d[0]), size(xx2d[1])
# CALCULATE THE LEVEL ICE SURFACE USING THE CUMULATIVE DISTRIBUTION
#THRESH IS THE LEVEL ICE PLUS RIDGED ICE ELEVATION
level_elev, thresh, levpercent = ro.calc_level_ice(elevation_km, pint, pwidth, min_ridge_height)
#level_elev, thresh, levpercent = ro.calc_level_ice(elevation_km, pwidth, min_ridge_height)
elevation2d, elevation2d_ridge_ma, ridge_area = ro.grid_elevation(xatm_km, yatm_km,elevation_km, xx2d, yy2d, thresh, kdtree=1)
elevation2d_ridge_maL =elevation2d_ridge_ma-level_elev
#IF THERE IS EVEN A LITTLE BIT OF RIDGING (might not actually be enough for a big areal ridge from the labelling) then proceed to clean up data.
if (ridge_area>0):
found_ridges=1
#CLEAN UP DATA WITH KDTREE AROUND RIDGE POINTS
#REMOVE FOR PRELIMINARY STUDIES AS COMPUTATIONALLY EXPENSIVE!
#elevation2d_ridge_ma = kdtree_clean()
#GET RIDGE LABELS - MAKE SURE RIDGES ARE ABOVE CERTAIN SIZE, DECIDED BY NUM_PTS_REQ
label_im = ro.get_labels(elevation2d_ridge_maL, xy_res, min_ridge_size, min_ridge_height)
# DECIDE IF WE WANT TO CALCULATE RIDGE ORIENTATION OR NOT.
if (np.amax(label_im)>=1):
found_big_ridge=1
print 'Found Ridge!'
print 'Number of labels:', np.amax(label_im)
num_ridges = np.amax(label_im)
#GET RIDGE STATS IF WE DID FIND A RIDGE
ridge_statsT, ridge_height_mesh, cov_matrix, indexT = ro.calc_ridge_stats(elevation2d_ridge_ma, num_ridges, label_im, xx2d, yy2d, level_elev, section_num, calc_orientation=1)
#CALCULATE BULK STATISTICS AS WE HAVE VALID NUMBER OF POINTS WITHIN THE SECTION
else:
print 'No data - WHY?! --------------'
print 'Num pts in section:', size(xatm_km)
#ASSIGN BULK STATISTICS AS WE HAVE NOT CARRIED OUT RIDGE CALCULATION AS PLANE IS DOING FUNNY THINGS
bulk_statsT = calc_bulk_stats()
ridge_statsALL = vstack([ridge_statsALL, ridge_statsT])
covarALL = vstack([covarALL, cov_matrix])
bulk_statsALL = vstack([bulk_statsALL, bulk_statsT])
if not os.path.exists(outpath+str(year)):
os.makedirs(outpath+str(year))
ridge_statsALL.dump(outpath+str(year)+'/ridge_stats_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file).zfill(3)+'.txt')
covarALL.dump(outpath+str(year)+'/cov_matrix_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file).zfill(3)+'.txt')
bulk_statsALL.dump(outpath+str(year)+'/bulk_ridge_stats_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file).zfill(3)+'.txt')
#CAN OUTPUT AS TEXT FILES INSTEAD - BIGGER BUT CAN OPEN RAW
#savetxt(outpath+str(year)+'/ridge_stats_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file)+'.txt', ridge_statsALL)
#savetxt(outpath+str(year)+'/cov_matrix_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file)+'.txt', covarALL)
#savetxt(outpath+str(year)+'/bulk_ridge_stats_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file)+'.txt', bulk_statsALL)
| gpl-3.0 |
jseabold/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
ishank08/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
nvoron23/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
zihua/scikit-learn | sklearn/model_selection/tests/test_validation.py | 6 | 30876 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.test_split import MockClassifier
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
assert_raise_message(ValueError,
"The groups parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The groups parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_check_is_permutation():
rng = np.random.RandomState(0)
p = np.arange(100)
rng.shuffle(p)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
# Check if the additional duplicate indices are caught
assert_false(_check_is_permutation(np.hstack((p, 0)), 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def test_cross_val_predict_with_method():
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold(len(iris.target))
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
| bsd-3-clause |
quheng/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
alpenwasser/laborjournal | versuche/skineffect/python/stuetzpunkte_new_lowfreq.py | 1 | 6373 | #!/usr/bin/env python3
from sympy import *
from mpmath import *
from matplotlib.pyplot import *
#init_printing() # make things prettier when we print stuff for debugging.
# ************************************************************************** #
# Magnetic field inside copper coil with hollow copper cylinder #
# ************************************************************************** #
# All values are in standard SI units unless otherwise noted.
# ---------------------------------------------------------#
# Define Variables and Constants #
# ---------------------------------------------------------#
npts = 19 # careful: number of points is npts + 1 (starts at 0)
#fmin = 5e-4
#fmax = 5e-2
fmin = 0.1
fmax = 0.2
highest_frequency = fmin * exp(log(fmax-fmin))
freq_max_ratio = highest_frequency / fmax
font = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 9,
}
titlefont = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 10,
}
plot_legend_fontsize = 9
plot_color_old = 'magenta'
plot_color_new = 'blue'
plot_color_common = 'black'
plot_label_points_old = r"St\"utzpunktformel A: $\displaystyle f_{kA} = f_{min} \cdot exp\Biggr(\frac{k}{NPTS} \cdot ln(f_{max}-f_{min})\Biggr)$"
plot_label_points_new = r"St\"utzpunktformel B: $\displaystyle f_{kB} = exp\Biggr((1-\frac{k}{NPTS}) \cdot ln(f_{min})\Biggr) \cdot exp\Biggr(\frac{k}{NPTS} \cdot ln(f_{max})\Biggr)$"
plot_label_vertical_common = r"minimale Frequenz St\"utzpunkt: "
plot_label_vertical_old = r"maximale Frequenz St\"utzpunkt, Methode A: "
plot_label_vertical_new = r"maximale Frequenz St\"utzpunkt, Methode B: "
plot_added_text = r"Verh\"altnis der maximalen Frequenzen Methode A -- Methode B: $\displaystyle \frac{f_{kA}}{f_{kB}}\Bigg|_{k=NPTS} \approx " + str(freq_max_ratio) + "$"
plot_freq_range_label = r"Eingestellter Frequenzbereich: $f_{min} = " + str(fmin) + r"$Hz bis $f_{max} = " + str(fmax) + r"$Hz"
plot_size_measurements = 24
plot_scale_x = 'log'
plot_label_x = r"Frequenz des St\"utzpunkts (Hz)"
plot_1_label_y = 'k (siehe Formel)'
plot_1_title = r"Vergleich St\"utzpunktformeln, effektiv abgedeckter Frequenzbereich:" + str(fmin) + " Hz bis " + str(highest_frequency) + " Hz, " + str(npts+1) + " Punkte"
y_lim_low = -2
y_lim_high = npts + 2
x_lim_low = 0.67 * fmin
x_lim_high = 1.33 * fmin * fmax
# ---------------------------------------------------------#
# Generate points for frequency axis #
# ---------------------------------------------------------#
n = np.linspace(0,npts,npts)
expufunc = np.frompyfunc(exp,1,1)
frequency_vector_old = fmin*expufunc(n*log(fmax-fmin)/npts)
frequency_vector_new = expufunc((1-n/npts)*log(fmin)) * expufunc(n*log(fmax)/npts)
plot_label_vertical_common += str(frequency_vector_old[0]) + " Hz"
plot_label_vertical_old += str(frequency_vector_old[npts-1]) + " Hz"
plot_label_vertical_new += str(frequency_vector_new[npts-1]) + " Hz"
# ---------------------------------------------------------#
# Plot the Things #
# ---------------------------------------------------------#
matplotlib.pyplot.rc('text', usetex=True)
matplotlib.pyplot.rc('font', family='serif')
#fig1 = figure(1)
#fig1 = figure(1,figsize=(9,9))
#fig1 = figure(1,figsize=(8.26,11.7)) # A4 size in inches
fig1 = figure(1,figsize=(8.26,10.0))
axes1 = fig1.add_subplot(111)
axes1.set_position([0.1,0.1,0.5,0.8])
axes1.scatter(frequency_vector_old,
n,
color=plot_color_old,
s=plot_size_measurements,
label=plot_label_points_old
)
axes1.scatter(frequency_vector_new,
n,
color=plot_color_new,
s=plot_size_measurements,
label=plot_label_points_new
)
# Draw the common starting point black and a bit bigger
axes1.scatter(frequency_vector_old[0],
n[0],
color=plot_color_common,
s=plot_size_measurements*1.5,
)
axes1.plot([frequency_vector_old[0],frequency_vector_old[0]],
[y_lim_low,y_lim_high],
color=plot_color_common,
label=plot_label_vertical_common
)
axes1.plot([frequency_vector_old[npts-1],frequency_vector_old[npts-1]],
[y_lim_low,y_lim_high],
color=plot_color_old,
label=plot_label_vertical_old
)
axes1.plot([frequency_vector_new[npts-1],frequency_vector_new[npts-1]],
[y_lim_low,y_lim_high],
color=plot_color_new,
label=plot_label_vertical_new
)
axes1.set_xscale(plot_scale_x)
axes1.set_ylim([y_lim_low,y_lim_high])
#axes1.set_xlim([x_lim_low,x_lim_high])
axes1.set_xlabel(plot_label_x,fontdict=font)
axes1.set_ylabel(plot_1_label_y,fontdict=font)
axes1.set_title(plot_1_title,fontdict=titlefont)
axes1.tick_params(labelsize=9)
# ---------------------------------------------------- #
# Work some magic to append the fraction of the two #
# methods to the legend instead of it being some #
# random piece of text on the plot. #
# ---------------------------------------------------- #
rect = matplotlib.patches.Rectangle([0,0],0,0,color='white',label=plot_added_text)
rect2= matplotlib.patches.Rectangle([0,0],0,0,color='white',label=plot_freq_range_label)
handles,legends = axes1.get_legend_handles_labels()
handles.append(rect)
handles.append(rect2)
axes1.legend(handles=handles,fontsize=plot_legend_fontsize,loc='upper left',bbox_to_anchor=(0.0,-0.075))
# ---------------------------------------------------- #
# This would be necessary if we wanted to actually #
# draw the patch, leaving this here for future #
# reference. #
# ---------------------------------------------------- #
#axes1.add_patch(rect)
fig1.subplots_adjust(bottom=0.29,left=0.05,right=0.99,top=.98,hspace=0.3)
fig1.savefig('plots-pgf/stuetzpunkte-lowfreq.pgf')
fig1.savefig('plots-pdf/stuetzpunkte-lowfreq.pdf')
#show()
| mit |
RomainBrault/scikit-learn | sklearn/datasets/__init__.py | 61 | 3734 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_breast_cancer
from .base import load_boston
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_sample_images
from .base import load_sample_image
from .base import load_wine
from .base import get_data_home
from .base import clear_data_home
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'load_wine',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
stscieisenhamer/glue | glue/core/data_factories/excel.py | 5 | 1367 | from __future__ import absolute_import, division, print_function
import os
from glue.core.data_factories.helpers import has_extension
from glue.core.data_factories.pandas import panda_process
from glue.config import data_factory
__all__ = []
@data_factory(label="Excel", identifier=has_extension('xls xlsx'))
def panda_read_excel(path, sheet=None, **kwargs):
""" A factory for reading excel data using pandas.
:param path: path/to/file
:param sheet: The sheet to read. If `None`, all sheets are read.
:param kwargs: All other kwargs are passed to pandas.read_excel
:return: core.data.Data object.
"""
try:
import pandas as pd
except ImportError:
raise ImportError('Pandas is required for Excel input.')
try:
import xlrd
except ImportError:
raise ImportError('xlrd is required for Excel input.')
name = os.path.basename(path)
if '.xls' in name:
name = name.rsplit('.xls', 1)[0]
xl_workbook = xlrd.open_workbook(path)
if sheet is None:
sheet_names = xl_workbook.sheet_names()
else:
sheet_names = [sheet]
all_data = []
for sheet in sheet_names:
indf = pd.read_excel(path, sheet, **kwargs)
data = panda_process(indf)
data.label = "{0}:{1}".format(name, sheet)
all_data.append(data)
return all_data
| bsd-3-clause |
jefffohl/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backend_bases.py | 69 | 69740 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
"""
from __future__ import division
import os, warnings, time
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
from matplotlib import rcParams
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
def open_group(self, s):
"""
Open a grouping element with label *s*. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
tpath = trans.transform_path(path)
for vertices, codes in tpath.iter_segments():
if len(vertices):
x,y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans + transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
"""
Draws a collection of paths, selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before
being applied.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
draw_path. Some backends may want to override this in order
to render each set of path data only once, and then reference
that path multiple times with the different offsets, colors,
styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
path, transform = path_id
transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)
self.draw_path(gc, path, transform, rgbFace)
def draw_quad_mesh(self, master_transform, cliprect, clippath,
clippath_trans, meshWidth, meshHeight, coordinates,
offsets, offsetTrans, facecolors, antialiased,
showedges):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if showedges:
edgecolors = np.array([[0.0, 0.0, 0.0, 1.0]], np.float_)
linewidths = np.array([1.0], np.float_)
else:
edgecolors = facecolors
linewidths = np.array([0.0], np.float_)
return self.draw_path_collection(
master_transform, cliprect, clippath, clippath_trans,
paths, [], offsets, offsetTrans, facecolors, edgecolors,
linewidths, [], [antialiased], [None])
def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc = self.new_gc()
gc.set_clip_rectangle(cliprect)
if clippath is not None:
clippath = transforms.TransformedPath(clippath, clippath_trans)
gc.set_clip_path(clippath)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
gc.set_foreground(edgecolors[i % Nedgecolors])
if Nlinewidths:
gc.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc.set_dashes(*linestyles[i % Nlinestyles])
if rgbFace is not None and len(rgbFace)==4:
gc.set_alpha(rgbFace[-1])
rgbFace = rgbFace[:3]
gc.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc, rgbFace
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the image instance into the current axes;
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
*bbox*
a :class:`matplotlib.transforms.Bbox` instance for clipping, or
None
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
overwrite this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
raise NotImplementedError
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text in display coords
*s*
a :class:`matplotlib.text.Text` instance
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
raise NotImplementedError
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
raise NotImplementedError
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
pass
def stop_rasterizing(self):
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid' : (None, None),
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'miter'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0)
self._hatch = None
self._url = None
self._snap = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._snap = gc._snap
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox` instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three floats from 0-1. color can be a
matlab format string, a html hex color string, or a rgb tuple
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
"""
self._alpha = alpha
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b: self._antialiased = 1
else: self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points. ``(None, None)`` specifies a solid line
"""
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGB=False):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
The :class:`GraphicsContextBase` converts colors to rgb
internally. If you know the color is rgb already, you can set
``isRGB=True`` to avoid the performace hit of the conversion
"""
if isRGB:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._rgb = (frac, frac, frac)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
try:
offset, dashes = self.dashd[style]
except:
raise ValueError('Unrecognized linestyle: %s' % style)
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas,guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class LocationEvent(Event):
"""
A event that has a screen location
The following additional attributes are defined and shown with
their default values
In addition to the :class:`Event` attributes, the following event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y,guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas,guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
axes_list = [a for a in self.canvas.figure.get_axes() if a.in_axes(self)]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axCmp = lambda _x,_y: cmp(_x.zorder, _y.zorder)
axes_list.sort(axCmp)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
xdata, ydata = self.inaxes.transData.inverted().transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes!=self.inaxes:
# process axes enter/leave events
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', 'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used for scroll events)
*key*
the key pressed: None, chr(range(255), 'shift', 'win', or 'control'
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print 'on pick line:', zip(xdata[ind], ydata[ind])
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist, guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key pressed: None, chr(range(255), shift, win, or control
This interface may change slightly when better support for
modifier keys is included.
Example usage::
def on_key(event):
print 'you pressed', event.key, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase:
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event'
]
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry(self.events)
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event',self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event',self.pick)
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event',self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [ (h.zorder, h) for h in artists ]
L.sort()
return [ h for zorder, h in L ]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under: h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
print "Removing",h
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self,'_active'): self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a,'get_color'):
a.set_color(self._active[a])
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a,'get_color'):
self._active[a] = a.get_color()
elif hasattr(a,'get_edgecolor'):
self._active[a] = (a.get_edgecolor(),a.get_facecolor())
else: self._active[a] = None
for a in enter:
if hasattr(a,'get_color'):
a.set_color('red')
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else: self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
def enter_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
'call when GUI is idle'
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'emf': 'Enhanced Metafile',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'png': 'Portable Network Graphics',
'ps' : 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
def print_emf(self, *args, **kwargs):
from backends.backend_emf import FigureCanvasEMF # lazy import
emf = self.switch_backends(FigureCanvasEMF)
return emf.print_emf(*args, **kwargs)
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgb = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
def get_supported_filetypes(self):
return self.filetypes
def get_supported_filetypes_grouped(self):
groupings = {}
for ext, name in self.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation* '
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
"""
if format is None:
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
method_name = 'print_%s' % format
if (format not in self.filetypes or
not hasattr(self, method_name)):
formats = self.filetypes.keys()
formats.sort()
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
try:
result = getattr(self, method_name)(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
**kwargs)
finally:
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
raise NotImplementedError
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def switch_backends(self, FigureCanvasClass):
"""
instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self,timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self,timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str,DeprecationWarning)
if timeout <= 0: timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter*timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
class FigureManagerBase:
"""
Helper class for matlab mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure nuamber
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.canvas.mpl_connect('key_press_event', self.key_press)
def destroy(self):
pass
def full_screen_toggle (self):
pass
def resize(self, w, h):
'For gui backends: resize window in pixels'
pass
def key_press(self, event):
# these bindings happen whether you are over an axes or not
#if event.key == 'q':
# self.destroy() # how cruel to have to destroy oneself!
# return
if event.key == 'f':
self.full_screen_toggle()
# *h*ome or *r*eset mnemonic
elif event.key == 'h' or event.key == 'r' or event.key == "home":
self.canvas.toolbar.home()
# c and v to enable left handed quick navigation
elif event.key == 'left' or event.key == 'c' or event.key == 'backspace':
self.canvas.toolbar.back()
elif event.key == 'right' or event.key == 'v':
self.canvas.toolbar.forward()
# *p*an mnemonic
elif event.key == 'p':
self.canvas.toolbar.pan()
# z*o*om mnemonic
elif event.key == 'o':
self.canvas.toolbar.zoom()
elif event.key == 's':
self.canvas.toolbar.save_figure(self.canvas.toolbar)
if event.inaxes is None:
return
# the mouse has to be over an axes to trigger these
if event.key == 'g':
event.inaxes.grid()
self.canvas.draw()
elif event.key == 'l':
ax = event.inaxes
scale = ax.get_yscale()
if scale=='log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale=='linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
elif event.key is not None and (event.key.isdigit() and event.key!='0') or event.key=='a':
# 'a' enables all axes
if event.key!='a':
n=int(event.key)-1
for i, a in enumerate(self.canvas.figure.get_axes()):
if event.x is not None and event.y is not None and a.in_axes(event):
if event.key=='a':
a.set_navigate(True)
else:
a.set_navigate(i==n)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
pass
# cursors
class Cursors: #namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2:
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self._button_pressed = None # determined by the button pressed at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
'display a message on toolbar or in status bar'
pass
def back(self, *args):
'move back up the view lim stack'
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
'draw a rectangle rubberband to indicate zoom limits'
pass
def forward(self, *args):
'move forward in the view lim stack'
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
'restore the original view'
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
#print 'mouse_move', event.button
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active=='ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
self.draw_rubberband(event, x, y, lastx, lasty)
elif (self._active=='PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try: s = event.inaxes.format_coord(event.xdata, event.ydata)
except ValueError: pass
except OverflowError: pass
else:
if len(self.mode):
self.set_message('%s : %s' % (self.mode, s))
else:
self.set_message(s)
else: self.set_message(self.mode)
def pan(self,*args):
'Activate the pan/zoom tool. pan with left button, zoom with right'
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
'this will be called whenver a mouse button is pressed'
pass
def press_pan(self, event):
'the press mouse button in pan/zoom mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) and a.get_navigate():
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.drag_pan)
self.press(event)
def press_zoom(self, event):
'the press mouse button in zoom to rect mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) \
and a.get_navigate() and a.can_zoom():
self._xypress.append(( x, y, a, i, a.viewLim.frozen(), a.transData.frozen()))
self.press(event)
def push_current(self):
'push the current view limits and position onto the stack'
lims = []; pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append( (xmin, xmax, ymin, ymax) )
# Store both the original and modified positions
pos.append( (
a.get_position(True).frozen(),
a.get_position().frozen() ) )
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
'this will be called whenever mouse button is released'
pass
def release_pan(self, event):
'the release mouse button callback in pan/zoom mode'
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress: return
self._xypress = []
self._button_pressed=None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
'the drag callback in pan/zoom mode'
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def release_zoom(self, event):
'the release mouse button callback in zoom to rect mode'
if not self._xypress: return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x-lastx)<5 or abs(y-lasty)<5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point( (lastx, lasty) )
x, y = inverse.transform_point( (x, y) )
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a,la): twinx=True
if a.get_shared_y_axes().joined(a,la): twiny=True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x<lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 < Xmin: x0=Xmin
if x1 > Xmax: x1=Xmax
else:
if x>lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 > Xmin: x0=Xmin
if x1 < Xmax: x1=Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y<lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 < Ymin: y0=Ymin
if y1 > Ymax: y1=Ymax
else:
if y>lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 > Ymin: y0=Ymin
if y1 < Ymax: y1=Ymax
if self._button_pressed == 1:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale()=='log':
alpha=np.log(Xmax/Xmin)/np.log(x1/x0)
rx1=pow(Xmin/x0,alpha)*Xmin
rx2=pow(Xmax/x0,alpha)*Xmin
else:
alpha=(Xmax-Xmin)/(x1-x0)
rx1=alpha*(Xmin-x0)+Xmin
rx2=alpha*(Xmax-x0)+Xmin
if a.get_yscale()=='log':
alpha=np.log(Ymax/Ymin)/np.log(y1/y0)
ry1=pow(Ymin/y0,alpha)*Ymin
ry2=pow(Ymax/y0,alpha)*Ymin
else:
alpha=(Ymax-Ymin)/(y1-y0)
ry1=alpha*(Ymin-y0)+Ymin
ry2=alpha*(Ymax-y0)+Ymin
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self.push_current()
self.release(event)
def draw(self):
'redraw the canvases, update the locators'
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
'''update the viewlim and position from the view and
position stack for each axes
'''
lims = self._views()
if lims is None: return
pos = self._positions()
if pos is None: return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position( pos[i][0], 'original' )
a.set_position( pos[i][1], 'active' )
self.draw()
def save_figure(self, *args):
'save the current figure'
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
'reset the axes stack'
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
'activate zoom to rect mode'
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress=self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease=self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom)
self.mode = 'Zoom to rect mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
'enable or disable back/forward button'
pass
| gpl-3.0 |
southpaw94/MachineLearning | Perceptron/Iris.py | 1 | 1993 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from Perceptron import Perceptron
def plotRawData():
plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='versicolor')
plt.xlabel('petal length')
plt.ylabel('sepal length')
plt.legend(loc='upper left')
plt.show()
plt.cla()
def plotErrors():
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of misclassifications')
plt.show()
plt.cla()
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors=('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y ==cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc='upper left')
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
# print(df.tail())
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:100, [0, 2]].values
ppn = Perceptron(eta=0.1, n_iter=10)
ppn.fit(X, y)
plot_decision_regions(X, y, ppn)
plt.cla()
| gpl-2.0 |