input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
VanillaBaseControl):
raise VanillaError("invalid object")
cell = button._nsObject.cell()
self._window.setDefaultButtonCell_(cell)
def bind(self, event, callback):
"""
Bind a callback to an event.
**event** A string representing the desired event. The options are:
+-------------------+----------------------------------------------------------------------+
| *"should close"* | Called when the user attempts to close the window. This must return |
| | a bool indicating if the window should be closed or not. |
+-------------------+----------------------------------------------------------------------+
| *"close"* | Called immediately before the window closes. |
+-------------------+----------------------------------------------------------------------+
| *"move"* | Called immediately after the window is moved. |
+-------------------+----------------------------------------------------------------------+
| *"resize"* | Caled immediately after the window is resized. |
+-------------------+----------------------------------------------------------------------+
| *"became main"* | Called immediately after the window has become the main window. |
+-------------------+----------------------------------------------------------------------+
| *"resigned main"* | Called immediately after the window has lost its main window status. |
+-------------------+----------------------------------------------------------------------+
| *"became key"* | Called immediately after the window has become the key window. |
+-------------------+----------------------------------------------------------------------+
| *"resigned key"* | Called immediately after the window has lost its key window status. |
+-------------------+----------------------------------------------------------------------+
*For more information about main and key windows, refer to the Cocoa
`documentation <http://developer.apple.com/documentation/Cocoa/Conceptual/WinPanel/Concepts/ChangingMainKeyWindow.html>`_
on the subject.*
**callback** The callback that will be called when the event occurs. It should accept a *sender* argument which will
be the Window that called the callback.::
class WindowBindDemo(object):
def __init__(self):
self.w = Window((200, 200))
self.w.bind("move", self.windowMoved)
self.w.open()
def windowMoved(self, sender):
print "window moved!", sender
WindowBindDemo()
"""
if event not in self._bindings:
self._bindings[event] = []
self._bindings[event].append(callback)
def unbind(self, event, callback):
"""
Unbind a callback from an event.
**event** A string representing the desired event.
Refer to *bind* for the options.
**callback** The callback that has been bound to the event.
"""
self._bindings[event].remove(callback)
def _alertBindings(self, key):
# test to see if the attr exists.
# this is necessary because NSWindow
# can move the window (and therefore
# call the delegate method which calls
# this method) before the super
# call in __init__ is complete.
if hasattr(self, "_bindings"):
if key in self._bindings:
for callback in self._bindings[key]:
# XXX this return causes only the first binding to be called XXX
# see http://code.typesupply.com/ticket/2
return callback(self)
def windowWillClose_(self, notification):
self.hide()
self._alertBindings("close")
# remove all bindings to prevent circular refs
if hasattr(self, "_bindings"):
del self._bindings
self._breakCycles()
# We must make sure that the window does _not_ get deallocated during
# windowWillClose_, or weird things happen, such as that the window
# below this window doesn't always properly gets activated. (For reference:
# this happens when closing with cmd-W, but not when clicking the close
# control.)
# Yet we want to get rid of the NSWindow object here, mostly as a flag
# so we can disallow re-opening windows. So we retain/autorelease the
# NSWindow, then get rid of our own reference.
self._window.retain()
self._window.autorelease()
self._window = None # make sure we can't re-open the window
self.autorelease() # see self.open()
def windowDidBecomeKey_(self, notification):
self._alertBindings("became key")
def windowDidResignKey_(self, notification):
self._alertBindings("resigned key")
def windowDidBecomeMain_(self, notification):
self._alertBindings("became main")
def windowDidResignMain_(self, notification):
self._alertBindings("resigned main")
def windowDidMove_(self, notification):
self._alertBindings("move")
def windowDidResize_(self, notification):
self._alertBindings("resize")
def windowDidEnterFullScreen_(self, notification):
self._alertBindings("enter full screen")
def windowWillEnterFullScreen_(self, notification):
self._alertBindings("will enter full screen")
def windowDidExitFullScreen_(self, notification):
self._alertBindings("exit full screen")
def windowWillExitFullScreen_(self, notification):
self._alertBindings("will exit full screen")
def windowShouldClose_(self, notification):
shouldClose = self._alertBindings("should close")
if shouldClose is None:
shouldClose = True
return shouldClose
# -------
# Toolbar
# -------
# credit where credit is due: much of this was learned
# from the PyObjC demo: WSTConnectionWindowControllerClass
def addToolbar(self, toolbarIdentifier, toolbarItems, addStandardItems=True, displayMode="default", sizeStyle="default"):
"""
Add a toolbar to the window.
**toolbarIdentifier** A string representing a unique name for the toolbar.
**toolbarItems** An ordered list of dictionaries containing the following items:
+-------------------------------+---------------------------------------------------------------------------+
| *itemIdentifier* | A unique string identifier for the item. This is only used internally. |
+-------------------------------+---------------------------------------------------------------------------+
| *label* (optional) | The text label for the item. Defaults to *None*. |
+-------------------------------+---------------------------------------------------------------------------+
| *paletteLabel* (optional) | The text label shown in the customization palette. Defaults to *label*. |
+-------------------------------+---------------------------------------------------------------------------+
| *toolTip* (optional) | The tool tip for the item. Defaults to *label*. |
+-------------------------------+---------------------------------------------------------------------------+
| *imagePath* (optional) | A file path to an image. Defaults to *None*. |
+-------------------------------+---------------------------------------------------------------------------+
| *imageNamed* (optional) | The name of an image already loaded as a *NSImage* by the application. |
| | Defaults to *None*. |
+-------------------------------+---------------------------------------------------------------------------+
| *imageObject* (optional) | A _NSImage_ object. Defaults to *None*. |
+-------------------------------+---------------------------------------------------------------------------+
| *selectable* (optional) | A boolean representing if the item is selectable or not. The default |
| | value is _False_. For more information on selectable toolbar items, refer |
| | to Apple's `documentation <http://tinyurl.com/SelectableItems>`_ |
+-------------------------------+---------------------------------------------------------------------------+
| *view* (optional) | A *NSView* object to be used instead of an image. Defaults to *None*. |
+-------------------------------+---------------------------------------------------------------------------+
| *visibleByDefault* (optional) | If the item should be visible by default pass True to this argument. |
| | If the item should be added to the toolbar only through the customization |
| | palette, use a value of _False_. Defaults to _True_. | |
+-------------------------------+---------------------------------------------------------------------------+
**addStandardItems** A boolean, specifying whether the standard Cocoa toolbar items
should be added. Defaults to *True*. If you set it to *False*, you must specify any
standard items manually in *toolbarItems*, by using the constants from the AppKit module:
+-------------------------------------------+----------------------------------------------------------------+
| *NSToolbarSeparatorItemIdentifier* | The Separator item. |
+-------------------------------------------+----------------------------------------------------------------+
| *NSToolbarSpaceItemIdentifier* | The Space item. |
+-------------------------------------------+----------------------------------------------------------------+
| *NSToolbarFlexibleSpaceItemIdentifier* | The Flexible Space item. |
+-------------------------------------------+----------------------------------------------------------------+
| *NSToolbarShowColorsItemIdentifier* | The Colors item. Shows the color panel. |
+-------------------------------------------+----------------------------------------------------------------+
| *NSToolbarShowFontsItemIdentifier* | The Fonts item. Shows the font panel. |
+-------------------------------------------+----------------------------------------------------------------+
| *NSToolbarCustomizeToolbarItemIdentifier* | The Customize item. Shows the customization palette. |
+-------------------------------------------+----------------------------------------------------------------+
| *NSToolbarPrintItemIdentifier* | The Print item. Refer to Apple's *NSToolbarItem* documentation |
| | for more information. |
+-------------------------------------------+----------------------------------------------------------------+
**displayMode** A string representing the desired display mode for the toolbar.
+-------------+
| "default" |
+-------------+
| "iconLabel" |
+-------------+
| "icon" |
+-------------+
| "label" |
+-------------+
**sizeStyle** A string representing the desired size for the toolbar
+-----------+
| "default" |
+-----------+
| "regular" |
+-----------+
| "small" |
+-----------+
Returns a dictionary containing the created toolbar items, mapped by itemIdentifier.
"""
STANDARD_TOOLBAR_ITEMS = [
NSToolbarFlexibleSpaceItemIdentifier,
NSToolbarSpaceItemIdentifier,
NSToolbarSeparatorItemIdentifier,
NSToolbarCustomizeToolbarItemIdentifier,
NSToolbarPrintItemIdentifier,
NSToolbarShowFontsItemIdentifier,
NSToolbarShowColorsItemIdentifier,
]
# create the reference structures
self._toolbarItems = {}
self._toolbarDefaultItemIdentifiers = []
self._toolbarAllowedItemIdentifiers = []
self._toolbarCallbackWrappers = {}
self._toolbarSelectableItemIdentifiers = []
# create the toolbar items
for itemData in toolbarItems:
self._createToolbarItem(itemData)
if addStandardItems:
for standardItem in STANDARD_TOOLBAR_ITEMS:
if standardItem not in self._toolbarAllowedItemIdentifiers:
self._toolbarAllowedItemIdentifiers.append(standardItem)
# create the toolbar
toolbar = NSToolbar.alloc().initWithIdentifier_(toolbarIdentifier)
toolbar.setDelegate_(self)
toolbar.setAllowsUserCustomization_(True)
toolbar.setAutosavesConfiguration_(True)
displayModeMap = dict(
default=NSToolbarDisplayModeDefault,
iconLabel=NSToolbarDisplayModeIconAndLabel,
icon=NSToolbarDisplayModeIconOnly,
label=NSToolbarDisplayModeLabelOnly,
)
toolbar.setDisplayMode_(displayModeMap[displayMode])
sizeStyleMap = dict(
default=NSToolbarSizeModeDefault,
regular=NSToolbarSizeModeRegular,
small=NSToolbarSizeModeSmall)
toolbar.setSizeMode_(sizeStyleMap[sizeStyle])
self._window.setToolbar_(toolbar)
# Return the dict of toolbar items, so our caller can choose to
# keep references to them if needed.
return self._toolbarItems
def getToolbarItems(self):
if hasattr(self, "_toolbarItems"):
return self._toolbarItems
return {}
def addToolbarItem(self, itemData, index=None):
"""
Add a toolbar item to the windows toolbar.
**itemData** item description with the same format as a toolbarItem description in `addToolbar`
**index** An interger, specifying the place to insert the toolbar itemIdentifier.
"""
if not hasattr(self, "_toolbarItems"):
raise VanillaError("window has not toolbar")
itemIdentifier = itemData.get("itemIdentifier")
self._createToolbarItem(itemData)
if itemData.get("visibleByDefault", True):
if index is not None:
self._toolbarDefaultItemIdentifiers.remove(itemIdentifier)
self._toolbarDefaultItemIdentifiers.insert(index, itemIdentifier)
index = self._toolbarDefaultItemIdentifiers.index(itemIdentifier)
self._window.toolbar().insertItemWithItemIdentifier_atIndex_(itemIdentifier, index)
def removeToolbarItem(self, itemIdentifier):
"""
Remove a toolbar item by his identifier.
**itemIdentifier** A unique string identifier for the removed item.
"""
if not hasattr(self, "_toolbarItems"):
raise VanillaError("window has not toolbar")
if itemIdentifier not in self._toolbarItems:
raise VanillaError("itemIdentifier %r not in toolbar" % itemIdentifier)
item = self._toolbarItems[itemIdentifier]
toolbarItems = self._window.toolbar().items()
if item in toolbarItems:
## it can happen a user changed the toolbar manually
index = toolbarItems.indexOfObject_(item)
self._window.toolbar().removeItemAtIndex_(index)
self._toolbarAllowedItemIdentifiers.remove(itemIdentifier)
self._toolbarDefaultItemIdentifiers.remove(itemIdentifier)
del self._toolbarItems[itemIdentifier]
def _createToolbarItem(self, itemData):
itemIdentifier = itemData.get("itemIdentifier")
if itemIdentifier is None:
raise VanillaError("toolbar item data must contain a unique itemIdentifier string")
if itemIdentifier in self._toolbarItems:
raise VanillaError("toolbar itemIdentifier is not unique: %r" % itemIdentifier)
| |
"""
Enthält die API-Endpunkte.
Alle Endpunkte liefern im Fehlerfall einen Key 'err_msg'.
"""
import flask
import logging
from flask import (Blueprint, request, send_file, send_from_directory)
from werkzeug.utils import secure_filename
from os import path
from datetime import datetime
from visuanalytics.server.db import db, queries
from visuanalytics.analytics.processing.image.matplotlib.diagram import generate_test_diagram
from visuanalytics.util.resources import TEMP_LOCATION, get_resource_path, get_temp_path
from visuanalytics.util.config_manager import get_private, set_private
from ast2json import str2json
from base64 import b64encode
from visuanalytics.analytics.apis.checkapi import check_api
logger = logging.getLogger()
api = Blueprint('api', __name__)
@api.teardown_app_request
def close_db_con(exception):
db.close_con_f()
@api.route("/testdiagram", methods=["POST"])
def test_diagram():
"""
Endpunkt `/testdiagram`.
Erzeugt ein Testbild mit Zufallswerten zu einem Diagramm.
Das übermittelte JSON sollte die gleiche Struktur besitzen wie beim Erstellen eines Infoproviders.
Die Response enthält das generierte Bild als BLOB-File.
"""
diagram_info = request.json
try:
file_path = generate_test_diagram(diagram_info)
return send_file(file_path, "application/json", True)
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while generating a test-diagram"})
return err, 400
@api.route("/checkapi", methods=["POST"])
def checkapi():
"""
Endpunkt `/checkapi`.
Das Übermittelte JSON enthält die API-Daten mit den Keys 'url', 'api_key' und 'has_key'.
Die Response enthält alle Keys und deren Typen, die bei der gegebenen API abgefragt werden können.
"""
api_info = request.json
try:
if "api_info" not in api_info:
err = flask.jsonify({"err_msg": "Missing field 'api'"})
return err, 400
if "api_key_name" not in api_info["api_info"]:
err = flask.jsonify({"err_msg": "Missing API-Key"})
return err, 400
if "url_pattern" not in api_info["api_info"]:
err = flask.jsonify({"err_msg": "Missing URL"})
return err, 400
if "method" not in api_info:
err = flask.jsonify({"err_msg": "Missing Field 'method'"})
return err, 400
if "response_type" not in api_info:
err = flask.jsonify({"err_msg": "Missing field 'response_type'"})
return err, 400
header, parameter = queries.generate_request_dicts(api_info["api_info"], api_info["method"])
url, params = queries.update_url_pattern(api_info["api_info"]["url_pattern"])
parameter.update(params)
req_data = {
"method": api_info["api_info"].get("method", "get"),
"url": url,
"headers": header,
"params": parameter,
"response_type": api_info["response_type"]
}
keys, success = check_api(req_data)
return flask.jsonify({"status": 0, "api_keys": keys}) if success else flask.jsonify({"status": 1, "api_keys": keys})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while checking a new api"})
return err, 400
@api.route("/infoprovider", methods=["POST"])
def add_infoprovider():
"""
Endpunkt `/infoprovider`.
Route zum Hinzufügen eines Infoproviders.
Der übertragene Infoprovider muss die Keys 'infoprovider_name', 'diagrams', 'diagrams_original' sowie einen Key
'datasources', welcher alle Datenquellen beinhaltet, enthalten.
"""
infoprovider = request.json
try:
if "infoprovider_name" not in infoprovider:
err = flask.jsonify({"err_msg": "Missing Infoprovider-Name"})
return err, 400
if "datasources" not in infoprovider:
err = flask.jsonify({"err_msg": "Missing Datasources"})
return err, 400
if "diagrams" not in infoprovider:
err = flask.jsonify({"err_msg": "Missing field 'diagrams'"})
return err, 400
if "diagrams_original" not in infoprovider:
err = flask.jsonify({"err_msg": "Missing field 'diagrams_original'"})
return err, 400
for datasource in infoprovider["datasources"]:
if "datasource_name" not in datasource:
err = flask.jsonify({"err_msg": "Missing field 'datasource_name' in a datasource"})
return err, 400
if "api" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "transform" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "calculates" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'calculates' for datasource {datasource['name']}"})
return err, 400
if "replacements" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'replacements' for datasource {datasource['name']}"})
return err, 400
if "storing" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "historized_data" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field schedule for datasource {datasource['name']}"})
return err, 400
if "formulas" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "schedule" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing schedule for datasource {datasource['name']}"})
return err, 400
if "listItems" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field listItems for datasource {datasource['name']}"})
return err, 400
if not queries.insert_infoprovider(infoprovider):
err = flask.jsonify({"err_msg": f"There already exists an infoprovider with the name "
f"{infoprovider['infoprovider_name']}"})
return err, 400
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while adding an infoprovider"})
return err, 400
@api.route("/videojob", methods=["POST"])
def add_videojob():
"""
Endpunkt `/videojob`.
Route zum Hinzufügen eines Video-Jobs.
Das übertragene Videojob-JSON muss die Keys 'videojob_name', 'images', 'audio', 'sequence', 'schedule', 'sceneList'
und 'selectedInfoprovider' enthalten.
"""
video = request.json
try:
if "videojob_name" not in video:
err = flask.jsonify({"err_msg": "Missing Videojob-name"})
return err, 400
if "images" not in video:
err = flask.jsonify({"err_msg": "Missing Images"})
return err, 400
if "audio" not in video:
err = flask.jsonify({"err_msg": "Missing Audio"})
return err, 400
if "sequence" not in video:
err = flask.jsonify({"err_msg": "Missing Sequence"})
return err, 400
if "schedule" not in video:
err = flask.jsonify({"err_msg": "Missing Schedule"})
return err, 400
if "sceneList" not in video:
err = flask.jsonify({"err_msg": "Missing field 'sceneList'"})
return err, 400
if "selectedInfoprovider" not in video:
err = flask.jsonify({"err_msg": "Missing field 'selectedInfoProvider'"})
return err, 400
if not queries.insert_video_job(video):
err = flask.jsonify({"err_msg": f"There already exists a video with the name "
f"{video['videojob_name']}"})
return err, 400
return "", 204
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while adding an video"})
return err, 400
@api.route("/infoprovider/schedules", methods=["GET"])
def show_schedule():
"""
Endpunkt '/infoprovider/schedules'.
Response enthält eine Liste von Einträgen aus der Tabelle "schedule_historisation".
Jeder Eintrag enthält die Keys schedule_historisation_id und den Typ des Schedules.
"""
try:
return flask.jsonify(queries.show_schedule())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading all infoproviders"})
return err, 400
@api.route("/infoprovider/showweekly", methods=["GET"])
def show_weekly():
"""
Endpunkt '/infoprovider/showweekly'.
Response enthält eine Liste von Einträgen aus der Tabelle "schedule_historisation_weekday".
Jeder Eintrag enthält die Keys schedule_historisation_id, schedule_weekday_historisation_id und weekday.
"""
try:
return flask.jsonify(queries.show_weekly())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading all infoproviders"})
return err, 400
@api.route("/infoprovider/all", methods=["GET"])
def get_all_infoproviders():
"""
Endpunkt `/infoproviders`.
Response enthält Informationen über alle, in der Datenbank enthaltenen, Infoprovider.
"""
try:
return flask.jsonify(queries.get_infoprovider_list())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading all infoproviders"})
return err, 400
@api.route("/infoprovider/<infoprovider_id>", methods=["PUT"])
def update_infoprovider(infoprovider_id):
"""
Endpunkt `/infoprovider/<infoprovider_id>`.
Route zum Ändern eines Infoproviders.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
"""
updated_data = request.json
try:
if "infoprovider_name" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Infoprovider-Name"})
return err, 400
if "datasources" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Datasources"})
return err, 400
if "diagrams" not in updated_data:
err = flask.jsonify({"err_msg": "Missing field 'diagrams'"})
return err, 400
if "diagrams_original" not in updated_data:
err = flask.jsonify({"err_msg": "Missing field 'diagrams'"})
return err, 400
for datasource in updated_data["datasources"]:
if "datasource_name" not in datasource:
err = flask.jsonify({"err_msg": "Missing field 'name' in a datasource"})
return err, 400
if "api" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "transform" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "calculates" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'calculates' for datasource {datasource['name']}"})
return err, 400
if "replacements" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'replacements' for datasource {datasource['name']}"})
return err, 400
if "storing" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "historized_data" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field schedule for datasource {datasource['name']}"})
return err, 400
if "formulas" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field 'api' in datasource {datasource['name']}"})
return err, 400
if "schedule" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field schedule for datasource {datasource['name']}"})
return err, 400
if "listItems" not in datasource:
err = flask.jsonify({f"err_msg": f"Missing field listItems for datasource {datasource['name']}"})
return err, 400
update_info = queries.update_infoprovider(infoprovider_id, updated_data)
if update_info is not None:
err = flask.jsonify(update_info)
return err, 400
return flask.jsonify({"status": "successful"})
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while updating an infoprovider"})
return err, 400
@api.route("/videojob/<videojob_id>", methods=["PUT"])
def update_videojob(videojob_id):
"""
Endpunkt `/videojob/<videojob_id>`.
Route zum Ändern eines Video-Jobs.
:param videojob_id: ID des Video-Jobs.
:type videojob_id: int
"""
updated_data = request.json
try:
if "videojob_name" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Videojob-name"})
return err, 400
if "images" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Images"})
return err, 400
if "audio" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Audio"})
return err, 400
if "sequence" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Sequence"})
return err, 400
if "schedule" not in updated_data:
err = flask.jsonify({"err_msg": "Missing Schedule"})
return err, 400
if "sceneList" not in updated_data:
err = flask.jsonify({"err_msg": "Missing field 'sceneList'"})
return err, | |
"diag_affine"):
assert(len(termLbl) == 2), "Stochastic term labels should have form ('S',<basis element label>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
else:
assert(len(termLbl) == 3), "Stochastic term labels should have form ('S',<bel1>, <bel2>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
if termLbl[2] not in otherBasisLabels:
otherBasisLabels.append(termLbl[2])
elif termType == "A": # Affine
assert(other_mode == "diag_affine"), "Affine labels are only allowed in an affine mode"
assert(len(termLbl) == 2), "Affine term labels should have form ('A',<basis element label>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
#Construct bases
# Note: the lists of basis matrices shouldn't contain the identity, since
# the terms above shouldn't contain identity terms - but `basis` should
# contain an identity element as it's first element, so add this identity el
# to non-empty bases (empty bases stay empty!) to be consistent with the
# rest of the framework (bases *have* Ids)
sparse = basis.sparse
if set(hamBasisLabels) == set(basis.labels):
ham_basis = basis
else:
Id = basis[0]
ham_basis_mxs = [basis[bl] for bl in hamBasisLabels]
if len(ham_basis_mxs) > 0:
ham_basis = _ExplicitBasis([Id] + ham_basis_mxs, ['I'] + hamBasisLabels,
name=None, real=True, sparse=sparse)
else:
ham_basis = _ExplicitBasis(ham_basis_mxs, name=None, real=True, sparse=sparse)
if set(otherBasisLabels) == set(basis.labels):
other_basis = basis
else:
Id = basis[0]
other_basis_mxs = [basis[bl] for bl in otherBasisLabels]
if len(other_basis_mxs) > 0:
other_basis = _ExplicitBasis([Id] + other_basis_mxs, ['I'] + otherBasisLabels,
name=None, real=True, sparse=sparse)
else:
other_basis = _ExplicitBasis(other_basis_mxs, name=None, real=True, sparse=sparse)
bsH, bsO = len(ham_basis), len(other_basis)
#print("DB: constructed ham_basis = ",ham_basis)
#print("DB: other basis = ",other_basis)
#Create projection (term coefficient) arrays - or return None if
# the corresponding basis is empty (as per our convention)
hamProjs = _np.zeros(bsH - 1, 'complex') if bsH > 0 else None
if bsO > 0:
if other_mode == "diagonal": # OK if this runs for 'auto' too since then len(otherBasisIndices) == 0
otherProjs = _np.zeros(bsO - 1, 'complex')
elif other_mode == "diag_affine":
otherProjs = _np.zeros((2, bsO - 1), 'complex')
else:
otherProjs = _np.zeros((bsO - 1, bsO - 1), 'complex')
else: otherProjs = None
#Fill arrays
hamBasisIndices = {lbl: i - 1 for i, lbl in enumerate(ham_basis.labels)} # -1 to compensate for identity as
otherBasisIndices = {lbl: i - 1 for i, lbl in enumerate(other_basis.labels)} # first element (not in projections).
for termLbl, coeff in Ltermdict.items():
if isinstance(termLbl, str): termLbl = (termLbl[0], termLbl[1:]) # e.g. "HXX" => ('H','XX')
termType = termLbl[0]
if termType == "H": # Hamiltonian
k = hamBasisIndices[termLbl[1]] # index of coefficient in array
hamProjs[k] = coeff
elif termType == "S": # Stochastic
if other_mode == "diagonal":
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[k] = coeff
elif other_mode == "diag_affine":
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[0, k] = coeff
else: # other_mode == "all"
k = otherBasisIndices[termLbl[1]] # index of row in "other" coefficient matrix
j = otherBasisIndices[termLbl[2]] # index of col in "other" coefficient matrix
otherProjs[k, j] = coeff
elif termType == "A": # Affine
assert(other_mode == "diag_affine")
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[1, k] = coeff
return hamProjs, otherProjs, ham_basis, other_basis
def lindblad_projections_to_paramvals(hamProjs, otherProjs, param_mode="cptp",
other_mode="all", truncate=True):
"""
Construct the array of Lindblad-gate parameter values from the separate
arrays of Hamiltonian and non-Hamiltonian Lindblad-term projections.
When `cptp=True`, this function handles parameterizing the projections
to that for (real) parameter values correspond to projections for a valid
CPTP gate (e.g. by parameterizing the Cholesky decomposition of `otherProjs`
instead of otherProjs itself). This function is closely related to
implementation details of the LindbladOp class.
Parameters
----------
hamProjs : numpy.ndarray
An array of length d-1, where d is the gate dimension, giving the
projections onto a full set of the Hamiltonian-type Lindblad terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the gate
dimension, for `other_mode` equal to `"all"`,`"diag_affine"`, or
`"diagonal"`, respectively. Values give the projections onto a full
set of non-Hamiltonian-type Lindblad terms.
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Describes how values in `hamProjs` and `otherProj` relate to the
returned parameter values. Allowed values are:
`"unconstrained"` (projs are independent unconstrained parameters),
`"cptp"` (independent parameters but constrained so map is CPTP),
`"reldepol"` (all non-Ham. diagonal projs take the *same* value),
`"depol"` (same as `"reldepol"` but projs must be *positive*)
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections `otherProjs` includes.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`.
truncate : bool, optional
Whether to truncate the projections onto the Lindblad terms in
order to meet constraints (e.g. to preserve CPTP) when necessary.
If False, then an error is thrown when the given projections
cannot be parameterized as specified.
Returns
-------
numpy.ndarray
A 1D array of real parameter values consisting of d-1 Hamiltonian
values followed by either (d-1)^2, 2*(d-1), or just d-1 non-Hamiltonian
values for `other_mode` equal to `"all"`, `"diag_affine"`, or
`"diagonal"`, respectively.
"""
if hamProjs is not None:
assert(_np.isclose(_np.linalg.norm(hamProjs.imag), 0)), \
"Hamiltoian projections (coefficients) are not all real!"
hamParams = hamProjs.real
else:
hamParams = _np.empty(0, 'd')
if otherProjs is not None:
if other_mode == "diagonal":
assert(_np.isclose(_np.linalg.norm(_np.imag(otherProjs)), 0)), \
"Diagonal stochastic projections (coefficients) are not all real!"
if param_mode == "depol": # otherParams is a *single-element* 1D vector of the sqrt of each diagonal el
assert(truncate or all([v >= -1e-12 for v in otherProjs])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
assert(truncate or all([_np.isclose(v, otherProjs[0]) for v in otherProjs])), \
"Diagonal lindblad coefficients are not equal (truncate == False)!"
otherProj = _np.mean(otherProjs.clip(1e-16, 1e100))
otherParams = _np.array(_np.sqrt(_np.real(otherProj)), 'd') # shape (1,)
elif param_mode == "cptp": # otherParams is a 1D vector of the sqrts of diagonal els
assert(truncate or all([v >= -1e-12 for v in otherProjs])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
otherProjs = otherProjs.clip(1e-16, 1e100)
otherParams = _np.sqrt(otherProjs.real) # shape (bsO-1,)
else: # "unconstrained": otherParams is a 1D vector of the real diagonal els of otherProjs
otherParams = otherProjs.real # shape (bsO-1,)
elif other_mode == "diag_affine":
assert(_np.isclose(_np.linalg.norm(_np.imag(otherProjs)), 0)), \
"Diagonal stochastic and affine projections (coefficients) are not all real!"
if param_mode == "depol": # otherParams is a single depol value + unconstrained affine coeffs
assert(truncate or all([v >= -1e-12 for v in otherProjs[0]])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
assert(truncate or all([_np.isclose(v, otherProjs[0, 0]) for v in otherProjs[0]])), \
"Diagonal lindblad coefficients are not equal (truncate == False)!"
depolProj = _np.mean(otherProjs[0, :].clip(1e-16, 1e100))
otherParams = _np.concatenate(([_np.sqrt(_np.real(depolProj))],
otherProjs[1].real)) # shape (1+(bsO-1),)
elif param_mode == "cptp": # Note: does not constrained affine coeffs to CPTP
assert(truncate or all([v >= -1e-12 for v in otherProjs[0]])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
diagParams = _np.sqrt(_np.real(otherProjs[0, :]).clip(1e-16, 1e100)) # shape (bsO-1,)
otherParams = _np.concatenate((diagParams, otherProjs[1].real)) # diag + affine params
else: # param_mode == "unconstrained": otherParams is a 1D vector of the real diagonal els of otherProjs
otherParams = otherProjs.real # shape (2,bsO-1)
else: # other_mode == "all"
assert(_np.isclose(_np.linalg.norm(otherProjs - otherProjs.T.conjugate()), 0)
), "Other projection/coefficient mx is not Hermitian!"
assert(param_mode != "depol"), "`depol` is not supported when `other_mode == 'all'`"
bsO = otherProjs.shape[0] + 1 # +1 to keep convention that this is the basis (w/Identity) size
otherParams = _np.empty((bsO - 1, bsO - 1), 'd')
if param_mode == "cptp": # otherParams mx stores Cholesky decomp
#push any slightly negative evals of otherProjs positive so that
# the Cholesky decomp will work.
evals, U = _np.linalg.eig(otherProjs)
Ui = _np.linalg.inv(U)
assert(truncate or all([ev >= -1e-12 for ev in evals])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
pos_evals = evals.clip(1e-16, 1e100)
otherProjs = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
try:
Lmx = _np.linalg.cholesky(otherProjs)
# if Lmx not postitive definite, try again with 1e-12 (same lines as above)
except _np.linalg.LinAlgError: # pragma: no cover
pos_evals = evals.clip(1e-12, 1e100) # pragma: no cover
otherProjs = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui)) # pragma: no cover
Lmx = _np.linalg.cholesky(otherProjs) # pragma: no cover
for i in range(bsO - 1):
assert(_np.linalg.norm(_np.imag(Lmx[i, i])) < | |
<gh_stars>0
"""
File: pylinex/fitter/MetaFitter.py
Author: <NAME>
Date: 3 Sep 2017
Description: File containing class which employs many Fitter objects to form
grids of fit statistics with which to perform parameter number
optimization.
"""
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.colors import LogNorm, SymLogNorm
from distpy import GaussianDistribution
from ..util import Savable, VariableGrid, create_hdf5_dataset, HDF5Link,\
int_types, sequence_types
from ..quantity import QuantityFinder
from .Fitter import Fitter
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class MetaFitter(Fitter, VariableGrid, QuantityFinder, Savable):
"""
Class which performs fits using the BasisSum it is given as well as subsets
of the BasisSum given. By doing so for grids of different subsets, it
chooses the optimal number of parameters.
"""
def __init__(self, basis_sum, data, error, compiled_quantity,\
quantity_to_minimize, *dimensions, **priors):
"""
Initializes a new MetaFitter object using the given inputs.
basis_sum: a BasisSum object (or a Basis object, which is converted
internally to a BasisSum of one Basis with the name 'sole')
data: 1D vector of same length as vectors in basis_sum
error: 1D vector of same length as vectors in basis_sum containing only
positive numbers
compiled_quantity: CompiledQuantity object representing quantities to
retrieve
quantity_to_minimize: the name of the Quantity object in the
CompiledQuantity to minimize to perform model
selection
*dimensions: list of lists of dictionaries indicating slices to take
for each subbasis.
**priors: keyword arguments where the keys are exactly the names of the
basis sets with '_prior' appended to them
"""
Fitter.__init__(self, basis_sum, data, error, **priors)
self.dimensions = dimensions
self.compiled_quantity = compiled_quantity
self.quantity_to_minimize = quantity_to_minimize
@property
def quantity_to_minimize(self):
"""
Property storing string name of quantity to minimize.
"""
if not hasattr(self, '_quantity_to_minimize'):
raise AttributeError("quantity_to_minimize was referenced " +\
"before it was set.")
return self._quantity_to_minimize
@quantity_to_minimize.setter
def quantity_to_minimize(self, value):
"""
Allows user to supply string name of the quantity to minimize.
"""
if isinstance(value, basestring):
if value in self.compiled_quantity:
self._quantity_to_minimize = value
else:
raise ValueError("quantity_to_minimize was not in " +\
"compiled_quantity.")
else:
raise TypeError("quantity_to_minimize was not a string.")
@property
def grids(self):
"""
Property storing the grids calculated by the full grid calculations. It
is a list of numpy.ndarray objects.
"""
if not hasattr(self, '_grids'):
self._grids = [np.zeros(self.shape + self.data.shape[:-1])\
for index in range(self.num_quantities)]
for indices in np.ndindex(*self.shape):
fitter = self.fitter_from_indices(indices)
quantity_values = self.compiled_quantity(fitter)
for (iquantity, quantity) in enumerate(quantity_values):
self._grids[iquantity][indices] = quantity
return self._grids
def fitter_from_subsets(self, **subsets):
"""
Finds the Fitter object associated with the given subbasis
subsets.
subsets: dict where the keys are basis names and the values are index
slices corresponding to the subsets to take
returns: Fitter corresponding to the given subbasis subsets
"""
sub_basis_sum = self.basis_sum.basis_subsets(**subsets)
sub_prior_sets = self.prior_subsets(**subsets)
return Fitter(sub_basis_sum, self.data, self.error, **sub_prior_sets)
def fitter_from_indices(self, indices):
"""
Finds the Fitter object corresponding to the given tuple of indices.
indices: tuple of ints with length given by the number of dimensions
returns: Fitter corresponding to the given grid position
"""
return self.fitter_from_subsets(**self.point_from_indices(indices))
def prior_subsets(self, **subsets):
"""
Applies given subbasis subsets to the priors for each subbasis.
subsets: dict where the keys are basis names and the values are index
slices corresponding to the subsets to take
returns: dict of priors with subsets taken from each subbasis
"""
result = {}
if self.has_priors:
for name in self.basis_sum.names:
key = name + '_prior'
if key in self.priors:
old_prior = self.priors[key]
if name in subsets:
result[key] = old_prior[:subsets[name]]
else:
result[key] = old_prior
return result
def __getitem__(self, index):
"""
Gets the grid associated with the given index.
index: if int, it is taken to be the internal index of the quantity to
retrieve
if str, it is taken to be the name of the quantity to retrieve
(in this case,
self.compiled_quantity.can_index_by_string must be True)
returns: numpy.ndarray of shape given by shape property containing
values of the given quantity
"""
if type(index) in int_types:
return self.grids[index]
elif isinstance(index, basestring):
return self.grids[self.compiled_quantity.index_dict[index]]
else:
raise AttributeError("index of MetaFitter must be an index " +\
"or a string. If it is a string, the " +\
"CompiledQuantity at the center of the " +\
"MetaFitter must have can_index_by_string " +\
"be True.")
def minimize_quantity(self, index=0, which_data=None, verbose=True):
"""
Minimizes the quantity associated with the given index and returns the
Fitter from that given set of subbasis subsets.
index: if int, it is taken to be the internal index of the quantity to
retrieve
if str, it is taken to be the name of the quantity to retrieve
(in this case,
self.compiled_quantity.can_index_by_string must be True)
which_data: if None, data must be 1D
if int, data must be 2D, it is used as index
if length-N sequence, data must be (N+1)D
verbose: if True, prints the name of the quantity being minimized
returns: Fitter corresponding to set of subbasis subsets which
minimizes the Quantity under concern
"""
grid_slice = ((slice(None),) * self.ndim)
if self.data.ndim > 1:
if type(which_data) is type(None):
raise ValueError("which_data must be given if data is not 1D.")
elif type(which_data) in int_types:
grid_slice = grid_slice + (which_data,)
else:
grid_slice = grid_slice + tuple(which_data)
grid = self[index][grid_slice]
if verbose:
print("Minimizing {!s} over grid.".format(\
self.compiled_quantity[index].name))
return np.unravel_index(np.argmin(grid), grid.shape)
def fill_hdf5_group(self, group, save_all_fitters=False,\
data_link=None, error_link=None, expander_links=None,\
save_channel_estimates=False):
"""
Saves all fitters to an hdf5 group. This should be used cautiously, as
it would take an unreasonably long time for large grids.
group: hdf5 file group to fill with Fitter information
"""
data_link =\
create_hdf5_dataset(group, 'data', data=self.data, link=data_link)
error_link = create_hdf5_dataset(group, 'error', data=self.error,\
link=error_link)
group.attrs['quantity_to_minimize'] = self.quantity_to_minimize
self.compiled_quantity.fill_hdf5_group(group.create_group(\
'compiled_quantity'), exclude=['bias_score'])
self.basis_sum.fill_hdf5_group(group.create_group('basis_sum'),\
expander_links=expander_links)
if self.has_priors:
subgroup = group.create_group('prior')
for name in self.names:
key = '{!s}_prior'.format(name)
if key in self.priors:
self.priors[key].fill_hdf5_group(\
subgroup.create_group(name))
if type(expander_links) is type(None):
expander_links = []
for ibasis in range(len(self.names)):
expander_links.append(\
group['basis_sum/basis_{}/expander'.format(ibasis)])
def prior_links_from_indices(subset_indices):
"""
Finds the prior mean links and prior covariance links from the
given indices.
"""
if self.has_priors:
(prior_mean_links, prior_covariance_links) = ({}, {})
subsets = self.point_from_indices(subset_indices)
for name in self.names:
prior_path = 'prior/{!s}'.format(name)
if '{!s}_prior'.format(name) in self.priors:
prior_mean_links[name] =\
group['{!s}/mean'.format(prior_path)]
prior_covariance_links[name] =\
group['{!s}/covariance'.format(prior_path)]
else:
prior_mean_links[name] = None
prior_covariance_links[name] = None
return (prior_mean_links, prior_covariance_links)
else:
return (None, None)
def basis_links_from_indices(subset_indices):
"""
Finds the basis links from the given indices.
"""
answer = []
subsets = self.point_from_indices(subset_indices)
for (iname, name) in enumerate(self.names):
relative_path = 'basis_sum/basis_{}/basis'.format(iname)
if name in subsets:
answer.append(HDF5Link(group[relative_path],\
slice(subsets[name])))
else:
answer.append(HDF5Link(group[relative_path]))
return answer
grids_already_defined = hasattr(self, '_grids')
if save_all_fitters or (not grids_already_defined):
if save_all_fitters:
subgroup = group.create_group('fitters')
if not grids_already_defined:
self._grids = [np.zeros(self.shape + self.data.shape[:-1])\
for index in range(self.num_quantities)]
for indices in np.ndindex(*self.shape):
fitter = self.fitter_from_indices(indices)
if not grids_already_defined:
quantity_values = self.compiled_quantity(fitter)
for (iquantity, quantity) in enumerate(quantity_values):
self._grids[iquantity][indices] = quantity
if save_all_fitters:
format_string = (('{}_' * (self.ndim - 1)) + '{}')
subsubgroup =\
subgroup.create_group(format_string.format(*indices))
basis_links = basis_links_from_indices(indices)
(prior_mean_links, prior_covariance_links) =\
prior_links_from_indices(indices)
fitter.fill_hdf5_group(subsubgroup, data_link=data_link,\
error_link=error_link, basis_links=basis_links,\
expander_links=expander_links,\
prior_mean_links=prior_mean_links,\
prior_covariance_links=prior_covariance_links,\
save_channel_estimates=save_channel_estimates)
subgroup = group.create_group('dimensions')
for (idimension, dimension) in enumerate(self.dimensions):
subsubgroup =\
subgroup.create_group('dimension_{}'.format(idimension))
for name in dimension:
create_hdf5_dataset(subsubgroup, name, data=dimension[name])
subgroup = group.create_group('grids')
for name in self.compiled_quantity.names:
create_hdf5_dataset(subgroup, name, data=self[name])
if self.data.ndim == 1:
if save_all_fitters:
for quantity in self.compiled_quantity:
indices = self.minimize_quantity(quantity.name)
format_string = 'fitters/{}' + ('_{}' * (self.ndim - 1))
group_name = format_string.format(*indices)
group['{!s}_optimal_fitter'.format(quantity.name)] =\
group[group_name]
if quantity.name == self.quantity_to_minimize:
group['optimal_fitter'] = group[group_name]
else:
indices = self.minimize_quantity(self.quantity_to_minimize)
subgroup = group.create_group('optimal_fitter')
basis_links = basis_links_from_indices(indices)
(prior_mean_links, prior_covariance_links) =\
prior_links_from_indices(indices)
fitter = self.fitter_from_indices(indices)
fitter.fill_hdf5_group(subgroup, data_link=data_link,\
error_link=error_link, basis_links=basis_links,\
expander_links=expander_links,\
prior_mean_links=prior_mean_links,\
prior_covariance_links=prior_covariance_links,\
save_channel_estimates=save_channel_estimates)
else:
subgroup = group.create_group('optimal_fitters')
left_format_string = 'optimal_fitters/data_curve' +\
('_{}' * (self.data.ndim - 1))
minimize_verbose = True
for data_indices in np.ndindex(*self.data.shape[:-1]):
left_group_name = left_format_string.format(*data_indices)
if save_all_fitters:
for quantity in self.compiled_quantity:
indices = self.minimize_quantity(quantity.name,\
data_indices, verbose=minimize_verbose)
right_format_string =\
'fitters/{}' + ('_{}' * (self.ndim - 1))
right_group_name = right_format_string.format(*indices)
group['{0!s}_{1!s}'.format(quantity.name,\
left_group_name)] = group[right_group_name]
if quantity.name == self.quantity_to_minimize:
group[left_group_name] = group[right_group_name]
else:
indices = self.minimize_quantity(\
self.quantity_to_minimize, data_indices,\
verbose=minimize_verbose)
subsubgroup = group.create_group(left_group_name)
basis_links = basis_links_from_indices(indices)
(prior_mean_links, prior_covariance_links) =\
prior_links_from_indices(indices)
fitter | |
conditional(SX ind, [SX] x, SX x_default, bool short_circuit) -> SX
conditional(MX ind, [MX] x, MX x_default, bool short_circuit) -> MX
If the condition
Parameters:
-----------
ind: evaluates to the integer k, where 0<=k<f.size(), then x[k] will be
returned, otherwise
x_default: will be returned.
"""
return _casadi.conditional(*args)
def depends_on(*args):
"""
Check if expression depends on the argument The argument must be symbolic.
depends_on(IM f, IM arg) -> bool
depends_on(DM f, DM arg) -> bool
depends_on(SX f, SX arg) -> bool
depends_on(MX f, MX arg) -> bool
"""
return _casadi.depends_on(*args)
def solve(*args):
"""
Crunch the numbers; solve the problem.
solve(IM A, IM b) -> IM
solve(DM A, DM b) -> DM
solve(SX A, SX b) -> SX
solve(MX A, MX b) -> MX
solve(IM A, IM b, str lsolver, dict opts) -> IM
solve(DM A, DM b, str lsolver, dict opts) -> DM
solve(SX A, SX b, str lsolver, dict opts) -> SX
solve(MX A, MX b, str lsolver, dict opts) -> MX
"""
return _casadi.solve(*args)
def pinv(*args):
"""
Computes the Moore-Penrose pseudo-inverse.
pinv(IM A) -> IM
pinv(DM A) -> DM
pinv(SX A) -> SX
pinv(MX A) -> MX
pinv(IM A, str lsolver, dict opts) -> IM
pinv(DM A, str lsolver, dict opts) -> DM
pinv(SX A, str lsolver, dict opts) -> SX
pinv(MX A, str lsolver, dict opts) -> MX
If the matrix A is fat (size1>size2), mul(A, pinv(A)) is unity. If the
matrix A is slender (size2<size1), mul(pinv(A), A) is unity.
"""
return _casadi.pinv(*args)
def expm_const(*args):
"""
expm_const(IM A, IM t) -> IM
expm_const(DM A, DM t) -> DM
expm_const(SX A, SX t) -> SX
expm_const(MX A, MX t) -> MX
"""
return _casadi.expm_const(*args)
def expm(*args):
"""
expm(IM A) -> IM
expm(DM A) -> DM
expm(SX A) -> SX
expm(MX A) -> MX
"""
return _casadi.expm(*args)
def jacobian(*args):
"""
Calculate Jacobian.
jacobian(IM ex, IM arg, dict opts) -> IM
jacobian(DM ex, DM arg, dict opts) -> DM
jacobian(SX ex, SX arg, dict opts) -> SX
jacobian(MX ex, MX arg, dict opts) -> MX
"""
return _casadi.jacobian(*args)
def jtimes(*args):
"""
Calculate the Jacobian and multiply by a vector from the right This is
jtimes(IM ex, IM arg, IM v, bool tr) -> IM
jtimes(DM ex, DM arg, DM v, bool tr) -> DM
jtimes(SX ex, SX arg, SX v, bool tr) -> SX
jtimes(MX ex, MX arg, MX v, bool tr) -> MX
equivalent to mul(jacobian(ex, arg), v) or mul(jacobian(ex, arg).T, v) for
tr set to false and true respectively. If contrast to these expressions, it
will use directional derivatives which is typically (but not necessarily)
more efficient if the complete Jacobian is not needed and v has few rows.
"""
return _casadi.jtimes(*args)
def linearize(*args):
"""
Linearize an expression.
linearize(IM f, IM x, IM x0) -> IM
linearize(DM f, DM x, DM x0) -> DM
linearize(SX f, SX x, SX x0) -> SX
linearize(MX f, MX x, MX x0) -> MX
"""
return _casadi.linearize(*args)
def which_depends(*args):
"""
Find out which variables enter with some order.
which_depends(IM expr, IM var, int order, bool tr) -> [bool]
which_depends(DM expr, DM var, int order, bool tr) -> [bool]
which_depends(SX expr, SX var, int order, bool tr) -> [bool]
which_depends(MX expr, MX var, int order, bool tr) -> [bool]
"""
return _casadi.which_depends(*args)
def is_linear(*args):
"""
Is expr linear in var?
is_linear(IM expr, IM var) -> bool
is_linear(DM expr, DM var) -> bool
is_linear(SX expr, SX var) -> bool
is_linear(MX expr, MX var) -> bool
False negatives are possible (an expression may not be recognised as linear
while it really is), false positives not.
"""
return _casadi.is_linear(*args)
def is_quadratic(*args):
"""
Is expr quadratic in var?
is_quadratic(IM expr, IM var) -> bool
is_quadratic(DM expr, DM var) -> bool
is_quadratic(SX expr, SX var) -> bool
is_quadratic(MX expr, MX var) -> bool
False negatives are possible (an expression may not be recognised as
quadratic while it really is), false positives not.
"""
return _casadi.is_quadratic(*args)
def gradient(*args):
"""
Calculate Jacobian.
gradient(IM ex, IM arg) -> IM
gradient(DM ex, DM arg) -> DM
gradient(SX ex, SX arg) -> SX
gradient(MX ex, MX arg) -> MX
"""
return _casadi.gradient(*args)
def tangent(*args):
"""
Calculate Jacobian.
tangent(IM ex, IM arg) -> IM
tangent(DM ex, DM arg) -> DM
tangent(SX ex, SX arg) -> SX
tangent(MX ex, MX arg) -> MX
"""
return _casadi.tangent(*args)
def hessian(*args):
"""
hessian(IM ex, IM arg) -> (IM , IM OUTPUT1)
hessian(DM ex, DM arg) -> (DM , DM OUTPUT1)
hessian(SX ex, SX arg) -> (SX , SX OUTPUT1)
hessian(MX ex, MX arg) -> (MX , MX OUTPUT1)
"""
return _casadi.hessian(*args)
def quadratic_coeff(*args):
"""
Recognizes quadratic form in scalar expression.
quadratic_coeff(IM ex, IM arg) -> (IM OUTPUT1, IM OUTPUT2, IM OUTPUT3)
quadratic_coeff(DM ex, DM arg) -> (DM OUTPUT1, DM OUTPUT2, DM OUTPUT3)
quadratic_coeff(SX ex, SX arg) -> (SX OUTPUT1, SX OUTPUT2, SX OUTPUT3)
quadratic_coeff(MX ex, MX arg) -> (MX OUTPUT1, MX OUTPUT2, MX OUTPUT3)
1/2*x' A x + b' x + c
e = 0.5*bilin(A,x,x)+dot(b,x)+c
"""
return _casadi.quadratic_coeff(*args)
def linear_coeff(*args):
"""
Recognizes linear form in vector expression.
linear_coeff(IM ex, IM arg) -> (IM OUTPUT1, IM OUTPUT2)
linear_coeff(DM ex, DM arg) -> (DM OUTPUT1, DM OUTPUT2)
linear_coeff(SX ex, SX arg) -> (SX OUTPUT1, SX OUTPUT2)
linear_coeff(MX ex, MX arg) -> (MX OUTPUT1, MX OUTPUT2)
A x + b
"""
return _casadi.linear_coeff(*args)
def n_nodes(*args):
"""
n_nodes(IM A) -> int
n_nodes(DM A) -> int
n_nodes(SX A) -> int
n_nodes(MX A) -> int
"""
return _casadi.n_nodes(*args)
def print_operator(*args):
"""
Get a string representation for a binary MatType, using custom arguments.
print_operator(IM xb, [str] args) -> str
print_operator(DM xb, [str] args) -> str
print_operator(SX xb, [str] args) -> str
print_operator(MX xb, [str] args) -> str
"""
return _casadi.print_operator(*args)
def repsum(*args):
"""
Given a repeated matrix, computes the sum of repeated parts.
repsum(IM A, int n, int m) -> IM
repsum(DM A, int n, int m) -> DM
repsum(SX A, int n, int m) -> SX
repsum(MX A, int n, int m) -> MX
"""
return _casadi.repsum(*args)
def diff(*args):
"""
Returns difference (n-th order) along given axis (MATLAB convention)
diff(IM A, int n, int axis) -> IM
diff(DM A, int n, int axis) -> DM
diff(SX A, int n, int axis) -> SX
diff(MX A, int n, int axis) -> MX
"""
return _casadi.diff(*args)
def cumsum(*args):
"""
Returns cumulative sum along given axis (MATLAB convention)
cumsum(IM A, int axis) -> IM
cumsum(DM A, int axis) -> DM
cumsum(SX A, int axis) -> SX
cumsum(MX A, int axis) -> MX
"""
return _casadi.cumsum(*args)
def einstein(*args):
"""
Computes an einstein dense tensor contraction.
einstein(IM A, IM B, [int] dim_a, [int] dim_b, [int] dim_c, [int] a, [int] b, [int] c) -> IM
einstein(DM A, DM B, [int] dim_a, [int] dim_b, [int] dim_c, [int] a, [int] b, [int] c) -> DM
einstein(SX A, SX B, [int] dim_a, [int] dim_b, [int] dim_c, [int] a, [int] b, [int] c) -> SX
einstein(MX A, MX B, [int] dim_a, [int] dim_b, [int] dim_c, [int] a, [int] b, [int] c) -> MX
einstein(IM A, IM B, IM C, [int] dim_a, [int] dim_b, [int] dim_c, [int] a, [int] b, [int] c) -> IM
einstein(DM A, DM B, DM C, [int] dim_a, [int] dim_b, [int] dim_c, [int] a, [int] b, [int] c) -> DM
einstein(SX A, SX B, SX C, [int] dim_a, [int] dim_b, [int] dim_c, [int] a, [int] b, [int] c) -> SX
einstein(MX A, MX B, MX C, [int] dim_a, [int] dim_b, [int] dim_c, [int] a, [int] b, [int] c) -> MX
Computes the product: C_c = A_a + B_b where a b c are index/einstein
notation in an encoded form
For example, an matrix-matrix product may be written as: C_ij = A_ik B_kj
The encoded form uses strictly negative numbers to indicate labels. For the
above example, we would have: a {-1, -3} b {-3, -2} c {-1 -2}
"""
return _casadi.einstein(*args)
def mmin(*args):
"""
Smallest element in a matrix.
mmin(IM x) -> IM
mmin(DM x) -> | |
<gh_stars>100-1000
# coding: utf-8
# In[1]:
import os
import math
import random
import numpy as np
import tensorflow as tf
import cv2
import glob
import os
import argparse
import lxml.etree
from datetime import datetime
slim = tf.contrib.slim
# In[2]:
#get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#from skimage import io
import time
import subprocess
import pafy
from common import *
def parseXML(xmlfile):
# create element tree object
tree = lxml.etree.parse(xmlfile)
# get root element
entry = tree.getroot()
filename = entry.xpath('/annotation/filename/text()')[0]
name = entry.xpath('/annotation/object/name/text()')[0]
xmin = entry.xpath('/annotation/object/bndbox/xmin/text()')[0]
#print("xmin",xmin)
ymin = entry.xpath('/annotation/object/bndbox/ymin/text()')[0]
xmax = entry.xpath('/annotation/object/bndbox/xmax/text()')[0]
ymax = entry.xpath('/annotation/object/bndbox/ymax/text()')[0]
# create empty list for news items
box = [filename,name,int(xmin),int(ymin),int(xmax),int(ymax)]
return box
# In[3]:
import sys
sys.path.append('../')
precision = 10
def getCurrentClock():
#return time.clock()
return datetime.now()
def click_and_crop(event, x, y, flags, param):
# grab references to the global variables
global refPt, cropping, tracks
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being
# performed
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
refPt.append((x, y))
if abs(refPt[0][0]-refPt[1][0]) > 10:
cropping = True
tracks = [] #reset tracking
else:
cropping = False
#cropping = False
# draw a rectangle around the region of interest
#cv2.rectangle(img, refPt[0], refPt[1], (0, 255, 0), 2)
#cv2.imshow("ssd", img)
# In[4]:
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-s", "--skipNr", help="skip frames nr")
ap.add_argument("-m", "--modelType", help="model type")
ap.add_argument("-t", "--trackCnt", help="track max corners")
ap.add_argument("-w", "--webcam", help="webcam mode")
ap.add_argument("-r", "--resolution", help="resolution default (640,480)")
ap.add_argument("-f", "--framerate", help="frames per second, default 30")
#ap.add_argument("-i", "--images", help="path to the images")
args = vars(ap.parse_args())
#url = args.get("images", None)
modelType=args.get("modelType", None)
if modelType is None :
modelType = "ssd"
webcam=args.get("webcam", None)
if webcam is None or int(webcam)==0:
webcam = False
else:
webcam = True
tracking=args.get("trackCnt", None)
if tracking is None:
tracking = 0
else:
tracking = int(tracking)
framerate=args.get("framerate", None)
if framerate is None:
framerate = 30
else:
framerate = int(framerate)
#procWidth = 1920 #640 # processing width (x resolution) of frame
#procHeight = 1080 # processing width (x resolution) of frame
procWidth = 1280 # processing width (x resolution) of frame
procHeight = int(procWidth*(1080/1920)) # processing width (x resolution) of frame
resolution=args.get("resolution", None)
if resolution is None:
(procWidth,procHeight) = (640,480)
else:
(procWidth,procHeight) = resolution.split(",")
procWidth = int(procWidth)
procHeight = int(procHeight)
shapeWidth=512
shapeHeight=512
shapeWidth=300
shapeHeight=300
if modelType=="ssd":
if shapeWidth==300:
from nets import ssd_vgg_300, ssd_common, np_methods
else:
from nets import ssd_vgg_512, ssd_common, np_methods
from preprocessing import ssd_vgg_preprocessing
import visualization
elif modelType=="tensorflow":
from utils import label_map_util
from utils import visualization_utils as vis_util
from collections import defaultdict
from PIL import Image
print("procWidth",procWidth,"procHeight", procHeight)
#print("Test")
# In[5]:
# In[6]:
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
url = args.get("video", None)
if url is None:
url = "https://www.youtube.com/watch?v=uuQlMCMT71I"
skipNr=args.get("skipNr", None)
if skipNr is not None :
skipNr = int(skipNr)
else:
skipNr=0
print("skipNr", skipNr)
#A smooth drive in The Crew on PS4 - OSSDC Simulator ACC Train 30fps
#videoUrl = subprocess.Popen("youtube-dl.exe -f22 -g https://www.youtube.com/watch?v=uuQlMCMT71I", shell=True, stdout=subprocess.PIPE).stdout.read()
#videoUrl = videoUrl.decode("utf-8").rstrip()
def getVideoURL(url):
videoUrl = url
video = pafy.new(url)
streams = video.streams
videoUrlList={}
for s in streams:
videoUrlList[s.resolution] = s.url
#print(s.resolution, s.extension, s.get_filesize(), s.url)
if videoUrlList.get("1280x720",None) is not None:
videoUrl = videoUrlList.get("1280x720",None)
print("1280x720")
if videoUrlList.get("1920x1080",None) is not None:
videoUrl = videoUrlList.get("1920x1080",None)
print("1920x1080")
return videoUrl
origVideoUrl = url
if "youtube." in url:
videoUrl = getVideoURL(url)
else:
videoUrl = url
# if the video argument is None, then we are reading from webcam
#videoUrl = args.get("video", None)
print("videoUrl=",videoUrl)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# ## Post-processing pipeline
#
# The SSD outputs need to be post-processed to provide proper detections. Namely, we follow these common steps:
#
# * Select boxes above a classification threshold;
# * Clip boxes to the image shape;
# * Apply the Non-Maximum-Selection algorithm: fuse together boxes whose Jaccard score > threshold;
# * If necessary, resize bounding boxes to original image shape.
# In[7]:
# Main image processing routine.
def process_image(img, select_threshold=0.5, nms_threshold=.45, net_shape=(shapeWidth, shapeHeight)):
# Run SSD network.
rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],
feed_dict={img_input: img})
# Get classes and bboxes from the net outputs.
rclasses, rscores, rbboxes = np_methods.ssd_bboxes_select(
rpredictions, rlocalisations, ssd_anchors,
select_threshold=select_threshold, img_shape=net_shape, num_classes=21, decode=True)
rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)
rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)
rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)
# Resize bboxes to original image shape. Note: useless for Resize.WARP!
rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)
return rclasses, rscores, rbboxes
if modelType=="ssd":
with tf.device('/gpu:0'):
# TensorFlow session: grow memory when needed. TF, DO NOT USE ALL MY GPU MEMORY!!!
#gpu_options = tf.GPUOptions(allow_growth=True)
#config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
#isess = tf.InteractiveSession(config=config)
isess = tf.InteractiveSession() #config=tf.ConfigProto(log_device_placement=True))
# ## SSD 300 Model
#
# The SSD 300 network takes 300x300 image inputs. In order to feed any image, the latter is resize to this input shape (i.e.`Resize.WARP_RESIZE`). Note that even though it may change the ratio width / height, the SSD model performs well on resized images (and it is the default behaviour in the original Caffe implementation).
#
# SSD anchors correspond to the default bounding boxes encoded in the network. The SSD net output provides offset on the coordinates and dimensions of these anchors.
# Input placeholder.
#net_shape = (300, 300)
net_shape = (shapeWidth, shapeHeight)
data_format = 'NHWC' #'NHWC' #'NCHW'
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
# Evaluation pre-processing: resize to SSD net shape.
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
img_input, None, None, net_shape, data_format, resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
image_4d = tf.expand_dims(image_pre, 0)
# Define the SSD model.
reuse = True if 'ssd_net' in locals() else None
if shapeWidth==300:
ssd_net = ssd_vgg_300.SSDNet()
else:
ssd_net = ssd_vgg_512.SSDNet()
with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)
# Restore SSD model.
if shapeWidth==300:
ckpt_filename = 'checkpoints/ssd_300_vgg.ckpt'
else:
ckpt_filename = 'checkpoints/VGG_VOC0712_SSD_512x512_ft_iter_120000.ckpt'
# ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)
# SSD default anchor boxes.
ssd_anchors = ssd_net.anchors(net_shape)
# In[10]:
if modelType=="tensorflow":
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
#with tf.device('/gpu:0'):
sess = tf.InteractiveSession(graph=detection_graph) #,config=tf.ConfigProto(log_device_placement=True)) #tf.InteractiveSession()
start_time = getCurrentClock()
from webcamvideostream import *
import mss
import numpy
'''
fpsValue=0
frameCnt=0
prevFrameCnt=0
prevTime=getCurrentClock()
for imgFile in sorted(glob.glob(url+"/*.jpg")):
bi = parseXML(url+os.path.splitext(os.path.basename(imgFile))[0]+".xml")
print(bi)
key = cv2.waitKey(1)
if key == 27:
break
img = cv2.imread(imgFile)
cv2.rectangle(img, (bi[2],bi[3]), (bi[4],bi[5]), (0, 255, 0), 2)
frameCnt=frameCnt+1
nowMicro = getCurrentClock()
delta = (nowMicro-prevTime).total_seconds()
#print("%f " % (delta))
if delta>=1.0:
fpsValue = ((frameCnt-prevFrameCnt)/delta)
prevTime = getCurrentClock()
prevFrameCnt=frameCnt
draw_str(img, (20, 20), "FPS = %03.2f, Frame = %05d, Object = %8s, File = %10s" % (fpsValue,frameCnt,bi[1],bi[0]))
cv2.imshow("tracking", img)
'''
# 800x600 windowed mode
#mon = {'top': 100, 'left': 2020, 'width': 1280, 'height': 720}
#mon = {'top': 0, 'left': 1920, 'width': 1280, 'height': 720}
mon = {'top': 0, 'left': 0, 'width': 1280, 'height': 720}
sct = None
def getCap(videoUrl):
global sct
if "screen" in url:
sct = mss.mss()
cap = None
if sct is None:
if webcam:
#cap = WebcamVideoStream(src=""+str(videoUrl)+"").start()
cap = WebcamVideoStream(videoUrl,(procWidth,procHeight),framerate)
cap.start()
else:
cap = cv2.VideoCapture(videoUrl)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, procWidth)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, procHeight)
cap.set(cv2.CAP_PROP_FPS, framerate)
return cap
cap = getCap(videoUrl)
count=50
#skip=2000
skip=skipNr
SKIP_EVERY=150 #pick a frame every 5 seconds
count=1000000
#skip=0 #int(7622-5)
SKIP_EVERY=0
every=SKIP_EVERY
initial_time = getCurrentClock()
flag=True
frameCnt=0
prevFrameCnt=0
prevTime=getCurrentClock()
showImage=False
showImage=True
processImage=False
processImage=True
zoomImage=0
#zoomImage=True
rclasses = []
rscores = []
rbboxes = []
record = False
#record = True
out = None
if record:
fourcc = cv2.VideoWriter_fourcc(*'MPEG')
timestr = time.strftime("%Y%m%d-%H%M%S")
out = cv2.VideoWriter('output-'+timestr+'.mp4',fourcc, 30.0, (int(procWidth),int(procHeight)))
#output_side_length = int(1920/zoomImage)
#height_offset = int((height - output_side_length) / 2)
#width_offset = int((width - output_side_length) / 2)
flag = True
# initialize the list of reference points and boolean indicating
# whether cropping is being performed or not
refPt = [(0, 0),([procWidth],procHeight)]
cropping = False
cv2.namedWindow("ossdc.org source: " + origVideoUrl)
cv2.setMouseCallback("ossdc.org source: " + origVideoUrl, click_and_crop)
fpsValue=0
tracks = []
if tracking>0:
lk_params = dict( winSize = (15, 15),#(15, 15),
maxLevel = 3,#2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 3, 0.01))
feature_params = dict( maxCorners = tracking, #5000 #500,
qualityLevel = 0.1, #0.3,
minDistance = 3, #7,
blockSize = 3 ) #7 )
#procWidth = 1280 #640 # processing width (x resolution) of frame
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
class CreatedBy(Model):
"""Provides details of the entity that created/updated the workspace.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar oid: The Object ID that created the workspace.
:vartype oid: str
:ivar puid: The Personal Object ID corresponding to the object ID above
:vartype puid: str
:ivar application_id: The application ID of the application that initiated
the creation of the workspace. For example, Azure Portal.
:vartype application_id: str
"""
_validation = {
'oid': {'readonly': True},
'puid': {'readonly': True},
'application_id': {'readonly': True},
}
_attribute_map = {
'oid': {'key': 'oid', 'type': 'str'},
'puid': {'key': 'puid', 'type': 'str'},
'application_id': {'key': 'applicationId', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(CreatedBy, self).__init__(**kwargs)
self.oid = None
self.puid = None
self.application_id = None
class Encryption(Model):
"""The object that contains details of encryption used on the workspace.
:param key_source: The encryption keySource (provider). Possible values
(case-insensitive): Default, Microsoft.Keyvault. Possible values include:
'Default', 'Microsoft.Keyvault'. Default value: "Default" .
:type key_source: str or ~azure.mgmt.databricks.models.KeySource
:param key_name: The name of KeyVault key.
:type key_name: str
:param key_version: The version of KeyVault key.
:type key_version: str
:param key_vault_uri: The Uri of KeyVault.
:type key_vault_uri: str
"""
_attribute_map = {
'key_source': {'key': 'keySource', 'type': 'str'},
'key_name': {'key': 'KeyName', 'type': 'str'},
'key_version': {'key': 'keyversion', 'type': 'str'},
'key_vault_uri': {'key': 'keyvaulturi', 'type': 'str'},
}
def __init__(self, *, key_source="Default", key_name: str=None, key_version: str=None, key_vault_uri: str=None, **kwargs) -> None:
super(Encryption, self).__init__(**kwargs)
self.key_source = key_source
self.key_name = key_name
self.key_version = key_version
self.key_vault_uri = key_vault_uri
class ErrorDetail(Model):
"""Error details.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error's code.
:type code: str
:param message: Required. A human readable error message.
:type message: str
:param target: Indicates which property in the request is responsible for
the error.
:type target: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(self, *, code: str, message: str, target: str=None, **kwargs) -> None:
super(ErrorDetail, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
class ErrorInfo(Model):
"""The code and message for an error.
All required parameters must be populated in order to send to Azure.
:param code: Required. A machine readable error code.
:type code: str
:param message: Required. A human readable error message.
:type message: str
:param details: error details.
:type details: list[~azure.mgmt.databricks.models.ErrorDetail]
:param innererror: Inner error details if they exist.
:type innererror: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'innererror': {'key': 'innererror', 'type': 'str'},
}
def __init__(self, *, code: str, message: str, details=None, innererror: str=None, **kwargs) -> None:
super(ErrorInfo, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = details
self.innererror = innererror
class ErrorResponse(Model):
"""Error response.
Contains details when the response code indicates an error.
All required parameters must be populated in order to send to Azure.
:param error: Required. The error details.
:type error: ~azure.mgmt.databricks.models.ErrorInfo
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorInfo'},
}
def __init__(self, *, error, **kwargs) -> None:
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
class ManagedIdentityConfiguration(Model):
"""The Managed Identity details for storage account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar principal_id: The objectId of the Managed Identity that is linked to
the Managed Storage account.
:vartype principal_id: str
:ivar tenant_id: The tenant Id where the Managed Identity is created.
:vartype tenant_id: str
:ivar type: The type of Identity created. It can be either SystemAssigned
or UserAssigned.
:vartype type: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(ManagedIdentityConfiguration, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = None
class Operation(Model):
"""REST API operation.
:param name: Operation name: {provider}/{resource}/{operation}
:type name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.databricks.models.OperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(self, *, name: str=None, display=None, **kwargs) -> None:
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
class OperationDisplay(Model):
"""The object that represents the operation.
:param provider: Service provider: Microsoft.ResourceProvider
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(self, *, provider: str=None, resource: str=None, operation: str=None, **kwargs) -> None:
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
class Resource(Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class Sku(Model):
"""SKU for the resource.
All required parameters must be populated in order to send to Azure.
:param name: Required. The SKU name.
:type name: str
:param tier: The SKU tier.
:type tier: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(self, *, name: str, tier: str=None, **kwargs) -> None:
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(self, *, location: str, tags=None, **kwargs) -> None:
super(TrackedResource, self).__init__(**kwargs)
self.tags = tags
self.location = location
class Workspace(TrackedResource):
"""Information about workspace.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: | |
basic slicing
if not isarray and not interp:
isbasic = True
else:
isbasic = False
ret = []
for axis in dimensions:
ret.append(xsel[axis])
ret = xSelect(ret)
ret.isbasic = isbasic
ret.interp = interp
ret.masked = masked
ret.order = order
return ret
#@-node:schmidli.20080322120238.15:idxsel2xsel
#@+node:schmidli.20080322120238.16:numpy2xsel
def numpy2xsel(isel):
""" convert a numpy selection object to an xselection object
extended numpy selection object:
if multidim: the dimensionality of idx is NOT changed
else: convert 1d-idx to ndim arrays
"""
if N.isscalar(isel): isel = tuple((isel,))
if isinstance(isel, slice): isel = tuple((isel,))
xsel = []
isarray = False
interp = False
multidim = False
do_convert = True
if not isinstance(isel, tuple):
raise TypeError("wrong argument type")
for idx in isel:
if isinstance(idx, slice):
xsel.append(idx)
elif N.isscalar(idx):
xsel.append(idx)
if idx.dtype in (N.float32, N.float64):
interp = True
else:
isarray = True
idx = N.atleast_1d(idx)
xsel.append(idx)
isarray = True
if idx.dtype in (N.float32, N.float64):
interp = True
if idx.ndim > 1:
multidim = True # conversion not supported for multidim
# convert selection objects to compatible _intp arrays if necessary
if isarray and not multidim:
# convert slices to 1d-arrays
for i in range(len(xsel)):
if isinstance(xsel[i], slice):
xsel[i] = N.arange(xsel[i].start, xsel[i].stop, xsel[i].step)
xsel_size[i] = len(xsel[i])
dim_ret = []
for i in range(len(xsel)):
if not N.isscalar(xsel[i]):
if xsel[i].ndim > 0:
dim_ret.append(len(xsel[i]))
ndim_ret = len(dim_ret)
j = 0
for i in range(len(xsel)):
if not N.isscalar(xsel[i]):
idx_shape = N.ones(ndim_ret)
idx_shape[j] = dim_ret[i]
xsel[i].shape = idx_shape
j += 1
# check if we only need basic slicing
if not isarray and not interp:
isbasic = True
else:
isbasic = False
ret = xSelect(xsel)
ret.isbasic = isbasic
ret.interp = interp
return ret
#@-node:schmidli.20080322120238.16:numpy2xsel
#@+node:schmidli.20080322120238.17:class axisSelect
class axisSelect(object):
""" axisSelect(inp)
Create an axis selection object.
Parameters:
inp a scalar, slice, or vector selection object
The syntax for inp is as follows:
[crdname|]<pre><selection><post>
<pre> is one of:
None <selection> is in native coordinate space
d <selection> is in ISO-8601 date format
i <selection> is in index space
<selection> is one of:
# for a scalar
#:#:# for a slice
#,#,... for a vector
where # is a number with an optional multiplier (e.g. 10k),
or a ISO-date. Valid multipliers include:
k (10**3), M (10**6), h (3600), m (60), H (100)
<post> is one of:
i interpolate data to exact location
n round to nearest index
Examples:
cidx = axisSelect('d20070321-09')
cidx = axisSelect('d20070321-09:18:3h')
cidx = axisSelect('i4')
cidx = axisSelect('i10,20,24i')
cidx = axisSelect('ZP|1500')
cidx = axisSelect('20k:100k:5k')
cidx = axisSelect('ZP|1500n')
"""
#@ @+others
#@+node:schmidli.20080322120238.18:__init__
def __init__(self, inp):
""" Overview of attributes:
.type one of 'scalar', 'slice', 'vector'
.fmt one of 'number', 'datetime'
.iscrd True/False
.interp False/True
.clip True/False
"""
if not isinstance(inp, str):
raise TypeError("Invalid argument type")
if len(inp) == 0:
raise ValueError("Empty string is not a valid input")
# default settings
self.type = 'slice'
self.fmt = 'number'
self.iscrd = True
self.clip = True
# check prefix
if inp[0] == 'd':
#raise NotImplementedError, "Date/time selection is not yet implemented"
self.fmt = 'datetime'
inp = inp[1:]
elif inp[0] == 'i':
self.iscrd = False
inp = inp[1:]
# check postfix
if inp[-1] in 'in':
postfix = inp[-1]
inp = inp[:-1]
else:
postfix = None
# check if to clip field
if inp[-1] == 'm':
self.clip = False
inp = inp[:-1]
if len(inp) == 0:
raise ValueError("Invalid input string")
# check for multi-dimensional coordinate name
inpv = inp.split('|')
if len(inpv) > 1:
self.mdcrd = inpv[0]
inp = inpv[1]
if postfix == 'n': # default is to interpolate
self.interp = False
else:
self.interp = True
else:
self.mdcrd = None
inp = inpv[0]
if postfix == 'i': # default is to not interpolate
self.interp = True
else:
self.interp = False
# determine selection type
inpv = inp.split(':')
if len(inpv) > 1 and len(inpv) <= 3:
self.type = 'slice'
elif len(inpv) > 3:
raise NotImplementedError("eslice is not yet implemented")
else:
inpv = inp.split(',')
if len(inpv) > 1:
self.type = 'vector'
if inpv[-1] == '': inpv = inpv[:-1]
else:
self.type = 'scalar'
# parse the selection string
data = []
if self.type == 'slice' and len(inpv) == 3:
step = inpv[2]
if len(step) == 0:
self.__step = None
else:
if step[0] == 'i':
self._index_step = 1
step = step[1:]
self.__step = str2float(step)
inpv = inpv[:2]
else:
self.__step = None
is_first = True
for item in inpv:
if self.fmt == 'datetime':
if is_first:
data.append(str2datetime(item))
is_first = False
else:
data.append(str2datetime(item, templ=data[-1]))
else:
data.append(str2float(item))
if self.type == 'slice':
if len(inpv) == 1:
data = [None, data[0]]
elif len(inpv) == 2:
data = [data[0], data[1]]
if self.mdcrd is not None:
if data[0] is None or data[1] is None or self.__step is None:
raise ValueError("must specify a complete slice for multidimensional coordinates")
if self.interp:
if data[0] is None or data[1] is None or self.__step is None:
raise ValueError("must specify a complete slice in interpolation mode")
self.__data = data
#@-node:schmidli.20080322120238.18:__init__
#@+node:schmidli.20080322120238.19:__str__
def __str__(self):
if self.type == 'scalar':
_str = 'scalar(' + str(self.__data[0]) + ')'
elif self.type == 'slice':
_str = 'slice(' + str(self.start) + ', ' + str(self.stop) + ', ' \
+ str(self.step) + ')'
elif self.type == 'vector':
_str = 'vector('
for item in self.__data:
_str += str(item) + ' '
_str = _str[:-1] + ')'
_str = 'axisSelect(' + _str + ', iscrd: ' + str(self.iscrd) \
+ ', interp: ' + str(self.interp) + ')'
return _str
#@-node:schmidli.20080322120238.19:__str__
#@+node:schmidli.20080322120238.20:__getitem__
def __getitem__(self, index):
if self.type == 'vector':
return self.__data[index]
else:
raise LookupError("Not valid method for "+self.type+".")
#@-node:schmidli.20080322120238.20:__getitem__
#@+node:schmidli.20080322120238.21:__len__
def __len__(self):
return len(self.__data)
#@-node:schmidli.20080322120238.21:__len__
#@+node:schmidli.20080322163604:__iter__
def __iter__(self):
if self.type == 'slice':
data = N.arange(self.start, self.stop, self.step)
if data[-1]+self.step == self.stop:
data = N.concatenate((data, [self.stop]))
data = data.tolist()
elif self.type == 'scalar':
data = (self.v)
else:
data = tuple(self.v)
return iter(data)
#@-node:schmidli.20080322163604:__iter__
#@+node:schmidli.20080322172813:tolist
def tolist(self):
if self.type == 'slice':
data = N.arange(self.start, self.stop, self.step)
if data[-1]+self.step == self.stop:
data = N.concatenate((data, [self.stop]))
data = data.tolist()
elif self.type == 'scalar':
data = [self.v]
else:
data = list(self.v)
return data
#@-node:schmidli.20080322172813:tolist
#@+node:schmidli.20080322120238.22:toindex
def toindex(self, file, axis, mdcrd=None, isel=None, clip=True, ep=0.0):
""" Convert a axisSelect object from coordinate space to index space
"""
dimsize = None; refdate = None
dims = None; axis_no = 0
if self.iscrd:
if file.cf2dims is not None:
axfile = file.cf2dims[axis]
else:
axfile = axis
if not axfile in file.file.variables:
self.iscrd = False
if self.iscrd:
if mdcrd is None:
if file.cf2dims is not None:
axfile = file.cf2dims[axis]
else:
axfile = axis
crd = file.file.variables[axfile]
if self.fmt == 'datetime':
refdate = get_refdate(crd)
crd = crd[:]
else:
crd = get_variable(file, mdcrd, isel)
var = file.variables[mdcrd]
dims = list(var.cf_dimensions)
for axis2 in isel.keys():
if isel[axis2].type == 'scalar' and axis2 != axis:
try:
dims.remove(axis2)
except ValueError:
pass
axis_no = dims.index(axis)
if self.type == 'scalar': dims.remove(axis)
if var.rank < 2:
raise ValueError("Coordinate variable "+self.mdcrd+ " is not multidimensional")
else:
dimsize = file.cf_dimensions[axis]
crd = None
ret = self.toindex_crd(crd, axis=axis, axis_no=axis_no, dimsize=dimsize, refdate=refdate,
clip=clip, ep=0.0)
ret.dims = dims
ret.axis = axis
return ret
#@-node:schmidli.20080322120238.22:toindex
#@+node:schmidli.20080322120238.23:toindex_crd
def toindex_crd(self, crd, axis= None, axis_no=0, dimsize=None, refdate=None, clip=True, ep=0.0):
""" Convert a axisSelect object from coordinate space to index space
"""
interp = self.interp
round_ = not interp
clip = self.clip
ep = 0.5
cidx = self
data = copy.copy(self.__data)
idx = axisIdxSelect(self)
# convert datetime to seconds since a reference date
if cidx.fmt == 'datetime':
for i in range(len(data)):
if data[i] is not None:
data[i] = data[i] - refdate
data[i] = data[i].days*86400. + data[i].seconds
# if interp=True: convert slice to vector object
if cidx.type == 'slice' and interp:
if data[0] is None:
if cidx.iscrd:
start = crd.min()
else:
if cidx.step < 0:
start = dimsize
else:
start = 0.0
else:
start = data[0]
if start < 0: start += dimsize
if data[1] is None:
if cidx.iscrd:
stop = crd.max()
else:
if cidx.step < 0:
stop = 0
else:
stop = dimsize
else:
stop = data[1]
if stop < 0: stop | |
import os
import csv
import threading
from .classes import Node, Link, Network, Column, ColumnVec, VDFPeriod, \
AgentType, DemandPeriod, Demand, Assignment, UI
from .colgen import update_links_using_columns
from .consts import SMALL_DIVISOR
__all__ = [
'read_network',
'load_columns',
'output_columns',
'output_link_performance',
'download_sample_data_sets',
'output_agent_paths'
]
# for precheck on connectivity of each OD pair
# 0: isolated, has neither outgoing links nor incoming links
# 1: has at least one outgoing link
# 2: has at least one incoming link
# 3: has both outgoing and incoming links
_zone_degrees = {}
def _update_orig_zone(oz_id):
if oz_id not in _zone_degrees:
_zone_degrees[oz_id] = 1
elif _zone_degrees[oz_id] == 2:
_zone_degrees[oz_id] = 3
def _update_dest_zone(dz_id):
if dz_id not in _zone_degrees:
_zone_degrees[dz_id] = 2
elif _zone_degrees[dz_id] == 1:
_zone_degrees[dz_id] = 3
def _are_od_connected(oz_id, dz_id):
connected = True
# at least one node in O must have outgoing links
if oz_id not in _zone_degrees or _zone_degrees[oz_id] == 2:
connected = False
print(f'WARNING! {oz_id} has no outgoing links to route volume '
f'between OD: {oz_id} --> {dz_id}')
# at least one node in D must have incoming links
if dz_id not in _zone_degrees or _zone_degrees[dz_id] == 1:
if connected:
connected = False
print(f'WARNING! {dz_id} has no incoming links to route volume '
f'between OD: {oz_id} --> {dz_id}')
return connected
def _convert_str_to_int(str):
"""
TypeError will take care the case that str is None
ValueError will take care the case that str is empty
"""
if not str:
return None
try:
return int(str)
except ValueError:
return int(float(str))
except TypeError:
return None
def _convert_str_to_float(str):
"""
TypeError will take care the case that str is None
ValueError will take care the case that str is empty
"""
if not str:
return None
try:
return float(str)
except (TypeError, ValueError):
return None
def _download_url(url, filename, loc_dir):
try:
import requests
except ImportError:
print('please print requests to preceed downloading!!')
try:
r = requests.get(url)
r.raise_for_status()
with open(loc_dir+filename, 'wb') as f:
f.write(r.content)
except requests.HTTPError:
print('file not existing: '+url)
except requests.ConnectionError:
raise Exception('check your connectcion!!!')
except Exception as e:
raise e
def download_sample_data_sets():
url = 'https://raw.githubusercontent.com/jdlph/Path4GMNS/master/data/'
data_sets = [
"ASU",
"Braess_Paradox",
"Chicago_Sketch",
"Lima_Network",
"Sioux_Falls",
"Two_Corridor"
]
files = [
"node.csv",
"link.csv",
"demand.csv",
"settings.csv",
"settings.yml"
]
print('downloading starts')
# data folder under cdw
loc_data_dir = 'data'
if not os.path.isdir(loc_data_dir):
os.mkdir(loc_data_dir)
for ds in data_sets:
web_dir = url + ds + '/'
loc_sub_dir = os.path.join(loc_data_dir, ds) + '/'
if not os.path.isdir(loc_sub_dir):
os.mkdir(loc_sub_dir)
# multi-threading
threads = []
for x in files:
t = threading.Thread(
target=_download_url,
args=(web_dir+x, x, loc_sub_dir)
)
t.start()
threads.append(t)
for t in threads:
t.join()
print('downloading completes')
print('check '+os.path.join(os.getcwd(), loc_data_dir)+' for downloaded data sets')
def read_nodes(input_dir,
nodes,
id_to_no_dict,
no_to_id_dict,
zone_to_node_dict):
""" step 1: read input_node """
with open(input_dir+'/node.csv', 'r') as fp:
print('read node.csv')
reader = csv.DictReader(fp)
node_seq_no = 0
for line in reader:
# set up node_id, which should be an integer
node_id = _convert_str_to_int(line['node_id'])
if node_id is None:
continue
# set up zone_id, which should be an integer
zone_id = _convert_str_to_int(line['zone_id'])
if zone_id is None:
zone_id = -1
# treat them as string
coord_x = line['x_coord']
coord_y = line['y_coord']
# construct node object
node = Node(node_seq_no, node_id, zone_id, coord_x, coord_y)
nodes.append(node)
# set up mapping between node_seq_no and node_id
id_to_no_dict[node_id] = node_seq_no
no_to_id_dict[node_seq_no] = node_id
# associate node_id with corresponding zone
if zone_id not in zone_to_node_dict.keys():
zone_to_node_dict[zone_id] = []
zone_to_node_dict[zone_id].append(node_id)
node_seq_no += 1
print(f'the number of nodes is {node_seq_no}')
zone_size = len(zone_to_node_dict)
# do not count virtual zone with id as -1
if -1 in zone_to_node_dict.keys():
zone_size -= 1
print(f'the number of zones is {zone_size}')
def read_links(input_dir,
links,
nodes,
id_to_no_dict,
link_id_dict,
agent_type_size,
demand_period_size,
load_demand):
""" step 2: read input_link """
with open(input_dir+'/link.csv', 'r') as fp:
print('read link.csv')
reader = csv.DictReader(fp)
link_seq_no = 0
for line in reader:
# it can be an empty string
link_id = line['link_id']
# check the validity
from_node_id = _convert_str_to_int(line['from_node_id'])
if from_node_id is None:
continue
to_node_id =_convert_str_to_int(line['to_node_id'])
if to_node_id is None:
continue
length = _convert_str_to_float(line['length'])
if length is None:
continue
# pass validity check
try:
from_node_no = id_to_no_dict[from_node_id]
to_node_no = id_to_no_dict[to_node_id]
except KeyError:
print(f'EXCEPTION: Node ID {from_node_id} '
f'or/and Node ID {to_node_id} NOT IN THE NETWORK!!')
continue
# for the following attributes,
# if they are not None, convert them to the corresponding types
# if they are None's, set them using the default values
lanes = _convert_str_to_int(line['lanes'])
if lanes is None:
lanes = 1
link_type = _convert_str_to_int(line['link_type'])
if link_type is None:
link_type = 1
free_speed = _convert_str_to_int(line['free_speed'])
if free_speed is None:
free_speed = 60
# issue: int??
capacity = _convert_str_to_int(line['capacity'])
if capacity is None:
capacity = 49500
# if link.csv does not have no column 'allowed_uses',
# set allowed_uses to 'all'
# developer's note:
# we may need to change this implementation as we cannot deal with
# cases a link which is not open to any modes
try:
allowed_uses = line['allowed_uses']
if not allowed_uses:
allowed_uses = 'all'
except KeyError:
allowed_uses = 'all'
# if link.csv does not have no column 'geometry',
# set geometry to ''
try:
geometry = line['geometry']
except KeyError:
geometry = ''
link_id_dict[link_id] = link_seq_no
# construct link object
link = Link(link_id,
link_seq_no,
from_node_no,
to_node_no,
from_node_id,
to_node_id,
length,
lanes,
link_type,
free_speed,
capacity,
allowed_uses,
geometry,
agent_type_size,
demand_period_size)
# VDF Attributes
for i in range(demand_period_size):
dp_id_str = str(i+1)
header_vdf_alpha = 'VDF_alpha' + dp_id_str
header_vdf_beta = 'VDF_beta' + dp_id_str
header_vdf_mu = 'VDF_mu' + dp_id_str
header_vdf_fftt = 'VDF_fftt' + dp_id_str
header_vdf_cap = 'VDF_cap' + dp_id_str
header_vdf_phf = 'VDF_phf' + dp_id_str
# case i: link.csv does not VDF attributes at all
# case ii: link.csv only has partial VDF attributes
# under case i, we will set up only one VDFPeriod object using
# default values
# under case ii, we will set up some VDFPeriod objects up to
# the number of complete set of VDF_alpha, VDF_beta, and VDF_mu
try:
VDF_alpha = line[header_vdf_alpha]
if VDF_alpha:
VDF_alpha = float(VDF_alpha)
except (KeyError, TypeError):
if i == 0:
# default value will be applied in the constructor
VDF_alpha = 0.15
else:
break
try:
VDF_beta = line[header_vdf_beta]
if VDF_beta:
VDF_beta = float(VDF_beta)
except (KeyError, TypeError):
if i == 0:
# default value will be applied in the constructor
VDF_beta = 4
else:
break
try:
VDF_mu = line[header_vdf_mu]
if VDF_mu:
VDF_mu = float(VDF_mu)
except (KeyError, TypeError):
if i == 0:
# default value will be applied in the constructor
VDF_mu = 1000
else:
break
try:
VDF_fftt = line[header_vdf_fftt]
if VDF_fftt:
VDF_fftt = float(VDF_fftt)
except (KeyError, TypeError):
# set it up using length and free_speed from link
VDF_fftt = length / max(SMALL_DIVISOR, free_speed) * 60
try:
VDF_cap = line[header_vdf_cap]
if VDF_cap:
VDF_cap = float(VDF_cap)
except (KeyError, TypeError):
# set it up using capacity from link
VDF_cap = capacity
# not a mandatory column
try:
VDF_phf = line[header_vdf_phf]
if VDF_phf:
VDF_phf = float(VDF_phf)
except (KeyError, TypeError):
# default value will be applied in the constructor
VDF_phf = -1
# construct VDFPeriod object
vdf = VDFPeriod(i, VDF_alpha, VDF_beta, VDF_mu,
VDF_fftt, VDF_cap, VDF_phf)
link.vdfperiods.append(vdf)
# set up outgoing links and incoming links
from_node = nodes[from_node_no]
to_node = nodes[to_node_no]
from_node.add_outgoing_link(link)
to_node.add_incoming_link(link)
links.append(link)
# set up zone degrees
if load_demand:
oz_id = from_node.get_zone_id()
dz_id = to_node.get_zone_id()
_update_orig_zone(oz_id)
_update_dest_zone(dz_id)
link_seq_no += 1
print(f'the number of links is {link_seq_no}')
def read_demand(input_dir,
file,
agent_type_id,
demand_period_id,
zone_to_node_dict,
column_pool):
""" step 3:read input_agent """
with open(input_dir+'/'+file, 'r') as fp:
print('read '+file)
at = agent_type_id
dp = demand_period_id
reader = csv.DictReader(fp)
total_agents = 0
for line in reader:
# invalid origin zone id, discard it
oz_id = _convert_str_to_int(line['o_zone_id'])
if oz_id is None:
continue
# invalid destination zone id, discard it
dz_id = _convert_str_to_int(line['d_zone_id'])
if dz_id is None:
continue
# o_zone_id does not exist in node.csv, discard it
if oz_id not in zone_to_node_dict.keys():
continue
# d_zone_id does not exist in node.csv, discard it
if dz_id not in zone_to_node_dict.keys():
continue
volume = _convert_str_to_float(line['volume'])
if volume is None:
continue
if volume == 0:
continue
# precheck on connectivity of each OD pair
if not _are_od_connected(oz_id, dz_id):
continue
# set up volume for ColumnVec
if (at, dp, oz_id, dz_id) not in column_pool.keys():
column_pool[(at, dp, | |
<filename>usaspending_api/etl/tests/integration/test_load_multiple_submissions.py
import pytest
from datetime import datetime, timezone, date
from decimal import Decimal
from django.core.management import call_command, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.test import TransactionTestCase
from model_mommy import mommy
from usaspending_api.accounts.models import AppropriationAccountBalances
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.common.helpers.sql_helpers import ordered_dictionary_fetcher
from usaspending_api.etl.submission_loader_helpers.object_class import reset_object_class_cache
from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass
from usaspending_api.submissions.models import SubmissionAttributes
@pytest.mark.usefixtures("broker_db_setup", "broker_server_dblink_setup")
class TestWithMultipleDatabases(TransactionTestCase):
"""
Super unfortunate, but because we're using a dblink these data will need to actually be committed to
the database so we use TransactionTestCase instead of TestCase. This slows down tests so use sparingly.
"""
databases = "__all__"
def setUp(self):
"""
Because we are adding fields and tables to the database and we want to keep track of that, we're
using setUp instead of setUpClass so that we can retain some state on the object. Another
unfortunate side effect of using dblink and having to modify the database. We can refactor this
set of tests once either of those situations is alleviated.
"""
reset_object_class_cache()
mommy.make(
"accounts.TreasuryAppropriationAccount",
treasury_account_identifier=1,
agency_id="111",
availability_type_code="X",
main_account_code="1111",
sub_account_code="111",
tas_rendering_label="111-X-1111-111",
)
mommy.make(
"accounts.TreasuryAppropriationAccount",
treasury_account_identifier=2,
agency_id="222",
availability_type_code="X",
main_account_code="2222",
sub_account_code="222",
tas_rendering_label="222-X-2222-222",
)
mommy.make("references.ObjectClass", major_object_class="10", object_class="10.1", direct_reimbursable="D")
mommy.make("references.DisasterEmergencyFundCode", code="B", title="BB")
mommy.make("references.DisasterEmergencyFundCode", code="L", title="LL")
mommy.make("references.DisasterEmergencyFundCode", code="N", title="NN")
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
id="2000041",
submission_fiscal_year=2000,
submission_fiscal_month=4,
is_quarter=True,
)
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
id="2000040",
submission_fiscal_year=2000,
submission_fiscal_month=4,
is_quarter=False,
)
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
id="2000041",
submission_fiscal_year=2000,
submission_fiscal_month=4,
is_quarter=True,
)
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
id="2000050",
submission_fiscal_year=2000,
submission_fiscal_month=5,
is_quarter=False,
)
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
id="2000060",
submission_fiscal_year=2000,
submission_fiscal_month=6,
is_quarter=False,
)
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
id="2000091",
submission_fiscal_year=2000,
submission_fiscal_month=9,
is_quarter=True,
)
connection = connections["data_broker"]
with connection.cursor() as cursor:
self._nuke_broker_data()
cursor.execute(
"""
insert into tas_lookup (
tas_id,
account_num,
agency_identifier,
availability_type_code,
main_account_code,
sub_account_code,
internal_start_date
) (values
(1, 1, '111', 'X', '1111', '111', '1900-01-01'),
(2, 2, '222', 'X', '2222', '222', '1900-01-01')
)
"""
)
cursor.execute(
"""
insert into submission (
submission_id,
cgac_code,
frec_code,
reporting_start_date,
reporting_end_date,
reporting_fiscal_year,
reporting_fiscal_period,
is_quarter_format,
d2_submission,
publish_status_id,
updated_at
) (values
-- bunch of good records with a mix of all kinds of settings
(1, '001', null, '2000-01-01', '2000-03-31', 2000, 4, true, false, 2, now()),
(2, null, '0002', '2000-01-01', '2000-01-31', 2000, 4, false, false, 3, now()),
(3, '003', '0003', '2000-02-01', '2000-02-29', 2000, 5, false, false, 2, now()),
(4, '004', null, '2000-03-01', '2000-03-31', 2000, 6, false, false, 3, now()),
(5, null, '005', '2000-04-01', '2000-06-30', 2000, 9, true, false, 2, now()),
-- submissions that should never return for various reasons
(6, '006', null, '2000-01-01', '2000-03-31', 2000, 4, true, false, 1, now()), -- not publish type 2 or 3
(7, '007', null, '2000-01-01', '2000-03-31', 2000, 4, true, true, 2, now()) -- D2
)
"""
)
cursor.execute(
"""
insert into publish_history (
publish_history_id,
submission_id,
updated_at
) (values
(1, 1, '1999-01-01'), (2, 2, '2000-01-02'), (3, 3, '2000-01-03'), (4, 4, '2000-01-04'),
(5, 5, '2000-01-05'), (6, 6, '2000-01-06'), (7, 7, '2000-01-07'), (8, 1, '2000-01-01')
)
"""
)
cursor.execute(
"""
insert into certify_history (
certify_history_id,
submission_id,
updated_at
) (values
(1, 1, '2000-02-01'), (3, 3, '2000-02-03'), (5, 5, '2000-02-05'), (7, 7, '2000-02-07')
)
"""
)
cursor.execute(
"""
insert into published_files_history (
published_files_history_id,
submission_id,
publish_history_id,
certify_history_id,
updated_at
) (values
(1, 1, 1, NULL, '1999-01-01'), (2, 2, 2, NULL, '2000-01-02'), (3, 3, 3, 3, '2000-01-03'),
(4, 4, 4, NULL, '2000-01-04'), (5, 5, 5, 5, '2000-01-05'), (6, 6, 6, NULL, '2000-01-06'),
(7, 7, 7, 7, '2000-01-07'), (8, 1, 8, 1, '2000-01-01')
)
"""
)
cursor.execute(
"""
insert into certified_appropriation (
certified_appropriation_id,
submission_id,
tas_id,
total_budgetary_resources_cpe
) (values
(1, 1, 1, 11),
(2, 2, 1, 22),
(3, 3, 2, 33),
(4, 4, 2, 44),
(5, 5, 2, 55),
(6, 6, 2, 66),
(7, 7, 2, 77)
)
"""
)
cursor.execute(
"""
insert into certified_object_class_program_activity (
certified_object_class_program_activity_id,
submission_id,
tas_id,
object_class,
gross_outlay_amount_by_pro_cpe,
disaster_emergency_fund_code
) (values
(1, 1, 1, '1101', 1111, null),
(2, 1, 1, '1101', 2222, 'B'),
(3, 1, 1, '1101', 3333, 'L'),
(4, 2, 1, '1101', 4444, null),
(5, 2, 1, '1101', 5555, null),
(6, 2, 1, '1101', 6666, null),
(7, 3, 2, '1101', 7777, 'L'),
(8, 3, 2, '1101', 8888, 'L'),
(9, 3, 2, '1101', 9999, 'L'),
(10, 4, 2, '1101', 1010, null),
(11, 5, 2, '1101', 1111, 'B'),
(12, 6, 2, '1101', 1212, 'L'),
(13, 7, 2, '1101', 1313, 'N')
)
"""
)
cursor.execute(
"""
insert into certified_award_financial (
certified_award_financial_id,
submission_id,
tas_id,
object_class,
gross_outlay_amount_by_awa_cpe,
transaction_obligated_amou,
disaster_emergency_fund_code
) (values
(1, 1, 1, '1101', 11111, 111110, null),
(2, 1, 1, '1101', 22222, 222220, 'B'),
(3, 1, 1, '1101', 33333, 333330, 'L'),
(4, 2, 1, '1101', 44444, 444440, null),
(5, 2, 1, '1101', 55555, 555550, null),
(6, 2, 1, '1101', 66666, 666660, null),
(7, 3, 2, '1101', 77777, 777770, 'L'),
(8, 3, 2, '1101', 88888, 888880, 'L'),
(9, 3, 2, '1101', 99999, 999990, 'L'),
(10, 4, 2, '1101', 10101, 101010, null),
(11, 5, 2, '1101', 11111, 111110, 'B'),
(12, 5, 2, '1101', null, null, 'B'), -- this should not load because of 0/null values
(13, 5, 2, '1101', 0, 0, 'B'), -- this should not load because of 0/null values
(14, 5, 2, '1101', null, 0, 'B'), -- this should not load because of 0/null values
(15, 5, 2, '1101', 0, null, 'B'), -- this should not load because of 0/null values
(16, 6, 2, '1101', 12121, 121210, 'L'),
(17, 7, 2, '1101', 13131, 131310, 'N')
)
"""
)
# This is an extremely brute force tactic, but there are many non-nullable fields in USAspending
# that are nullable in Broker. To keep from throwing not-null errors, we are going to provide
# zero values for a whole mess of fields known to be numeric. This will also prevent me having
# to mock a whole mess of additional data.
cursor.execute(
"""
select table_name, column_name
from information_schema.columns
where table_schema = 'public' and
table_name in (
'certified_appropriation',
'certified_object_class_program_activity',
'certified_award_financial'
) and
(column_name like '%cpe' or column_name like '%fyb')
"""
)
sqls = " ".join([f"update {r[0]} set {r[1]} = 0 where {r[1]} is null;" for r in cursor.fetchall()])
cursor.execute(sqls)
@staticmethod
def _nuke_broker_data():
"""
For reasons unbeknownst to me, I am having a very hard time getting TransactionTestCase to roll
back Broker changes. I spent entirely too much time trying to figure out a more graceful
way, sooooo, brute force it is.
"""
connection = connections["data_broker"]
with connection.cursor() as cursor:
cursor.execute(
"""
truncate table certify_history restart identity cascade;
truncate table publish_history restart identity cascade;
truncate table certified_appropriation restart identity cascade;
truncate table certified_object_class_program_activity restart identity cascade;
truncate table certified_award_financial restart identity cascade;
truncate table tas_lookup restart identity cascade;
truncate table submission restart identity cascade;
"""
)
def tearDown(self):
self._nuke_broker_data()
def test_all_the_things(self):
"""
Because we are using TransactionTestCase we're going to run all of our tests in one method to
prevent repeated set ups and tear downs which are expensive. This is less than ideal, but we'll
probably be fine.
"""
# Cue firey explosions.
with self.assertRaises(CommandError):
call_command("load_multiple_submissions")
with self.assertRaises(CommandError):
call_command("load_multiple_submissions", "--report-queue-status-only", "--submission_ids")
with self.assertRaises(CommandError):
call_command("load_multiple_submissions", "--submission_ids", "--incremental")
# Load specific submissions.
call_command("load_multiple_submissions", "--submission-ids", 1, 2, 3)
assert SubmissionAttributes.objects.count() == 3
assert AppropriationAccountBalances.objects.count() == 3
assert FinancialAccountsByProgramActivityObjectClass.objects.count() == 5
assert FinancialAccountsByAwards.objects.count() == 9
# We'll need these later.
update_date_sub_2 = SubmissionAttributes.objects.get(submission_id=2).update_date
create_date_sub_3 = SubmissionAttributes.objects.get(submission_id=3).create_date
# Load remaining submissions.
call_command("load_multiple_submissions", "--incremental")
assert SubmissionAttributes.objects.count() == 5
assert AppropriationAccountBalances.objects.count() == 5
assert FinancialAccountsByProgramActivityObjectClass.objects.count() == 7
assert FinancialAccountsByAwards.objects.count() == 11
# Now that we have everything loaded, let's make sure our data make sense.
with connections[DEFAULT_DB_ALIAS].cursor() as cursor:
cursor.execute("select * from submission_attributes where submission_id = 1")
d = dict(ordered_dictionary_fetcher(cursor)[0])
del d["create_date"]
del d["update_date"]
assert d == {
"submission_id": 1,
"certified_date": datetime(2000, 2, 1, 0, 0, tzinfo=timezone.utc),
"toptier_code": "001",
"reporting_period_start": date(2000, 1, 1),
"reporting_period_end": date(2000, 3, 31),
"reporting_fiscal_year": 2000,
"reporting_fiscal_quarter": 2,
"reporting_fiscal_period": 4,
"quarter_format_flag": True,
"reporting_agency_name": None,
"is_final_balances_for_fy": False,
"published_date": datetime(2000, 1, 1, 0, 0, tzinfo=timezone.utc),
"submission_window_id": 2000041,
"history": [
{"certified_date": None, "published_date": "1999-01-01T00:00:00+00:00"},
{"certified_date": "2000-02-01T00:00:00+00:00", "published_date": "2000-01-01T00:00:00+00:00"},
],
}
cursor.execute(
"""
select sum(total_budgetary_resources_amount_cpe)
from appropriation_account_balances
"""
)
assert cursor.fetchone()[0] == Decimal("165.00")
cursor.execute(
"""
select sum(gross_outlay_amount_by_program_object_class_cpe),
string_agg(disaster_emergency_fund_code, ',' order by disaster_emergency_fund_code)
from financial_accounts_by_program_activity_object_class
"""
)
assert cursor.fetchone() == (Decimal("-52116.00"), "B,B,L,L")
cursor.execute(
"""
select sum(gross_outlay_amount_by_award_cpe),
sum(transaction_obligated_amount),
string_agg(disaster_emergency_fund_code, ',' order by disaster_emergency_fund_code)
from financial_accounts_by_awards
"""
)
assert cursor.fetchone() | |
<filename>src/data_processing.py
import math
from collections import Counter
from functools import reduce
from itertools import combinations, permutations, combinations_with_replacement
from statistics import mean
from time import sleep
import json
import csv
import logging
from geopy import geocoders
from geopy import distance
from pycountry import countries
from pycountry_convert import country_alpha2_to_continent_code
from utilities import *
from datastructure import *
# The data-structure storing in memory all the entries we have, as well as the locations of the conferences.
# Requests are built upon this data-structure and are parameterized by a function mapping carbon costs to a pair of locations.
class DB:
# A Database contains a list of RawData 'data', a mapping (conf -> year -> Location) 'confs'
def __init__(self, data, confs):
self.data = data
self.confs = confs
def get_participants_conf(self, conf):
return [d for d in self.data if d.conference == conf]
def get_participants_conf_year(self, conf, year):
return [d for d in self.data if d.conference == conf and d.year == year]
# This routine checks for each location of each conference that the data for this location are already cached.
# If not, it tries to compute them and cache them.
# If it fails, it reports the likely to be erroneous data entry, and removes it from the conferences of consideration
def preprocess_confs(self, GLOB, cache):
logging.info("Starting the preprocessing of the locations of the events")
buggy_inputs = []
for name, conf in self.confs.items():
for year, loc in conf.items():
# print('Processing {} {}'.format(name,year))
try:
cache.check_cache_loc(GLOB, loc.place)
cache.set_loc(GLOB, loc)
except:
buggy_inputs.append((name, year))
print(
"WARNING: in the list of conference, entry {} {} at {} cannot be processed and has been ignored\n".format(
name, year, loc
)
)
for name, year in buggy_inputs:
self.confs[name].pop(year)
def preprocess_users(self, GLOB, cache):
logging.info("Starting the preprocessing of the participation database")
confs = self.confs
buggy_inputs = []
for name, conf in confs.items():
for year, conf_loc in conf.items():
# print('Processing conference {} {}\n'.format(name,year))
participants = self.get_participants_conf_year(name, year)
for d in participants:
loc = d.location
try:
cache.check_cache_loc(GLOB, loc.place)
cache.set_loc(GLOB, loc)
footprint = d.get_and_set_footprint(GLOB, cache, conf_loc)
if footprint is None:
print(conf_loc)
raise KeyError
except Exception as e:
print(e)
print(
"WARNING: in the list of participants, entry {} cannot be processed and has been ignored\n".format(
d
)
)
buggy_inputs.append(d)
for d in buggy_inputs:
self.data.remove(d)
def preprocess(self, GLOB, cache):
self.preprocess_confs(GLOB, cache)
self.preprocess_users(GLOB, cache)
self.print_user_db(GLOB)
def print_user_db(self, GLOB):
logging.info("Writing the raw emission data at {}".format(GLOB.output_raw))
with open(GLOB.output_raw, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=",", quoting=csv.QUOTE_MINIMAL)
writer.writerow(
[
"id",
"city",
"state",
"country",
"continent",
"conference",
"year",
"footprint",
]
)
for d in self.data:
d.write_csv_row(writer)
def footprint_per_conf(self, GLOB):
with open(GLOB.footprint_confs, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=",", quoting=csv.QUOTE_MINIMAL)
writer.writerow(
[
"conf",
"year",
"location",
"nb participants",
"total footprint",
"average footprint",
]
)
for name, conf in self.confs.items():
for year, conf_loc in conf.items():
select_data = [
d for d in self.data if d.conference == name and d.year == year
]
for d in select_data:
if d.footprint is None:
print(d)
raise KeyError
nb = len(select_data)
if nb > 0:
total_footprint = round(
reduce(lambda x, y: x + y.footprint, select_data, 0) / 1000)
average_footprint = round(total_footprint / nb, 1)
writer.writerow(
[
name,
year,
conf_loc.place.city,
nb,
total_footprint,
average_footprint,
]
)
def analysis_demographic(self, GLOB):
output_file_main = fill_hole_string(GLOB.output_demographic, "")
output_file_conf = fill_hole_string(GLOB.output_demographic, "_per_conf")
output_file_delta = fill_hole_string(GLOB.output_demographic, "_delta")
output_ParticipantsOrigin = GLOB.ParticipantsOrigin
continents = GLOB.continents()
init_distrib = Counter({c: 0 for c in continents + ["SAME"]})
# Global distribution of origin
distrib_total = init_distrib.copy()
total_attendance = 0
# Distribution for each origin of the conf
distrib_per_loc = {c: init_distrib.copy() for c in continents}
total_attendance_per_loc = init_distrib.copy()
with open(output_file_main, "w", newline="") as csvfile_main:
with open(output_file_conf, "w", newline="") as csvfile_conf:
with open(output_ParticipantsOrigin, "w", newline="") as csvfile_PO:
writer_main = csv.writer(
csvfile_main, delimiter=",", quoting=csv.QUOTE_MINIMAL
)
writer_conf = csv.writer(
csvfile_conf, delimiter=",", quoting=csv.QUOTE_MINIMAL
)
writer_PO = csv.writer(
csvfile_PO, delimiter=",", quoting=csv.QUOTE_MINIMAL
)
writer_main.writerow(
["Conference", "Year", "Continent"] + continents + ["Local"]
)
writer_conf.writerow(["Conference"] + continents + ["Local"])
writer_PO.writerow(["Conference"] + continents)
# For each conference
for name, conf in self.confs.items():
# Distribution for the conference 'name'
distrib_conf = init_distrib.copy()
total_attendance_conf = 0
output_POC = fill_hole_string(GLOB.ParticipantsOriginC, name)
with open(output_POC, "w", newline="") as csvfile_POC:
writer_POC = csv.writer(
csvfile_POC, delimiter=",", quoting=csv.QUOTE_MINIMAL
)
writer_POC.writerow(["Year", "Location"] + continents)
# For each instance of the conference 'name'
for year, conf_loc in conf.items():
# List of participants to 'name[year]'
select_data = [
d
for d in self.data
if d.conference == name and d.year == year
]
attendance = len(select_data)
# If we actually have data for this instance
if attendance > 0:
# Distribution of this instance
nb_loc = {}
total_attendance += attendance
total_attendance_per_loc[conf_loc.continent] += attendance
total_attendance_conf += attendance
nb_loc = {
l: len(
[
d
for d in select_data
if d.location.continent == l
]
)
for l in continents
}
nb_loc["SAME"] = len(
[
d
for d in select_data
if d.location.continent == conf_loc.continent
]
)
distrib_total += nb_loc
distrib_per_loc[conf_loc.continent] += nb_loc
distrib_conf += nb_loc
main_row = [
norm_perc(nb_loc[x], attendance) for x in continents
]
writer_main.writerow(
[name, year, conf_loc.continent]
+ main_row
+ [norm_perc(nb_loc["SAME"], attendance)]
)
main_row = [nb_loc[x] for x in continents]
writer_POC.writerow([name+str(year), conf_loc.continent] + main_row)
conf_row = [
norm_perc(distrib_conf[x], total_attendance_conf)
for x in continents
]
writer_conf.writerow(
[name]
+ conf_row
+ [norm_perc(distrib_conf["SAME"], total_attendance_conf)]
)
conf_row = [distrib_conf[x] for x in continents]
writer_PO.writerow(
[name]
+ conf_row
)
# print("total_attendance : {}".format(total_attendance))
# for x in continents:
# print("{} has {}".format(x,norm_perc(distrib_total[x], total_attendance)))
writer_conf.writerow(
["Any"]
+ [
norm_perc(distrib_total[x], total_attendance)
for x in continents
]
+ [norm_perc(distrib_total["SAME"], total_attendance)]
)
writer_PO.writerow(
["All"]
+ [distrib_total[x] for x in continents]
)
with open(output_file_delta, "w", newline="") as csvfile_delta:
writer = csv.writer(csvfile_delta, delimiter=",", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Location"] + continents + ["Local"])
for c in continents:
if total_attendance_per_loc[c] != 0:
writer.writerow(
[c]
+ [
norm_perc(
distrib_per_loc[c][x], total_attendance_per_loc[c]
)
for x in continents
]
+ [
norm_perc(
distrib_per_loc[c]["SAME"], total_attendance_per_loc[c]
)
]
)
writer.writerow(
["Any"]
+ [norm_perc(distrib_total[x], total_attendance) for x in continents]
+ [norm_perc(distrib_total["SAME"], total_attendance)]
)
# Overlap of participation, in percentage, between two instances of two conferences
def participation_overlap_single(self, name1, year1, name2, year2):
participants1 = set(
[d.id for d in self.data if d.conference == name1 and d.year == year1]
)
participants2 = set(
[d.id for d in self.data if d.conference == name2 and d.year == year2]
)
if len(participants1) > 0 and len(participants2) > 0:
intersection = participants1 & participants2
return [len (intersection), len(participants1), len(participants2)]
# return norm(
# len(intersection) * 2 * 100 / (len(participants1) + len(participants2))
# )
else:
return None
# Overlap of participation, in percentage, between any two instances of a given conference
def participation_overlap_intra_conf(self, GLOB, name):
output_file = fill_hole_string(GLOB.output_overlap_intra_conf, "_" + name)
with open(output_file, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=",", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Year1", "Year2", "Overlap"])
for pair in combinations(GLOB.years_processed, 2):
overlap = self.participation_overlap_single(
name, pair[0], name, pair[1]
)
if overlap is not None:
writer.writerow([pair[0], pair[1]] + overlap)
def participation_overlap_intra_conf_generate_all(self, GLOB):
for c in GLOB.confs_processed:
self.participation_overlap_intra_conf(GLOB, c)
# Overlap of participation, in percentage, between two given conferences for each year
def participation_overlap_cross_conf(self, GLOB, conf1, conf2):
output_file = fill_hole_string(
GLOB.output_overlap_cross_conf, "_" + conf1 + "_" + conf2
)
with open(output_file, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=",", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Year", "Overlap", "Total1", "Total2"])
overlap_acc = 0
part1_acc = 0
part2_acc = 0
for year in GLOB.years_processed:
overlap = self.participation_overlap_single(conf1, year, conf2, year)
if overlap is not None:
overlap_acc += overlap[0]
part1_acc += overlap[1]
part2_acc += overlap[2]
writer.writerow([year] + overlap)
writer.writerow(["All", overlap_acc, part1_acc, part2_acc])
def participation_overlap_cross_conf_generate_all(self, GLOB):
for pair in combinations(GLOB.confs_processed, 2):
self.participation_overlap_cross_conf(GLOB, pair[0], pair[1])
def participation_overlap_general(self, GLOB):
with open(GLOB.OverlapAnalysiscrop, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=",", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Conf1", "Year1", "Conf2", "Year2", "Overlap"])
for conf1,conf2 in combinations_with_replacement(GLOB.confs_processed, 2):
for year1,year2 in combinations_with_replacement(GLOB.years_processed, 2):
overlap = self.participation_overlap_single(conf1,year1,conf2,year2)
if not overlap is None:
writer.writerow([conf1,year1,conf2,year2,overlap[0]])
def get_number_of_participations(self, GLOB):
with open(GLOB.output_number_of_participations, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=",", quoting=csv.QUOTE_MINIMAL)
writer.writerow(
[
"Conference",
"Avrg nb of participations",
"Avrg non one timer",
">= 2",
">= 2 per",
">= 3",
">= 3 per",
">= 4",
">= 4 per",
">= 5",
">= 5 per",
]
)
# res: conf |-> id_participant |-> number of participations to conf specifically
| |
<gh_stars>0
from opensimplex import OpenSimplex
import pandas as pd
import xarray as xr
import numpy as np
import constants
import netCDF4
import random
import pygmt
import math
import csv
import os
def create_temp_dir():
'''
PARAMETERS: none
RETURN VALUE: none
REQUIREMENTS: none
PURPOSE: creates new temp directory for data and image storage
'''
# creating temporary directory
if not os.path.exists(constants.TEMPDIR):
os.makedirs(constants.TEMPDIR)
def create_image():
'''
PARAMETERS: none
RETURN VALUE: none
REQUIREMENTS: Data.nc has to be created in the temp directory
PURPOSE: creates Image1.png in the temp directory
'''
# creating temporary directory
create_temp_dir()
# resetting figure
constants.FIG = pygmt.Figure()
# make color pallets
pygmt.makecpt(
cmap='topo',
series='-10000/10000/500',
continuous=True
)
# plotting topography data
constants.FIG.grdimage(
grid=str(constants.TEMPDIR/'Data.nc'),
shading=True,
frame=True,
projection='M4i',
region=get_bounds()
)
# plotting coastlines
constants.FIG.coast(
shorelines=True,
frame=True
)
# plotting topo contour lines
constants.FIG.grdcontour(
grid=str(constants.TEMPDIR/'Data.nc'),
interval=1000,
annotation="2000+f6p",
limit="-10000/10000",
pen="a0.12p"
)
# creating color bar
constants.FIG.colorbar(
frame='+l" "'
)
# saving figure as Image.png
constants.FIG.savefig(constants.TEMPDIR/"Image1.png", crop=False, dpi=720)
def plot_points(points):
'''
PARAMETERS: points, [[x1,y1],[x2,y2],...] coordinates of points in the path
RETURN VALUE: none
REQUIREMENTS: Image2.png has be created in the temp directory
PURPOSE: creates Image3.png with path plotted
'''
# creating temporary directory
create_temp_dir()
# separating x and y coordinates
x = [point[0] for point in points]
y = [point[1] for point in points]
# resetting image
plot_endpoints([x[0],y[0]],[x[-1],y[-1]])
# plot data points
constants.FIG.plot(
x=x,
y=y,
style='c0.05c',
color='black',
pen='black',
)
# saving figure as Image.png
constants.FIG.savefig(constants.TEMPDIR/"Image3.png", crop=False, dpi=720)
def plot_endpoints(start, end):
'''
PARAMETERS: start, [x,y] coordinates of starting point
end, [x,y] coordinates of ending point
RETURN VALUE: none
REQUIREMENTS: Image1.png has be created in the temp directory
PURPOSE: creates Image2.png with endpoints plotted
'''
# creating temporary directory
create_temp_dir()
# resetting image
create_image()
# plot data points
constants.FIG.plot(
x=[start[0],end[0]],
y=[start[1],end[1]],
style='c0.2c',
color='red',
pen='black',
)
# saving figure as Image.png
constants.FIG.savefig(constants.TEMPDIR/"Image2.png", crop=False, dpi=720)
def get_scale():
'''
PARAMETERS: none
RETURN VALUE: [londist, latdist, diagdist], distance between two lon points,
two lat points, and two diagonal points respectively
REQUIREMENTS: Data.csv has to be created in the temp directory
PURPOSE: to get the distance between points, used in the algo
'''
# creating temporary directory
create_temp_dir()
# loading data from Data.csv
arr = np.loadtxt(constants.TEMPDIR/"Data.csv", delimiter=',')
lon = arr[:, 0]
lat = arr[:, 1]
alt = arr[:, 2]
# finding indexs of the first repitition of data
temp = np.where(lon == lon[0])
index1 = temp[0][0]
index2 = temp[0][1]
# calculating londist, latdist, and diagdist in degrees
londist = (lon[1] - lon[0])
latdist = (lat[index2] - lat[index1])
diagdist = math.sqrt(londist ** 2 + latdist ** 2)
# converting degrees to meters and returning values
londist = londist * 60 * 60 * 30
latdist = latdist * 60 * 60 * 30
diagdist = diagdist * 60 * 60 * 30
return [londist, latdist, diagdist]
def convert_to_csv():
'''
PARAMETERS: none
RETURN VALUE: none
REQUIREMENTS: Data.nc has to be created in the temp directory
PURPOSE: creates Data.csv in the temp directory
'''
# creating temporary directory
create_temp_dir()
# reading in data from Data.nc
nc = xr.open_dataset(constants.TEMPDIR/'Data.nc')
lon = nc.variables['x'][:]
length = np.size(lon)
lat = nc.variables['y'][:]
width = np.size(lat)
alt = nc.variables['z'][:]
# reshaping and flattening data
lon = np.tile(lon,width)
lat = np.tile(lat,length)
lat = np.reshape(lat,(width,length))
lat = lat.flatten('F')
alt = np.array(alt)
alt = alt.flatten()
# concatenating data together
data = np.column_stack((lon,lat,alt))
# creating Data.csv
np.savetxt(constants.TEMPDIR/'Data.csv',data,delimiter=',')
def convert_to_nc():
'''
PARAMETERS: none
RETURN VALUE: none
REQUIREMENTS: Data.csv has to be created in the temp directory
PURPOSE: creates Data.nc in the temp directory
'''
# creating temporary directory
create_temp_dir()
# reading in data from Data.csv
arr = np.loadtxt(constants.TEMPDIR/"Data.csv", delimiter=',')
lon = arr[:, 0]
lat = arr[:, 1]
alt = arr[:, 2]
# extracting dimensions of lon, lat, and alt
temp = np.where(lon == lon[0])
index1 = temp[0][0]
index2 = temp[0][1]
londim = index2 - index1
latdim = int(np.shape(lat)[0] / londim)
altdim = (londim, latdim)
# rehaping lon, lat, and alt
lon = lon[0:londim]
lat = lat.reshape(londim, latdim)
lat = lat[:, 0]
alt = np.reshape(alt,(londim, latdim))
# creating Data.nc and setting dimensions
nc = netCDF4.Dataset(constants.TEMPDIR/'Data.nc','w','NETCDF4')
nc.createDimension('x',londim)
nc.createDimension('y',latdim)
# adding data to Data.nc and closing the file
lonvar = nc.createVariable('x','float32',('x'))
lonvar[:] = lon
latvar = nc.createVariable('y','float32',('y'))
latvar[:] = lat
altvar = nc.createVariable('z','float32',('x','y'));
altvar[:] = alt;
nc.close();
def get_etopo_data(lon, lat, size):
'''
PARAMETERS: -180 <= lon <= 180(suggested -175 <= lon <= 175),
-89 <= lat <= 89(suggested -85 <= lat <= 85),
0.05 <= size <= 90(suggested 0.1 <= size <= 10)
RETURN VALUE: none
REQUIREMENTS: none
PURPOSE: creates Data.nc and Data.csv in the temp directory
'''
# creating temporary directory
create_temp_dir()
# creating region boundaries
minlon, maxlon = max(-180,lon-size), min(180, lon+size) # -180 < lon < 180
minlat, maxlat = max(-89,lat-size), min(89,lat+size) # -89 < lat < 89
# determining which etopo data file to use
if (size > 2):
topo_data = '@earth_relief_30s' # 30 arc seconds between points
elif (size > 0.4):
topo_data = '@earth_relief_15s' # 15 arc seconds between points
else:
topo_data = '@earth_relief_03s' # 03 arc seconds between points
# extracting subregion and creating Data.nc file
pygmt.grdcut(
grid=topo_data,
outgrid=constants.TEMPDIR/'Data.nc',
projection='M4i',
region=[minlon, maxlon, minlat, maxlat]
)
# converting Data.nc to Data.csv
convert_to_csv()
def get_bounds():
'''
PARAMETERS: none
RETURN VALUE: [minlon, maxlon, minlat, maxlat], min and max values of lon and lat
REQUIREMENTS: Data.csv has to be created in the temp directory
PURPOSE: finds the bounds of the region from a data file
'''
# creating temporary directory
create_temp_dir()
# loading data from Data.csv
arr = np.loadtxt(constants.TEMPDIR/"Data.csv", delimiter=',')
lon = arr[:, 0]
lat = arr[:, 1]
alt = arr[:, 2]
# finding values and returning them
return [np.min(lon),np.max(lon),np.min(lat),np.max(lat)]
def get_ncfile(path):
'''
PARAMETERS: path, the path to the file that is going to be read
RETURN VALUE: none
REQUIREMENTS: the .nc file has to have only 3 variables x, y, z
shape of x = n, shape of y = m, shape of z = (n,m)
PURPOSE: creates Data.nc and Data.csv in the temp directory
'''
# creating temporary directory
create_temp_dir()
# opening and the nc file and Data.csv in temp directory
src = netCDF4.Dataset(path)
dst = netCDF4.Dataset(constants.TEMPDIR/'Data.nc','w','netCDF4')
# copying attributes
for name in src.ncattrs():
dst.setncattr(name, src.getncattr(name))
# copying dimensions
for name in src.dimensions:
dst.createDimension(name, len(src.dimensions[name]))
# copying all file data and closing file
for name in src.variables:
x = dst.createVariable(name, src.variables[name].datatype, src.variables[name].dimensions)
dst.variables[name][:] = src.variables[name][:]
src.close()
dst.close()
# converting Data.nc to Data.csv
convert_to_csv()
# setting up subregion for image creation
pygmt.grdcut(
grid=str(constants.TEMPDIR/'Data.nc'),
outgrid=constants.TEMPDIR/'Data.nc',
projection='M4i',
region=get_bounds()
)
def get_csvfile(path):
'''
PARAMETERS: path, the path to the file that is going to be read
RETURN VALUE: none
REQUIREMENTS: the .csv file should be n rows and 3 columns with column 1 being
lon, column 2 being lat, column 3 being alt. lon should differ in
values before lat does
PURPOSE: creates Data.nc and Data.csv in the temp directory
'''
# creating temporary directory
create_temp_dir()
# opening and creating Data.csv
arr = np.loadtxt(path,delimiter=',')
np.savetxt(constants.TEMPDIR/'Data.csv',arr,delimiter=',')
# converting Data.csv to Data.nc
convert_to_nc()
# setting up subregion for image creation
pygmt.grdcut(
grid=str(constants.TEMPDIR/'Data.nc'),
outgrid=constants.TEMPDIR/'Data.nc',
projection='M4i',
region=get_bounds()
)
def create_random_terrain(freq, height, water):
'''
PARAMETERS: freq, 1 <= freq <= 25, controls how mountainy the data will be
height, 100 <= height <= 8000, controls max altitude difference
water, 0 <= water <= 100, percentage of the map that will be under water
RETURN VALUE: none
REQUIREMENTS: none
PURPOSE: creates Data.nc and Data.csv in the temp directory
'''
# creating temporary directory
create_temp_dir()
# initializing altitude data and creating noise generators
n = 500
rnd = random.randrange(0,1000000)
gens = [OpenSimplex(seed=i) for i in range(10)]
alt = np.zeros((n,n))
# creating noise in altitude data
for x in range(n):
for y in range(n):
for i,gen in enumerate(gens):
alt[x][y] += (0.5**i)*(gen.noise2d(freq*(x/n-0.5)-rnd, freq*(y/n-0.5)-rnd) / 2 + (0.5-water/100))
# reshaping and increasing height of altitude data
alt *= height
alt = alt.flatten()
# creating lon and lat values
lon = np.linspace(-2,2,n)
lon = np.tile(lon,n)
lat = np.linspace(-2,2,n)
lat = np.tile(lat,n)
lat = np.reshape(lat,(n,n))
lat = lat.flatten('F')
# concatenating data together
data = np.column_stack((lon,lat,alt))
# | |
mbkin**6 - (505200*mckin**4*q_cut**2*sB)/mbkin**8 -
(4070880*mckin**6*q_cut**2*sB)/mbkin**10 - (4525536*mckin**8*q_cut**2*sB)/
mbkin**12 + (142656*mckin**10*q_cut**2*sB)/mbkin**14 -
(2771904*mckin**12*q_cut**2*sB)/mbkin**16 - (4946496*mckin**14*q_cut**2*sB)/
mbkin**18 - (5925840*mckin**16*q_cut**2*sB)/mbkin**20 -
(2336640*mckin**18*q_cut**2*sB)/mbkin**22 + (87600*mckin**20*q_cut**2*sB)/
mbkin**24 + (84000*mckin**22*q_cut**2*sB)/mbkin**26 +
(5040*q_cut**3*sB)/mbkin**6 + (82320*mckin**2*q_cut**3*sB)/mbkin**8 -
(854496*mckin**4*q_cut**3*sB)/mbkin**10 - (7703856*mckin**6*q_cut**3*sB)/
mbkin**12 - (16016976*mckin**8*q_cut**3*sB)/mbkin**14 -
(21826608*mckin**10*q_cut**3*sB)/mbkin**16 - (21509136*mckin**12*q_cut**3*
sB)/mbkin**18 - (13371216*mckin**14*q_cut**3*sB)/mbkin**20 -
(3961056*mckin**16*q_cut**3*sB)/mbkin**22 + (110880*mckin**18*q_cut**3*sB)/
mbkin**24 + (82800*mckin**20*q_cut**3*sB)/mbkin**26 -
(12600*q_cut**4*sB)/mbkin**8 - (253680*mckin**2*q_cut**4*sB)/mbkin**10 +
(404088*mckin**4*q_cut**4*sB)/mbkin**12 + (8097600*mckin**6*q_cut**4*sB)/
mbkin**14 + (19759944*mckin**8*q_cut**4*sB)/mbkin**16 +
(21465504*mckin**10*q_cut**4*sB)/mbkin**18 + (14271960*mckin**12*q_cut**4*
sB)/mbkin**20 + (3432288*mckin**14*q_cut**4*sB)/mbkin**22 -
(769440*mckin**16*q_cut**4*sB)/mbkin**24 - (205200*mckin**18*q_cut**4*sB)/
mbkin**26 + (5040*q_cut**5*sB)/mbkin**10 + (102480*mckin**2*q_cut**5*sB)/
mbkin**12 - (16512*mckin**4*q_cut**5*sB)/mbkin**14 -
(1479984*mckin**6*q_cut**5*sB)/mbkin**16 - (2775456*mckin**8*q_cut**5*sB)/
mbkin**18 - (2796144*mckin**10*q_cut**5*sB)/mbkin**20 -
(875232*mckin**12*q_cut**5*sB)/mbkin**22 + (290640*mckin**14*q_cut**5*sB)/
mbkin**24 + (75600*mckin**16*q_cut**5*sB)/mbkin**26 +
(5040*q_cut**6*sB)/mbkin**12 + (127680*mckin**2*q_cut**6*sB)/mbkin**14 +
(597024*mckin**4*q_cut**6*sB)/mbkin**16 + (1262448*mckin**6*q_cut**6*sB)/
mbkin**18 + (1846128*mckin**8*q_cut**6*sB)/mbkin**20 +
(1700544*mckin**10*q_cut**6*sB)/mbkin**22 + (725760*mckin**12*q_cut**6*sB)/
mbkin**24 + (75600*mckin**14*q_cut**6*sB)/mbkin**26 -
(5040*q_cut**7*sB)/mbkin**14 - (122640*mckin**2*q_cut**7*sB)/mbkin**16 -
(494400*mckin**4*q_cut**7*sB)/mbkin**18 - (1044720*mckin**6*q_cut**7*sB)/
mbkin**20 - (1306080*mckin**8*q_cut**7*sB)/mbkin**22 -
(661440*mckin**10*q_cut**7*sB)/mbkin**24 - (54000*mckin**12*q_cut**7*sB)/
mbkin**26 + (1260*q_cut**8*sB)/mbkin**16 + (29400*mckin**2*q_cut**8*sB)/
mbkin**18 + (84036*mckin**4*q_cut**8*sB)/mbkin**20 +
(198456*mckin**6*q_cut**8*sB)/mbkin**22 + (166080*mckin**8*q_cut**8*sB)/
mbkin**24 + (7200*mckin**10*q_cut**8*sB)/mbkin**26 -
(8928*mckin**4*q_cut**9*sB)/mbkin**22 - (28800*mckin**6*q_cut**9*sB)/
mbkin**24 - (16800*mckin**8*q_cut**9*sB)/mbkin**26 +
(15120*mckin**4*q_cut**10*sB)/mbkin**24 + (18000*mckin**6*q_cut**10*sB)/
mbkin**26 - (4800*mckin**4*q_cut**11*sB)/mbkin**26 - 1360*sE -
(2000*mckin**2*sE)/mbkin**2 + (309680*mckin**4*sE)/mbkin**4 -
(670736*mckin**6*sE)/mbkin**6 - (7412592*mckin**8*sE)/mbkin**8 +
(6194640*mckin**10*sE)/mbkin**10 + (8632992*mckin**12*sE)/mbkin**12 -
(5378784*mckin**14*sE)/mbkin**14 - (700464*mckin**16*sE)/mbkin**16 -
(312368*mckin**18*sE)/mbkin**18 - (1548880*mckin**20*sE)/mbkin**20 +
(836848*mckin**22*sE)/mbkin**22 + (65264*mckin**24*sE)/mbkin**24 -
(12240*mckin**26*sE)/mbkin**26 + (5520*q_cut*sE)/mbkin**2 +
(32160*mckin**2*q_cut*sE)/mbkin**4 - (906080*mckin**4*q_cut*sE)/mbkin**6 -
(1643168*mckin**6*q_cut*sE)/mbkin**8 + (17160304*mckin**8*q_cut*sE)/
mbkin**10 + (36106432*mckin**10*q_cut*sE)/mbkin**12 +
(22402624*mckin**12*q_cut*sE)/mbkin**14 + (4453696*mckin**14*q_cut*sE)/
mbkin**16 + (2931184*mckin**16*q_cut*sE)/mbkin**18 -
(2327648*mckin**18*q_cut*sE)/mbkin**20 - (2691296*mckin**20*q_cut*sE)/
mbkin**22 - (94368*mckin**22*q_cut*sE)/mbkin**24 +
(49680*mckin**24*q_cut*sE)/mbkin**26 - (5600*q_cut**2*sE)/mbkin**4 -
(57120*mckin**2*q_cut**2*sE)/mbkin**6 + (427680*mckin**4*q_cut**2*sE)/
mbkin**8 + (2262240*mckin**6*q_cut**2*sE)/mbkin**10 +
(581760*mckin**8*q_cut**2*sE)/mbkin**12 - (2877312*mckin**10*q_cut**2*sE)/
mbkin**14 + (1913472*mckin**12*q_cut**2*sE)/mbkin**16 +
(6809472*mckin**14*q_cut**2*sE)/mbkin**18 + (5638368*mckin**16*q_cut**2*sE)/
mbkin**20 + (1425440*mckin**18*q_cut**2*sE)/mbkin**22 -
(101280*mckin**20*q_cut**2*sE)/mbkin**24 - (50400*mckin**22*q_cut**2*sE)/
mbkin**26 - (5520*q_cut**3*sE)/mbkin**6 - (54240*mckin**2*q_cut**3*sE)/
mbkin**8 + (691120*mckin**4*q_cut**3*sE)/mbkin**10 +
(4345728*mckin**6*q_cut**3*sE)/mbkin**12 + (5522528*mckin**8*q_cut**3*sE)/
mbkin**14 + (4002368*mckin**10*q_cut**3*sE)/mbkin**16 +
(2024544*mckin**12*q_cut**3*sE)/mbkin**18 + (3545728*mckin**14*q_cut**3*sE)/
mbkin**20 + (2277168*mckin**16*q_cut**3*sE)/mbkin**22 +
(21408*mckin**18*q_cut**3*sE)/mbkin**24 - (49680*mckin**20*q_cut**3*sE)/
mbkin**26 + (13680*q_cut**4*sE)/mbkin**8 + (186480*mckin**2*q_cut**4*sE)/
mbkin**10 - (511520*mckin**4*q_cut**4*sE)/mbkin**12 -
(4919264*mckin**6*q_cut**4*sE)/mbkin**14 - (7467680*mckin**8*q_cut**4*sE)/
mbkin**16 - (6060512*mckin**10*q_cut**4*sE)/mbkin**18 -
(5694752*mckin**12*q_cut**4*sE)/mbkin**20 - (1869920*mckin**14*q_cut**4*sE)/
mbkin**22 + (413616*mckin**16*q_cut**4*sE)/mbkin**24 +
(123120*mckin**18*q_cut**4*sE)/mbkin**26 - (5040*q_cut**5*sE)/mbkin**10 -
(77280*mckin**2*q_cut**5*sE)/mbkin**12 + (108480*mckin**4*q_cut**5*sE)/
mbkin**14 + (874016*mckin**6*q_cut**5*sE)/mbkin**16 +
(894432*mckin**8*q_cut**5*sE)/mbkin**18 + (642528*mckin**10*q_cut**5*sE)/
mbkin**20 + (23872*mckin**12*q_cut**5*sE)/mbkin**22 -
(213024*mckin**14*q_cut**5*sE)/mbkin**24 - (45360*mckin**16*q_cut**5*sE)/
mbkin**26 - (5040*q_cut**6*sE)/mbkin**12 - (105840*mckin**2*q_cut**6*sE)/
mbkin**14 - (365840*mckin**4*q_cut**6*sE)/mbkin**16 -
(419792*mckin**6*q_cut**6*sE)/mbkin**18 - (359504*mckin**8*q_cut**6*sE)/
mbkin**20 - (464784*mckin**10*q_cut**6*sE)/mbkin**22 -
(371952*mckin**12*q_cut**6*sE)/mbkin**24 - (45360*mckin**14*q_cut**6*sE)/
mbkin**26 + (3600*q_cut**7*sE)/mbkin**14 + (102240*mckin**2*q_cut**7*sE)/
mbkin**16 + (292720*mckin**4*q_cut**7*sE)/mbkin**18 +
(389440*mckin**6*q_cut**7*sE)/mbkin**20 + (544240*mckin**8*q_cut**7*sE)/
mbkin**22 + (363360*mckin**10*q_cut**7*sE)/mbkin**24 +
(32400*mckin**12*q_cut**7*sE)/mbkin**26 - (480*q_cut**8*sE)/mbkin**16 -
(25440*mckin**2*q_cut**8*sE)/mbkin**18 - (44400*mckin**4*q_cut**8*sE)/
mbkin**20 - (82928*mckin**6*q_cut**8*sE)/mbkin**22 -
(85728*mckin**8*q_cut**8*sE)/mbkin**24 - (4320*mckin**10*q_cut**8*sE)/
mbkin**26 + (1120*q_cut**9*sE)/mbkin**18 + (2240*mckin**2*q_cut**9*sE)/
mbkin**20 + (2240*mckin**4*q_cut**9*sE)/mbkin**22 +
(9664*mckin**6*q_cut**9*sE)/mbkin**24 + (10080*mckin**8*q_cut**9*sE)/
mbkin**26 - (1200*q_cut**10*sE)/mbkin**20 - (1200*mckin**2*q_cut**10*sE)/
mbkin**22 - (6960*mckin**4*q_cut**10*sE)/mbkin**24 -
(10800*mckin**6*q_cut**10*sE)/mbkin**26 + (320*q_cut**11*sE)/mbkin**22 +
(2880*mckin**4*q_cut**11*sE)/mbkin**26 - 145*sqB - (800*mckin**2*sqB)/
mbkin**2 + (54167*mckin**4*sqB)/mbkin**4 - (39482*mckin**6*sqB)/
mbkin**6 - (2079312*mckin**8*sqB)/mbkin**8 - (1917294*mckin**10*sqB)/
mbkin**10 + (4531890*mckin**12*sqB)/mbkin**12 +
(3302724*mckin**14*sqB)/mbkin**14 - (2155053*mckin**16*sqB)/
mbkin**16 - (1741748*mckin**18*sqB)/mbkin**18 -
(24841*mckin**20*sqB)/mbkin**20 + (69430*mckin**22*sqB)/mbkin**22 +
(974*mckin**24*sqB)/mbkin**24 - (510*mckin**26*sqB)/mbkin**26 +
(570*q_cut*sqB)/mbkin**2 + (6480*mckin**2*q_cut*sqB)/mbkin**4 -
(148544*mckin**4*q_cut*sqB)/mbkin**6 - (579896*mckin**6*q_cut*sqB)/
mbkin**8 + (3426370*mckin**8*q_cut*sqB)/mbkin**10 +
(16467184*mckin**10*q_cut*sqB)/mbkin**12 + (23592832*mckin**12*q_cut*sqB)/
mbkin**14 + (14304928*mckin**14*q_cut*sqB)/mbkin**16 +
(2600590*mckin**16*q_cut*sqB)/mbkin**18 - (688736*mckin**18*q_cut*sqB)/
mbkin**20 - (201440*mckin**20*q_cut*sqB)/mbkin**22 +
(4152*mckin**22*q_cut*sqB)/mbkin**24 + (2070*mckin**24*q_cut*sqB)/
mbkin**26 - (560*q_cut**2*sqB)/mbkin**4 - (9660*mckin**2*q_cut**2*sqB)/
mbkin**6 + (55200*mckin**4*q_cut**2*sqB)/mbkin**8 +
(549540*mckin**6*q_cut**2*sqB)/mbkin**10 + (928824*mckin**8*q_cut**2*sqB)/
mbkin**12 + (375504*mckin**10*q_cut**2*sqB)/mbkin**14 +
(785808*mckin**12*q_cut**2*sqB)/mbkin**16 + (1426416*mckin**14*q_cut**2*sqB)/
mbkin**18 + (718968*mckin**16*q_cut**2*sqB)/mbkin**20 +
(84620*mckin**18*q_cut**2*sqB)/mbkin**22 - (13680*mckin**20*q_cut**2*sqB)/
mbkin**24 - (2100*mckin**22*q_cut**2*sqB)/mbkin**26 -
(570*q_cut**3*sqB)/mbkin**6 - (8760*mckin**2*q_cut**3*sqB)/mbkin**8 +
(108574*mckin**4*q_cut**3*sqB)/mbkin**10 + (1034772*mckin**6*q_cut**3*sqB)/
mbkin**12 + (2407232*mckin**8*q_cut**3*sqB)/mbkin**14 +
(2729180*mckin**10*q_cut**3*sqB)/mbkin**16 + (2234328*mckin**12*q_cut**3*
sqB)/mbkin**18 + (1038412*mckin**14*q_cut**3*sqB)/mbkin**20 +
(159642*mckin**16*q_cut**3*sqB)/mbkin**22 - (7812*mckin**18*q_cut**3*sqB)/
mbkin**24 - (2070*mckin**20*q_cut**3*sqB)/mbkin**26 +
(1440*q_cut**4*sqB)/mbkin**8 + (27510*mckin**2*q_cut**4*sqB)/mbkin**10 -
(43082*mckin**4*q_cut**4*sqB)/mbkin**12 - (1078244*mckin**6*q_cut**4*sqB)/
mbkin**14 - (3002066*mckin**8*q_cut**4*sqB)/mbkin**16 -
(2994488*mckin**10*q_cut**4*sqB)/mbkin**18 - (1173962*mckin**12*q_cut**4*
sqB)/mbkin**20 - (79652*mckin**14*q_cut**4*sqB)/mbkin**22 +
(42966*mckin**16*q_cut**4*sqB)/mbkin**24 + (5130*mckin**18*q_cut**4*sqB)/
mbkin**26 - (630*q_cut**5*sqB)/mbkin**10 - (10920*mckin**2*q_cut**5*sqB)/
mbkin**12 - (5652*mckin**4*q_cut**5*sqB)/mbkin**14 +
(184832*mckin**6*q_cut**5*sqB)/mbkin**16 + (365376*mckin**8*q_cut**5*sqB)/
mbkin**18 + (177024*mckin**10*q_cut**5*sqB)/mbkin**20 -
(14900*mckin**12*q_cut**5*sqB)/mbkin**22 - (20664*mckin**14*q_cut**5*sqB)/
mbkin**24 - (1890*mckin**16*q_cut**5*sqB)/mbkin**26 -
(630*q_cut**6*sqB)/mbkin**12 - (13650*mckin**2*q_cut**6*sqB)/mbkin**14 -
(70346*mckin**4*q_cut**6*sqB)/mbkin**16 - (131414*mckin**6*q_cut**6*sqB)/
mbkin**18 - (124586*mckin**8*q_cut**6*sqB)/mbkin**20 -
(86790*mckin**10*q_cut**6*sqB)/mbkin**22 - (26502*mckin**12*q_cut**6*sqB)/
mbkin**24 - (1890*mckin**14*q_cut**6*sqB)/mbkin**26 +
(810*q_cut**7*sqB)/mbkin**14 + (12840*mckin**2*q_cut**7*sqB)/mbkin**16 +
(61750*mckin**4*q_cut**7*sqB)/mbkin**18 + (109420*mckin**6*q_cut**7*sqB)/
mbkin**20 + (85210*mckin**8*q_cut**7*sqB)/mbkin**22 +
(27180*mckin**10*q_cut**7*sqB)/mbkin**24 + (1350*mckin**12*q_cut**7*sqB)/
mbkin**26 - (255*q_cut**8*sqB)/mbkin**16 - (2910*mckin**2*q_cut**8*sqB)/
mbkin**18 - (11409*mckin**4*q_cut**8*sqB)/mbkin**20 -
(15662*mckin**6*q_cut**8*sqB)/mbkin**22 - (6588*mckin**8*q_cut**8*sqB)/
mbkin**24 - (180*mckin**10*q_cut**8*sqB)/mbkin**26 -
(140*q_cut**9*sqB)/mbkin**18 - (280*mckin**2*q_cut**9*sqB)/mbkin**20 -
(568*mckin**4*q_cut**9*sqB)/mbkin**22 + (184*mckin**6*q_cut**9*sqB)/
mbkin**24 + (420*mckin**8*q_cut**9*sqB)/mbkin**26 + (150*q_cut**10*sqB)/
mbkin**20 + (150*mckin**2*q_cut**10*sqB)/mbkin**22 -
(210*mckin**4*q_cut**10*sqB)/mbkin**24 - (450*mckin**6*q_cut**10*sqB)/
mbkin**26 - (40*q_cut**11*sqB)/mbkin**22 + (120*mckin**4*q_cut**11*sqB)/
mbkin**26))*np.log((mbkin**2 + mckin**2 - q_cut -
mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 -
2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/mbkin**4))/
(mbkin**2 + mckin**2 - q_cut + mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*
mckin**2 + mckin**4 - 2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/
mbkin**4))) - (144*mckin**4*
(-16*(-((-1 + mckin**2/mbkin**2)**4*(-590 - (170*mckin**2)/mbkin**2 +
(47529*mckin**4)/mbkin**4 + (10177*mckin**6)/mbkin**6 -
(120338*mckin**8)/mbkin**8 - (39978*mckin**10)/mbkin**10 -
(3053*mckin**12)/mbkin**12 + (12643*mckin**14)/mbkin**14 +
(3060*mckin**16)/mbkin**16)) + ((-1 + mckin**2/mbkin**2)**2*(
-2960 - (5850*mckin**2)/mbkin**2 + (190377*mckin**4)/mbkin**4 +
(219388*mckin**6)/mbkin**6 - (442203*mckin**8)/mbkin**8 -
(597078*mckin**10)/mbkin**10 - (182993*mckin**12)/mbkin**12 +
(13020*mckin**14)/mbkin**14 + (67059*mckin**16)/mbkin**16 +
(15480*mckin**18)/mbkin**18)*q_cut)/mbkin**2 -
(3*(-1780 - (4930*mckin**2)/mbkin**2 + (89849*mckin**4)/mbkin**4 +
(83367*mckin**6)/mbkin**6 - (134199*mckin**8)/mbkin**8 -
(202701*mckin**10)/mbkin**10 - (265421*mckin**12)/mbkin**12 -
(100835*mckin**14)/mbkin**14 + (4207*mckin**16)/mbkin**16 +
(39243*mckin**18)/mbkin**18 + (9360*mckin**20)/mbkin**20)*q_cut**2)/
mbkin**4 + ((-2970 - (11480*mckin**2)/mbkin**2 + (121169*mckin**4)/
mbkin**4 + (162838*mckin**6)/mbkin**6 - (396629*mckin**8)/
mbkin**8 - (498572*mckin**10)/mbkin**10 - (54133*mckin**12)/
mbkin**12 + (158794*mckin**14)/mbkin**14 + (97083*mckin**16)/
mbkin**16 + (15660*mckin**18)/mbkin**18)*q_cut**3)/mbkin**6 +
((-2940 - (26390*mckin**2)/mbkin**2 - (12875*mckin**4)/mbkin**4 +
(74925*mckin**6)/mbkin**6 + (184456*mckin**8)/mbkin**8 +
(125662*mckin**10)/mbkin**10 + (82895*mckin**12)/mbkin**12 +
(75915*mckin**14)/mbkin**14 + (15120*mckin**16)/mbkin**16)*q_cut**4)/
mbkin**8 - (3*(-1750 - (14980*mckin**2)/mbkin**2 - (1663*mckin**4)/
mbkin**4 + (43998*mckin**6)/mbkin**6 + (91212*mckin**8)/
mbkin**8 + (97050*mckin**10)/mbkin**10 + (54369*mckin**12)/
mbkin**12 + (8820*mckin**14)/mbkin**14)*q_cut**5)/mbkin**10 +
((-2940 - (23870*mckin**2)/mbkin**2 + (9833*mckin**4)/mbkin**4 +
(105581*mckin**6)/mbkin**6 + (151243*mckin**8)/mbkin**8 +
(84441*mckin**10)/mbkin**10 + (15120*mckin**12)/mbkin**12)*q_cut**6)/
mbkin**12 - ((-810 - (4360*mckin**2)/mbkin**2 + (9023*mckin**4)/
mbkin**4 + (21544*mckin**6)/mbkin**6 + (14031*mckin**8)/
mbkin**8 + (7020*mckin**10)/mbkin**10)*q_cut**7)/mbkin**14 +
(6*(-55 - (70*mckin**2)/mbkin**2 + (431*mckin**4)/mbkin**4 +
(672*mckin**6)/mbkin**6 + (990*mckin**8)/mbkin**8)*q_cut**8)/
mbkin**16 - (10*(-19 - (19*mckin**2)/mbkin**2 + (230*mckin**4)/
mbkin**4 + (342*mckin**6)/mbkin**6)*q_cut**9)/mbkin**18 -
(40*(mbkin**4 - 18*mckin**4)*q_cut**10)/mbkin**24)*rE +
((-1 + mckin**2/mbkin**2)**2 - (2*(mbkin**2 + mckin**2)*q_cut)/mbkin**4 +
q_cut**2/mbkin**4)*((-2520*mckin**2*muG**2)/mbkin**2 +
(7740*mckin**4*muG**2)/mbkin**4 - (25884*mckin**6*muG**2)/mbkin**6 +
(165564*mckin**8*muG**2)/mbkin**8 + (515340*mckin**10*muG**2)/
mbkin**10 - (1157940*mckin**12*muG**2)/mbkin**12 +
(117180*mckin**14*muG**2)/mbkin**14 + (465444*mckin**16*muG**2)/
mbkin**16 - (78804*mckin**18*muG**2)/mbkin**18 -
(6120*mckin**20*muG**2)/mbkin**20 - (2520*mckin**2*muG*mupi)/
mbkin**2 + (17460*mckin**4*muG*mupi)/mbkin**4 +
(51084*mckin**6*muG*mupi)/mbkin**6 - (442764*mckin**8*muG*mupi)/
mbkin**8 - (36540*mckin**10*muG*mupi)/mbkin**10 +
(910980*mckin**12*muG*mupi)/mbkin**12 - (243180*mckin**14*muG*mupi)/
mbkin**14 - (289044*mckin**16*muG*mupi)/mbkin**16 +
(28404*mckin**18*muG*mupi)/mbkin**18 + (6120*mckin**20*muG*mupi)/
mbkin**20 - (2520*mckin**2*muG**2*q_cut)/mbkin**4 +
(30060*mckin**4*muG**2*q_cut)/mbkin**6 + (108504*mckin**6*muG**2*q_cut)/
mbkin**8 - (486972*mckin**8*muG**2*q_cut)/mbkin**10 -
(1597032*mckin**10*muG**2*q_cut)/mbkin**12 - (2135052*mckin**12*muG**
2*q_cut)/mbkin**14 - (563832*mckin**14*muG**2*q_cut)/mbkin**16 +
(273564*mckin**16*muG**2*q_cut)/mbkin**18 + (18720*mckin**18*muG**2*q_cut)/
mbkin**20 + (7560*mckin**2*muG*mupi*q_cut)/mbkin**4 -
(30060*mckin**4*muG*mupi*q_cut)/mbkin**6 - (214344*mckin**6*muG*mupi*
q_cut)/mbkin**8 + (476892*mckin**8*muG*mupi*q_cut)/mbkin**10 +
(2035512*mckin**10*muG*mupi*q_cut)/mbkin**12 +
(1832652*mckin**12*muG*mupi*q_cut)/mbkin**14 +
(387432*mckin**14*muG*mupi*q_cut)/mbkin**16 -
(122364*mckin**16*muG*mupi*q_cut)/mbkin**18 -
(18720*mckin**18*muG*mupi*q_cut)/mbkin**20 + (5040*mckin**2*muG**2*q_cut**
2)/mbkin**6 - (131400*mckin**6*muG**2*q_cut**2)/mbkin**10 -
(42912*mckin**8*muG**2*q_cut**2)/mbkin**12 + (603144*mckin**10*muG**2*q_cut**
2)/mbkin**14 + (130968*mckin**12*muG**2*q_cut**2)/mbkin**16 -
(189360*mckin**14*muG**2*q_cut**2)/mbkin**18 - (12600*mckin**16*muG**2*q_cut**
2)/mbkin**20 - (5040*mckin**2*muG*mupi*q_cut**2)/mbkin**6 +
(10080*mckin**4*muG*mupi*q_cut**2)/mbkin**8 + (111240*mckin**6*muG*mupi*
q_cut**2)/mbkin**10 - (98208*mckin**8*muG*mupi*q_cut**2)/mbkin**12 -
(401544*mckin**10*muG*mupi*q_cut**2)/mbkin**14 -
(80568*mckin**12*muG*mupi*q_cut**2)/mbkin**16 +
(88560*mckin**14*muG*mupi*q_cut**2)/mbkin**18 +
(12600*mckin**16*muG*mupi*q_cut**2)/mbkin**20 +
(15120*mckin**2*muG**2*q_cut**3)/mbkin**8 + (12600*mckin**4*muG**2*q_cut**3)/
mbkin**10 - (302760*mckin**6*muG**2*q_cut**3)/mbkin**12 -
(719352*mckin**8*muG**2*q_cut**3)/mbkin**14 - (676512*mckin**10*muG**2*q_cut**
3)/mbkin**16 - (239760*mckin**12*muG**2*q_cut**3)/mbkin**18 -
(12600*mckin**14*muG**2*q_cut**3)/mbkin**20 - (5040*mckin**2*muG*mupi*q_cut**
3)/mbkin**8 + (7560*mckin**4*muG*mupi*q_cut**3)/mbkin**10 +
(161640*mckin**6*muG*mupi*q_cut**3)/mbkin**12 +
(316152*mckin**8*muG*mupi*q_cut**3)/mbkin**14 +
(323712*mckin**10*muG*mupi*q_cut**3)/mbkin**16 +
(138960*mckin**12*muG*mupi*q_cut**3)/mbkin**18 +
(12600*mckin**14*muG*mupi*q_cut**3)/mbkin**20 -
(22680*mckin**2*muG**2*q_cut**4)/mbkin**10 - (3780*mckin**4*muG**2*q_cut**4)/
mbkin**12 + (454500*mckin**6*muG**2*q_cut**4)/mbkin**14 +
(754668*mckin**8*muG**2*q_cut**4)/mbkin**16 + (343620*mckin**10*muG**2*q_cut**
4)/mbkin**18 + (17640*mckin**12*muG**2*q_cut**4)/mbkin**20 +
(7560*mckin**2*muG*mupi*q_cut**4)/mbkin**10 - (11340*mckin**4*muG*mupi*
q_cut**4)/mbkin**12 - (227700*mckin**6*muG*mupi*q_cut**4)/mbkin**14 -
(376668*mckin**8*muG*mupi*q_cut**4)/mbkin**16 -
(192420*mckin**10*muG*mupi*q_cut**4)/mbkin**18 -
(17640*mckin**12*muG*mupi*q_cut**4)/mbkin**20 +
(7560*mckin**2*muG**2*q_cut**5)/mbkin**12 - (6300*mckin**4*muG**2*q_cut**5)/
mbkin**14 - (143496*mckin**6*muG**2*q_cut**5)/mbkin**16 -
(109476*mckin**8*muG**2*q_cut**5)/mbkin**18 - (5040*mckin**10*muG**2*q_cut**
5)/mbkin**20 - (2520*mckin**2*muG*mupi*q_cut**5)/mbkin**12 +
(6300*mckin**4*muG*mupi*q_cut**5)/mbkin**14 + (67896*mckin**6*muG*mupi*
q_cut**5)/mbkin**16 + (59076*mckin**8*muG*mupi*q_cut**5)/mbkin**18 +
(5040*mckin**10*muG*mupi*q_cut**5)/mbkin**20 +
(2520*mckin**4*muG**2*q_cut**6)/mbkin**16 + (4176*mckin**6*muG**2*q_cut**6)/
mbkin**18 + (2520*mckin**8*muG**2*q_cut**6)/mbkin**20 -
(2520*mckin**4*muG*mupi*q_cut**6)/mbkin**16 - (4176*mckin**6*muG*mupi*
q_cut**6)/mbkin**18 - (2520*mckin**8*muG*mupi*q_cut**6)/mbkin**20 -
(3960*mckin**4*muG**2*q_cut**7)/mbkin**18 - (3960*mckin**6*muG**2*q_cut**7)/
mbkin**20 + (3960*mckin**4*muG*mupi*q_cut**7)/mbkin**18 +
(3960*mckin**6*muG*mupi*q_cut**7)/mbkin**20 + (1440*mckin**4*muG**2*q_cut**
8)/mbkin**20 - (1440*mckin**4*muG*mupi*q_cut**8)/mbkin**20 +
72*mckin**2*muG*((-1 + mckin**2/mbkin**2)**2*(-70 + (345*mckin**2)/
mbkin**2 + (2179*mckin**4)/mbkin**4 - (8286*mckin**6)/mbkin**6 -
(19766*mckin**8)/mbkin**8 - (5941*mckin**10)/mbkin**10 +
(1129*mckin**12)/mbkin**12 + (170*mckin**14)/mbkin**14) +
((210 - (835*mckin**2)/mbkin**2 - (5954*mckin**4)/mbkin**4 +
(13247*mckin**6)/mbkin**6 + (56542*mckin**8)/mbkin**8 +
(50907*mckin**10)/mbkin**10 + (10762*mckin**12)/mbkin**12 -
(3399*mckin**14)/mbkin**14 - (520*mckin**16)/mbkin**16)*q_cut)/
mbkin**2 + | |
'''
access_groups_v2
=============
The following methods allow for interaction into the Tenable.io
:devportal:`access-groups-v2 <v2-access-groups>` API endpoints.
Methods available on ``tio.access_groups_v2``:
.. rst-class:: hide-signature
.. autoclass:: AccessGroupsV2API
.. automethod:: list
.. automethod:: create
.. automethod:: delete
.. automethod:: edit
.. automethod:: details
'''
from restfly.utils import dict_merge
from tenable.errors import UnexpectedValueError
from tenable.io.base import TIOEndpoint, TIOIterator
class AccessGroupsIteratorV2(TIOIterator):
'''
The access groups v2 iterator provides a scalable way to work through
access groups result sets of any size. The iterator will walk through each
page of data, returning one record at a time. If it reaches the end of a
page of records, then it will request the next page of information and then
continue to return records from the next page (and the next, and the next)
until the counter reaches the total number of records that the API has
reported.
Attributes:
count (int): The current number of records that have been returned
page (list):
The current page of data being walked through. pages will be
cycled through as the iterator requests more information from the
API.
page_count (int): The number of record returned from the current page.
total (int):
The total number of records that exist for the current request.
'''
pass
class AccessGroupsV2API(TIOEndpoint):
'''
This will contain all methods related to access group
'''
def _list_clean(self, items):
'''
Removes duplicate values from list
Args:
items (list): list of items
Returns:
:obj:`list`:
Returns list of distinct values
'''
return list(set(self._check('items', items, list)))
def _principal_constructor(self, items):
'''
Simple principle tuple expander. Also supports validating principle
dictionaries for transparent passthrough.
'''
resp = list()
for item in items:
self._check('principal', item, (tuple, dict))
if isinstance(item, tuple):
data = dict()
if len(item) == 2:
item = item + ([],)
data['type'] = self._check('principal:type', item[0], str,
choices=['user', 'group'])
try:
data['principal_id'] = self._check('principal:id', item[1], 'uuid')
except UnexpectedValueError:
data['principal_name'] = self._check('principal:name', item[1], str)
data['permissions'] = self._list_clean(
[self._check('permission', permission, str,
choices=['CAN_VIEW', 'CAN_SCAN'], case='upper')
for permission in self._check('permissions', item[2], list)])
# if permissions are empty, we will assign default value to it
if not data['permissions']:
data['permissions'] = ['CAN_VIEW']
resp.append(data)
else:
self._check('principal:type', item['type'], str,
choices=['user', 'group'])
if 'principal_id' in item:
self._check('principal_id', item['principal_id'], 'uuid')
if 'principal_name' in item:
self._check('principal_name', item['principal_name'], str)
item['permissions'] = self._list_clean([
self._check('permission', permission, str,
choices=['CAN_VIEW', 'CAN_SCAN'], case='upper')
for permission in self._check('permissions', item['permissions']
if 'permissions' in item and item['permissions']
else None, list, default=['CAN_VIEW'])]
)
resp.append(item)
return resp
def list(self, *filters, **kw):
'''
Get the listing of configured access groups from Tenable.io.
:devportal:`access-groups-v2: list <v2-access-groups-listt>`
Args:
*filters (tuple, optional):
Filters are tuples in the form of ('NAME', 'OPERATOR', 'VALUE').
Multiple filters can be used and will filter down the data being
returned from the API.
Examples:
- ``('distro', 'match', 'win')``
- ``('name', 'nmatch', 'home')``
As the filters may change and sortable fields may change over
time, it's highly recommended that you look at the output of
the :py:meth:`tio.filters.access_groups_filters_v2()` endpoint to get more details.
filter_type (str, optional):
The filter_type operator determines how the filters are combined
together. ``and`` will inform the API that all of the filter
conditions must be met for an access group to be returned,
whereas ``or`` would mean that if any of the conditions are met,
the access group record will be returned.
limit (int, optional):
The number of records to retrieve. Default is 50
offset (int, optional):
The starting record to retrieve. Default is 0.
sort (tuple, optional):
A tuple of tuples identifying the the field and sort order of
the field.
wildcard (str, optional):
A string to pattern match against all available fields returned.
wildcard_fields (list, optional):
A list of fields to optionally restrict the wild-card matching
to.
Returns:
:obj:`AccessGroupsIterator`:
An iterator that handles the page management of the requested
records.
Examples:
Getting the listing of all agents:
>>> for group in tio.access_groups_v2.list():
... pprint(group)
Retrieving all of the windows agents:
>>> for group in tio.access_groups_v2.list(('name', 'eq', 'win')):
... pprint(group)
'''
limit = 50
offset = 0
pages = None
query = self._parse_filters(filters,
self._api.filters.access_group_filters_v2(), rtype='colon')
# If the offset was set to something other than the default starting
# point of 0, then we will update offset to reflect that.
if 'offset' in kw and self._check('offset', kw['offset'], int):
offset = kw['offset']
# The limit parameter affects how many records at a time we will pull
# from the API. The default in the API is set to 50, however we can
# pull any variable amount.
if 'limit' in kw and self._check('limit', kw['limit'], int):
limit = kw['limit']
# For the sorting fields, we are converting the tuple that has been
# provided to us and converting it into a comma-delimited string with
# each field being represented with its sorting order. e.g. If we are
# presented with the following:
#
# sort=(('field1', 'asc'), ('field2', 'desc'))
#
# we will generate the following string:
#
# sort=field1:asc,field2:desc
#
if 'sort' in kw and self._check('sort', kw['sort'], tuple):
query['sort'] = ','.join(['{}:{}'.format(
self._check('sort_field', i[0], str),
self._check('sort_direction', i[1], str, choices=['asc', 'desc'])
) for i in kw['sort']])
# The filter_type determines how the filters are combined together.
# The default is 'and', however you can always explicitly define 'and'
# or 'or'.
if 'filter_type' in kw and self._check(
'filter_type', kw['filter_type'], str, choices=['and', 'or']):
query['ft'] = kw['filter_type']
# The wild-card filter text refers to how the API will pattern match
# within all fields, or specific fields using the wildcard_fields param.
if 'wildcard' in kw and self._check('wildcard', kw['wildcard'], str):
query['w'] = kw['wildcard']
# The wildcard_fields parameter allows the user to restrict the fields
# that the wild-card pattern match pertains to.
if 'wildcard_fields' in kw and self._check(
'wildcard_fields', kw['wildcard_fields'], list):
query['wf'] = ','.join(kw['wildcard_fields'])
# Return the Iterator.
return AccessGroupsIteratorV2(self._api,
_limit=limit,
_offset=offset,
_pages_total=pages,
_query=query,
_path='v2/access-groups',
_resource='access_groups'
)
def create(self, name, rules, principals=None, all_users=False, access_group_type=None):
'''
Creates a new access group
:devportal:`access-groups: create <v2-access-groups-create>`
Args:
name (str):
The name of the access group to create.
rules (list):
a list of rule tuples. Tuples are defined in the standardized
method of name, operator, value. For example:
.. code-block:: python
('operating_system', 'eq', ['Windows NT'])
Rules will be validate against by the filters before being sent
to the API. Note that the value field in this context is a list
of string values.
principals (list, optional):
A list of principal tuples. Each tuple must contain the type,
the identifier and the permissions for the principal.
The identifier can be either a UUID associated to a user/group, or the name of the
user/group and the permissions can be either a CAN_VIEW or CAN_EDIT or Both in list
Default permission is ``CAN_VIEW``
For example:
.. code-block:: python
('user', '32a0c314-442b-4aed-bbf5-ba9cf5cafbf4', ['CAN_VIEW'])
('user', '<EMAIL>', ['CAN_SCAN'])
('group', '32a0c314-442b-4aed-bbf5-ba9cf5cafbf4')
all_users (bool, optional):
If enabled, the access group will apply to all users and any
principals defined will be ignored.
access_group_type (str, optional):
The type of access group. It can be one of two possible types:
`MANAGE_ASSETS`, `SCAN_TARGETS`
The default is `MANAGE_ASSETS`
Returns:
:obj:`dict`:
The resource record for the new access list.
Examples:
Allow all users to see 192.168.0.0/24:
>>> tio.access_groups_v2.create('Example',
... [('ipv4', 'eq', ['192.168.0.0/24'])],
... all_users=True)
Allow everyone in a specific group id to see specific hosts:
>>> tio.access_groups_v2.create('Example',
... [('netbios_name', 'eq', ['dc1.company.tld']),
... ('netbios_name', 'eq', ['dc2.company.tld'])],
... principals=[
... ('group', '32a0c314-442b-4aed-bbf5-ba9cf5cafbf4', ['CAN_VIEW'])
... ])
'''
if not principals:
principals = list()
# construct the payload dictionary
payload = {
# run the rules through the filter parser...
'rules': self._parse_filters(rules,
self._api.filters.access_group_asset_rules_filters_v2(),
rtype='accessgroup')['rules'],
# run the principals through the principal parser...
'principals': self._principal_constructor(principals),
'name': self._check('name', name, str),
'all_users': self._check('all_users', all_users, bool),
'access_group_type': self._check('access_group_type', access_group_type, str,
choices=['MANAGE_ASSETS', 'SCAN_TARGETS'],
default='MANAGE_ASSETS',
case='upper')
}
# call the API endpoint and return the response to the caller.
return self._api.post('v2/access-groups', json=payload).json()
def delete(self, group_id):
'''
Deletes the specified access group.
:devportal:`access-groups: delete <v2-access-groups-delete>`
Args:
group_id (str): The UUID of the access group to remove.
'''
self._api.delete('v2/access-groups/{}'.format(
self._check('group_id', group_id, 'uuid')))
def edit(self, group_id, **kw):
'''
Edits an access group
:devportal:`access-groups: edit <v2-access-groups-edit>`
Args:
| |
import pytest
from ..model import Model, SnapshotSource, CreateOptions
from ..snapshots import Snapshot, DummySnapshotSource
from ..exceptions import DeleteMutlipleSnapshotsError
from ..config import Config
from ..globalinfo import GlobalInfo
from ..settings import Setting
from .faketime import FakeTime
from datetime import datetime, timedelta, timezone
from dateutil.tz import gettz
from io import IOBase
from typing import Dict
test_tz = gettz('EST')
default_source = SnapshotSource()
def test_timeOfDay(estimator) -> None:
time: FakeTime = FakeTime()
info = GlobalInfo(time)
config: Config = Config()
model: Model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() is None
config = Config().override(Setting.SNAPSHOT_TIME_OF_DAY, '00:00')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() == (0, 0)
config.override(Setting.SNAPSHOT_TIME_OF_DAY, '23:59')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() == (23, 59)
config.override(Setting.SNAPSHOT_TIME_OF_DAY, '24:59')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() is None
config.override(Setting.SNAPSHOT_TIME_OF_DAY, '24:60')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() is None
config.override(Setting.SNAPSHOT_TIME_OF_DAY, '-1:60')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() is None
config.override(Setting.SNAPSHOT_TIME_OF_DAY, '24:-1')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() is None
config.override(Setting.SNAPSHOT_TIME_OF_DAY, 'boop:60')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() is None
config.override(Setting.SNAPSHOT_TIME_OF_DAY, '24:boop')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() is None
config.override(Setting.SNAPSHOT_TIME_OF_DAY, '24:10:22')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() is None
config.override(Setting.SNAPSHOT_TIME_OF_DAY, '10')
model = Model(config, time, default_source, default_source, info, estimator)
assert model.getTimeOfDay() is None
def test_next_time(estimator):
time: FakeTime = FakeTime()
info = GlobalInfo(time)
now: datetime = datetime(1985, 12, 6, 1, 0, 0).astimezone(timezone.utc)
config: Config = Config().override(Setting.DAYS_BETWEEN_SNAPSHOTS, 0)
model: Model = Model(config, time, default_source, default_source, info, estimator)
assert model._nextSnapshot(now=now, last_snapshot=None) is None
assert model._nextSnapshot(now=now, last_snapshot=now) is None
config: Config = Config().override(Setting.DAYS_BETWEEN_SNAPSHOTS, 1)
model: Model = Model(config, time, default_source, default_source, info, estimator)
assert model._nextSnapshot(now=now, last_snapshot=None) == now - timedelta(minutes=1)
assert model._nextSnapshot(now=now, last_snapshot=now) == now + timedelta(days=1)
assert model._nextSnapshot(now=now, last_snapshot=now - timedelta(days=1)) == now
assert model._nextSnapshot(now=now, last_snapshot=now + timedelta(days=1)) == now + timedelta(days=2)
def test_next_time_of_day(estimator):
time: FakeTime = FakeTime()
info = GlobalInfo(time)
now: datetime = datetime(1985, 12, 6, 1, 0, 0).astimezone(timezone.utc)
config: Config = Config().override(Setting.DAYS_BETWEEN_SNAPSHOTS, 1).override(Setting.SNAPSHOT_TIME_OF_DAY, '08:00')
model: Model = Model(config, time, default_source, default_source, info, estimator)
assert model._nextSnapshot(now=now, last_snapshot=None) == now - timedelta(minutes=1)
assert model._nextSnapshot(now=now, last_snapshot=now - timedelta(days=1)) == now
assert model._nextSnapshot(now=now, last_snapshot=now) == datetime(1985, 12, 6, 8, 0, tzinfo=test_tz)
assert model._nextSnapshot(now=now, last_snapshot=datetime(1985, 12, 6, 8, 0, tzinfo=test_tz)) == datetime(1985, 12, 7, 8, 0, tzinfo=test_tz)
assert model._nextSnapshot(now=datetime(1985, 12, 6, 8, 0, tzinfo=test_tz), last_snapshot=datetime(1985, 12, 6, 8, 0, tzinfo=test_tz)) == datetime(1985, 12, 7, 8, 0, tzinfo=test_tz)
def test_next_time_of_day_dest_disabled(model, time, source, dest):
dest.setEnabled(True)
assert model._nextSnapshot(now=time.now(), last_snapshot=None) == time.now() - timedelta(minutes=1)
dest.setEnabled(False)
assert model._nextSnapshot(now=time.now(), last_snapshot=None) is None
def test_sync_empty(model, time, source, dest):
source.setEnabled(False)
dest.setEnabled(False)
model.sync(time.now())
assert len(model.snapshots) == 0
def test_sync_single_source(model, source, dest, time):
snapshot = source.create(CreateOptions(time.now(), "name"))
dest.setEnabled(False)
model.sync(time.now())
assert len(model.snapshots) == 1
assert snapshot.slug() in model.snapshots
assert model.snapshots[snapshot.slug()].getSource(source.name()) is snapshot
assert model.snapshots[snapshot.slug()].getSource(dest.name()) is None
def test_sync_source_and_dest(model, time, source, dest):
snapshot_source = source.create(CreateOptions(time.now(), "name"))
model._syncSnapshots([source, dest])
assert len(model.snapshots) == 1
snapshot_dest = dest.save(model.snapshots[snapshot_source.slug()])
model._syncSnapshots([source, dest])
assert len(model.snapshots) == 1
assert model.snapshots[snapshot_source.slug()].getSource(source.name()) is snapshot_source
assert model.snapshots[snapshot_source.slug()].getSource(dest.name()) is snapshot_dest
def test_sync_different_sources(model, time, source, dest):
snapshot_source = source.create(CreateOptions(time.now(), "name"))
snapshot_dest = dest.create(CreateOptions(time.now(), "name"))
model._syncSnapshots([source, dest])
assert len(model.snapshots) == 2
assert model.snapshots[snapshot_source.slug()].getSource(source.name()) is snapshot_source
assert model.snapshots[snapshot_dest.slug()].getSource(dest.name()) is snapshot_dest
def test_removal(model, time, source, dest):
source.create(CreateOptions(time.now(), "name"))
model._syncSnapshots([source, dest])
assert len(model.snapshots) == 1
source.current = {}
model._syncSnapshots([source, dest])
assert len(model.snapshots) == 0
def test_new_snapshot(model, source, dest, time):
model.sync(time.now())
assert len(model.snapshots) == 1
assert len(source.created) == 1
assert source.created[0].date() == time.now()
assert len(source.current) == 1
assert len(dest.current) == 1
def test_upload_snapshot(time, model, dest, source):
dest.setEnabled(True)
model.sync(time.now())
assert len(model.snapshots) == 1
source.assertThat(created=1, current=1)
assert len(source.created) == 1
assert source.created[0].date() == time.now()
assert len(source.current) == 1
assert len(dest.current) == 1
assert len(dest.saved) == 1
def test_disabled(time, model, source, dest):
# create two disabled sources
source.setEnabled(False)
source.insert("newer", time.now(), "slug1")
dest.setEnabled(False)
dest.insert("s2", time.now(), "slug2")
model.sync(time.now())
source.assertUnchanged()
dest.assertUnchanged()
assert len(model.snapshots) == 0
def test_delete_source(time, model, source, dest):
time = FakeTime()
now = time.now()
# create two source snapshots
source.setMax(1)
older = source.insert("older", now - timedelta(minutes=1), "older")
newer = source.insert("newer", now, "newer")
# configure only one to be kept
model.sync(now)
assert len(model.snapshots) == 1
assert len(source.saved) == 0
assert source.deleted == [older]
assert len(source.saved) == 0
assert newer.slug() in model.snapshots
assert model.snapshots[newer.slug()].getSource(source.name()) == newer
def test_delete_dest(time, model, source, dest):
now = time.now()
# create two source snapshots
dest.setMax(1)
older = dest.insert("older", now - timedelta(minutes=1), "older")
newer = dest.insert("newer", now, "newer")
# configure only one to be kept
model.sync(now)
assert len(model.snapshots) == 1
assert len(dest.saved) == 0
assert dest.deleted == [older]
assert len(source.saved) == 0
assert newer.slug() in model.snapshots
assert model.snapshots[newer.slug()].getSource(dest.name()) == newer
source.assertUnchanged()
def test_new_upload_with_delete(time, model, source, dest, simple_config):
now = time.now()
# create a single old snapshot
source.setMax(1)
dest.setMax(1)
snapshot_dest = dest.insert("older", now - timedelta(days=1), "older")
snapshot_source = source.insert("older", now - timedelta(days=1), "older")
# configure only one to be kept in both places
simple_config.config.update({
"days_between_snapshots": 1
})
model.reinitialize()
model.sync(now)
# Old snapshto shoudl be deleted, new one shoudl be created and uploaded.
source.assertThat(current=1, created=1, deleted=1)
dest.assertThat(current=1, saved=1, deleted=1)
assert dest.deleted == [snapshot_dest]
assert source.deleted == [snapshot_source]
assert len(model.snapshots) == 1
assertSnapshot(model, [source.created[0], dest.saved[0]])
def test_new_upload_no_delete(time, model, source, dest, simple_config):
now = time.now()
# create a single old snapshot
source.setMax(2)
dest.setMax(2)
snapshot_dest = dest.insert("older", now - timedelta(days=1), "older")
snapshot_source = source.insert("older", now - timedelta(days=1), "older")
# configure keeping two in both places
simple_config.config.update({
"days_between_snapshots": 1
})
model.reinitialize()
model.sync(now)
# Another snapshot should have been created and saved
source.assertThat(current=2, created=1)
dest.assertThat(current=2, saved=1)
assert len(model.snapshots) == 2
assertSnapshot(model, [source.created[0], dest.saved[0]])
assertSnapshot(model, [snapshot_dest, snapshot_source])
def test_multiple_deletes_allowed(time, model, source, dest, simple_config):
now = time.now()
simple_config.config.update({"confirm_multiple_deletes": False})
# create 4 snapshots in dest
dest.setMax(1)
current = dest.insert("current", now, "current")
old = dest.insert("old", now - timedelta(days=1), "old")
older = dest.insert("older", now - timedelta(days=2), "older")
oldest = dest.insert("oldest", now - timedelta(days=3), "oldest")
# configure keeping 1
simple_config.config.update({
"max_snapshots_in_google_drive": 1,
})
model.reinitialize()
model.sync(now)
source.assertUnchanged()
dest.assertThat(current=1, deleted=3)
assert dest.deleted == [oldest, older, old]
assert len(model.snapshots) == 1
assertSnapshot(model, [current])
def test_confirm_multiple_deletes(time, model, source, dest, simple_config):
now = time.now()
dest.setMax(1)
source.setMax(1)
dest.insert("current", now, "current")
dest.insert("old", now - timedelta(days=1), "old")
dest.insert("older", now - timedelta(days=2), "older")
dest.insert("oldest", now - timedelta(days=2), "olderest")
source.insert("current", now, "current")
source.insert("old", now - timedelta(days=1), "old")
source.insert("older", now - timedelta(days=2), "older")
with pytest.raises(DeleteMutlipleSnapshotsError) as thrown:
model.sync(now)
thrown.value.data() == {
source.name(): 2,
dest.name(): 3
}
source.assertUnchanged()
dest.assertUnchanged()
def test_dont_upload_deletable(time, model, source, dest):
now = time.now()
# a new snapshot in Drive and an old snapshot in HA
dest.setMax(1)
current = dest.insert("current", now, "current")
old = source.insert("old", now - timedelta(days=1), "old")
# configure keeping 1
model.sync(now)
# Nothing should happen, because the upload from hassio would have to be deleted right after it's uploaded.
source.assertUnchanged()
dest.assertUnchanged()
assert len(model.snapshots) == 2
assertSnapshot(model, [current])
assertSnapshot(model, [old])
def test_dont_upload_when_disabled(time, model, source, dest):
now = time.now()
# Make an enabled destination but with upload diabled.
dest.setMax(1)
dest.setUpload(False)
model.sync(now)
# Verify the snapshot was created at the source but not uploaded.
source.assertThat(current=1, created=1)
dest.assertUnchanged()
assert len(model.snapshots) == 1
def test_dont_delete_purgable(time, model, source, dest, simple_config):
now = time.now()
# create a single old snapshot, retained
source.setMax(1)
dest.setMax(1)
snapshot_dest = dest.insert("older", now - timedelta(days=1), "older")
snapshot_dest.setRetained(True)
snapshot_source = source.insert("older", now - timedelta(days=1), "older")
snapshot_source.setRetained(True)
# configure only one to be kept in both places
simple_config.config.update({
"days_between_snapshots": 1
})
model.reinitialize()
model.sync(now)
# Old snapshto shoudl be kept, new one should be created and uploaded.
source.assertThat(current=2, created=1)
dest.assertThat(current=2, saved=1)
assert len(model.snapshots) == 2
assertSnapshot(model, [snapshot_dest, snapshot_source])
assertSnapshot(model, [source.created[0], dest.saved[0]])
def test_generational_delete(time, model, dest, source, simple_config):
time.setNow(time.local(2019, 5, 10))
now = time.now()
# Create 4 snapshots, configured to keep 3
source.setMax(3)
source.insert("Fri", time.local(2019, 5, 10, 1))
source.insert("Thu", time.local(2019, 5, 9, 1))
wed = source.insert("Wed", time.local(2019, 5, 8, 1))
source.insert("Mon", time.local(2019, 5, 6, 1))
# configure only one to be kept in both places
simple_config.config.update({
"days_between_snapshots": 1,
"generational_weeks": 1,
"generational_days": 2
})
model.reinitialize()
model.sync(now)
# Shoud only delete wed, since it isn't kept in the generational backup config
source.assertThat(current=3, deleted=1)
assert source.deleted == [wed]
assert len(model.snapshots) == 3
dest.assertThat(current=3, saved=3)
def assertSnapshot(model, sources):
matches = {}
for source in sources:
matches[source.source()] = source
slug = source.slug()
assert slug in model.snapshots
assert | |
<gh_stars>0
[MASTER]
# Specify a configuration file.
#rcfile=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
ignore-patterns=
# Pickle collected data for later comparisons.
persistent=yes
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Use multiple processes to speed up Pylint.
jobs=2
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=
# Allow optimization of some AST trees. This will activate a peephole AST
# optimizer, which will apply various small optimizations. For instance, it can
# be used to obtain the result of joining multiple strings with the addition
# operator. Joining a lot of strings can lead to a maximum recursion error in
# Pylint and this flag can prevent that. It has one side effect, the resulting
# AST will be different than the one from reality. This option is deprecated
# and it will be removed in Pylint 2.0.
optimize-ast=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=xrange-builtin,dict-view-method,backtick,unichr-builtin,unpacking-in-except,input-builtin,old-ne-operator,nonzero-method,intern-builtin,parameter-unpacking,apply-builtin,filter-builtin-not-iterating,old-octal-literal,reload-builtin,dict-iter-method,zip-builtin-not-iterating,raw_input-builtin,next-method-called,standarderror-builtin,long-suffix,getslice-method,metaclass-assignment,using-cmp-argument,execfile-builtin,round-builtin,old-division,coerce-method,map-builtin-not-iterating,import-star-module-level,basestring-builtin,buffer-builtin,useless-suppression,unicode-builtin,cmp-method,file-builtin,no-absolute-import,indexing-exception,cmp-builtin,range-builtin-not-iterating,long-builtin,oct-method,coerce-builtin,suppressed-message,old-raise-syntax,raising-string,hex-method,print-statement,reduce-builtin,delslice-method,setslice-method,bad-whitespace,line-too-long
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html. You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=text
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]". This option is deprecated
# and it will be removed in Pylint 2.0.
files-output=no
# Tells whether to display a full report or only the messages
reports=yes
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,_cb
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,future.builtins
[SPELLING]
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO,KLUDGE
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=120
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=3
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
[BASIC]
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
property-classes=abc.abstractproperty
# Regular expression matching correct constant names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Naming hint for constant names
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression matching correct method names
method-rgx=[a-z_][a-z0-9_]{2,}$
# Naming hint for method names
method-name-hint=[a-z_][a-z0-9_]{2,}$
# Regular expression matching correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,}$
# Naming hint for variable names
variable-name-hint=[a-z_][a-z0-9_]{2,}$
# Regular expression matching correct function names
function-rgx=[a-z_][a-z0-9_]{2,}$
# Naming hint for function names
function-name-hint=[a-z_][a-z0-9_]{2,}$
# Regular expression matching correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,}$
# Naming hint for argument names
argument-name-hint=[a-z_][a-z0-9_]{2,}$
# Regular expression matching correct inline iteration names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Naming hint for inline iteration names
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression matching correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Naming hint for module names
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression matching correct class names
class-rgx=_?[A-Z][a-zA-Z0-9]+$
# Naming hint for class names
class-name-hint=_?[A-Z][a-zA-Z0-9]+$
# Regular expression matching correct attribute names
attr-rgx=[a-z_][a-z0-9_]{2,}$
# Naming hint for attribute names
attr-name-hint=[a-z_][a-z0-9_]{2,}$
# Regular expression matching correct class attribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,}|(__.*__))$
# Naming hint for class attribute names
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,}|(__.*__))$
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
[ELIF]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=optparse
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be | |
<filename>src/populations.py
from src import individual
import random
import numpy as np
class Population:
def __init__(self, pop_size, n_var, n_of, n_lim):
self._size = pop_size
self._n_var = n_var
self._n_lim = n_lim
self._n_of = n_of
self.population = []
if self._n_of > 1:
self.populationQ = []
self.populationPQ = []
def checkFeasibility(self, population, lim_range):
"""Check the feasibility of the individuals of the population"""
for i in range(self._size):
population[i].checkFeasibility(lim_range)
def evaluate(self, population, of_functions, lim_functions):
"""Evaluate both the objective functions and the limiting variables of each individual"""
for i in range(self._size):
population[i].evaluate(of_functions, lim_functions)
def nulliffyFeasibility(self, population):
"""Sets all the individuals of a population to feasible"""
for i in range(len(population)):
population[i].setFeasibility(True)
def obtainPercentageOfFeasibles(self):
n_feasibles = 0
for ind in self.population:
if ind.getFeasibility():
n_feasibles += 1
return n_feasibles*100.0/self._size
class PopulationGA(Population):
def __init__(self, pop_size, n_var, n_gen_var, n_of, n_lim):
super().__init__(pop_size, n_var, n_of, n_lim)
self._n_gen_var = n_gen_var
self._n_genes = self._n_gen_var * self._n_var
def initialize(self):
"""Initialize genes of the population with random genes"""
for i in range(self._size):
self.population.append(individual.IndividualGA(self._n_var, self._n_gen_var, self._n_of, self._n_lim))
def decode(self, population, var_range):
"""Decodes the chromosomes of every individual in the population"""
for i in range(self._size):
population[i].decodeChromosome(var_range)
def mutate(self, population, mut_rate):
"""Mutates all the genes of all the individuals with a probability of mut_rate"""
for i in range(self._size):
population[i].mutate(mut_rate)
def crossover(self, chromo_1, chromo_2):
"""Perform one point crossover of two given chromosomes"""
cross_point = random.randint(1, self._n_genes)
new_chromo_1 = self.__zeros(self._n_genes)
new_chromo_2 = self.__zeros(self._n_genes)
new_chromo_1[:cross_point] = chromo_1[:cross_point]
new_chromo_1[cross_point:] = chromo_2[cross_point:]
new_chromo_2[:cross_point] = chromo_2[:cross_point]
new_chromo_2[cross_point:] = chromo_1[cross_point:]
return [new_chromo_1, new_chromo_2]
def tournamentSelection(self, tour_sel_param, max_min):
"""Select one random individuals of the population using tournament selection of size two"""
i1 = random.randint(0, self._size - 1)
i2 = random.randint(0, self._size - 1)
while i2 == i1:
i2 = random.randint(0, self._size - 1)
bool1 = self.population[i1].getFeasibility()
bool2 = self.population[i2].getFeasibility()
fit1 = self.population[i1].getOf()[0]
fit2 = self.population[i2].getOf()[0]
if bool1 > bool2:
best = i1
elif bool2 > bool1:
best = i2
else:
# TODO check how non feasible individuals are compared
if max_min == 'max':
if fit1 > fit2:
best = i1
else:
best = i2
else:
if fit1 < fit2:
best = i1
else:
best = i2
if random.random() > tour_sel_param:
if best == i1:
best = i2
else:
best = i1
return best
def __zeros(self, n):
"""Create a list containing n zeros"""
list = []
for i in range(n):
list.append(0)
return list
class PopulationGAIntegerVar(PopulationGA):
"""Class that stores the information for the different populations and performs mutation, crossover... on them.
This class allows for variables that only accept int values. Population of GA."""
def __init__(self, pop_size, n_var, n_gen_var, n_of, n_lim, int_var_indexes):
super().__init__(pop_size, n_var, n_gen_var, n_of, n_lim)
self.__int_var_indexes = int_var_indexes
def initialize(self):
"""Initialize genes of the population with random genes and integer var individuals"""
for i in range(self._size):
self.population.append(individual.IndividualGAIntegerVar(self._n_var, self._n_gen_var, self._n_of, self._n_lim,
self.__int_var_indexes))
class PopulationMOEA:
def crowdedDistance(self, population, ranking_list_f, ranking_list_nf):
"""Assign the crowded distance to every individual to see how crowded it's area in the design space is."""
for i in range(len(population)):
population[i].setCrowdedDistance(0.)
for of in range(self._n_of):
for num in range(2):
if num == 0:
ranking_list = ranking_list_f
elif num == 1:
ranking_list = ranking_list_nf
for set in ranking_list:
l = len(set)
crowded_dist_list = []
for i in range(l):
crowded_dist_list.append(0)
tupple_set = []
for i in range(l):
tupple_set.append((i, population[set[i]].getOf()[of], set[i]))
tupple_sorted = sorted(tupple_set, key=lambda x: x[1])
for i in range(l):
crowded_dist_list[tupple_sorted[i][0]] = population[tupple_sorted[i][2]].getCrowdedDistance()
for i in range(l):
if i == 0 or i == l - 1:
population[tupple_sorted[i][2]].setCrowdedDistance(float('inf'))
else:
if not tupple_sorted[0][1] - tupple_sorted[l - 1][1]:
population[tupple_sorted[i][2]].setCrowdedDistance(float('inf'))
else:
population[tupple_sorted[i][2]].setCrowdedDistance(
crowded_dist_list[tupple_sorted[i][0]] + (tupple_sorted[i + 1][1] -
tupple_sorted[i - 1][1]) / abs(tupple_sorted[0][1] -tupple_sorted[l - 1][1]))
crowded_dist_list[tupple_sorted[i][0]] = population[tupple_sorted[i][2]].getCrowdedDistance()
def crowdedDistanceOneSet(self, set, n_of):
"""Compute the crowded distance for every individual in one set"""
l = len(set)
crowded_dist_list = []
for i in range(l):
crowded_dist_list.append(0)
for i in range(l):
set[i].setCrowdedDistance(0.)
for objFunct in range(n_of):
tupple_set = []
for i in range(l):
tupple_set.append((i, set[i].getOf()[objFunct]))
tupple_sorted = sorted(tupple_set, key=lambda x: x[1])
for i in range(l):
crowded_dist_list[tupple_sorted[i][0]] = set[tupple_sorted[i][0]].getCrowdedDistance()
for i in range(l):
if i == 0 or i == l - 1:
set[tupple_sorted[i][0]].setCrowdedDistance(float('inf'))
else:
if not tupple_sorted[0][1] - tupple_sorted[l - 1][1]:
set[tupple_sorted[i][0]].setCrowdedDistance(float('inf'))
else:
set[tupple_sorted[i][0]].setCrowdedDistance(crowded_dist_list[tupple_sorted[i][0]] +
(tupple_sorted[i + 1][1] - tupple_sorted[i - 1][1]) / abs(tupple_sorted[0][1] -
tupple_sorted[l - 1][1]))
crowded_dist_list[tupple_sorted[i][0]] = set[tupple_sorted[i][0]].getCrowdedDistance()
return set
def rankPopulation(self, population, max_min):
"""Fast-non-dominated-sort --> assigns a rank value to every individual"""
pop = population[:]
for i in range(len(pop)):
pop[i].setRank(0)
rank = 1
Fi = []
rank_list = []
for p in range(len(pop)):
pop[p].setCrowdedDistance([[], 0])
Sp = []
np = 0
for q in range(len(pop)):
if self._dominates(pop[p].getOf(), pop[q].getOf(), max_min):
Sp.append(q)
elif self._dominates(pop[q].getOf(), pop[p].getOf(), max_min):
np = np + 1
if not np:
population[p].setRank(rank)
Fi.append(p)
x = [Sp[:], np]
pop[p].setCrowdedDistance(x)
crowded_dist = []
for i in range(len(pop)):
crowded_dist.append(pop[i].getCrowdedDistance()[1])
while len(Fi) > 0:
rank_list.append(Fi)
Q = []
for p in Fi:
Sp = pop[p].getCrowdedDistance()[0][:]
for q in Sp:
x = pop[q].getCrowdedDistance()
x[1] = crowded_dist[q] - 1
pop[q].setCrowdedDistance(x)
crowded_dist[q] = pop[q].getCrowdedDistance()[1]
if not pop[q].getCrowdedDistance()[1]:
pop[q].setRank(rank + 1)
Q.append(q)
rank = rank + 1
Fi = Q[:]
for i in range(len(pop)):
population[i] = pop[i]
population[i].setCrowdedDistance(0.)
return [population, rank_list]
# TODO check how non feasible individuals are compared
def _dominates(self, of_i1, of_i2, max_min):
"""Given two individuals checks if the first one dominates the second"""
dom = True
for i in range(len(max_min)):
if max_min[i] == 'max' and of_i1[i] < of_i2[i]:
dom = False
break
elif max_min[i] == 'min' and of_i1[i] > of_i2[i]:
dom = False
break
if of_i1 == of_i2:
dom = False
return dom
'''
def selectIndividuals(self, rank_list, n_indiv_to_select, type_dist, var_data, of_data, compare_w_data):
"""Select the individual from the last population according to selected criteria"""
rank = rank_list[0]
count = 1
pop_w_index = []
selected_index = []
indiv_check_w = []
while len(rank) < n_indiv_to_select:
rank_below = rank
rank = rank + rank_list[count]
count = count + 1
if compare_w_data:
for i in range(len(var_data)):
indiv_check_w.append([var_data[i], of_data[i]])
# If count>1 it means that there are not enough rank 1 individuals in order to satisfy the number of
# selected required, hence all the necessary ranking except the last one checked are automatically selected
# and then rank_list[count - 1] is checked for distances
if count > 1:
for i in rank_below:
indiv_check_w.append([self.population[i].getVar(), self.population[i].getOf()])
selected_index.append(i)
for i in rank_list[count - 1]:
pop_w_index.append([i, self.population[i]])
while len(selected_index) < n_indiv_to_select:
dist_index = []
for j in range(len(pop_w_index)):
#TODO maybe try summing up all the distances instead of taking just the smallest one?
min_dist = float('inf')
for k in indiv_check_w:
if not type_dist:
dist_to_index = self.__norm(k[0], pop_w_index[j][1].getVar())
if type_dist == 1:
dist_to_index = self.__norm(k[1], pop_w_index[j][1].getOf())
if min_dist > dist_to_index:
min_dist = dist_to_index
dist_index.append([pop_w_index[j][0], min_dist])
sorted_by_distances = sorted(dist_index, key=lambda x: x[1], reverse=True)
for i in sorted_by_distances:
if (i[0] in selected_index) == False:
selected_index.append(i[0])
indiv_check_w.append([self.population[i[0]].getVar(), self.population[i[0]].getOf()])
break
return selected_index
'''
def selectIndividuals(self, rank_list, n_indiv_to_select, type_dist, var_data, of_data, compare_w_data):
"""Select the individual from the last population according to selected criteria"""
# TODO normalized the distances with the ranges found so that the weight doesnt affect the criteria
rank = rank_list[0]
count = 1
pop_w_index = []
selected_index = []
indiv_check_w = []
while len(rank) < n_indiv_to_select:
rank_below = rank
rank = rank + rank_list[count]
count = count + 1
if compare_w_data:
for i in range(len(var_data)):
indiv_check_w.append([var_data[i], of_data[i]])
# If count>1 it means that there are not enough rank 1 individuals in order to satisfy the number of
# selected required, hence all the necessary ranking except the last one checked are automatically selected
# and then rank_list[count - 1] is checked for distances
if count > 1:
for i in rank_below:
indiv_check_w.append([self.population[i].getVar(), self.population[i].getOf()])
selected_index.append(i)
for i in rank_list[count - 1]:
pop_w_index.append([i, self.population[i]])
while len(selected_index) < n_indiv_to_select:
dist_index = []
for j in range(len(pop_w_index)):
#TODO maybe try summing up all the distances instead of taking just the smallest one?
#dist_to_index = 0.
dist_to_index = float('inf')
for k in indiv_check_w:
if type_dist == 0:
#dist_to_index += self.__norm(k[0], pop_w_index[j][1].getVar())
d = self.__norm(k[0], pop_w_index[j][1].getVar())
if d<dist_to_index:
dist_to_index = d
if type_dist == 1:
#dist_to_index += self.__norm(k[1], pop_w_index[j][1].getOf())
d = self.__norm(k[1], pop_w_index[j][1].getOf())
| |
<gh_stars>0
import requests
from pprint import pprint
import time
BASE_URL = 'http://host.docker.internal:8080'
REALM = 'ma-graphql-shop'
CLIENT_ID = 'external'
CLIENT_SECRET = '49caadde-e89f-404b-815b-44ea4470e399'
USERNAME = 'admin'
PASSWORD = '<PASSWORD>'
def authenticate():
# grant_type=password&client_id=external&client_secret=161f9398-7e03-4492-a5d9-2b4f1b4b96c7&username=someone&password=password
payload = {
'grant_type': 'password',
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'username': USERNAME,
'password': PASSWORD,
}
r = requests.post(BASE_URL + "/auth/realms/{0}/protocol/openid-connect/token".format(REALM), data=payload)
if r.status_code != 200:
raise ValueError('Authentication failed with code: {0}'.format(r.status_code))
return r.json()['access_token']
def create_warehouse(name, token):
headers = {'authorization': 'Bearer {0}'.format(token)}
payload = {'query': 'mutation{{createWarehouse(name: "{0}"){{id}}}}'.format(name)}
r = requests.post(BASE_URL + "/gateway/graphql", headers=headers, json=payload)
if r.status_code != 200:
raise ValueError('Creating warehouse failed with code: {0}'.format(r.status_code))
return r.json()['data']['createWarehouse']['id']
def create_item(name, price, token):
headers = {'authorization': 'Bearer {0}'.format(token)}
payload = {'query': 'mutation{{createItem(title:"{0}", price:{1}){{id}}}}'.format(name, price)}
r = requests.post(BASE_URL + "/gateway/graphql", headers=headers, json=payload)
if r.status_code != 200:
raise ValueError('Creating item failed with code: {0}'.format(r.status_code))
return r.json()['data']['createItem']['id']
def create_stock_position(item, warehouse, amount, token):
headers = {'authorization': 'Bearer {0}'.format(token)}
payload = {'query': 'mutation{{createItemStock(itemId:{0},warehouseId:{1},inStock:{2},available:{2}){{id}}}}'.format(item, warehouse, amount)}
pprint(payload)
r = requests.post(BASE_URL + "/gateway/graphql", headers=headers, json=payload)
if r.status_code != 200:
raise ValueError('Creating stock position failed with code: {0}'.format(r.status_code))
return r.json()['data']['createItemStock']['id']
def create_order(items, houseNo):
payload = {'query': 'mutation{{createOrder(destinationAddress:{{city:"City",street:"Street {0}",zip:"12345"}},positions:[{1}]){{message order{{id}}}}}}'.format(houseNo, ",".join("{{itemId:{0},amount:{1}}}".format(item, amount) for item, amount in items.items()))}
r = requests.post(BASE_URL + "/gateway/graphql", json=payload)
if r.status_code != 200:
raise ValueError('Creating order failed with code: {0}'.format(r.status_code))
pprint(r.json())
return r.json()['data']['createOrder']['order']['id']
def get_payment_of_order(order):
payload = {'query': 'query{{order(id:{0}){{payment{{id}}}}}}'.format(order)}
r = requests.post(BASE_URL + "/gateway/graphql", json=payload)
return r.json()['data']['order']['payment']['id']
def get_payment_status_etag(payment):
return None
def update_payment_status(payment, status, etag, token):
payload = {'query': 'mutation{{updatePaymentStatus(paymentId:{0},status:PAYED){{success}}}}'.format(payment)}
pprint(payload)
r = requests.post(BASE_URL + "/gateway/graphql", json=payload)
pprint(r.json())
return r.json()['data']['updatePaymentStatus']['success']
if __name__ == '__main__':
token = authenticate()
warehouse1 = create_warehouse("Warehouse 1", token)
warehouse2 = create_warehouse("Warehouse 2", token)
warehouse3 = create_warehouse("Warehouse 3", token)
item1 = create_item("Item 1", 2.99, token)
item2 = create_item("Item 2", 9.99, token)
item3 = create_item("Item 3", 1.49, token)
item4 = create_item("Item 4", 7.59, token)
item5 = create_item("Item 5", 11.99, token)
create_stock_position(item1, warehouse1, 1000, token)
create_stock_position(item1, warehouse2, 1000, token)
create_stock_position(item1, warehouse3, 1000, token)
create_stock_position(item2, warehouse1, 1000, token)
create_stock_position(item2, warehouse3, 1000, token)
create_stock_position(item3, warehouse1, 1000, token)
create_stock_position(item3, warehouse2, 1000, token)
create_stock_position(item3, warehouse3, 1000, token)
create_stock_position(item4, warehouse1, 1000, token)
create_stock_position(item4, warehouse2, 1000, token)
create_stock_position(item4, warehouse3, 1000, token)
create_stock_position(item5, warehouse1, 1000, token)
create_stock_position(item5, warehouse2, 1000, token)
order1 = create_order({item4: 3}, 1)
time.sleep(0.061)
order2 = create_order({item4: 3}, 2)
time.sleep(0.093)
order3 = create_order({item1: 1, item5: 2, item3: 2}, 3)
time.sleep(0.183)
order4 = create_order({item1: 1, item4: 1, item3: 3, item5: 1}, 4)
time.sleep(0.128)
order5 = create_order({item4: 1, item1: 3}, 5)
time.sleep(0.179)
order6 = create_order({item5: 2, item2: 1, item4: 1, item1: 1}, 6)
time.sleep(0.11)
order7 = create_order({item2: 3}, 7)
time.sleep(0.151)
order8 = create_order({item3: 1, item2: 2}, 8)
time.sleep(0.203)
order9 = create_order({item3: 1, item2: 1, item1: 3}, 9)
time.sleep(0.15)
order10 = create_order({item3: 2}, 10)
time.sleep(0.192)
order11 = create_order({item2: 3, item3: 3}, 11)
time.sleep(0.219)
order12 = create_order({item1: 2}, 12)
time.sleep(0.064)
order13 = create_order({item4: 3, item1: 2}, 13)
time.sleep(0.172)
order14 = create_order({item4: 2, item5: 2}, 14)
time.sleep(0.229)
order15 = create_order({item3: 2}, 15)
time.sleep(0.062)
order16 = create_order({item2: 2}, 16)
time.sleep(0.226)
order17 = create_order({item4: 1, item3: 1, item2: 2, item1: 2}, 17)
time.sleep(0.216)
order18 = create_order({item3: 2}, 18)
time.sleep(0.168)
order19 = create_order({item5: 1, item1: 2, item4: 2, item3: 2}, 19)
time.sleep(0.009)
order20 = create_order({item1: 2, item2: 3, item5: 2, item3: 3}, 20)
time.sleep(0.13)
order21 = create_order({item5: 3}, 21)
time.sleep(0.241)
order22 = create_order({item4: 2}, 22)
time.sleep(0.203)
order23 = create_order({item2: 2, item4: 3, item3: 1, item1: 1}, 23)
time.sleep(0.076)
order24 = create_order({item3: 1, item1: 2}, 24)
time.sleep(0.13)
order25 = create_order({item2: 3, item4: 3, item1: 1, item3: 1}, 25)
time.sleep(0.173)
order26 = create_order({item1: 3, item2: 1}, 26)
time.sleep(0.089)
order27 = create_order({item4: 1, item1: 2, item3: 2}, 27)
time.sleep(0.134)
order28 = create_order({item4: 1, item5: 2}, 28)
time.sleep(0.017)
order29 = create_order({item3: 1, item5: 1}, 29)
time.sleep(0.152)
order30 = create_order({item4: 3, item2: 1, item3: 1}, 30)
time.sleep(0.205)
order31 = create_order({item5: 3}, 31)
time.sleep(0.068)
order32 = create_order({item5: 2, item3: 2}, 32)
time.sleep(0.12)
order33 = create_order({item3: 3}, 33)
time.sleep(0.089)
order34 = create_order({item4: 1}, 34)
time.sleep(0.122)
order35 = create_order({item2: 2, item3: 2, item5: 3}, 35)
time.sleep(0.083)
order36 = create_order({item2: 1, item5: 1, item4: 1}, 36)
time.sleep(0.023)
order37 = create_order({item2: 1, item5: 3}, 37)
time.sleep(0.023)
order38 = create_order({item3: 1, item2: 2, item4: 3, item5: 2}, 38)
time.sleep(0.151)
order39 = create_order({item3: 1, item1: 3, item5: 2, item2: 3}, 39)
time.sleep(0.213)
order40 = create_order({item2: 1, item4: 1, item3: 3, item5: 2}, 40)
time.sleep(0.07)
order41 = create_order({item2: 2, item1: 3, item5: 3, item3: 1}, 41)
time.sleep(0.098)
order42 = create_order({item4: 2, item5: 3, item3: 1}, 42)
time.sleep(0.146)
order43 = create_order({item1: 3, item4: 3, item2: 3}, 43)
time.sleep(0.185)
order44 = create_order({item3: 1}, 44)
time.sleep(0.154)
order45 = create_order({item2: 1, item5: 2}, 45)
time.sleep(0.103)
order46 = create_order({item1: 1, item2: 1, item4: 2}, 46)
time.sleep(0.075)
order47 = create_order({item1: 2, item5: 3, item4: 3, item3: 3}, 47)
time.sleep(0.082)
order48 = create_order({item4: 1, item3: 3, item1: 2}, 48)
time.sleep(0.114)
order49 = create_order({item4: 2, item5: 2, item3: 3}, 49)
time.sleep(0.225)
order50 = create_order({item3: 1}, 50)
time.sleep(0.04)
order51 = create_order({item1: 1}, 51)
time.sleep(0.225)
order52 = create_order({item1: 3, item4: 2, item5: 1, item2: 1}, 52)
time.sleep(0.045)
order53 = create_order({item3: 2, item2: 3, item1: 1}, 53)
time.sleep(0.147)
order54 = create_order({item5: 1, item4: 3, item2: 2, item1: 3}, 54)
time.sleep(0.056)
order55 = create_order({item5: 3, item1: 3}, 55)
time.sleep(0.007)
order56 = create_order({item2: 1, item1: 1, item3: 3, item5: 3}, 56)
time.sleep(0.233)
order57 = create_order({item4: 2, item1: 1, item3: 3}, 57)
time.sleep(0.031)
order58 = create_order({item4: 3, item5: 1, item3: 3}, 58)
time.sleep(0.042)
order59 = create_order({item2: 1, item1: 3, item4: 2}, 59)
time.sleep(0.245)
order60 = create_order({item3: 3, item1: 1, item4: 3, item2: 3}, 60)
time.sleep(0.219)
order61 = create_order({item5: 2, item1: 1, item4: 3}, 61)
time.sleep(0.067)
order62 = create_order({item1: 3}, 62)
time.sleep(0.16)
order63 = create_order({item4: 2, item5: 2}, 63)
time.sleep(0.079)
order64 = create_order({item3: 1, item1: 3}, 64)
time.sleep(0.127)
order65 = create_order({item3: 1, item5: 1, item4: 3}, 65)
time.sleep(0.044)
order66 = create_order({item3: 2, item4: 3, item2: 2}, 66)
time.sleep(0.223)
order67 = create_order({item1: 1}, 67)
time.sleep(0.217)
order68 = create_order({item1: 1, item5: 3, item2: 1, item4: 1}, 68)
time.sleep(0.179)
order69 = create_order({item5: 3}, 69)
time.sleep(0.082)
order70 = create_order({item5: 2, item1: 3, item3: 1, item2: 3}, 70)
time.sleep(0.089)
order71 = create_order({item5: 1, item4: 3, item3: 3}, 71)
time.sleep(0.102)
order72 = create_order({item3: 3, item4: 3}, 72)
time.sleep(0.017)
order73 = create_order({item3: 2, item2: 1, item5: 2, item4: 2}, 73)
time.sleep(0.203)
order74 = create_order({item2: 1, item4: 3}, 74)
time.sleep(0.112)
order75 = create_order({item3: 3, item5: 3}, 75)
time.sleep(0.019)
order76 = create_order({item1: 2, item2: 3}, 76)
time.sleep(0.043)
order77 = create_order({item4: 3, item3: 2}, 77)
time.sleep(0.234)
order78 = create_order({item4: 1}, 78)
time.sleep(0.114)
order79 = create_order({item3: 1}, 79)
time.sleep(0.182)
order80 = create_order({item5: 1, item4: 2}, 80)
time.sleep(0.061)
order81 = create_order({item2: 3, item1: 3}, 81)
time.sleep(0.124)
order82 = create_order({item4: 2, item3: 3, item5: 2}, 82)
time.sleep(0.088)
order83 = create_order({item4: 2, item5: 3, item1: 1}, 83)
time.sleep(0.233)
order84 = create_order({item5: 3, item3: 2, item4: 2, item1: 3}, 84)
time.sleep(0.218)
order85 = create_order({item2: 1}, 85)
time.sleep(0.036)
order86 = create_order({item5: 3, item1: 2, item3: 1}, 86)
time.sleep(0.065)
order87 = create_order({item2: 3}, 87)
time.sleep(0.209)
order88 = create_order({item5: 3, item2: 2, item4: 2, item1: 1}, 88)
time.sleep(0.247)
order89 = create_order({item2: 2, item5: 2}, 89)
time.sleep(0.243)
order90 = create_order({item1: 3}, 90)
time.sleep(0.153)
order91 = create_order({item4: 3, item1: 3, item5: 3}, 91)
time.sleep(0.18)
order92 = create_order({item3: 1, item1: 1, item5: 3}, 92)
time.sleep(0.223)
order93 = create_order({item1: 1, item2: 1, item4: 2}, 93)
time.sleep(0.123)
order94 = create_order({item3: 3, item1: 1, item5: 2}, 94)
time.sleep(0.167)
order95 = create_order({item3: 2, item2: 3}, 95)
time.sleep(0.238)
order96 = create_order({item5: 3, item3: 2, item2: 1}, 96)
time.sleep(0.206)
order97 = create_order({item1: 3}, 97)
time.sleep(0.187)
order98 = create_order({item5: 3, item2: 3}, 98)
time.sleep(0.233)
order99 = create_order({item4: 2, item2: 3, item5: 1, item3: 3}, 99)
time.sleep(0.246)
order100 = create_order({item1: 2, item2: 2, item5: 1}, 100)
time.sleep(0.167)
time.sleep(120)
payment63 = get_payment_of_order(order63)
etag63 = get_payment_status_etag(payment63)
update_payment_status(payment63, 'payed', etag63, token)
time.sleep(0.213)
payment3 = get_payment_of_order(order3)
etag3 = get_payment_status_etag(payment3)
update_payment_status(payment3, 'payed', etag3, token)
time.sleep(0.087)
payment35 = get_payment_of_order(order35)
etag35 = get_payment_status_etag(payment35)
update_payment_status(payment35, 'payed', etag35, token)
time.sleep(0.234)
payment15 = get_payment_of_order(order15)
etag15 = get_payment_status_etag(payment15)
update_payment_status(payment15, 'payed', etag15, token)
time.sleep(0.047)
payment64 = get_payment_of_order(order64)
etag64 = get_payment_status_etag(payment64)
update_payment_status(payment64, 'payed', etag64, token)
time.sleep(0.099)
payment88 = | |
# Return values
return train_gid_set, data_list, label_list
@register_ibs_method
def classifier_train_image_svm(
ibs, species_list, output_path=None, dryrun=False, C=1.0, kernel='rbf'
):
from sklearn import svm, preprocessing
# Load data
logger.info('Loading pre-trained features for images')
# Save model pickle
if output_path is None:
output_path = abspath(expanduser(join('~', 'code', 'wbia', 'models')))
ut.ensuredir(output_path)
species_list = [species.lower() for species in species_list]
species_list_str = '.'.join(species_list)
kernel = str(kernel.lower())
args = (
species_list_str,
kernel,
C,
)
output_filename = 'classifier.svm.image.%s.%s.%s.pkl' % args
output_filepath = join(output_path, output_filename)
if not dryrun:
vals = get_classifier_svm_data_labels(ibs, 'TRAIN_SET', species_list)
train_gid_set, data_list, label_list = vals
logger.info('Train SVM scaler using features')
# Train new scaler and model using data and labels
scaler = preprocessing.StandardScaler().fit(data_list)
data_list = scaler.transform(data_list)
logger.info('Train SVM model using features and target labels')
model = svm.SVC(C=C, kernel=kernel, probability=True)
model.fit(data_list, label_list)
model_tup = (
model,
scaler,
)
ut.save_cPkl(output_filepath, model_tup)
# Load model pickle
model_tup_ = ut.load_cPkl(output_filepath)
model_, scaler_ = model_tup_
# Test accuracy
vals = get_classifier_svm_data_labels(ibs, 'TEST_SET', species_list)
train_gid_set, data_list, label_list = vals
# Normalize data
data_list = scaler_.transform(data_list)
label_list_ = model_.predict(data_list)
# score_list_ = model_.decision_function(data_list) # NOQA
score_list_ = model_.predict_proba(data_list) # NOQA
tp, tn, fp, fn = 0, 0, 0, 0
for label_, label in zip(label_list_, label_list):
if label == 1 and label == label_:
tp += 1
elif label == 0 and label == label_:
tn += 1
elif label == 1 and label != label_:
fn += 1
elif label == 0 and label != label_:
fp += 1
else:
raise ValueError
pos, neg = tp + fn, tn + fp
correct = tp + tn
total = tp + tn + fp + fn
accuracy = correct / total
logger.info('Accuracy: %0.02f' % (accuracy,))
logger.info('\t TP: % 4d (%0.02f %%)' % (tp, tp / pos))
logger.info('\t FN: % 4d (%0.02f %%)' % (fn, fn / neg))
logger.info('\t TN: % 4d (%0.02f %%)' % (tn, tn / neg))
logger.info('\t FP: % 4d (%0.02f %%)' % (fp, fp / pos))
return output_filepath
@register_ibs_method
def classifier_train_image_svm_sweep(ibs, species_list, precompute=True, **kwargs):
depc = ibs.depc_image
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', species_list)
config_list = [
(0.5, 'rbf'),
(1.0, 'rbf'),
(2.0, 'rbf'),
(0.5, 'linear'),
(1.0, 'linear'),
(2.0, 'linear'),
]
output_filepath_list = []
for C, kernel in config_list:
output_filepath = ibs.classifier_train_image_svm(
species_list, C=C, kernel=kernel, **kwargs
)
output_filepath_list.append(output_filepath)
if precompute:
config = {
'algo': '_COMBINED',
'features': True,
'feature2_algo': 'resnet',
'feature2_chip_masking': False,
'classify': True,
'classifier_algo': 'svm',
'classifier_masking': False,
'classifier_weight_filepath': output_filepath,
}
depc.get_rowids('localizations_features', test_gid_list, config=config)
depc.get_rowids('localizations_classifier', test_gid_list, config=config)
# config['feature2_chip_masking'] = True
# config['classifier_masking'] = True
# depc.get_rowids('localizations_features', test_gid_list, config=config)
# depc.get_rowids('localizations_classifier', test_gid_list, config=config)
return output_filepath_list
@register_ibs_method
def classifier2_train_image_rf(
ibs, species_list, output_path=None, dryrun=False, n_estimators=100
):
from sklearn import ensemble, preprocessing
# Load data
logger.info('Loading pre-trained features for images')
# Save model pickle
if output_path is None:
output_path = abspath(expanduser(join('~', 'code', 'wbia', 'models')))
ut.ensuredir(output_path)
species_list = [species.lower() for species in species_list]
species_list_str = '.'.join(species_list)
args = (
species_list_str,
n_estimators,
)
output_filename = 'classifier2.rf.image.%s.%s.pkl' % args
output_filepath = join(output_path, output_filename)
if not dryrun:
vals = get_classifier2_rf_data_labels(ibs, 'TRAIN_SET', species_list)
train_gid_set, data_list, label_list = vals
logger.info('Train data scaler using features')
# Train new scaler and model using data and labels
scaler = preprocessing.StandardScaler().fit(data_list)
data_list = scaler.transform(data_list)
logger.info('Train RF model using features and target labels')
model = ensemble.RandomForestClassifier(
n_estimators=n_estimators, max_features=None
)
model.fit(data_list, label_list)
model_tup = (
model,
scaler,
)
ut.save_cPkl(output_filepath, model_tup)
# Load model pickle
model_tup_ = ut.load_cPkl(output_filepath)
model_, scaler_ = model_tup_
# Test accuracy
vals = get_classifier2_rf_data_labels(ibs, 'TEST_SET', species_list)
train_gid_set, data_list, label_list = vals
# Normalize data
data_list = scaler_.transform(data_list)
label_list_ = model_.predict(data_list)
# score_list_ = model_.decision_function(data_list) # NOQA
score_list_ = model_.predict_proba(data_list) # NOQA
tp, tn, fp, fn = 0, 0, 0, 0
for label_, label in zip(label_list_, label_list):
if label == 1 and label == label_:
tp += 1
elif label == 0 and label == label_:
tn += 1
elif label == 1 and label != label_:
fn += 1
elif label == 0 and label != label_:
fp += 1
else:
raise ValueError
pos, neg = tp + fn, tn + fp
correct = tp + tn
total = tp + tn + fp + fn
accuracy = correct / total
logger.info('Accuracy: %0.02f' % (accuracy,))
logger.info('\t TP: % 4d (%0.02f %%)' % (tp, tp / pos))
logger.info('\t FN: % 4d (%0.02f %%)' % (fn, fn / neg))
logger.info('\t TN: % 4d (%0.02f %%)' % (tn, tn / neg))
logger.info('\t FP: % 4d (%0.02f %%)' % (fp, fp / pos))
return output_filepath
@register_ibs_method
def classifier2_train_image_rf_sweep(ibs, species_list, precompute=True, **kwargs):
depc = ibs.depc_image
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', species_list)
config_list = [
10,
]
output_filepath_list = []
for n_estimators in config_list:
output_filepath = ibs.classifier2_train_image_rf(
species_list, n_estimators=n_estimators, **kwargs
)
output_filepath_list.append(output_filepath)
if precompute:
config = {
'classifier_two_algo': 'rf',
'classifier_two_weight_filepath': output_filepath,
}
depc.get_rowids('classifier_two', test_gid_list, config=config)
return output_filepath_list
config_list = [
# {'label': 'All Species', 'grid' : False, 'config_filepath' : 'candidacy', 'weight_filepath' : 'candidacy', 'species_set' : species_set},
# {'label': 'Masai Giraffe', 'grid' : False, 'config_filepath' : 'candidacy', 'weight_filepath' : 'candidacy', 'species_set' : [ species_set[0] ]},
# {'label': 'Reticulated Giraffe', 'grid' : False, 'config_filepath' : 'candidacy', 'weight_filepath' : 'candidacy', 'species_set' : [ species_set[1] ]},
# {'label': 'Sea Turtle', 'grid' : False, 'config_filepath' : 'candidacy', 'weight_filepath' : 'candidacy', 'species_set' : [ species_set[2] ]},
# {'label': 'Whale Fluke', 'grid' : False, 'config_filepath' : 'candidacy', 'weight_filepath' : 'candidacy', 'species_set' : [ species_set[3] ]},
# {'label': 'Grevy\'s Zebra', 'grid' : False, 'config_filepath' : 'candidacy', 'weight_filepath' : 'candidacy', 'species_set' : [ species_set[4] ]},
# {'label': 'Plains Zebra', 'grid' : False, 'config_filepath' : 'candidacy', 'weight_filepath' : 'candidacy', 'species_set' : [ species_set[5] ]},
# {'label': 'V1', 'grid' : False, 'config_filepath' : 'v1', 'weight_filepath' : 'v1'},
# {'label': 'V1 (GRID)', 'grid' : True, 'config_filepath' : 'v1', 'weight_filepath' : 'v1'},
# {'label': 'V2', 'grid' : False, 'config_filepath' : 'v2', 'weight_filepath' : 'v2'},
# {'label': 'V2 (GRID)', 'grid' : True, 'config_filepath' : 'v2', 'weight_filepath' : 'v2'},
# {'label': 'V3', 'grid' : False, 'config_filepath' : 'v3', 'weight_filepath' : 'v3'},
# {'label': 'V3 (GRID)', 'grid' : True, 'config_filepath' : 'v3', 'weight_filepath' : 'v3'},
# {'label': 'V3 Whale Shark', 'grid' : False, 'config_filepath' : 'v3', 'weight_filepath' : 'v3', 'species_set' : set(['whale_shark'])},
# {'label': 'V3 Whale Fluke', 'grid' : True, 'config_filepath' : 'v3', 'weight_filepath' : 'v3', 'species_set' : set(['whale_fluke'])},
# {'label': 'V3', 'grid' : False, 'config_filepath' : 'v3', 'weight_filepath' : 'v3', 'species_set' : set(['whale_fluke'])},
# {'label': 'Whale Fluke V1', 'grid' : False, 'config_filepath' : 'whalefluke', 'weight_filepath' : 'whalefluke', 'species_set' : set(['whale_fluke'])},
# {'label': 'Whale Fluke V2', 'grid' : False, 'config_filepath' : 'whalefluke_v2', 'weight_filepath' : 'whalefluke_v2', 'species_set' : set(['whale_fluke'])},
# {'label': 'Green', 'grid' : False, 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'include_parts': True, 'species_set' : set(['turtle_green']), 'check_species': False},
# {'label': 'Hawksbill', 'grid' : False, 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'include_parts': True, 'species_set' : set(['turtle_hawksbill']), 'check_species': False},
# {'label': 'Sea Turtle', 'grid' : False, 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'include_parts': True, 'species_set' : set(['turtle_green', 'turtle_hawksbill']), 'check_species': False},
# {'label': 'Green (Head)', 'grid' : False, 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'include_parts': True, 'species_set' : set(['turtle_green+head']), 'check_species': False},
# {'label': 'Hawksbill (Head)', 'grid' : False, 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'include_parts': True, 'species_set' : set(['turtle_hawksbill+head']), 'check_species': False},
# {'label': 'Sand Tiger', 'grid' : False, 'config_filepath' : 'sandtiger', 'weight_filepath' : 'sandtiger'},
# {'label': 'Sand Tiger (Grid)', 'grid' : True, 'config_filepath' : 'sandtiger', 'weight_filepath' : 'sandtiger'},
# {'label': 'Hammerhead', 'grid' : False, 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead'},
# {'label': 'Hammerhead (Grid)', 'grid' : True, 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead'},
# {'label': 'Sea Turtle', 'grid' : False, 'config_filepath' : 'sea', 'weight_filepath' : 'sea', 'species_set' : set(['turtle_general'])},
# {'label': 'Shark', 'grid' : False, 'config_filepath' : 'sea', 'weight_filepath' : 'sea', 'species_set' : set(['shark_general'])},
# {'label': 'Whaleshark', 'grid' : False, 'config_filepath' : 'sea', 'weight_filepath' : 'sea', 'species_set' : set(['whaleshark'])},
# {'label': 'Sea Turtle (Green)', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['turtle_green'])},
{
'label': 'Hawksbill 00',
'grid': False,
'algo': 'lightnet',
'config_filepath': 'seaturtle',
'weight_filepath': 'seaturtle',
'include_parts': True,
'sensitivity': 0.01,
'nms': True,
'nms_thresh': 0.00,
'species_set': set(['turtle_hawksbill']),
},
{
'label': | |
"""
return self.reference_string()
def __unicode__(self):
"""
x.__unicode__() <==> unicode(x)
Return unicode version of passage string. Uses en-dash for ranges.
"""
return self.reference_string(dash=u"–")
def abbr(self):
"""
Return abbreviated passage string
"""
return self.reference_string(abbreviated=True)
def uabbr(self):
"""
Return unicode-type abbreviated passage string. Uses en-dash for ranges.
"""
return self.reference_string(abbreviated=True, dash=u"–")
def __repr__(self):
"""
x.__repr__() <==> x
"""
return "PassageCollection(" + ", ".join([repr(x) for x in self]) + ")"
class PassageDelta(object):
"""
Extension (or contraction) of passages, in chapter or verse increments.
"""
def __init__(self, chapters=0, verses=0, passage_start=False):
"""
PassageDelta initialisation.
To add (or remove) chapters and/or verses to the START of a passage, set
passage_start=True. Otherwise chapters/verses will be added to the END
of a passage.
"""
self.passage_start = passage_start
self.delta_chapter = chapters
self.delta_verse = verses
def __add__(self, other):
"""
x.__add__(y) <==> x + y
Addition of Passage and PassageDelta objects
"""
if isinstance(other, Passage):
if not self.passage_start:
# Add verses to END of passage
# Check whether passage currently finishes at the end of a
# chapter
if other.end_verse == other.bd.last_verses[other.start_book_n,
other.end_chapter]:
finishes_at_end_of_chapter = True
else:
finishes_at_end_of_chapter = False
# Compute chapter difference operation first
(end_book_n,
end_chapter,
end_verse) = delta_chapter(
self.delta_chapter,
other.end_book_n,
other.end_chapter,
other.end_verse,
other.bd,
finishes_at_end_of_chapter=finishes_at_end_of_chapter)
# Verse difference operation
(end_book_n,
end_chapter,
end_verse) = delta_verse(
self.delta_verse,
end_book_n,
end_chapter,
end_verse,
other.bd)
return Passage(
other.start_book_n,
other.start_chapter,
other.start_verse,
end_chapter,
end_verse,
end_book_n)
else:
# Add verses to START of passage
# Compute chapter difference operation first
(start_book_n,
start_chapter,
start_verse) = delta_chapter(-self.delta_chapter,
other.start_book_n,
other.start_chapter,
other.start_verse,
other.bd)
# Verse difference operation
(start_book_n,
start_chapter,
start_verse) = delta_verse(-self.delta_verse,
start_book_n,
start_chapter,
start_verse,
other.bd)
return Passage(start_book_n,
start_chapter,
start_verse,
other.end_chapter,
other.end_verse,
other.end_book_n)
else:
return NotImplemented
def __radd__(self, other):
return self.__add__(other)
def __repr__(self):
"""
x.__repr__() <==> x
"""
return "PassageDelta(chapters="+repr(self.delta_chapter)+", verses=" +\
repr(self.delta_verse)+", passage_start=" +\
repr(self.passage_start)+")"
def get_passage_text(passage, **kwargs):
""" Get text of supplied Passage object """
warnings.warn("Deprecated function; use Passage.text or " +
"PassageCollection.text instead", DeprecationWarning)
translation = kwargs.get('translation', "ESV")
return bible_data(translation).get_passage_text(passage, **kwargs)
# === Internal functions ===
def book_name(bible_data, book_n, abbreviated=False, single_psalm=False):
""" Return full or abbreviated book name. """
if abbreviated:
return bible_data.book_names[book_n][2]
else:
if single_psalm:
return "Psalm"
else:
return bible_data.book_names[book_n][1]
def book_total_verses(bible_data, start_book_n, end_book_n=None):
"""
Return total number of verses in book or book range,
as a dictionary keyed book to book_n
"""
if end_book_n == None:
end_book_n = start_book_n
total_verses = defaultdict(lambda: 0)
for book_n in range(start_book_n, end_book_n+1):
for chapter in range(1, bible_data.number_chapters[book_n]+1):
total_verses[book_n] += bible_data.last_verses[book_n, chapter] - \
len(bible_data.missing_verses.get((book_n, chapter), []))
return total_verses
def delta_chapter(chapter_difference, current_book_n, current_chapter,
current_verse, bible_data, finishes_at_end_of_chapter=False):
new_chapter = current_chapter + chapter_difference
if new_chapter > bible_data.number_chapters[current_book_n]:
# Got to end of book; need to go to next book
overflow_chapters = new_chapter - \
bible_data.number_chapters[current_book_n]
if current_book_n == 66:
# Got to the end of the bible; can't go any further
c = bible_data.number_chapters[current_book_n]
v = bible_data.last_verses[current_book_n, c]
return (current_book_n, c, v)
else:
return delta_chapter(overflow_chapters, current_book_n+1, 0,
current_verse, bible_data, finishes_at_end_of_chapter)
elif new_chapter < 1:
# Got to start of book; need to go to previous book
overflow_chapters = new_chapter - 1
if current_book_n == 1:
# Got to start of the bible; can't go any further
return (1, 1, 1)
else:
c = bible_data.number_chapters[current_book_n-1]
return delta_chapter(overflow_chapters, current_book_n-1, c+1,
current_verse, bible_data, finishes_at_end_of_chapter)
else:
if finishes_at_end_of_chapter or current_verse >\
bible_data.last_verses[current_book_n, new_chapter]:
current_verse = bible_data.last_verses[current_book_n, new_chapter]
return (current_book_n, new_chapter, current_verse)
def delta_verse(verse_difference, current_book_n, current_chapter,
current_verse, bible_data):
new_verse = current_verse + verse_difference
if new_verse > bible_data.last_verses[current_book_n, current_chapter]:
# Got to end of chapter; need to go to next chapter
overflow_verses = new_verse - \
bible_data.last_verses[current_book_n, current_chapter]
if current_chapter == bible_data.number_chapters[current_book_n]:
# Got to end of book; need to go to next book
if current_book_n == 66:
# Got to end of the bible; can't go any further
c = bible_data.number_chapters[current_book_n]
v = bible_data.last_verses[current_book_n, c]
return (current_book_n, c, v)
else:
return delta_verse(overflow_verses, current_book_n+1, 1, 0,
bible_data)
else:
# Next chapter within the same book
return delta_verse(overflow_verses, current_book_n,
current_chapter+1, 0, bible_data)
elif new_verse < 1:
# Got to start of chapter; need to go to previous chapter
overflow_verses = new_verse - 1
if current_chapter == 1:
# Got to start of book; need to go to previous book
if current_book_n == 1:
# Got to start of the bible; can't go any further
return (1, 1, 1)
else:
c = bible_data.number_chapters[current_book_n-1]
v = bible_data.last_verses[current_book_n-1, c]
return delta_verse(overflow_verses, current_book_n-1, c, v+1,
bible_data)
else:
c = current_chapter - 1
v = bible_data.last_verses[current_book_n, c]
return delta_verse(overflow_verses, current_book_n, c, v+1,
bible_data)
else:
return (current_book_n, current_chapter, new_verse)
class MCBGroup(object):
"""
Internal-use class for creating reference strings for groups of passages
that are all from the same multi-chapter book
"""
def __init__(self):
# Dictionary of reference objects (each within a list), indexed by order
# that they were added
self.bunches = defaultdict(lambda: [])
# Boolean indicating whether corresponding self.bunches reference is for
# a full chapter
self.full_chapter_bunch = defaultdict(lambda: False)
self.order = 0
self.last_full_chapter_loc = -1 # Order of last full-chapter reference
# [chapter, order] of last reference that wasn't a full chapter
self.last_partial_chapter = [None, -1]
def add(self, reference):
# Set the book_n variable if this is the first passage added
if self.order == 0:
self.start_book_n = reference.start_book_n
else:
if reference.start_book_n != self.start_book_n:
raise Exception
if reference.complete_chapter(multiple=True):
# Reference is one or more full chapters in length
if self.last_full_chapter_loc >= 0:
# Last reference was a full chapter, so add it to previous
# 'bunch'
self.bunches[self.last_full_chapter_loc].append(reference)
else:
# Add new bunch
self.bunches[self.order].append(reference)
self.last_full_chapter_loc = self.order
self.full_chapter_bunch[self.order] = True
# Reset last_partial_chapter
self.last_partial_chapter = [None, -1]
else:
# Reference is not a full-chapter length passage
if reference.start_chapter == reference.end_chapter:
# Some verse range that is within the same chapter
if reference.start_chapter == self.last_partial_chapter[0]:
# Same chapter as for last passage, so add to previous bunch
self.bunches[self.last_partial_chapter[1]].append(
reference)
else:
# Different to last passage
self.bunches[self.order].append(reference)
self.last_partial_chapter = [
reference.start_chapter, self.order]
else:
# Verse range over two or more chapters, between arbitrary
# verses (e.g. 5:2-7:28)
self.last_partial_chapter = [None, -1]
self.bunches[self.order].append(reference)
self.last_full_chapter_loc = -1
self.order += 1
def reference_string(self, abbreviated, dash):
if self.order == 0:
# No passages have been added to bunch; return blank.
return ""
# Helper functions
def full_ch_ref(reference, verse_encountered):
# Chapter string for references that are one or many full chapters
if verse_encountered:
if reference.start_chapter == reference.end_chapter:
return str(reference.start_chapter) + ":" +\
str(reference.start_verse) + dash +\
str(reference.end_verse)
else:
return str(reference.start_chapter) + ":" +\
str(reference.start_verse) + dash +\
str(reference.end_chapter) + ":" +\
str(reference.end_verse)
else:
if reference.start_chapter == reference.end_chapter:
return str(reference.start_chapter)
else:
return str(reference.start_chapter) + dash +\
str(reference.end_chapter)
def verses_only(reference):
# Verse string
if reference.start_verse == reference.end_verse:
return str(reference.start_verse)
else:
return str(reference.start_verse) + dash +\
str(reference.end_verse)
# List of passage bunches, sorted by order-of-addition
ordered_bunches = sorted(list(self.bunches.items()), key=itemgetter(0))
# Iterate through bunches, creating their textual representations
textual_bunches = []
verse_encountered = False
for order, bunch in ordered_bunches:
if self.full_chapter_bunch[order]:
# All passages in this bunch are for full chapters
textual_bunches.append(
", ".join([full_ch_ref(x, verse_encountered) for x
in bunch]))
else:
# Not a full-chapter bunch.
verse_encountered = True
if len(bunch) == 1:
# NB: this bunch may be over two or more chapters
if bunch[0].start_chapter == bunch[0].end_chapter:
textual_bunches.append(
str(bunch[0].start_chapter) + ":" +
verses_only(bunch[0]))
else:
textual_bunches.append(
str(bunch[0].start_chapter) + ":" +
str(bunch[0].start_verse) + dash +
str(bunch[0].end_chapter) + ":" +
str(bunch[0].end_verse))
else:
# Guaranteed (via self.add() algorithm) to be within same
# chapter
textual_bunches.append(", ".join(
[str(bunch[0].start_chapter) + ":" + verses_only(x) for
x in bunch]))
if abbreviated:
book = bibledata.book_names[self.start_book_n][2]
else:
book = bibledata.book_names[self.start_book_n][1]
return book + " " + ", ".join(textual_bunches)
def check_reference(bd, start_book_n, start_chapter=None, start_verse=None,
end_book_n=None, end_chapter=None, end_verse=None):
"""
Check and normalise numeric reference inputs (start_chapter, start_verse, end_chapter and end_verse)
Where possible, missing inputs will be inferred. Thus for example, if start_chapter and end_chapter
are provided but start_verse and end_verse are not, it will be assumed that the whole chapter was intended.
"""
if end_book_n == None:
end_book_n = start_book_n
# Check which numbers have been provided.
sc = sv = ec = ev = True
if start_chapter == None:
| |
# -*- coding: utf-8 -*-
#
# (c) Copyright 2001-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: <NAME>, <NAME> <NAME>
#
#from __future__ import generators
# Std Lib
import sys
import time
import os
import gzip
import select
import struct
import signal
from base.sixext.moves import configparser
# Local
from base.g import *
from base import device, utils, pml, maint, models, pkit, os_utils
from prnt import cups
from base.sixext import PY3
from base.codes import *
from .ui_utils import *
import hpmudext
from installer.core_install import *
# Qt
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import collections
# dbus
try:
import dbus
from dbus.mainloop.pyqt5 import DBusQtMainLoop
from dbus import lowlevel
except ImportError:
log.error("Unable to load DBus libraries. Please check your installation and try again.")
if PY3: # Workaround due to incomplete Python3 support in Linux distros.
log.error("Please upgrade your python installation to the latest available version.")
sys.exit(1)
import warnings
# Ignore: .../dbus/connection.py:242: DeprecationWarning: object.__init__() takes no parameters
# (occurring on Python 2.6/dBus 0.83/Ubuntu 9.04)
warnings.simplefilter("ignore", DeprecationWarning)
# Main form
from .devmgr5_base import Ui_MainWindow
from .devmgr_ext import Ui_MainWindow_Derived
# Aux. dialogs
from .faxsetupdialog import FaxSetupDialog
from .plugindialog import PluginDialog
from .firmwaredialog import FirmwareDialog
from .aligndialog import AlignDialog
from .printdialog import PrintDialog
from .makecopiesdialog import MakeCopiesDialog
from .sendfaxdialog import SendFaxDialog
from .fabwindow import FABWindow
from .devicesetupdialog import DeviceSetupDialog
from .printtestpagedialog import PrintTestPageDialog
from .infodialog import InfoDialog
from .cleandialog import CleanDialog
from .colorcaldialog import ColorCalDialog
from .linefeedcaldialog import LineFeedCalDialog
from .pqdiagdialog import PQDiagDialog
from .nodevicesdialog import NoDevicesDialog
from .aboutdialog import AboutDialog
# Other forms and controls
from .settingsdialog import SettingsDialog
from .printsettingstoolbox import PrintSettingsToolbox
from base import os_utils
# all in seconds
MIN_AUTO_REFRESH_RATE = 5
MAX_AUTO_REFRESH_RATE = 60
DEF_AUTO_REFRESH_RATE = 30
device_list = {} # { Device_URI : device.Device(), ... }
model_obj = models.ModelData() # Used to convert dbus xformed data back to plain Python types
# ***********************************************************************************
#
# ITEM/UTILITY UI CLASSES
#
# ***********************************************************************************
class FuncViewItem(QListWidgetItem):
def __init__(self, parent, text, pixmap, tooltip_text, cmd):
QListWidgetItem.__init__(self, QIcon(pixmap), text, parent)
self.tooltip_text = tooltip_text
self.cmd = cmd
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class DeviceViewItem(QListWidgetItem):
def __init__(self, parent, text, pixmap, device_uri, is_avail=True):
QListWidgetItem.__init__(self, QIcon(pixmap), text, parent)
self.device_uri = device_uri
self.is_avail = is_avail
self.setTextAlignment(Qt.AlignHCenter)
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class PluginInstall(QObject):
def __init__(self, parent, plugin_type, plugin_installed):
self.parent = parent
self.plugin_type = plugin_type
self.plugin_installed = plugin_installed
def exec_(self):
install_plugin = True
if self.plugin_installed:
install_plugin = QMessageBox.warning(self.parent,
self.parent.windowTitle(),
self.__tr("<b>The HPLIP plugin is already installed.</b><p>Do you want to continue and re-install it?"),
QMessageBox.Yes,
QMessageBox.No,
QMessageBox.NoButton) == QMessageBox.Yes
if install_plugin:
ok, sudo_ok = pkit.run_plugin_command(self.plugin_type == PLUGIN_REQUIRED, self.parent.cur_device.mq['plugin-reason'])
if not sudo_ok:
QMessageBox.critical(self.parent,
self.parent.windowTitle(),
self.__tr("<b>Unable to find an appropriate su/sudo utility to run hp-plugin.</b><p>Install kdesu, gnomesu, or gksu.</p>"),
QMessageBox.Ok,
QMessageBox.NoButton,
QMessageBox.NoButton)
def __tr(self,s,c = None):
return qApp.translate("DevMgr5",s,c)
# ***********************************************************************************
#
# MAINWINDOW
#
# ***********************************************************************************
'''
class Ui_MainWindow_Derived(Ui_MainWindow):
def setupUi(self, MainWindow, latest_available_version, Is_autoInstaller_distro):
super().setupUi(MainWindow)
self.DiagnoseQueueAction = QAction(MainWindow)
self.DiagnoseQueueAction.setObjectName("DiagnoseQueueAction")
self.DiagnoseHPLIPAction = QAction(MainWindow)
self.DiagnoseHPLIPAction.setObjectName("DiagnoseHPLIPAction")
self.latest_available_version = latest_available_version
self.Is_autoInstaller_distro = Is_autoInstaller_distro
if self.latest_available_version is not "":
self.tab_3 = QWidget()
self.tab_3.setObjectName("tab_3")
self.label = QLabel(self.tab_3)
self.label.setGeometry(QRect(30, 45, 300, 17))
self.label.setObjectName("label")
if self.Is_autoInstaller_distro:
self.InstallLatestButton = QPushButton(self.tab_3)
self.InstallLatestButton.setGeometry(QRect(351, 40, 96, 27))
self.InstallLatestButton.setObjectName("pushButton")
else:
self.ManualInstalllabel = QLabel(self.tab_3)
self.ManualInstalllabel.setGeometry(QRect(30, 70,300, 45))
self.ManualInstalllabel.setObjectName("label")
self.InstallLatestButton = QPushButton(self.tab_3)
self.InstallLatestButton.setGeometry(QRect(295, 80, 110, 25))
self.InstallLatestButton.setObjectName("pushButton")
self.Tabs.addTab(self.tab_3, "")
# super().setupUi(MainWindow)
def retranslateUi(self, MainWindow):
super().retranslateUi(MainWindow)
if self.latest_available_version is not "":
self.label.setText(QtGui.QApplication.translate("MainWindow", "New version of HPLIP-%s is available"%self.latest_available_version, None))
self.Tabs.setTabText(self.Tabs.indexOf(self.tab_3), QtGui.QApplication.translate("MainWindow", "Upgrade", None))
if self.Is_autoInstaller_distro:
self.InstallLatestButton.setText(QtGui.QApplication.translate("MainWindow", "Install now", None))
else:
msg="Please install manually as mentioned in "
self.ManualInstalllabel.setText(QtGui.QApplication.translate("MainWindow", msg, None))
self.InstallLatestButton.setText(QtGui.QApplication.translate("MainWindow", "HPLIP website", None))
'''
class DevMgr5(Ui_MainWindow_Derived, Ui_MainWindow, QMainWindow):
def __init__(self, toolbox_version, initial_device_uri=None,
dbus_loop=None, parent=None, name=None, fl=0):
# QMainWindow.__init__(self, parent)
super(DevMgr5, self).__init__(parent)
log.debug("Initializing toolbox UI (Qt5)...")
log.debug("HPLIP Version: %s" % prop.installed_version)
self.toolbox_version = toolbox_version
self.initial_device_uri = initial_device_uri
self.device_vars = {}
self.num_devices = 0
self.cur_device = None
self.cur_printer = None
self.updating = False
self.init_failed = False
self.service = None
self.Is_autoInstaller_distro = False # True-->tier1(supports auto installation). False--> tier2(manual installation)
# Distro insformation
core = CoreInstall(MODE_CHECK)
# core.init()
self.Is_autoInstaller_distro = core.is_auto_installer_support()
# User settings
self.user_settings = UserSettings()
self.user_settings.load()
self.user_settings.debug()
self.cur_device_uri = self.user_settings.last_used_device_uri
installed_version=sys_conf.get('hplip','version')
if not utils.Is_HPLIP_older_version( installed_version, self.user_settings.latest_available_version):
self.setupUi(self,"",self.Is_autoInstaller_distro)
else:
self.setupUi(self, self.user_settings.latest_available_version,self.Is_autoInstaller_distro)
# Other initialization
self.initDBus()
self.initPixmaps()
self.initMisc()
self.initUI()
cups.setPasswordCallback(showPasswordUI)
if not prop.doc_build:
self.ContentsAction.setEnabled(False)
self.allow_auto_refresh = True
QTimer.singleShot(0, self.initalUpdate)
# ***********************************************************************************
#
# INIT
#
# ***********************************************************************************
# TODO: Make sbus init mandatory success, else exit
def initDBus(self):
self.dbus_loop = DBusQtMainLoop(set_as_default=True)
self.dbus_avail, self.service, self.session_bus = device.init_dbus(self.dbus_loop)
if not self.dbus_avail:
log.error("dBus initialization error. Exiting.")
self.init_failed = True
return
# Receive events from the session bus
self.session_bus.add_signal_receiver(self.handleSessionSignal, sender_keyword='sender',
destination_keyword='dest', interface_keyword='interface',
member_keyword='member', path_keyword='path')
def initPixmaps(self):
self.func_icons_cached = False
self.func_icons = {}
self.device_icons = {}
# Application icon
self.setWindowIcon(QIcon(load_pixmap('hp_logo', '128x128')))
self.fax_icon = load_pixmap("fax2", "other")
def initUI(self):
# Setup device icon list
self.DeviceList.setSortingEnabled(True)
self.DeviceList.setContextMenuPolicy(Qt.CustomContextMenu)
self.setDeviceListViewMode(QListView.IconMode)
self.ViewAsIconsAction.triggered.connect(lambda: self.setDeviceListViewMode(QListView.IconMode))
self.ViewAsListAction.triggered.connect(lambda: self.setDeviceListViewMode(QListView.ListMode))
self.DeviceList.customContextMenuRequested["const QPoint &"].connect(self.DeviceList_customContextMenuRequested)
# Setup main menu
self.DeviceRefreshAction.setIcon(QIcon(load_pixmap("refresh1", "16x16")))
self.DeviceRefreshAction.triggered.connect(self.DeviceRefreshAction_activated)
self.RefreshAllAction.setIcon(QIcon(load_pixmap("refresh", "16x16")))
self.RefreshAllAction.triggered.connect(self.RefreshAllAction_activated)
self.SetupDeviceAction.setIcon(QIcon(load_pixmap('list_add', '16x16')))
self.SetupDeviceAction.triggered.connect(self.SetupDeviceAction_activated)
self.RemoveDeviceAction.setIcon(QIcon(load_pixmap('list_remove', '16x16')))
self.RemoveDeviceAction.triggered.connect(self.RemoveDeviceAction_activated)
self.PreferencesAction.setIcon(QIcon(load_pixmap('settings', '16x16')))
self.PreferencesAction.triggered.connect(self.PreferencesAction_activated)
self.DiagnoseQueueAction.setIcon(QIcon(load_pixmap('warning', '16x16')))
self.DiagnoseQueueAction.triggered.connect(self.DiagnoseQueueAction_activated)
self.DiagnoseHPLIPAction.setIcon(QIcon(load_pixmap('troubleshoot', '16x16')))
self.DiagnoseHPLIPAction.triggered.connect(self.DiagnoseHPLIP_activated)
self.ContentsAction.setIcon(QIcon(load_pixmap("help", "16x16")))
self.ContentsAction.triggered.connect(self.helpContents)
self.QuitAction.setIcon(QIcon(load_pixmap("quit", "16x16")))
self.QuitAction.triggered.connect(self.quit)
self.AboutAction.triggered.connect(self.helpAbout)
self.PrintControlPrinterNameCombo.activated["const QString &"].connect(self.PrintControlPrinterNameCombo_activated)
self.PrintSettingsPrinterNameCombo.activated["const QString &"].connect(self.PrintSettingsPrinterNameCombo_activated)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Init tabs/controls
self.initActionsTab()
self.initStatusTab()
self.initSuppliesTab()
self.initPrintSettingsTab()
self.initPrintControlTab()
self.Tabs.currentChanged[int].connect(self.Tabs_currentChanged)
# Resize the splitter so that the device list starts as a single column
self.splitter.setSizes([80, 600])
# Setup the Device List
self.DeviceList.setIconSize(QSize(60, 60))
self.DeviceList.currentItemChanged["QListWidgetItem *", "QListWidgetItem *"].connect(self.DeviceList_currentChanged)
def initMisc(self):
self.TabIndex = { 0: self.updateActionsTab,
1: self.updateStatusTab,
2: self.updateSuppliesTab,
3: self.updatePrintSettingsTab,
4: self.updatePrintControlTab,
5:self.updateHPLIPupgrade,
}
# docs
self.docs = "http://hplip.sf.net"
if prop.doc_build:
g = os.path.join(sys_conf.get('dirs', 'doc'), 'index.html')
if os.path.exists(g):
self.docs = "file://%s" % g
# support
self.support = "https://launchpad.net/hplip"
def initalUpdate(self):
if self.init_failed:
self.close()
return
self.rescanDevices()
cont = True
if self.initial_device_uri is not None:
if not self.activateDevice(self.initial_device_uri):
log.error("Device %s not found" % self.initial_device_uri)
cont = False
if self.cur_printer:
self.getPrinterState()
if self.printer_state == cups.IPP_PRINTER_STATE_STOPPED:
self.cur_device.sendEvent(EVENT_PRINTER_QUEUE_STOPPED, self.cur_printer)
if not self.printer_accepting:
self.cur_device.sendEvent(EVENT_PRINTER_QUEUE_REJECTING_JOBS, self.cur_printer)
def activateDevice(self, device_uri):
log.debug(log.bold("Activate: %s %s %s" % ("*"*20, device_uri, "*"*20)))
index = 0
d = self.DeviceList.item(index) #firstItem()
found = False
while d is not None:
if d.device_uri == device_uri:
found = True
self.DeviceList.setSelected(d, True)
self.DeviceList.setCurrentItem(d)
break
index += 1
d = self.DeviceList.item(index)
return found
# ***********************************************************************************
#
# UPDATES/NOTIFICATIONS
#
# ***********************************************************************************
def handleSessionSignal(self, *args, **kwds):
if kwds['interface'] == 'com.hplip.Toolbox' and \
kwds['member'] == 'Event':
log.debug("Handling event...")
event = device.Event(*args[:6])
event.debug()
if event.event_code < EVENT_MIN_USER_EVENT:
pass
elif event.event_code == EVENT_DEVICE_UPDATE_REPLY:
log.debug("EVENT_DEVICE_UPDATE_REPLY (%s)" % event.device_uri)
dev = self.findDeviceByURI(event.device_uri)
if dev is not None:
try:
self.service.GetStatus(event.device_uri, reply_handler=self.handleStatusReply,
error_handler=self.handleStatusError)
except dbus.exceptions.DBusException as e:
log.error("dbus call to GetStatus() failed.")
elif event.event_code == EVENT_USER_CONFIGURATION_CHANGED:
log.debug("EVENT_USER_CONFIGURATION_CHANGED")
self.user_settings.load()
elif event.event_code == EVENT_HISTORY_UPDATE:
log.debug("EVENT_HISTORY_UPDATE (%s)" % event.device_uri)
dev = self.findDeviceByURI(event.device_uri)
if dev is not None:
self.updateHistory(dev)
elif event.event_code == EVENT_SYSTEMTRAY_EXIT:
log.debug("EVENT_SYSTEMTRAY_EXIT")
log.warn("HPLIP Status Service was closed. HPLIP Device Manager will now exit.")
cups.releaseCupsInstance()
self.close()
elif event.event_code == EVENT_RAISE_DEVICE_MANAGER:
log.debug("EVENT_RAISE_DEVICE_MANAGER")
self.showNormal()
self.setWindowState(self.windowState() & ~Qt.WindowMinimized | Qt.WindowActive)
self.raise_()
elif event.event_code in (EVENT_DEVICE_START_POLLING,
EVENT_DEVICE_STOP_POLLING,
EVENT_POLLING_REQUEST):
pass
else:
log.error("Unhandled event: %d" % event.event_code)
def handleStatusReply(self, device_uri, data):
dev = self.findDeviceByURI(device_uri)
if dev is not None:
t = {}
for key in data:
value = model_obj.convert_data(str(key), str(data[key]))
t.setdefault(key, value)
dev.dq = t.copy()
for d in dev.dq:
dev.__dict__[d.replace('-','_')] = dev.dq[d]
self.updateDevice(dev)
def handleStatusError(self, e):
log.error(str(e))
def updateHistory(self, dev=None):
if dev is None:
dev = self.cur_device
try:
self.service.GetHistory(dev.device_uri, reply_handler=self.handleHistoryReply,
error_handler=self.handleHistoryError)
except dbus.exceptions.DBusException as e:
log.error("dbus call to GetHistory() failed.")
def handleHistoryReply(self, device_uri, history):
dev = self.findDeviceByURI(device_uri)
if dev is not None:
result = []
history.reverse()
for h in history:
result.append(device.Event(*tuple(h)))
try:
self.error_code = result[0].event_code
except IndexError:
self.error_code = STATUS_UNKNOWN
dev.error_state = STATUS_TO_ERROR_STATE_MAP.get(self.error_code, ERROR_STATE_CLEAR)
dev.hist = result
self.updateDevice(dev)
def handleHistoryError(self, e):
log.error(str(e))
def sendMessage(self, device_uri, printer_name, event_code, username=prop.username,
job_id=0, title=''):
device.Event(device_uri, printer_name, event_code, username,
job_id, title).send_via_dbus(self.session_bus)
def timedRefresh(self):
if not self.updating and self.user_settings.auto_refresh and self.allow_auto_refresh:
log.debug("Refresh timer...")
self.cleanupChildren()
if self.user_settings.auto_refresh_type == 0:
self.requestDeviceUpdate()
else:
self.rescanDevices()
# ***********************************************************************************
#
# TAB/DEVICE CHANGE SLOTS
#
# ***********************************************************************************
def Tabs_currentChanged(self, tab=0):
""" Called when the active tab changes.
Update newly displayed | |
import numpy as np
import pandas as pd
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, f1_score,roc_auc_score,roc_curve
from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.pipeline import Pipeline
import os, sys, site
import itertools
from numpy.random import uniform
from random import sample
from math import isnan
from multiprocessing import Pool
from scipy.spatial import distance
from sklearn.metrics.pairwise import cosine_similarity
def printAlgorithm(algo):
"""
You need the change the path.
"""
p=os.getcwd()
os.chdir(r"E:\OneDrive\Dökümanlar\GitHub\PythonRocks")
df=pd.read_excel("Algorithms.xlsx",skiprows=1)
print(df[df.Algorithm==algo].T)
os.chdir(p)
def adjustedr2(R_sq,y,y_pred,x):
return 1 - (1-R_sq)*(len(y)-1)/(len(y_pred)-x.shape[1]-1)
def calculate_aic_bic(n, mse, num_params):
"""
n=number of instances in y
"""
aic = n *np.log(mse) + 2 * num_params
bic = n * np.log(mse) + num_params * np.log(n)
# ssr = fitted.ssr #residual sum of squares
# AIC = N + N*np.log(2.0*np.pi*ssr/N)+2.0*(p+1)
# print(AIC)
# BIC = N + N*np.log(2.0*np.pi*ssr/N) + p*np.log(N)
# print(BIC)
return aic, bic
def printScores(y_test,y_pred,x=None,*, alg_type='c'):
"""
Args:
alg_type: c for classfication, r for regressin
"""
if alg_type=='c':
acc=accuracy_score(y_test,y_pred)
print("Accuracy:",acc)
recall=recall_score(y_test,y_pred)
print("Recall:",recall)
precision=precision_score(y_test,y_pred)
print("Precision:",precision)
f1=f1_score(y_test,y_pred)
print("F1:",f1)
return acc,recall,precision,f1
else:
mse=mean_squared_error(y_test,y_pred) #RMSE için squared=False yapılabilir ama bize mse de lazım
rmse=round(np.sqrt(mse),2)
print("RMSE:",rmse)
mae=round(mean_absolute_error(y_test,y_pred),2)
print("MAE:",mae)
r2=round(r2_score(y_test,y_pred),2)
print("r2:",r2)
adjr2=round(adjustedr2(r2_score(y_test,y_pred),y_test,y_pred,x),2)
print("Adjusted R2:",adjr2)
aic, bic=calculate_aic_bic(len(y_test),mse,len(x))
print("AIC:",round(aic,2))
print("BIC:",round(bic,2))
return (rmse,mae,r2,adjr2,round(aic,2),round(bic,2))
def draw_siluet(range_n_clusters,data,isbasic=True,printScores=True):
"""
Used for K-means
"""
if isbasic==False:
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(12,4)
ax1.set_xlim([-1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(data) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(data)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(data, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(data, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(data[:, 0], data[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
else:
ss = []
for n in range_n_clusters:
kmeans = KMeans(n_clusters=n)
kmeans.fit_transform(data)
labels = kmeans.labels_
score = silhouette_score(data, labels)
ss.append(score)
if printScores==True:
print(n,score)
plt.plot(range_n_clusters,ss)
def drawEpsilonDecider(data,n):
"""
for DBSCAN
n: # of neighbours
data:numpy array
"""
neigh = NearestNeighbors(n_neighbors=n)
nbrs = neigh.fit(data)
distances, indices = nbrs.kneighbors(data)
distances = np.sort(distances, axis=0)
distances = distances[:,1]
plt.ylabel("eps")
plt.plot(distances)
def draw_elbow(ks,data):
wcss = []
for i in ks:
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) #k-means++ ensures that you get don’t fall into the random initialization trap.???????
kmeans.fit(data)
wcss.append(kmeans.inertia_)
plt.plot(ks, wcss)
plt.title('Elbow Method')
plt.xlabel('# of clusters')
plt.ylabel('WCSS')
plt.show()
#PCA biplot
def biplot(score,coeff,y,variance,labels=None):
"""
found here: https://stackoverflow.com/questions/39216897/plot-pca-loadings-and-loading-in-biplot-in-sklearn-like-rs-autoplot
"""
xs = score[:,0]
ys = score[:,1]
n = coeff.shape[0]
scalex = 1.0/(xs.max() - xs.min())
scaley = 1.0/(ys.max() - ys.min())
plt.scatter(xs * scalex,ys * scaley, c = y)
for i in range(n):
plt.arrow(0, 0, coeff[i,0], coeff[i,1],color = 'r',alpha = 0.5)
if labels is None:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, "Var"+str(i+1), color = 'g', ha = 'center', va = 'center')
else:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'center')
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel("PC{},Variance:{}".format(1,variance[0]))
plt.ylabel("PC{},Variance:{}".format(2,variance[1]))
plt.grid()
def PCAChart(X_pca,alpha=0.2):
n=X_pca.shape[1] #second dimension is the number of colums which is the number of components
if n==2:
plt.scatter(X_pca[:,0], X_pca[:,1],alpha=alpha);
elif n==3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
Axes3D.scatter(ax,xs=X_pca[:,0], ys=X_pca[:,1],zs=X_pca[:,2],alpha=alpha)
else:
print("n should be either 2 or 3")
def getfullitemsforOHE(wholedf,featlist,sort=True):
"""
wholedf should be the dataframe including both train and test set.
"""
def sortornot(X):
if sort==False:
return X
else:
return sorted(X)
fulllist=[]
for feat in featlist:
fulllist.append(sortornot(wholedf[feat].unique()))
return fulllist
def getfeaturenames(ct,dataframe):
final_features=[]
for trs in ct.transformers_:
trName=trs[0]
trClass=trs[1]
features=trs[2]
if isinstance(trClass,Pipeline):
n,tr=zip(*trClass.steps)
for t in tr: #t is a transformator object, tr is the list of all transoformators in the pipeline
if isinstance(t,OneHotEncoder):
for f in t.get_feature_names(features):
final_features.append("OHE_"+f)
break
else: #if not found onehotencoder, add the features directly
for f in features:
final_features.append(f)
elif isinstance(trClass,OneHotEncoder): #?type(trClass)==OneHotEncoder:
for f in trClass.get_feature_names(features):
final_features.append("OHE_"+f)
else:
#remainders
if trName=="remainder":
for i in features:
final_features.append(list(dataframe.columns)[i])
#all the others
else:
for f in features:
final_features.append(f)
return final_features
def featureImportanceEncoded(importance,feature_names,figsize=(8,6)):
plt.figure(figsize=figsize)
dfimp=pd.DataFrame(importance.reshape(-1,1).T,columns=feature_names).T
dfimp.index.name="Encoded"
dfimp.rename(columns={0: "Importance"},inplace=True)
dfimp.reset_index(inplace=True)
dfimp["Feature"]=dfimp["Encoded"].apply(lambda x:x[4:].split('_')[0] if "OHE" in x else x)
dfimp.groupby(by='Feature')["Importance"].sum().sort_values().plot(kind='barh');
def compareClassifiers(gs,tableorplot='plot',figsize=(10,5)):
cvres = gs.cv_results_
cv_results = pd.DataFrame(cvres)
cv_results['param_clf']=cv_results['param_clf'].apply(lambda x:str(x).split('(')[0])
cols={"mean_test_score":"MAX of mean_test_score","mean_fit_time":"MIN of mean_fit_time"}
summary=cv_results.groupby(by='param_clf').agg({"mean_test_score":"max", "mean_fit_time":"min"}).rename(columns=cols)
summary.sort_values(by='MAX of mean_test_score', ascending=False,inplace=True)
if tableorplot=='table':
return summary
else:
fig, ax1 = plt.subplots(figsize=figsize)
color = 'tab:red'
ax1.set_xticklabels('Classifiers', rotation=45,ha='right')
ax1.set_ylabel('MAX of mean_test_score', color=color)
ax1.bar(summary.index, summary['MAX of mean_test_score'], color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('MIN of mean_fit_time', color=color)
ax2.plot(summary.index, summary['MIN of mean_fit_time'], color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.show()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def CheckForClusterinTendencyWithHopkins(df):
"""
taken from https://matevzkunaver.wordpress.com/2017/06/20/hopkins-test-for-cluster-tendency/
the closer to 1, the higher probability of clustering tendency
"""
d = df.shape[1]
#d = len(vars) # columns
n = len(df) # rows
m = int(0.1 * n) # heuristic from article [1]
nbrs = NearestNeighbors(n_neighbors=1).fit(df.values)
rand_X = sample(range(0, n, 1), m)
ujd = []
wjd = []
for j in range(0, m):
u_dist, _ = nbrs.kneighbors(uniform(np.amin(df,axis=0),np.amax(df,axis=0),d).reshape(1, -1), 2, return_distance=True)
ujd.append(u_dist[0][1])
w_dist, _ = nbrs.kneighbors(df.iloc[rand_X[j]].values.reshape(1, -1), 2, return_distance=True)
wjd.append(w_dist[0][1])
H = sum(ujd) / (sum(ujd) + sum(wjd))
if isnan(H):
print(ujd, wjd)
H = 0
return H
def getNumberofCatsAndNumsFromDatasets(path,size=10_000_000):
"""
returns the number of features by their main type(i.e categorical or numeric or datetime)
args:
path:path of the files residing in.
size:size of the file(default is ~10MB). if chosen larger, it will take longer to return.
"""
os.chdir(path)
files=os.listdir()
liste=[]
for d in files:
try:
if os.path.isfile(d) and os.path.getsize(d)<size:
if os.path.splitext(d)[1]==".csv":
df=pd.read_csv(d,encoding = "ISO-8859-1")
elif os.path.splitext(d)[1]==".xlsx":
df=pd.read_excel(d)
else:
continue
nums=len(df.select_dtypes("number").columns)
date=len(df.select_dtypes(include=[np.datetime64]).columns)
cats=len(df.select_dtypes("O").columns)-date
liste.append((d,nums,cats,date))
except:
pass
dffinal=pd.DataFrame(liste,columns=["filename","numeric","categorical","datettime"])
dffinal.set_index("filename")
return dffinal
#Functions to run before and during modelling
def checkIfNumberOfInstanceEnough(df):
"""
o Çok az satır varsa daha fazla veri toplanması sağlanmalıdır
o Aşırı çok satır varsa kısmi sampling yapılabilir.(Detayları göreceğiz)
o Data çokluğundan emin değilseniz tamamıyla deneyin. Eğitim süresi çok uzun sürüyorsa aşamalı olarak azaltabilirsiniz.
"""
def checkIfNumberOFeatures(df):
"""
o | |
error = nmap_process.communicate()
# If there is output
if len(output):
parser = _XMLParser(output)
try:
parsed_nmap_output = parser.parse()
# If parsing error raise NmapScanError with STDERR info.
except ET.ParseError as e:
raise NmapScanError('Could not parse output from nmap: {}'.format(e)) from None
except AttributeError as e:
raise NmapScanError('Nmap application error: {}'.format(parser.extract_error_msg())) from None
# If there is no output, raise NmapScanError with STDERR info
else:
raise NmapScanError('No output from process was given. STDERR says:\n{}'.format(error.decode('utf8')))
# If any error but method reaches this point, there are tolerant errors.
if len(error):
self._tolerant_errors = error
# Assign class attributes from the parsed information.
self._assign_class_attributes(parsed_nmap_output)
# Execute all the functions that were registered in the engine
if self.engine is not None:
self._execute_engine_scripts()
# Set finished variable to True
self._finished = True
def _execute_engine_scripts(self):
""" Get all host and ports scripts from the PyNSEEngine in case its not None, and execute all its functions.
"""
for i in self._result:
for j in self.engine.get_suitable_host_scripts(i):
self._result[i]['scripts'].append({
'name': j.name,
'output': j.execute()
})
for proto in self._result[i]['protocols']:
for port in self._result[i]['protocols'][proto]:
script_list = [x for x in self.engine.get_suitable_port_scripts(i,
proto, port, self._result[i]['protocols'][proto][str(port)]['state'])]
if len(script_list):
try:
service_instance = self._result[i]['protocols'][proto][str(port)]['service']
except KeyError:
service_instance = Service('', '', '', '', [])
for k in script_list:
service_instance[k.name] = k.execute()
def _assign_class_attributes(self, nmap_output):
""" Assign class attributes (properties) from the dictionary coming from the parsed XML.
:param nmap_output:
:type nmap_output: dict
"""
self._start_timestamp = nmap_output['running_info']['start_timestamp']
self._exit_status = nmap_output['running_info']['exit_status']
self._start_time = nmap_output['running_info']['start_time']
self._args = nmap_output['running_info']['args']
self._summary = nmap_output['running_info']['summary']
self._version = nmap_output['running_info']['version']
self._end_time = nmap_output['running_info']['end_time']
self._end_timestamp = nmap_output['running_info']['end_timestamp']
self._scanned_protocols_info = nmap_output['scan_info']
self._result = nmap_output['scan']
def _has_finished(func):
""" Raises NmapScanError if scanner has not finished or was not performed.
:raises: NmapScanError
"""
def check_finish_tag(self, *args, **kwargs):
if not self.finished:
raise NmapScanError('Scan was not completed or was not even launched.')
return func(self, *args, **kwargs)
return check_finish_tag
@_has_finished
def raw_data(self):
""" Returns the parsed dictionary itself containing all the scan information.
:return: Structured nested dictionary
:rtype: dict
"""
return self._result
@_has_finished
def scanned_hosts(self):
""" Returns a list containing all scanned hosts.
:return: List of scanned hosts
:rtype: list
"""
return [ip for ip in self._result]
@_has_finished
def non_scanned_hosts(self):
""" Return a list of hosts that did not respond to the scan.
:return: List of non scanned hosts
:rtype: list
"""
return [t for t in self._target_list if t not in self._result]
@_has_finished
def state(self, host):
""" Return the state of a host. It returns None if the host was not scanned.
:param host: Host where to get the state from.
:type host: str
:return: Host's state. None if the host does not exists
:rtype: str, None
:raises: NmapScanError if host does not exist.
"""
try:
return self._result[host]['state']
except KeyError:
raise NmapScanError('Host does not exist in the scan result.') from None
@_has_finished
def reason(self, host):
""" Returns the reason why a host was successfully scanned. It returns None if the host was not scanned
:param host: Host where to get the reason from.
:type host: str
:return: Reason from scan success. None if host does not exists.
:rtype: str, None
:raises: NmapScanError if host does not exist.
"""
try:
return self._result[host]['reason']
except KeyError:
raise NmapScanError('Host does not exist in the scan result.') from None
@_has_finished
def all_protocols(self, host):
""" Yields all scanned protocols from a host.
:param host: Host where to get the protocols from.
:type host: str
:return: Iterable with all scanned protocol
:rtype: str
:raises: NmapScanError if host does not exist.
"""
try:
for proto in self._result[host]['protocols']:
yield proto
except KeyError:
raise NmapScanError('Host does not exist in the scan result.') from None
@_has_finished
def scanned_ports(self, host, protocol):
""" Return a list of scanned ports for a given host and protocol.
:param host: Host where to get the ports from.
:param protocol: Protocol specification
:type host: str
:type protocol: str
:return: List of scanned ports from a host and protocol
:rtype: list
:raises: NmapScanError if host or protocol do not exist.
"""
try:
return [int(p) for p in self._result[host]['protocols'][protocol]]
except KeyError:
raise NmapScanError('Host and/or protocol do not exist.') from None
@_has_finished
def non_scanned_ports(self, host, protocol):
""" Return a list of non scanned ports for a given host and protocol.
:param host: Host where to get the ports from.
:param protocol: Protocol specification
:type host: str
:type protocol: str
:return: List of non scanned ports from a host and protocol
:rtype: list
:raises: NmapScanError if host or protocol do not exist.
"""
try:
return [p for p in self._port_list if str(p)
not in self._result[host]['protocols'][protocol]]
except KeyError:
raise NmapScanError('Host and/or protocol do not exist.') from None
@_has_finished
def hostnames(self, host):
""" Returns a list containing all hostnames from a given host, eliminating duplicates.
:param host: Host where to get the hostnames from.
:type host: str
:return: List of hostnames,.
:rtype: list
:raises: NmapScanError if host does not exist.
"""
try:
return list(set(self._result[host]['hostnames']))
except KeyError:
raise NmapScanError('Host does not exist in the scan result.') from None
@_has_finished
def os_matches(self, host):
""" Yield every OS name and accuracy for every OS match from a given host.
:param host: Host where to get the os info from.
:type host: str
:return: OS name and accuracy for every os match
:rtype: iter
:raises: NmapScanError if host does not exist.
"""
try:
for os_dict in self._result[host]['os']['matches']:
yield os_dict['name'], os_dict['accuracy']
except KeyError:
raise NmapScanError('Host does not exist in the scan result.') from None
@_has_finished
def os_fingerprint(self, host):
""" Returns the OS fingerprint from a given host. If there is no fingerprint match or the host was not scanned,
it will return None.
:param host: Host where to get the os fingerprint from.
:type host: str
:return: OS fingerprint. None if there is no fingerprint or there is no such host
:rtype: str, None
:raises: NmapScanError if the host does not exist.
"""
try:
return self._result[host]['os']['fingerprint']
except KeyError:
raise NmapScanError('Host does not exist in the scan result.') from None
@_has_finished
def most_accurate_os(self, host):
""" Returns a list of the most accurate OS matches for a given host. If there is no OS match or no OS match was
performed, it will return None.
:param host: Host where to get the most accurate OSes.
:type host: str
:return: List of most accurate OSes.
:rtype: list
:raises: NmapScanError if the host does not exist.
"""
try:
best_accuracy = self._result[host]['os']['matches'][0]['accuracy']
except KeyError:
raise NmapScanError('Host does not exist in the scan result.') from None
return [o['name'] for o in self._result[host]['os']['matches']
if o['accuracy'] == best_accuracy]
@_has_finished
def port_state(self, host, protocol, port):
""" Yields the state and reason from a port, given a host and a protocol.
:param host: Host where to get the port info from
:param protocol: Protocol specification of the port.
:param port: Target port
:type host: str
:type protocol: str
:type port: str, int
:return: state and reason
:rtype: iter
WHERE
state str is the state of the port
reason str is the reason for that port to be classified as open.
:raises: NmapScanError if host, protocol or port do not exist.
"""
try:
port = self._result[host]['protocols'][protocol][str(port)]
return port['state'], port['reason']
except KeyError as e:
if host in str(e):
raise NmapScanError('Host does not exist in the scan result.') from None
elif protocol in str(e):
raise NmapScanError('Protocol does not exist for given host: {}'.format(host)) from None
else:
raise NmapScanError('Port doest no exist in scan result for given host and'
'protocol: {} - {}'.format(host, protocol)) from None
@_has_finished
def service(self, host, protocol, port):
""" Returns a Service instance containing the information from a service for
a given host, protocol and port. None if no service information was found
:param host: Host where to get the port info from
:param protocol: Protocol specification of the port.
:param port: Target port
:type host: str
:type protocol: str
:type port: str, int
:return: Service instance from that port.
:rtype: Service
:raises: NmapScanError if host, port or protocol do not | |
-11, 5, -5, 4],
[ 26, -26, 10, -10, 30, -30, 23],
[ 20, -20, 3, -3, 33, -33, 23],
[ 5, -5, 25, -25, 24, -24, 4],
[ 17, -17, 4, -4, 22, -22, 0]
]
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic
G = AdditiveCyclic(35)
Mb=[]
for R in zip(*M):
for i in range(7):
Mb.append(cyclic_shift(R,i))
return G, Mb
def QDM_45_7_1_1_9():
r"""
Return a `(45,7;1,1;9)`-quasi-difference matrix.
Used to build an `OA(7,54)`
As explained in the Handbook III.3.71 [DesignHandbook]_.
EXAMPLES::
sage: from sage.combinat.designs.database import QDM_45_7_1_1_9
sage: from sage.combinat.designs.designs_pyx import is_quasi_difference_matrix
sage: G,M = QDM_45_7_1_1_9()
sage: is_quasi_difference_matrix(M,G,7,1,1,9)
True
"""
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic
G = AdditiveCyclic(45)
M = [
[None,None,None,None,None,None,None,None,None],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 1, 27, 16, 7, -1, -27, -16, -7, 3],
[ 24, 40, 1, 35, -24, -40, -1, -35, 7],
[ 10, 30, 22, 44, -10, -30, -22, -44, 7],
[ 5, 18, 14, 33, -5, -18, -14, -33, 3],
[ 30, 16, 33, 27, -30, -16, -33, -27, 0],
]
Mb=[]
for R in zip(*M):
for c in range(7):
Mb.append(cyclic_shift(R,c))
return G, Mb
def QDM_54_7_1_1_8():
r"""
Return a `(54,7;1,1;8)`-quasi-difference matrix.
Used to build an `OA(7,62)`
As explained in the Handbook III.3.74 [DesignHandbook]_.
EXAMPLES::
sage: from sage.combinat.designs.database import QDM_54_7_1_1_8
sage: from sage.combinat.designs.designs_pyx import is_quasi_difference_matrix
sage: G,M = QDM_54_7_1_1_8()
sage: is_quasi_difference_matrix(M,G,7,1,1,8)
True
"""
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic
G = AdditiveCyclic(54)
M = [
[ 0 ,None,None,None, 0 ,None ,None ,None,None,None],
[17 , 0 , 0 , 0 , -17 , 0 , 0 , 0 , 1 , 11 ],
[29 , 28 , 35 , 23 , -29 , -28 , -35 , -23, 3 , 19 ],
[36 , 50 , 5 , 33 , -36 , -50 , -5 , -33, 7 , 33 ],
[31 , 2 , 43 , 30 , -31 , - 2 , -43 , -30, 34 , 33 ],
[16 , 47 , 44 , 51 , -16 , -47 , -44 , -51, 30 , 19 ],
[41 , 11 , 1 , 17 , -41 , -11 , - 1 , -17, 28 , 11 ]
]
Mb=[]
for R in zip(*M):
for c in range(7):
Mb.append(cyclic_shift(R,c))
return G, Mb
def QDM_57_9_1_1_8():
r"""
Return a `(57,9;1,1;8)`-quasi-difference matrix.
Used to build an `OA(9,65)`
Construction shared by <NAME>
EXAMPLES::
sage: from sage.combinat.designs.database import QDM_57_9_1_1_8
sage: from sage.combinat.designs.designs_pyx import is_quasi_difference_matrix
sage: G,M = QDM_57_9_1_1_8()
sage: is_quasi_difference_matrix(M,G,9,1,1,8)
True
"""
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as G
B = [None,1, 6, 7, 9, 19, 38, 42, 49] # Base block of a (57,8,1)-BIBD
OA = orthogonal_array(9,9,2)
M = [R for R in OA if any(R[0] != x for x in R)]
M = [[B[x] for x in R] for R in M] # replacing [0,..,8] by the elements of B
M.append([0]*9)
return G(57), M
# Quasi-difference matrices
#
# The syntax of the dictionary is
#
# QDM = {
# (n+u,lmbda): { # QDM with mu<=lmbda=1 yields a OA(k,n+u)-OA(k,u)
# (n,lmbda,mu,u): (k,qdm_constructor),
# }
# }
QDM = {}
for ((n,k,lmbda,mu,u),f) in [((19,6,1,1,1), QDM_19_6_1_1_1),
((21,5,1,1,1), QDM_21_5_1_1_1),
((21,6,1,1,5), QDM_21_6_1_1_5),
((25,6,1,1,5), QDM_25_6_1_1_5),
((33,6,1,1,1), QDM_33_6_1_1_1),
((37,6,1,1,1), QDM_37_6_1_1_1),
((35,7,1,1,7), QDM_35_7_1_1_7),
((45,7,1,1,9), QDM_45_7_1_1_9),
((54,7,1,1,8), QDM_54_7_1_1_8),
((57,9,1,1,8), QDM_57_9_1_1_8)]:
if not (n+u,lmbda) in QDM:
QDM[n+u,lmbda] = {}
QDM[n+u,lmbda][n,lmbda,mu,u] = (k,f)
# Create the list of QDM matrices for the doc
LIST_OF_QDM = ", ".join("`({},{};{},{};{})`".format(n,k,lmbda,mu,u)
for n,k,lmbda,mu,u in
sorted((n,k,lmbda,mu,u) for entry in QDM.values()
for (n,lmbda,mu,u),(k,_) in sorted(entry.items())))
_ref_Handbook = """Handbook of Combinatorial Designs (2ed),
<NAME>, <NAME>, 2010 CRC Press"""
_ref_Brouwer_vanRees = """<NAME> and <NAME>, More mutually orthogonal Latin squares,
Discrete Mathematics 1982, vol 39, num 3, pp 263-281"""
_ref_Colbourn = """<NAME>, Some direct constructions for incomplete transversal designs,
Journal of Statistical Planning and Inference, vol 56, num 1, pp 93-104"""
_ref_Abel_v_12_t = """<NAME>, Some V(12,t) vectors and designs from difference and quasi-difference matrices,
Australasian Journal of Combinatorics 2008, vol 40 pp 69-85"""
_ref_Abel_v_11_t = """<NAME>, Some new matrix-minus-diagonal V(11,t) vectors,
Journal of Combinatorial Designs 2003, vol 11, num 4, pp 304-306"""
Vmt_vectors = {
(3 ,2 ) : ((0,1,3,6), _ref_Handbook),
(3 ,4 ) : ((0,1,3,9), _ref_Handbook),
(3 ,10) : ((0,1,4,13), _ref_Handbook),
(3 ,12) : ((0,1,3,10), _ref_Handbook),
(3 ,20) : ((0,1,3,13), _ref_Handbook),
(3 ,6 ) : ((0,1,3,7), _ref_Handbook),
(3 ,26) : ((0,1,3,8), _ref_Handbook),
(3 ,32) : ((0,1,3,9), _ref_Handbook),
(3 ,6 ) : ((0,1,3,7), _ref_Handbook),
(3 ,14) : ((0,1,4,13), _ref_Handbook),
(3 ,24) : ((0,1,3,15), _ref_Handbook),
(3 ,34) : ((0,1,3,7), _ref_Handbook),
(4 ,3 ) : ((0,1,3,7,2), _ref_Handbook),
(4 ,7 ) : ((0,1,3,7,19), _ref_Handbook),
(4 ,9 ) : ((0,1,3,2,8), _ref_Brouwer_vanRees),
(4 ,13) : ((0,1,3,7,19), _ref_Handbook),
(4 ,15) : ((0,1,3,7,5), _ref_Handbook),
(4 ,25) : ((0,1,3,2,31), _ref_Handbook),
(5 ,6 ) : ((0,1,3,7,30,17), _ref_Handbook),
(5 ,8 ) : ((0,1,3,22,14,18), _ref_Handbook),
(5 ,12) : ((0,1,3,7,23,50), _ref_Handbook),
(5 ,14) : ((0,1,3,9,25,54), _ref_Handbook),
(5 ,20) : ((0,1,3,10,43,91), _ref_Handbook),
(5 ,26) : ((0,1,3,6,48,15), _ref_Handbook),
(6 ,5 ) : ((0,1,7,30,12,21,15), _ref_Handbook),
(6 ,7 ) : ((0,1,3,16,35,26,36), _ref_Colbourn),
(6 ,11) : ((0,1,3,14,7,24,27), _ref_Handbook),
(6 ,13) : ((0,1,3,7,55,47,34), _ref_Handbook),
(6 ,17) : ((0,1,3,2,14,99,29), _ref_Handbook),
(6 ,21) : ((0,1,4,13,66,93,45), _ref_Handbook),
(7 ,6 ) : ((0,1,12,27,37,16,30,35), _ref_Handbook),
(7 ,10) : ((0,1,3,45,9,50,28,16), _ref_Handbook),
(7 ,16) : ((0,1,3,7,82,72,93,39), _ref_Handbook),
(7 ,18) : ((0,1,3,6,97,114,99,26), _ref_Handbook),
(8 ,9 ) : ((0,1,20,70,23,59,3,8,19), _ref_Colbourn),
(8 ,11) : ((0,1,6,56,22,35,47,23,60), _ref_Colbourn),
(8 ,17) : ((0,1,3,2,133,126,47,109,74), _ref_Colbourn),
(8 ,29) : ((0,1,4,11,94,60,85,16,198), _ref_Colbourn),
(8 ,57) : ((0,1,3,2,12,333,363,154,340), _ref_Brouwer_vanRees),
(9 ,12) : ((0,1,4,19,56,22,83,95,52,96), _ref_Handbook),
(9 ,14) : ((0,1,11,25,37,8,100,23,95,42), _ref_Handbook),
(9 ,18) : ((0,1,3,7,36,30,158,94,52,70), _ref_Handbook),
(9 ,20) : ((0,1,3,19,145,70,173,159,18,85), _ref_Handbook),
(9 ,22) : ((0,1,3,31,99,190,174,46,87,127), _ref_Handbook),
(9 ,30) : ((0,1,3,8,197,68,119,13,215,105), _ref_Handbook),
(9 ,34) : ((0,1,3,13,140,81,74,131,303,238), _ref_Handbook),
(9 ,42) : ((0,1,3,6,66,258,186,346,104,152), _ref_Handbook),
(9 ,44) : ((0,1,4,11,144,103,216,77,160,363), _ref_Handbook),
(10,13) : ((0,1,5,10,22,6,14,9,53,129,84), _ref_Colbourn),
(10,15) : ((0,1,45,146,51,97,70,137,85,133,18), _ref_Handbook),
(10,19) : ((0,1,3,96,143,156,182,142,4,189,25), _ref_Colbourn),
(10,21) : ((0,1,6,188,205,39,101,113,30,32,42), _ref_Handbook),
(10,25) : ((0,1,3,85,140,178,195,22,48,179,188), _ref_Colbourn),
(10,27) : ((0,1,3,82,109,241,36,112,141,263,126), _ref_Colbourn),
(10,31) : ((0,1,3,57,128,247,289,239,70,271,96), _ref_Colbourn),
(10,33) : ((0,1,3,67,319,44,249,146,302,282,90), _ref_Handbook),
(10,43) : ((0,1,6,29,170,207,385,290,375,32,336), _ref_Colbourn),
(10,49) : ((0,1,3,8,406,72,335,197,324,383,395), _ref_Handbook),
(10,81) : ((0,1,3,2,27,438,615,708,168,410,656), _ref_Colbourn),
(10,97) : ((0,1,3,6,11,274,772,340,707,157,556), _ref_Colbourn),
(10,103) : ((0,1,3,2,7,744,342,797,468,46,561), _ref_Colbourn),
(10,181) : ((0,1,3,8,5,68,514,16,1168,225,929), _ref_Colbourn),
(10,187) : ((0,1,3,7,2,325,1138,730,1013,534,366), _ref_Colbourn),
(10,259) : ((0,1,3,7,2,15,324,1956,1353,2041,1616), _ref_Colbourn),
(10,273) : ((0,1,3,6,11,28,2573,38,1215,1299,2468), _ref_Colbourn),
(10,319) : ((0,1,3,7,2,43,239,1335,1586,2724,63), _ref_Colbourn),
(10,391) : ((0,1,3,2,5,32,555,3450,1242,1823,3833), _ref_Colbourn),
(10,409) : ((0,1,3,2,5,11,505,3202,1502,2521,3023), _ref_Colbourn),
(11,30 ) : ((0,1,58,61,235,82,160,120,260,161,204,174), _ref_Abel_v_11_t),
(11,32 ) : ((0,1,90,6,158,125,293,76,250,123,341,79), _ref_Abel_v_11_t),
(11,36 ) : ((0,1,3,57,250,77,196,255,371,107,305,260), _ref_Abel_v_11_t),
(11,38 ) : ((0,1,43,27,179,37,345,70,17,255,238,147), _ref_Abel_v_11_t),
(11,42 ) : ((0,1,3,12,87,104,392,328,346,314,23,359), _ref_Abel_v_11_t),
(11,56 ) : ((0,1,26,50,76,246,255,146,513,271,123,555), _ref_Abel_v_11_t),
(11,60 ) : ((0,1,5,46,324,206,537,621,304,307,529,547), _ref_Abel_v_11_t),
(11,62 ) : ((0,1,11,31,395,251,605,55,336,321,6,213), _ref_Abel_v_11_t),
(11,66 ) : ((0,1,4,32,15,586,669,112,240,496,490,210), _ref_Abel_v_11_t),
(11,78 ) : ((0,1,4,31,97,264,277,746,816,808,298,741), _ref_Abel_v_11_t),
(11,80 ) : ((0,1,3,73,68,71,569,409,127,110,554,432), _ref_Abel_v_11_t),
(11,86 ) : ((0,1,13,32,17,236,380,340,849,855,189,774), _ref_Abel_v_11_t),
(11,90 ) : ((0,1,6,19,193,213,529,661,52,952,638,605), _ref_Abel_v_11_t),
(11,92 ) : ((0,1,4,80,177,182,508,581,511,664,25,425), _ref_Abel_v_11_t),
(11,102) : ((0,1,9,34,747,766,884,887,812,12,255,475), _ref_Abel_v_11_t),
(11,116) : ((0,1,3,16,692,7,36,183,201,846,661,759), _ref_Abel_v_11_t),
(11,120) : ((0,1,4,29,531,536,732,1167,65,1033,508,1255), _ref_Abel_v_11_t),
(11,128) : ((0,1,6,53,50,492,599,1230,430,131,1063,677), _ref_Abel_v_11_t),
(11,132) : ((0,1,4,81,626,632,694,1352,744,60,105,821), _ref_Abel_v_11_t),
(11,146) : ((0,1,7,18,92,176,193,1088,114,515,791,548), _ref_Abel_v_11_t),
(11,162) : ((0,1,8,28,314,323,401,1569,1197,1455,1269,382), _ref_Abel_v_11_t),
(11,170) : ((0,1,8,41,1573,1585,1686,1750,358,1732,271,340), _ref_Abel_v_11_t),
(11,182) : ((0,1,5,23,675,682,732,1800,1821,1485,763,1913), _ref_Abel_v_11_t),
(11,188) : ((0,1,5,29,1454,1463,1493,1838,903,98,1692,1846), _ref_Abel_v_11_t),
(11,192) : ((0,1,4,9,1842,1851,1876,2035,139,979,1027,350), _ref_Abel_v_11_t),
(11,198) : ((0,1,3,52,250,255,278,347,418,856,1298,780), _ref_Abel_v_11_t),
(11,206) : ((0,1,6,99,1465,1469,1501,1530,869,2074,1786,674), _ref_Abel_v_11_t),
(11,210) : ((0,1,8,39,2228,2244,2274,2293,188,2181,537,867), _ref_Abel_v_11_t),
(11,212) : ((0,1,9,32,2219,2241,2310,2319,1253,352,920,365), _ref_Abel_v_11_t),
(11,216) : ((0,1,5,15,1606,1611,1627,2101,211,1821,1564,1688), _ref_Abel_v_11_t),
(11,218) : ((0,1,8,23,1347,1352,1358,1846,1479,2157,1910,292), _ref_Abel_v_11_t),
(11,230) : ((0,1,6,33,2387,2394,2488,2518,1893,728,246,65), _ref_Abel_v_11_t),
(11,242) : ((0,1,8,57,378,392,404,637,1708,567,1356,1903), _ref_Abel_v_11_t),
(11,246) : ((0,1,7,97,389,400,413,1253,1625,1071,1756,1440), _ref_Abel_v_11_t),
(11,248) : ((0,1,6,67,2112,2118,2142,2181,365,1315,2336,1283), _ref_Abel_v_11_t),
(11,260) : ((0,1,5,20,1158,1165,1171,1609,449,1990,1546,1222), _ref_Abel_v_11_t),
(11,266) : ((0,1,4,45,2132,2136,2164,2354,2407,2194,1459,394), _ref_Abel_v_11_t),
(11,270) : ((0,1,9,31,2085,2089,2100,2348,57,748,1440,2254), _ref_Abel_v_11_t),
(11,276) : ((0,1,5,42,1905,1910,1925,2382,618,594,2820,322), _ref_Abel_v_11_t),
(11,288) : ((0,1,7,21,2651,2656,2694,2953,190,545,311,3063), _ref_Abel_v_11_t),
(11,290) : ((0,1,5,95,1487,1492,1512,1523,1599,939,2724,971), _ref_Abel_v_11_t),
(11,296) : ((0,1,7,68,856,860,868,2884,2872,2339,2965,1715), _ref_Abel_v_11_t),
(11,300) : ((0,1,9,24,2221,2232,2246,2349,2196,3173,2190,1661), _ref_Abel_v_11_t),
(11,302) : ((0,1,8,24,1273,1277,1290,1750,2662,733,511,1147), _ref_Abel_v_11_t),
(11,308) : ((0,1,4,29,1159,1168,1174,2322,2963,1778,3071,2317), _ref_Abel_v_11_t),
(11,312) : ((0,1,4,43,121,128,136,1266,2919,603,3199,2590), _ref_Abel_v_11_t),
(11,318) : ((0,1,8,36,2701,2712,2733,2995,3281,2830,1262,2203), _ref_Abel_v_11_t),
(11,330) : ((0,1,9,22,2312,2316,2326,2517,1311,488,1406,267), _ref_Abel_v_11_t),
(11,336) : ((0,1,3,69,117,126,133,456,1399,579,3469,1157), _ref_Abel_v_11_t),
(11,338) : ((0,1,9,52,1012,1017,1027,1511,3139,243,2560,139), _ref_Abel_v_11_t),
(11,350) : ((0,1,5,37,2650,2655,2666,3213,3709,86,3456,1383), _ref_Abel_v_11_t),
(11,356) : ((0,1,6,23,2647,2651,2657,2942,2733,1481,301,831), _ref_Abel_v_11_t),
(11,366) : ((0,1,6,28,1144,1151,1160,1349,392,1114,1006,1906), _ref_Abel_v_11_t),
(11,368) : ((0,1,9,47,1259,1263,1269,1319,1029,2121,2206,3959), _ref_Abel_v_11_t),
(11,372) : ((0,1,7,89,1015,1022,1035,1280,361,3425,1101,2744), _ref_Abel_v_11_t),
(11,378) : ((0,1,3,35,551,558,570,750,481,464,118,2491), _ref_Abel_v_11_t),
(11,396) : ((0,1,9,58,1938,1942,1956,2251,434,768,582,1489), _ref_Abel_v_11_t),
(11,402) : ((0,1,8,49,4331,4336,4350,4399,4169,1114,3877,3795), _ref_Abel_v_11_t),
(11,420) : ((0,1,9,23,207,214,220,359,1273,1500,1817,1048), _ref_Abel_v_11_t),
(11,422) : ((0,1,7,27,86,97,125,246,3796,3663,2211,2422), _ref_Abel_v_11_t),
(11,450) : ((0,1,7,31,4808,4812,4826,4931,1333,4783,1152,162), _ref_Abel_v_11_t),
(11,452) : ((0,1,5,58,4530,4536,4544,4568,3644,1121,561,1732), _ref_Abel_v_11_t),
(12,33 ) : ((0,1,117,331,131,309,321,386,204,276,278,40,118), _ref_Abel_v_12_t),
(12,35 ) : ((0,1,110,361,349,226,98,68,80,234,347,198,321), _ref_Abel_v_12_t),
(12,45 ) : ((0,1,128,372,85,361,484,394,242,41,412,388,480), _ref_Abel_v_12_t),
(12,51 ) : ((0,1,216,516,92,426,559,292,568,184,387,460,162), _ref_Abel_v_12_t),
(12,55 ) : ((0,1,354,581,101,391,639,534,523,252,338,379,77), _ref_Abel_v_12_t),
(12,59 ) : ((0,1,287,561,431,482,527,513,234,518,366,673,670), _ref_Abel_v_12_t),
(12,61 ) : ((0,1,289,562,361,385,125,613,219,637,686,732,185), _ref_Abel_v_12_t),
(12,63 ) : ((0,1,216,562,384,653,218,584,188,704,11,29,122), _ref_Abel_v_12_t),
(12,69 ) : ((0,1,527,449,471,497,677,20,778,88,366,721,753), | |
# You may use, modify and redistribute this module under the terms of the GNU GPL3.0.
"""
Translate a morph target from a mesh with topology 1 to a mesh with topology 2
The shape of old and new objs must be similar. We assume the new mesh is done using a retopology tool.
=========================== ==================================================================
Project Name: **MakeHuman**
Module File Location: utils/topology_translator/topologylib.py
Product Home Page: http://www.makehuman.org/
Authors:
Copyright(c): MakeHuman Team 2001-2011
Licensing: GPL3 (see also: http://sites.google.com/site/makehumandocs/licensing)
Coding Standards: See http://sites.google.com/site/makehumandocs/developers-guide#TOC-Coding-Style
=========================== ==================================================================
"""
import sys
sys.path.append("../../core/")
from math import sqrt
from aljabr import *
import simpleoctree
import sys
import os
import copy
def loadVertsCoo(path):
"""
This function serves as a small utility function to load just the vertex
data from a WaveFront object file.
It is used for example to build the original vertex data
or to reset mesh modifications to their pre-modified state.
Parameters
----------
path:
*string*. The file system path to the file to be read.
"""
try:
fileDescriptor = open(path)
except:
print 'Error opening %s file' % path
return
verts = []
faces = []
for data in fileDescriptor:
lineData = data.split()
if len(lineData) > 2:
if lineData[0] == 'v':
co = [float(lineData[1]), float(lineData[2]), float(lineData[3])]
verts.append(co)
fileDescriptor.close()
return verts
def loadFacesIndices(path):
"""
This function serves as a small utility function to load just the face indices
data from a WaveFront object file.
Parameters
----------
path:
*string*. The file system path to the file to be read.
"""
try:
fileDescriptor = open(path)
except:
print 'Error opening %s file' % path
return
faces = []
for data in fileDescriptor:
lineData = data.split()
if len(lineData) > 2:
if lineData[0] == 'f':
face = []
for faceData in lineData[1:]:
vInfo = faceData.split('/')
vIdx = int(vInfo[0]) - 1 # -1 because obj is 1 based list
face.append(vIdx)
faces.append(face)
fileDescriptor.close()
return faces
def subdivideObj(faces, vertices, loops):
for n in xrange(loops):
faces,vertices = tessellate(faces, vertices)
return (faces,vertices)
def tessellate(faces, vertices):
"""
This function make a very simple tesselation, based on verts only.
Parameters
----------
faces:
*list*. Each "face" is a list with the index of face verts. Faces is a list of these lists.
vertices:
*list*. The list of verts coords to be subdivided. They can't be loaded from
the wavefront above, because it's supposed the coordinates are
changed by previously applied morphs.
"""
subdividedVerts = []
subdividedFaces = []
idx = len(vertices)-1
vertsUsed = {}
for face in faces:
centroidVerts = []
if len(face) == 4:
i0 = face[0]
i1 = face[1]
i2 = face[2]
i3 = face[3]
newVert1 = centroid([vertices[i0],vertices[i1]])
newVert2 = centroid([vertices[i1],vertices[i2]])
newVert3 = centroid([vertices[i2],vertices[i3]])
newVert4 = centroid([vertices[i3],vertices[i0]])
newVert5 = centroid([newVert1,newVert2,newVert3,newVert4])
k1 = [i0,i1]
k2 = [i1,i2]
k3 = [i2,i3]
k4 = [i3,i0]
k5 = [i0,i1,i2,i3]
k1.sort()
k2.sort()
k3.sort()
k4.sort()
k5.sort()
key1 = str(k1)
key2 = str(k2)
key3 = str(k3)
key4 = str(k4)
key5 = str(k5)
if not vertsUsed.has_key(key1):
idx += 1
vertsUsed[key1] = idx
subdividedVerts.append(newVert1)
n1 = idx
else:
n1 = vertsUsed[key1]
if not vertsUsed.has_key(key2):
idx += 1
vertsUsed[key2] = idx
subdividedVerts.append(newVert2)
n2 = idx
else:
n2 = vertsUsed[key2]
if not vertsUsed.has_key(key3):
idx += 1
vertsUsed[key3] = idx
subdividedVerts.append(newVert3)
n3 = idx
else:
n3 = vertsUsed[key3]
if not vertsUsed.has_key(key4):
idx += 1
vertsUsed[key4] = idx
subdividedVerts.append(newVert4)
n4 = idx
else:
n4 = vertsUsed[key4]
if not vertsUsed.has_key(key5):
idx += 1
vertsUsed[key5] = idx
subdividedVerts.append(newVert5)
n5 = idx
else:
n5 = vertsUsed[key5]
newFace1 = [i0,n1,n5,n4]
newFace2 = [n1,i1,n2,n5]
newFace3 = [n5,n2,i2,n3]
newFace4 = [n5,n3,i3,n4]
subdividedFaces.extend([newFace1,newFace2,newFace3,newFace4])
elif len(face) == 3:
i0 = face[0]
i1 = face[1]
i2 = face[2]
newVert1 = centroid([vertices[i0],vertices[i1]])
newVert2 = centroid([vertices[i1],vertices[i2]])
newVert3 = centroid([vertices[i2],vertices[i0]])
newVert4 = centroid([newVert1,newVert2,newVert3])
#Create an unique ID of each new vert, using a sorted list of
#vert indices used to calculate it.
k1 = [i0,i1]
k2 = [i1,i2]
k3 = [i2,i0]
k4 = [i0,i1,i2]
k1.sort()
k2.sort()
k3.sort()
k4.sort()
key1 = str(k1)
key2 = str(k2)
key3 = str(k3)
key4 = str(k4)
if not vertsUsed.has_key(key1):
idx += 1
vertsUsed[key1] = idx
subdividedVerts.append(newVert1)
n1 = idx
else:
n1 = vertsUsed[key1]
if not vertsUsed.has_key(key2):
idx += 1
vertsUsed[key2] = idx
subdividedVerts.append(newVert2)
n2 = idx
else:
n2 = vertsUsed[key2]
if not vertsUsed.has_key(key3):
idx += 1
vertsUsed[key3] = idx
subdividedVerts.append(newVert3)
n3 = idx
else:
n3 = vertsUsed[key3]
if not vertsUsed.has_key(key4):
idx += 1
vertsUsed[key4] = idx
subdividedVerts.append(newVert4)
n4 = idx
else:
n4 = vertsUsed[key4]
newFace1 = [i0,n1,n4]
newFace2 = [n1,i1,n4]
newFace3 = [i1,n2,n4]
newFace4 = [n2,i2,n4]
newFace5 = [i2,n3,n4]
newFace6 = [n3,i0,n4]
subdividedFaces.extend([newFace1,newFace2,newFace3,newFace4,newFace5,newFace6])
finalVertList = vertices + subdividedVerts
finalFacesList = subdividedFaces
print "ORIGINAL VERTS: %i"%(len(vertices))
print "VERTS ADDED BY SUBDIVISION: %i"%(len(subdividedVerts))
print "TOTAL VERTICES ADDED %i"%len(finalVertList)
return (finalFacesList, finalVertList)
def applyMorph(vertsList, targetPath):
"""
This function load and apply, with value 1, a morph target.
Parameters
----------
vertsList:
*list*. The list of verts coords to be modified by the morph.
path:
*string*. The wavefront obj to tesselate. It's needed to get the
faces informations.
"""
newVertsList = copy.deepcopy(vertsList)
try:
fileDescriptor = open(targetPath)
except:
print 'Unable to open %s', targetPath
return
for line in fileDescriptor:
translationData = line.split()
if len(translationData) == 4:
vertIndex = int(translationData[0])
# Adding the translation vector
newVertsList[vertIndex][0] = vertsList[vertIndex][0] + float(translationData[1])
newVertsList[vertIndex][1] = vertsList[vertIndex][1] + float(translationData[2])
newVertsList[vertIndex][2] = vertsList[vertIndex][2] + float(translationData[3])
fileDescriptor.close()
return newVertsList
def meshComparison(vertsList1, vertsList2, faces2, indexListPath = None):
"""
This function measure the similarity of 2 meshes.
Instead to have ray intersection to measure the surfaces differences,
we subdivide the mesh2, in order to in increase the density, and then
we use the vert to vert distance.
Parameters
----------
mesh1:
*string*. The path of the new wavefront obj
mesh2:
*string*. The path of the old wavefront obj
"""
if indexListPath:
indexList = []
try:
fileDescriptor = open(indexListPath)
except:
print 'Error opening %s file' % path
return
for data in fileDescriptor:
lineData = data.split()
i = int(lineData[0])
indexList.append(i)
fileDescriptor.close()
else:
indexList = xrange(len(vertsList1))
tess = subdivideObj(faces2, vertsList2, 2)
overwrite = 0 #Just for more elegant one-line print output progress
#Init of the octree
octree = simpleoctree.SimpleOctree(tess[1] , .25)
#For each vert of new mesh we found the nearest verts of old one
vDistances = []
for i1 in indexList:
v1 = vertsList1[i1]
#We use octree to search only on a small part of the whole old mesh.
vertsList3 = octree.root.getSmallestChild(v1)
#... find nearest verts on old mesh
dMin = 100
for v2 in vertsList3.verts:
d = vdist(v1, v2)
if d < dMin:
dMin = d
vDistances.append(dMin)
word = "Linking verts: %.2f%c."%((float(i1)/len(vertsList1))*100, "%")
sys.stdout.write("%s%s\r" % (word, " "*overwrite ))
sys.stdout.flush()
overwrite = len(word)
dSum = 0
for d in vDistances:
dSum += d
averageDist = dSum/len(vDistances)
print "Average distance %s %s = %s"%(mesh1,mesh2,averageDist)
return averageDist
def saveData(vertsList1, vertsList2, faces2, dataPath, epsilon = 0.2):
"""
This function link the mesh1 to the mesh2.
It find, for each vert of the mesh2, one or more verts (max 7) on the mesh1,
that are the nearest to the input one. Then, each vert is saved on the data ascii file,
with its weight calculated in function of the distance from the input vert, as well.
Parameters
----------
mesh1:
*string*. The path of the new wavefront obj
mesh2:
*string*. The path of the old wavefront obj
dataPath:
*string*. The path of data file to save
epsilon:
*float*. Threshold
"""
print "building data..."
#We load the old mesh coords, and then tesselate it, in order
#to have a better result in linking new mesh.
tess = subdivideObj(faces2, vertsList2, 2)
vertsList2Tesselated = tess[1]
deltaVectors = []
notLinked = 0
overwrite = 0 #Just for more elegant one-line print output progress
#We need to add index information to each vert.
for i,v in enumerate(vertsList2Tesselated):
v.append(i)
#Init of the octree
octree = simpleoctree.SimpleOctree(vertsList2Tesselated, .25)
try:
fileDescriptor = open(dataPath, 'w')
except:
print 'Unable to open %s'%(dataPath)
return None
#For each vert of new mesh we found the nearest verts of old one
for i1,v1 in enumerate(vertsList1):
vIndices = []
vDistances = []
#We use octree to search only | |
self.memo is not None:
result['memo'] = self.memo
result['attachments'] = []
if self.attachments is not None:
for k in self.attachments:
result['attachments'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('memo') is not None:
self.memo = m.get('memo')
self.attachments = []
if m.get('attachments') is not None:
for k in m.get('attachments'):
temp_model = UrgeTicketRequestTicketMemoAttachments()
self.attachments.append(temp_model.from_map(k))
return self
class UrgeTicketRequest(TeaModel):
def __init__(
self,
ding_isv_org_id: int = None,
ding_org_id: int = None,
ding_suite_key: str = None,
ding_token_grant_type: int = None,
operator_union_id: str = None,
open_ticket_id: str = None,
ticket_memo: UrgeTicketRequestTicketMemo = None,
open_team_id: str = None,
):
self.ding_isv_org_id = ding_isv_org_id
self.ding_org_id = ding_org_id
self.ding_suite_key = ding_suite_key
self.ding_token_grant_type = ding_token_grant_type
# 工单催单操作人UnionId
self.operator_union_id = operator_union_id
# 工单开放id
self.open_ticket_id = open_ticket_id
# 备注
self.ticket_memo = ticket_memo
# 开放团队ID
self.open_team_id = open_team_id
def validate(self):
if self.ticket_memo:
self.ticket_memo.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.operator_union_id is not None:
result['operatorUnionId'] = self.operator_union_id
if self.open_ticket_id is not None:
result['openTicketId'] = self.open_ticket_id
if self.ticket_memo is not None:
result['ticketMemo'] = self.ticket_memo.to_map()
if self.open_team_id is not None:
result['openTeamId'] = self.open_team_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('operatorUnionId') is not None:
self.operator_union_id = m.get('operatorUnionId')
if m.get('openTicketId') is not None:
self.open_ticket_id = m.get('openTicketId')
if m.get('ticketMemo') is not None:
temp_model = UrgeTicketRequestTicketMemo()
self.ticket_memo = temp_model.from_map(m['ticketMemo'])
if m.get('openTeamId') is not None:
self.open_team_id = m.get('openTeamId')
return self
class UrgeTicketResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
):
self.headers = headers
def validate(self):
self.validate_required(self.headers, 'headers')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
return self
class GetTicketHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetTicketRequest(TeaModel):
def __init__(
self,
open_team_id: str = None,
open_ticket_id: str = None,
):
# eKWh3GBwsKEiE
self.open_team_id = open_team_id
# hNiPO2OVktNMiE
self.open_ticket_id = open_ticket_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_team_id is not None:
result['openTeamId'] = self.open_team_id
if self.open_ticket_id is not None:
result['openTicketId'] = self.open_ticket_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('openTeamId') is not None:
self.open_team_id = m.get('openTeamId')
if m.get('openTicketId') is not None:
self.open_ticket_id = m.get('openTicketId')
return self
class GetTicketResponseBodyCreator(TeaModel):
def __init__(
self,
union_id: str = None,
nick_name: str = None,
):
self.union_id = union_id
self.nick_name = nick_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.union_id is not None:
result['unionId'] = self.union_id
if self.nick_name is not None:
result['nickName'] = self.nick_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
if m.get('nickName') is not None:
self.nick_name = m.get('nickName')
return self
class GetTicketResponseBodyProcessor(TeaModel):
def __init__(
self,
union_id: str = None,
nick_name: str = None,
):
self.union_id = union_id
self.nick_name = nick_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.union_id is not None:
result['unionId'] = self.union_id
if self.nick_name is not None:
result['nickName'] = self.nick_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
if m.get('nickName') is not None:
self.nick_name = m.get('nickName')
return self
class GetTicketResponseBodyTakers(TeaModel):
def __init__(
self,
union_id: str = None,
nick_name: str = None,
):
self.union_id = union_id
self.nick_name = nick_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.union_id is not None:
result['unionId'] = self.union_id
if self.nick_name is not None:
result['nickName'] = self.nick_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
if m.get('nickName') is not None:
self.nick_name = m.get('nickName')
return self
class GetTicketResponseBody(TeaModel):
def __init__(
self,
open_ticket_id: str = None,
create_time: str = None,
update_time: str = None,
open_conversation_id: str = None,
creator: GetTicketResponseBodyCreator = None,
processor: GetTicketResponseBodyProcessor = None,
takers: List[GetTicketResponseBodyTakers] = None,
stage: str = None,
title: str = None,
custom_fields: str = None,
scene: str = None,
scene_context: str = None,
):
# Id of the request
self.open_ticket_id = open_ticket_id
self.create_time = create_time
self.update_time = update_time
self.open_conversation_id = open_conversation_id
self.creator = creator
self.processor = processor
self.takers = takers
self.stage = stage
self.title = title
self.custom_fields = custom_fields
self.scene = scene
self.scene_context = scene_context
def validate(self):
if self.creator:
self.creator.validate()
if self.processor:
self.processor.validate()
if self.takers:
for k in self.takers:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_ticket_id is not None:
result['openTicketId'] = self.open_ticket_id
if self.create_time is not None:
result['createTime'] = self.create_time
if self.update_time is not None:
result['updateTime'] = self.update_time
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.creator is not None:
result['creator'] = self.creator.to_map()
if self.processor is not None:
result['processor'] = self.processor.to_map()
result['takers'] = []
if self.takers is not None:
for k in self.takers:
result['takers'].append(k.to_map() if k else None)
if self.stage is not None:
result['stage'] = self.stage
if self.title is not None:
result['title'] = self.title
if self.custom_fields is not None:
result['customFields'] = self.custom_fields
if self.scene is not None:
result['scene'] = self.scene
if self.scene_context is not None:
result['sceneContext'] = self.scene_context
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('openTicketId') is not None:
self.open_ticket_id = m.get('openTicketId')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('updateTime') is not None:
self.update_time = m.get('updateTime')
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('creator') is not None:
temp_model = GetTicketResponseBodyCreator()
self.creator = temp_model.from_map(m['creator'])
if m.get('processor') is not None:
temp_model = GetTicketResponseBodyProcessor()
self.processor = temp_model.from_map(m['processor'])
self.takers = []
if m.get('takers') is not None:
for k in m.get('takers'):
temp_model = GetTicketResponseBodyTakers()
self.takers.append(temp_model.from_map(k))
if m.get('stage') is not None:
self.stage = m.get('stage')
if m.get('title') is not None:
self.title = m.get('title')
if m.get('customFields') is not None:
self.custom_fields = m.get('customFields')
if m.get('scene') is not None:
self.scene = m.get('scene')
if m.get('sceneContext') is not None:
self.scene_context = m.get('sceneContext')
return self
class GetTicketResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetTicketResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetTicketResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetOssTempUrlHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] | |
in range(elem978):
elem979 = iprot.readString()
self.ids.append(elem979)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getContactsForChannel_args')
if self.ids is not None:
oprot.writeFieldBegin('ids', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.ids))
for elem980 in self.ids:
oprot.writeString(elem980)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.ids))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getContactsForChannel_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None):
self.success = success
self.e = e
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_, elem981) = iprot.readListBegin()
for _ in range(elem981):
elem982 = Contact()
elem982.read(iprot)
self.success.append(elem982)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getContactsForChannel_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for elem983 in self.success:
elem983.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.success))
value = (value * 31) ^ hash(make_hashable(self.e))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCallCreditProducts_args(object):
"""
Attributes:
- appStoreCode
- pgCode
- country
- language
"""
def __init__(self, appStoreCode=None, pgCode=None, country=None, language=None):
self.appStoreCode = appStoreCode
self.pgCode = pgCode
self.country = country
self.language = language
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.I32:
self.appStoreCode = PaymentType(iprot.readI32())
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.pgCode = PaymentPgType(iprot.readI32())
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.country = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.language = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getCallCreditProducts_args')
if self.appStoreCode is not None:
oprot.writeFieldBegin('appStoreCode', TType.I32, 2)
oprot.writeI32(self.appStoreCode)
oprot.writeFieldEnd()
if self.pgCode is not None:
oprot.writeFieldBegin('pgCode', TType.I32, 3)
oprot.writeI32(self.pgCode)
oprot.writeFieldEnd()
if self.country is not None:
oprot.writeFieldBegin('country', TType.STRING, 4)
oprot.writeString(self.country)
oprot.writeFieldEnd()
if self.language is not None:
oprot.writeFieldBegin('language', TType.STRING, 5)
oprot.writeString(self.language)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.appStoreCode))
value = (value * 31) ^ hash(make_hashable(self.pgCode))
value = (value * 31) ^ hash(make_hashable(self.country))
value = (value * 31) ^ hash(make_hashable(self.language))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCallCreditProducts_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None):
self.success = success
self.e = e
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_, elem984) = iprot.readListBegin()
for _ in range(elem984):
elem985 = CoinProductItem()
elem985.read(iprot)
self.success.append(elem985)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getCallCreditProducts_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for elem986 in self.success:
elem986.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.success))
value = (value * 31) ^ hash(make_hashable(self.e))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCompactContacts_args(object):
"""
Attributes:
- lastModifiedTimestamp
"""
def __init__(self, lastModifiedTimestamp=None):
self.lastModifiedTimestamp = lastModifiedTimestamp
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.I64:
self.lastModifiedTimestamp = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getCompactContacts_args')
if self.lastModifiedTimestamp is not None:
oprot.writeFieldBegin('lastModifiedTimestamp', TType.I64, 2)
oprot.writeI64(self.lastModifiedTimestamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.lastModifiedTimestamp))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getCompactContacts_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None):
self.success = success
self.e = e
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_, elem987) = iprot.readListBegin()
for _ in range(elem987):
elem988 = CompactContact()
elem988.read(iprot)
self.success.append(elem988)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getCompactContacts_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for elem989 in self.success:
elem989.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.success))
value = (value * 31) ^ hash(make_hashable(self.e))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class notifyNotiCenterEvent_args(object):
"""
Attributes:
- event
"""
def __init__(self, event=None):
self.event = event
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.event = NotiCenterEventData()
self.event.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('notifyNotiCenterEvent_args')
if self.event is not None:
oprot.writeFieldBegin('event', TType.STRUCT, 1)
self.event.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.event))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class notifyNotiCenterEvent_result(object):
"""
Attributes:
- e
"""
def __init__(self, e=None):
self.e = e
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('notifyNotiCenterEvent_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.e))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isInContact_args(object):
"""
Attributes:
- mid
"""
def __init__(self, mid=None):
self.mid = mid
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.STRING:
self.mid = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('isInContact_args')
if self.mid is not None:
oprot.writeFieldBegin('mid', TType.STRING, 2)
oprot.writeString(self.mid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.mid))
return value
def __repr__(self):
L = ['%s=%r' | |
for a ConvBPDN variant with projection onto the
:math:`\ell_1` ball instead of an :math:`\ell_1` penalty.
|
.. inheritance-diagram:: ConvBPDNProjL1
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{x} \;
(1/2) \left\| \sum_m \mathbf{d}_m * \mathbf{x}_m - \mathbf{s}
\right\|_2^2 \; \text{such that} \; \sum_m \| \mathbf{x}_m \|_1
\leq \gamma
via the ADMM problem
.. math::
\mathrm{argmin}_{\mathbf{x}, \mathbf{y}} \;
(1/2) \left\| \sum_m \mathbf{d}_m * \mathbf{x}_m -
\mathbf{s} \right\|_2^2 + \iota_{C(\gamma)}
(\{\mathbf{y}_m\}) \quad \text{such that} \quad \mathbf{x}_m =
\mathbf{y}_m \;\;,
where :math:`\iota_{C(\gamma)}(\cdot)` is the indicator function
of the :math:`\ell_1` ball of radius :math:`\gamma` about the origin.
The algorithm is very similar to that for the CBPDN problem (see
:class:`ConvBPDN`), the only difference being in the replacement in the
:math:`\mathbf{y}` step of the proximal operator of the :math:`\ell_1`
norm with the projection operator of the :math:`\ell_1` norm.
In particular, the :math:`\mathbf{x}` step uses the solver from
:cite:`wohlberg-2014-efficient` for single-channel dictionaries, and the
solver from :cite:`wohlberg-2016-convolutional` for multi-channel
dictionaries.
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``Cnstr`` : Constraint violation measure
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``XSlvRelRes`` : Relative residual of X step solver
``Time`` : Cumulative run time
"""
class Options(GenericConvBPDN.Options):
"""ConvBPDNProjL1 algorithm options
Options are the same as those defined in
:class:`.GenericConvBPDN.Options`.
"""
defaults = copy.deepcopy(GenericConvBPDN.Options.defaults)
defaults['AutoRho'].update({'RsdlTarget': 1.0})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvBPDNProjL1 algorithm options
"""
if opt is None:
opt = {}
GenericConvBPDN.Options.__init__(self, opt)
itstat_fields_objfn = ('ObjFun', 'Cnstr')
hdrtxt_objfn = ('Fnc', 'Cnstr')
hdrval_objfun = {'Fnc': 'ObjFun', 'Cnstr': 'Cnstr'}
def __init__(self, D, S, gamma, opt=None, dimK=None, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/cbpdnprjl1_init.svg
:width: 20%
:target: ../_static/jonga/cbpdnprjl1_init.svg
|
Parameters
----------
D : array_like
Dictionary matrix
S : array_like
Signal vector or matrix
gamma : float
Constraint parameter
opt : :class:`ConvBPDNProjL1.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
# Set default options if necessary
if opt is None:
opt = ConvBPDNProjL1.Options()
super(ConvBPDNProjL1, self).__init__(D, S, opt, dimK=dimK, dimN=dimN)
self.gamma = self.dtype.type(gamma)
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
# NB: still needs to be worked out.
return np.zeros(ushape, dtype=self.dtype)
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
self.Y = sp.proj_l1(self.AX + self.U, self.gamma,
axis=self.cri.axisN + (self.cri.axisC,
self.cri.axisM))
super(ConvBPDNProjL1, self).ystep()
def eval_objfn(self):
"""Compute components of regularisation function as well as total
objective function.
"""
dfd = self.obfn_dfd()
prj = sp.proj_l1(self.obfn_gvar(), self.gamma,
axis=self.cri.axisN + (self.cri.axisC,
self.cri.axisM))
cns = np.linalg.norm(prj - self.obfn_gvar())
return (dfd, cns)
class ConvTwoBlockCnstrnt(admm.ADMMTwoBlockCnstrnt):
r"""
Base class for ADMM algorithms for problems of the form
.. math::
\mathrm{argmin}_\mathbf{x} \;
g_0(D \mathbf{x} - \mathbf{s}) + g_1(\mathbf{x}) \;\;,
where :math:`D \mathbf{x} = \sum_m \mathbf{d}_m * \mathbf{x}_m`.
|
.. inheritance-diagram:: ConvTwoBlockCnstrnt
:parts: 2
|
The problem is solved via an ADMM problem of the form
.. math::
\mathrm{argmin}_{\mathbf{x},\mathbf{y}_0,\mathbf{y}_1} \;
g_0(\mathbf{y}_0) + g_1(\mathbf{y}_1) \;\text{such that}\;
\left( \begin{array}{c} D \\ I \end{array} \right) \mathbf{x}
- \left( \begin{array}{c} \mathbf{y}_0 \\ \mathbf{y}_1 \end{array}
\right) = \left( \begin{array}{c} \mathbf{s} \\
\mathbf{0} \end{array} \right) \;\;.
In this case the ADMM constraint is :math:`A\mathbf{x} + B\mathbf{y}
= \mathbf{c}` where
.. math::
A = \left( \begin{array}{c} D \\ I \end{array} \right)
\qquad B = -I \qquad \mathbf{y} = \left( \begin{array}{c}
\mathbf{y}_0 \\ \mathbf{y}_1 \end{array} \right) \qquad
\mathbf{c} = \left( \begin{array}{c} \mathbf{s} \\
\mathbf{0} \end{array} \right) \;\;.
|
The implementation of this class is substantially complicated by the
support of multi-channel signals. In the following, the number of
channels in the signal and dictionary are denoted by ``C`` and ``Cd``
respectively, the number of signals and the number of filters are
denoted by ``K`` and ``M`` respectively, ``D``, ``X``, and ``S`` denote
the dictionary, coefficient map, and signal arrays respectively, and
``Y0`` and ``Y1`` denote blocks 0 and 1 of the auxiliary (split)
variable of the ADMM problem. We need to consider three different cases:
1. Single channel signal and dictionary (``C`` = ``Cd`` = 1)
2. Multi-channel signal, single channel dictionary (``C`` > 1,
``Cd`` = 1)
3. Multi-channel signal and dictionary (``C`` = ``Cd`` > 1)
The final three (non-spatial) dimensions of the main variables in each
of these cases are as in the following table:
====== ================== ===================== ==================
Var. ``C`` = ``Cd`` = 1 ``C`` > 1, ``Cd`` = 1 ``C`` = ``Cd`` > 1
====== ================== ===================== ==================
``D`` 1 x 1 x ``M`` 1 x 1 x ``M`` ``Cd`` x 1 x ``M``
``X`` 1 x ``K`` x ``M`` ``C`` x ``K`` x ``M`` 1 x ``K`` x ``M``
``S`` 1 x ``K`` x 1 ``C`` x ``K`` x 1 ``C`` x ``K`` x 1
``Y0`` 1 x ``K`` x 1 ``C`` x ``K`` x 1 ``C`` x ``K`` x 1
``Y1`` 1 x ``K`` x ``M`` ``C`` x ``K`` x ``M`` 1 x ``K`` x ``M``
====== ================== ===================== ==================
In order to combine the block components ``Y0`` and ``Y1`` of
variable ``Y`` into a single array, we need to be able to
concatenate the two component arrays on one of the axes. The final
``M`` axis is suitable in the first two cases, but it is not
possible to concatenate ``Y0`` and ``Y1`` on the final axis in
case 3. The solution is that, in case 3, the the ``C`` and ``M``
axes of ``Y0`` are swapped before concatenating, as well as after
extracting the ``Y0`` component from the concatenated ``Y``
variable (see :meth:`.block_sep0` and :meth:`block_cat`).
|
This class specialises class :class:`.ADMMTwoBlockCnstrnt`, but remains
a base class for other classes that specialise to specific optimisation
problems.
"""
class Options(admm.ADMMTwoBlockCnstrnt.Options):
"""ConvTwoBlockCnstrnt algorithm options
Options include all of those defined in
:class:`.ADMMTwoBlockCnstrnt.Options`, together with
additional options:
``LinSolveCheck`` : Flag indicating whether to compute
relative residual of X step solver.
``HighMemSolve`` : Flag indicating whether to use a slightly
faster algorithm at the expense of higher memory usage.
``NonNegCoef`` : Flag indicating whether to force solution
to be non-negative.
``NoBndryCross`` : Flag indicating whether all solution
coefficients corresponding to filters crossing the image
boundary should be forced to zero.
"""
defaults = copy.deepcopy(admm.ADMMEqual.Options.defaults)
defaults.update({'AuxVarObj': False, 'fEvalX': True,
'gEvalY': False, 'HighMemSolve': False,
'LinSolveCheck': False, 'NonNegCoef': False,
'NoBndryCross': False, 'RelaxParam': 1.8,
'rho': 1.0, 'ReturnVar': 'Y1'})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvTwoBlockCnstrnt algorithm options
"""
if opt is None:
opt = {}
admm.ADMMTwoBlockCnstrnt.Options.__init__(self, opt)
itstat_fields_objfn = ('ObjFun', 'G0Val', 'G1Val')
itstat_fields_extra = ('XSlvRelRes',)
hdrtxt_objfn = ('Fnc', 'g0', 'g1')
hdrval_objfun = {'Fnc': 'ObjFun', 'g0': 'G0Val', 'g1': 'G1Val'}
def __init__(self, D, S, opt=None, dimK=None, dimN=2):
"""
Parameters
----------
D : array_like
Dictionary array
S : array_like
Signal array
opt : :class:`ConvTwoBlockCnstrnt.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
# Infer problem dimensions and set relevant attributes of self
self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)
# Determine whether axis swapping on Y block 0 is necessary
self.y0swapaxes = bool(self.cri.C > 1 and self.cri.Cd > 1)
# Call parent class __init__
Nx = self.cri.M * self.cri.N * self.cri.K
shpY = list(self.cri.shpX)
if self.y0swapaxes:
shpY[self.cri.axisC] = 1
shpY[self.cri.axisM] += self.cri.Cd
super(ConvTwoBlockCnstrnt, self).__init__(Nx, shpY, self.cri.axisM,
self.cri.Cd, S.dtype, opt)
| |
service_account_file
request.Project = project
request.Location = location
return stub.ListContainerCluster(request).items
def to_proto(self):
resource = cluster_pb2.ContainerCluster()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.initial_node_count):
resource.initial_node_count = Primitive.to_proto(self.initial_node_count)
if ClusterMasterAuth.to_proto(self.master_auth):
resource.master_auth.CopyFrom(ClusterMasterAuth.to_proto(self.master_auth))
else:
resource.ClearField("master_auth")
if Primitive.to_proto(self.logging_service):
resource.logging_service = Primitive.to_proto(self.logging_service)
if Primitive.to_proto(self.monitoring_service):
resource.monitoring_service = Primitive.to_proto(self.monitoring_service)
if Primitive.to_proto(self.network):
resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.cluster_ipv4_cidr):
resource.cluster_ipv4_cidr = Primitive.to_proto(self.cluster_ipv4_cidr)
if ClusterAddonsConfig.to_proto(self.addons_config):
resource.addons_config.CopyFrom(
ClusterAddonsConfig.to_proto(self.addons_config)
)
else:
resource.ClearField("addons_config")
if Primitive.to_proto(self.subnetwork):
resource.subnetwork = Primitive.to_proto(self.subnetwork)
if ClusterNodePoolsArray.to_proto(self.node_pools):
resource.node_pools.extend(ClusterNodePoolsArray.to_proto(self.node_pools))
if Primitive.to_proto(self.locations):
resource.locations.extend(Primitive.to_proto(self.locations))
if Primitive.to_proto(self.enable_kubernetes_alpha):
resource.enable_kubernetes_alpha = Primitive.to_proto(
self.enable_kubernetes_alpha
)
if Primitive.to_proto(self.resource_labels):
resource.resource_labels = Primitive.to_proto(self.resource_labels)
if Primitive.to_proto(self.label_fingerprint):
resource.label_fingerprint = Primitive.to_proto(self.label_fingerprint)
if ClusterLegacyAbac.to_proto(self.legacy_abac):
resource.legacy_abac.CopyFrom(ClusterLegacyAbac.to_proto(self.legacy_abac))
else:
resource.ClearField("legacy_abac")
if ClusterNetworkPolicy.to_proto(self.network_policy):
resource.network_policy.CopyFrom(
ClusterNetworkPolicy.to_proto(self.network_policy)
)
else:
resource.ClearField("network_policy")
if ClusterIPAllocationPolicy.to_proto(self.ip_allocation_policy):
resource.ip_allocation_policy.CopyFrom(
ClusterIPAllocationPolicy.to_proto(self.ip_allocation_policy)
)
else:
resource.ClearField("ip_allocation_policy")
if ClusterMasterAuthorizedNetworksConfig.to_proto(
self.master_authorized_networks_config
):
resource.master_authorized_networks_config.CopyFrom(
ClusterMasterAuthorizedNetworksConfig.to_proto(
self.master_authorized_networks_config
)
)
else:
resource.ClearField("master_authorized_networks_config")
if ClusterBinaryAuthorization.to_proto(self.binary_authorization):
resource.binary_authorization.CopyFrom(
ClusterBinaryAuthorization.to_proto(self.binary_authorization)
)
else:
resource.ClearField("binary_authorization")
if ClusterAutoscaling.to_proto(self.autoscaling):
resource.autoscaling.CopyFrom(ClusterAutoscaling.to_proto(self.autoscaling))
else:
resource.ClearField("autoscaling")
if ClusterNetworkConfig.to_proto(self.network_config):
resource.network_config.CopyFrom(
ClusterNetworkConfig.to_proto(self.network_config)
)
else:
resource.ClearField("network_config")
if ClusterMaintenancePolicy.to_proto(self.maintenance_policy):
resource.maintenance_policy.CopyFrom(
ClusterMaintenancePolicy.to_proto(self.maintenance_policy)
)
else:
resource.ClearField("maintenance_policy")
if ClusterDefaultMaxPodsConstraint.to_proto(self.default_max_pods_constraint):
resource.default_max_pods_constraint.CopyFrom(
ClusterDefaultMaxPodsConstraint.to_proto(
self.default_max_pods_constraint
)
)
else:
resource.ClearField("default_max_pods_constraint")
if ClusterResourceUsageExportConfig.to_proto(self.resource_usage_export_config):
resource.resource_usage_export_config.CopyFrom(
ClusterResourceUsageExportConfig.to_proto(
self.resource_usage_export_config
)
)
else:
resource.ClearField("resource_usage_export_config")
if ClusterAuthenticatorGroupsConfig.to_proto(self.authenticator_groups_config):
resource.authenticator_groups_config.CopyFrom(
ClusterAuthenticatorGroupsConfig.to_proto(
self.authenticator_groups_config
)
)
else:
resource.ClearField("authenticator_groups_config")
if ClusterPrivateClusterConfig.to_proto(self.private_cluster_config):
resource.private_cluster_config.CopyFrom(
ClusterPrivateClusterConfig.to_proto(self.private_cluster_config)
)
else:
resource.ClearField("private_cluster_config")
if ClusterDatabaseEncryption.to_proto(self.database_encryption):
resource.database_encryption.CopyFrom(
ClusterDatabaseEncryption.to_proto(self.database_encryption)
)
else:
resource.ClearField("database_encryption")
if ClusterVerticalPodAutoscaling.to_proto(self.vertical_pod_autoscaling):
resource.vertical_pod_autoscaling.CopyFrom(
ClusterVerticalPodAutoscaling.to_proto(self.vertical_pod_autoscaling)
)
else:
resource.ClearField("vertical_pod_autoscaling")
if ClusterShieldedNodes.to_proto(self.shielded_nodes):
resource.shielded_nodes.CopyFrom(
ClusterShieldedNodes.to_proto(self.shielded_nodes)
)
else:
resource.ClearField("shielded_nodes")
if Primitive.to_proto(self.master_version):
resource.master_version = Primitive.to_proto(self.master_version)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
if Primitive.to_proto(self.enable_tpu):
resource.enable_tpu = Primitive.to_proto(self.enable_tpu)
if ClusterConditionsArray.to_proto(self.conditions):
resource.conditions.extend(ClusterConditionsArray.to_proto(self.conditions))
if ClusterAutopilot.to_proto(self.autopilot):
resource.autopilot.CopyFrom(ClusterAutopilot.to_proto(self.autopilot))
else:
resource.ClearField("autopilot")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if ClusterNodeConfig.to_proto(self.node_config):
resource.node_config.CopyFrom(ClusterNodeConfig.to_proto(self.node_config))
else:
resource.ClearField("node_config")
if ClusterReleaseChannel.to_proto(self.release_channel):
resource.release_channel.CopyFrom(
ClusterReleaseChannel.to_proto(self.release_channel)
)
else:
resource.ClearField("release_channel")
if ClusterWorkloadIdentityConfig.to_proto(self.workload_identity_config):
resource.workload_identity_config.CopyFrom(
ClusterWorkloadIdentityConfig.to_proto(self.workload_identity_config)
)
else:
resource.ClearField("workload_identity_config")
if ClusterNotificationConfig.to_proto(self.notification_config):
resource.notification_config.CopyFrom(
ClusterNotificationConfig.to_proto(self.notification_config)
)
else:
resource.ClearField("notification_config")
if ClusterConfidentialNodes.to_proto(self.confidential_nodes):
resource.confidential_nodes.CopyFrom(
ClusterConfidentialNodes.to_proto(self.confidential_nodes)
)
else:
resource.ClearField("confidential_nodes")
if Primitive.to_proto(self.initial_cluster_version):
resource.initial_cluster_version = Primitive.to_proto(
self.initial_cluster_version
)
if Primitive.to_proto(self.instance_group_urls):
resource.instance_group_urls.extend(
Primitive.to_proto(self.instance_group_urls)
)
return resource
class ClusterMasterAuth(object):
def __init__(
self,
username: str = None,
password: str = None,
client_certificate_config: dict = None,
cluster_ca_certificate: str = None,
client_certificate: str = None,
client_key: str = None,
):
self.username = username
self.password = password
self.client_certificate_config = client_certificate_config
self.cluster_ca_certificate = cluster_ca_certificate
self.client_certificate = client_certificate
self.client_key = client_key
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterMasterAuth()
if Primitive.to_proto(resource.username):
res.username = Primitive.to_proto(resource.username)
if Primitive.to_proto(resource.password):
res.password = Primitive.to_proto(resource.password)
if ClusterMasterAuthClientCertificateConfig.to_proto(
resource.client_certificate_config
):
res.client_certificate_config.CopyFrom(
ClusterMasterAuthClientCertificateConfig.to_proto(
resource.client_certificate_config
)
)
else:
res.ClearField("client_certificate_config")
if Primitive.to_proto(resource.cluster_ca_certificate):
res.cluster_ca_certificate = Primitive.to_proto(
resource.cluster_ca_certificate
)
if Primitive.to_proto(resource.client_certificate):
res.client_certificate = Primitive.to_proto(resource.client_certificate)
if Primitive.to_proto(resource.client_key):
res.client_key = Primitive.to_proto(resource.client_key)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterMasterAuth(
username=Primitive.from_proto(resource.username),
password=Primitive.from_proto(resource.password),
client_certificate_config=ClusterMasterAuthClientCertificateConfig.from_proto(
resource.client_certificate_config
),
cluster_ca_certificate=Primitive.from_proto(
resource.cluster_ca_certificate
),
client_certificate=Primitive.from_proto(resource.client_certificate),
client_key=Primitive.from_proto(resource.client_key),
)
class ClusterMasterAuthArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterMasterAuth.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterMasterAuth.from_proto(i) for i in resources]
class ClusterMasterAuthClientCertificateConfig(object):
def __init__(self, issue_client_certificate: bool = None):
self.issue_client_certificate = issue_client_certificate
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterMasterAuthClientCertificateConfig()
if Primitive.to_proto(resource.issue_client_certificate):
res.issue_client_certificate = Primitive.to_proto(
resource.issue_client_certificate
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterMasterAuthClientCertificateConfig(
issue_client_certificate=Primitive.from_proto(
resource.issue_client_certificate
),
)
class ClusterMasterAuthClientCertificateConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterMasterAuthClientCertificateConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [
ClusterMasterAuthClientCertificateConfig.from_proto(i) for i in resources
]
class ClusterAddonsConfig(object):
def __init__(
self,
http_load_balancing: dict = None,
horizontal_pod_autoscaling: dict = None,
kubernetes_dashboard: dict = None,
network_policy_config: dict = None,
cloud_run_config: dict = None,
dns_cache_config: dict = None,
config_connector_config: dict = None,
gce_persistent_disk_csi_driver_config: dict = None,
):
self.http_load_balancing = http_load_balancing
self.horizontal_pod_autoscaling = horizontal_pod_autoscaling
self.kubernetes_dashboard = kubernetes_dashboard
self.network_policy_config = network_policy_config
self.cloud_run_config = cloud_run_config
self.dns_cache_config = dns_cache_config
self.config_connector_config = config_connector_config
self.gce_persistent_disk_csi_driver_config = (
gce_persistent_disk_csi_driver_config
)
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterAddonsConfig()
if ClusterAddonsConfigHttpLoadBalancing.to_proto(resource.http_load_balancing):
res.http_load_balancing.CopyFrom(
ClusterAddonsConfigHttpLoadBalancing.to_proto(
resource.http_load_balancing
)
)
else:
res.ClearField("http_load_balancing")
if ClusterAddonsConfigHorizontalPodAutoscaling.to_proto(
resource.horizontal_pod_autoscaling
):
res.horizontal_pod_autoscaling.CopyFrom(
ClusterAddonsConfigHorizontalPodAutoscaling.to_proto(
resource.horizontal_pod_autoscaling
)
)
else:
res.ClearField("horizontal_pod_autoscaling")
if ClusterAddonsConfigKubernetesDashboard.to_proto(
resource.kubernetes_dashboard
):
res.kubernetes_dashboard.CopyFrom(
ClusterAddonsConfigKubernetesDashboard.to_proto(
resource.kubernetes_dashboard
)
)
else:
res.ClearField("kubernetes_dashboard")
if ClusterAddonsConfigNetworkPolicyConfig.to_proto(
resource.network_policy_config
):
res.network_policy_config.CopyFrom(
ClusterAddonsConfigNetworkPolicyConfig.to_proto(
resource.network_policy_config
)
)
else:
res.ClearField("network_policy_config")
if ClusterAddonsConfigCloudRunConfig.to_proto(resource.cloud_run_config):
res.cloud_run_config.CopyFrom(
ClusterAddonsConfigCloudRunConfig.to_proto(resource.cloud_run_config)
)
else:
res.ClearField("cloud_run_config")
if ClusterAddonsConfigDnsCacheConfig.to_proto(resource.dns_cache_config):
res.dns_cache_config.CopyFrom(
ClusterAddonsConfigDnsCacheConfig.to_proto(resource.dns_cache_config)
)
else:
res.ClearField("dns_cache_config")
if ClusterAddonsConfigConfigConnectorConfig.to_proto(
resource.config_connector_config
):
res.config_connector_config.CopyFrom(
ClusterAddonsConfigConfigConnectorConfig.to_proto(
resource.config_connector_config
)
)
else:
res.ClearField("config_connector_config")
if ClusterAddonsConfigGcePersistentDiskCsiDriverConfig.to_proto(
resource.gce_persistent_disk_csi_driver_config
):
res.gce_persistent_disk_csi_driver_config.CopyFrom(
ClusterAddonsConfigGcePersistentDiskCsiDriverConfig.to_proto(
resource.gce_persistent_disk_csi_driver_config
)
)
else:
res.ClearField("gce_persistent_disk_csi_driver_config")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterAddonsConfig(
http_load_balancing=ClusterAddonsConfigHttpLoadBalancing.from_proto(
resource.http_load_balancing
),
horizontal_pod_autoscaling=ClusterAddonsConfigHorizontalPodAutoscaling.from_proto(
resource.horizontal_pod_autoscaling
),
kubernetes_dashboard=ClusterAddonsConfigKubernetesDashboard.from_proto(
resource.kubernetes_dashboard
),
network_policy_config=ClusterAddonsConfigNetworkPolicyConfig.from_proto(
resource.network_policy_config
),
cloud_run_config=ClusterAddonsConfigCloudRunConfig.from_proto(
resource.cloud_run_config
),
dns_cache_config=ClusterAddonsConfigDnsCacheConfig.from_proto(
resource.dns_cache_config
),
config_connector_config=ClusterAddonsConfigConfigConnectorConfig.from_proto(
resource.config_connector_config
),
gce_persistent_disk_csi_driver_config=ClusterAddonsConfigGcePersistentDiskCsiDriverConfig.from_proto(
resource.gce_persistent_disk_csi_driver_config
),
)
class ClusterAddonsConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterAddonsConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterAddonsConfig.from_proto(i) for i in resources]
class ClusterAddonsConfigHttpLoadBalancing(object):
def __init__(self, disabled: bool = None):
self.disabled = disabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterAddonsConfigHttpLoadBalancing()
if Primitive.to_proto(resource.disabled):
res.disabled = Primitive.to_proto(resource.disabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterAddonsConfigHttpLoadBalancing(
disabled=Primitive.from_proto(resource.disabled),
)
class ClusterAddonsConfigHttpLoadBalancingArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterAddonsConfigHttpLoadBalancing.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterAddonsConfigHttpLoadBalancing.from_proto(i) for i in resources]
class ClusterAddonsConfigHorizontalPodAutoscaling(object):
def __init__(self, disabled: bool = None):
self.disabled = disabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterAddonsConfigHorizontalPodAutoscaling()
if Primitive.to_proto(resource.disabled):
res.disabled = Primitive.to_proto(resource.disabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterAddonsConfigHorizontalPodAutoscaling(
disabled=Primitive.from_proto(resource.disabled),
)
class ClusterAddonsConfigHorizontalPodAutoscalingArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
ClusterAddonsConfigHorizontalPodAutoscaling.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
ClusterAddonsConfigHorizontalPodAutoscaling.from_proto(i) for i in resources
]
class ClusterAddonsConfigKubernetesDashboard(object):
def __init__(self, disabled: bool = None):
self.disabled = disabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterAddonsConfigKubernetesDashboard()
if Primitive.to_proto(resource.disabled):
res.disabled = Primitive.to_proto(resource.disabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterAddonsConfigKubernetesDashboard(
disabled=Primitive.from_proto(resource.disabled),
)
class ClusterAddonsConfigKubernetesDashboardArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterAddonsConfigKubernetesDashboard.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterAddonsConfigKubernetesDashboard.from_proto(i) for i in resources]
class ClusterAddonsConfigNetworkPolicyConfig(object):
def __init__(self, disabled: bool = None):
self.disabled = disabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterAddonsConfigNetworkPolicyConfig()
if Primitive.to_proto(resource.disabled):
res.disabled = Primitive.to_proto(resource.disabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterAddonsConfigNetworkPolicyConfig(
disabled=Primitive.from_proto(resource.disabled),
)
class ClusterAddonsConfigNetworkPolicyConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterAddonsConfigNetworkPolicyConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterAddonsConfigNetworkPolicyConfig.from_proto(i) for i in resources]
class ClusterAddonsConfigCloudRunConfig(object):
def __init__(self, disabled: bool = None, load_balancer_type: str = None):
self.disabled = disabled
self.load_balancer_type = load_balancer_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterAddonsConfigCloudRunConfig()
if Primitive.to_proto(resource.disabled):
res.disabled = Primitive.to_proto(resource.disabled)
if ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum.to_proto(
resource.load_balancer_type
):
res.load_balancer_type = ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum.to_proto(
resource.load_balancer_type
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterAddonsConfigCloudRunConfig(
disabled=Primitive.from_proto(resource.disabled),
load_balancer_type=ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum.from_proto(
resource.load_balancer_type
),
)
class ClusterAddonsConfigCloudRunConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterAddonsConfigCloudRunConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterAddonsConfigCloudRunConfig.from_proto(i) for i in resources]
class ClusterAddonsConfigDnsCacheConfig(object):
def __init__(self, enabled: bool = None):
self.enabled = enabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterAddonsConfigDnsCacheConfig()
if Primitive.to_proto(resource.enabled):
res.enabled = Primitive.to_proto(resource.enabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterAddonsConfigDnsCacheConfig(
enabled=Primitive.from_proto(resource.enabled),
)
class ClusterAddonsConfigDnsCacheConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterAddonsConfigDnsCacheConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ClusterAddonsConfigDnsCacheConfig.from_proto(i) for i in resources]
class ClusterAddonsConfigConfigConnectorConfig(object):
def __init__(self, enabled: bool = None):
self.enabled = enabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterAddonsConfigConfigConnectorConfig()
if Primitive.to_proto(resource.enabled):
res.enabled = Primitive.to_proto(resource.enabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterAddonsConfigConfigConnectorConfig(
enabled=Primitive.from_proto(resource.enabled),
)
class ClusterAddonsConfigConfigConnectorConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ClusterAddonsConfigConfigConnectorConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [
ClusterAddonsConfigConfigConnectorConfig.from_proto(i) for i in resources
]
class ClusterAddonsConfigGcePersistentDiskCsiDriverConfig(object):
def __init__(self, enabled: bool = None):
self.enabled = enabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = cluster_pb2.ContainerClusterAddonsConfigGcePersistentDiskCsiDriverConfig()
if Primitive.to_proto(resource.enabled):
res.enabled = Primitive.to_proto(resource.enabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ClusterAddonsConfigGcePersistentDiskCsiDriverConfig(
enabled=Primitive.from_proto(resource.enabled),
)
class ClusterAddonsConfigGcePersistentDiskCsiDriverConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
ClusterAddonsConfigGcePersistentDiskCsiDriverConfig.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
| |
<gh_stars>1-10
"""
Binance Pybot v1.1 (21-9-5)
https://github.com/rulibar/binance-pybot
"""
import os
import time
from calendar import timegm as timegm
import numpy
import random
import logging
import talib
from binance.client import Client
# instance vars
api_key = ""
api_secret = ""
client = Client(api_key, api_secret, tld='us')
asset = "ETH"; base = "BTC"
interval_mins = 30
# strategy vars
storage = dict()
# set up logger
def set_log_file():
# Set up the log folders
gmt = time.gmtime()
yy = str(gmt.tm_year)[2:]; mm = str(gmt.tm_mon); dd = str(gmt.tm_mday)
if len(mm) == 1: mm = "0" + mm
if len(dd) == 1: dd = "0" + dd
path = "./logs/"
if not os.path.isdir(path): os.mkdir(path)
path += "{}/".format(yy + mm)
if not os.path.isdir(path): os.mkdir(path)
# Set the log destination and format
fileh = logging.FileHandler("./logs/{}/{}.log".format(yy + mm, yy + mm + dd), "a")
formatter = logging.Formatter("%(levelname)s %(asctime)s - %(message)s")
fileh.setFormatter(formatter)
logger.handlers = [fileh]
logging.basicConfig(level=logging.INFO)
logging.Formatter.converter = time.gmtime
logger = logging.getLogger()
set_log_file()
# set up trading bot
def fix_dec(float_in):
float_out = "{:.8f}".format(float_in)
while float_out[-1] == "0": float_out = float_out[:-1]
if float_out[-1] == ".": float_out = float_out[:-1]
return float_out
def shrink_list(list_in, size):
if len(list_in) > size: return list_in[-size:]
return list_in
class Portfolio:
def __init__(self, candle, positions, funds):
self.ts = candle['ts_end']
self.asset = positions['asset'][1]
self.base = positions['base'][1]
self.price = candle['close']
self.positionValue = self.price * self.asset
self.size = self.base + self.positionValue
self.funds = funds
if funds > self.size or funds == 0: self.funds = float(self.size)
self.sizeT = float(self.funds)
self.rin = self.price * self.asset / self.size
self.rinT = self.price * self.asset / self.sizeT
class Instance:
def __init__(self, asset, base, interval_mins):
self.next_log = 0
self.ticks = 0; self.days = 0; self.trades = 0
self.exchange = "binance"
self.base = str(base)
self.asset = str(asset)
self.pair = self.asset + self.base
self.interval = int(interval_mins)
logger.info("New trader instance started on {} {} {}m.".format(self.exchange.title(), self.pair, self.interval))
self.get_params()
self.candles_raw = self._get_candles_raw()
self.candles = self._get_candles()
self.candles_raw = shrink_list(self.candles_raw, 2 * self.interval)
self.candles_raw_unused = self._get_raw_unused()
self.deposits_pending = set()
self.withdrawals_pending = set()
self.earliest_pending = 0
self.candle_start = None
self.positions_start = None
self.positions_init_ts = 0
self.positions = self.get_positions()
self.positions_f = {'asset': list(self.positions['asset'])}
self.positions_f['base'] = list(self.positions['base'])
self.positions_t = {'asset': list(self.positions['asset'])}
self.positions_t['base'] = list(self.positions['base'])
p = Portfolio(self.candles[-1], self.positions, float(self.params['funds']))
self.last_order = {"type": "none", "amt": 0, "pt": self.candles[-1]['close']}
self.signal = {"rinTarget": p.rinT, "rinTargetLast": p.rinT, "position": "none", "status": 0, "apc": p.price, "target": p.price, "stop": p.price}
self.performance = {"bh": 0, "change": 0, "W": 0, "L": 0, "wSum": 0, "lSum": 0, "w": 0, "l": 0, "be": 0, "aProfits": 0, "bProfits": 0, "cProfits": 0}
self.init(p)
def _get_candles_raw(self):
# get enough 1m candles to create 600 historical candles
data = self.get_historical_candles(self.pair, "1m", 600 * self.interval)
data.pop()
for i in range(len(data)): data[i] = self.get_candle(data[i])
return data
def _get_candles(self):
# convert historical 1m candles into historical candles
candles = list(); candle_new = dict()
candles_raw_clone = list(self.candles_raw)
for i in range(self.interval - 2): candles_raw_clone.pop()
for i in range(len(candles_raw_clone)):
order = i % self.interval
candle_raw = candles_raw_clone[- 1 - i]
if order == 0:
candle_new = candle_raw
continue
if candle_raw["high"] > candle_new["high"]:
candle_new["high"] = candle_raw["high"]
if candle_raw["low"] < candle_new["low"]:
candle_new["low"] = candle_raw["low"]
candle_new["volume"] += candle_raw["volume"]
if order == self.interval - 1:
candle_new["open"] = candle_raw["open"]
candle_new["ts_start"] = candle_raw["ts_start"]
candles.append(candle_new)
return candles[::-1]
def _get_raw_unused(self):
# get unused historical 1m candles
raw_unused = -1
str_out = str()
data = self.candles_raw[-2 * self.interval:]
for i in range(len(data)):
candle_raw = data[i]
if raw_unused > -1:
raw_unused += 1
if candle_raw["ts_end"] == self.candles[-1]["ts_end"]:
raw_unused += 1
continue
if raw_unused > 0: str_out += " {}\n".format(candle_raw)
return raw_unused
def get_historical_candles_method(self, symbol, interval, start_str):
data, err = list(), str()
try: data = client.get_historical_klines(symbol, interval, start_str)
except Exception as e: err = e
return data, err
def get_historical_candles(self, symbol, interval, n_candles):
tries = 0
while True:
data, err = self.get_historical_candles_method(symbol, interval, "{} minutes ago UTC".format(n_candles))
tries += 1
if len(data) == 0:
if tries <= 3:
err_msg = "Error getting historical candle data. Retrying in 5 seconds..."
if err != "": err_msg += "\n'{}'".format(err)
logger.error(err_msg)
if tries == 3: logger.error("(Future repeats of this error hidden to avoid spam.)")
time.sleep(5)
else: break
if tries > 3: logger.error("Failed to get historical candle data {} times.".format(tries - 1))
return data
def get_candle(self, data):
# data is a kline list from Binance
candle = {
"ts_start": int(data[0]),
"open": round(float(data[1]), 8),
"high": round(float(data[2]), 8),
"low": round(float(data[3]), 8),
"close": round(float(data[4]), 8),
"volume": round(float(data[5]), 8),
"ts_end": int(data[6])}
return candle
def limit_buy(self, amt, pt):
try:
logger.warning("Trying to buy {} {} for {} {}. (price: {})".format(fix_dec(amt), self.asset, fix_dec(round(amt * pt, self.pt_dec)), self.base, fix_dec(pt)))
self.last_order = {"type": "buy", "amt": amt, "pt": pt}
client.order_limit_buy(symbol = self.pair, quantity = "{:.8f}".format(amt), price = "{:.8f}".format(pt))
except Exception as e:
logger.error("Error buying.\n'{}'".format(e))
def limit_sell(self, amt, pt):
try:
logger.warning("Trying to sell {} {} for {} {}. (price: {})".format(fix_dec(amt), self.asset, fix_dec(round(amt * pt, self.pt_dec)), self.base, fix_dec(pt)))
self.last_order = {"type": "sell", "amt": amt, "pt": pt}
client.order_limit_sell(symbol = self.pair, quantity = "{:.8f}".format(amt), price = "{:.8f}".format(pt))
except Exception as e:
logger.error("Error selling.\n'{}'".format(e))
def bso(self, p):
# buy/sell/other
s = self.signal
rbuy = s['rinTarget'] - s['rinTargetLast']
order_size = 0
if rbuy * p.asset >= 0:
order_size = abs(rbuy * p.funds)
if order_size > p.base: order_size = p.base
if rbuy * p.asset < 0:
rbuy_asset = rbuy / s['rinTargetLast']
order_size = abs(rbuy_asset * p.asset * p.price)
if order_size < self.min_order: order_size = 0
if order_size > 0:
if rbuy > 0: pt = (1 + 0.0015) * p.price
else: pt = (1 - 0.0015) * p.price
pt = round(pt, self.pt_dec)
if rbuy > 0: amt = order_size / pt
else: amt = order_size / p.price
amt = round(0.995 * amt * 10**self.amt_dec - 2) / 10**self.amt_dec
if rbuy > 0: self.limit_buy(amt, pt)
if rbuy < 0: self.limit_sell(amt, pt)
if rbuy == 0: order_size = 0
if order_size == 0:
if self.ticks == 1: logger.info("Waiting for a signal to trade...")
self.last_order = {"type": "none", "amt": 0, "pt": p.price}
def close_orders(self):
# close open orders
try:
orders = client.get_open_orders(symbol = self.pair)
for order in orders:
client.cancel_order(symbol = self.pair, orderId = order['orderId'])
except Exception as e:
logger.error("Error closing open orders.\n'{}'".format(e))
def update_vars(self):
# Get preliminary vars
self.ticks += 1
self.days = (self.ticks - 1) * self.interval / (60 * 24)
try: data = client.get_symbol_info(self.pair)['filters']
except Exception as e:
logger.error("Error getting symbol info.\n'{}'".format(e))
return
min_order = float(data[2]['minQty']) * self.candles[-1]['close']
self.min_order = 3 * max(min_order, float(data[3]['minNotional']))
amt_dec = 8
for char in reversed(data[2]['stepSize']):
if char == "0": amt_dec -= 1
else: break
self.amt_dec = amt_dec
pt_dec = 8
for char in reversed(data[0]['tickSize']):
if char == "0": pt_dec -= 1
else: break
self.pt_dec = pt_dec
def get_params(self):
# import and process params from config.txt
params = dict()
with open("config.txt") as cfg:
par = [l.split()[0] for l in cfg.read().split("\n")[2:-1]]
for p in par:
p = p.split("=")
if len(p) != 2: continue
params[str(p[0])] = str(p[1])
# check values
funds = float(params['funds'])
if funds < 0:
logger.warning("Warning! Maximum amount to invest should be zero or greater.")
params['funds'] = "0"
logs_per_day = float(params['logs_per_day'])
if logs_per_day < 0:
logger.warning("Warning! Logs per day should be zero or greater.")
params['logs_per_day'] = "1"
log_dws = str(params['log_dws'])
if log_dws not in {"yes", "no"}:
logger.warning("Warning! Log deposits and withdrawals set to 'yes'.")
params['log_dws'] = "yes"
# check for additions and removals
if self.ticks == 0: self.params = dict()
keys_old = {key for key in self.params}
keys_new = {key for key in params}
keys_added = {key for key in keys_new if key not in keys_old}
keys_removed = {key for key in keys_old if key not in keys_new}
if len(keys_added) > 0:
logger.info("{} parameter(s) added.".format(len(keys_added)))
for key in keys_added: logger.info(" \"{}\": {}".format(key, params[key]))
if len(keys_removed) > 0:
logger.info("{} parameter(s) removed.".format(len(keys_removed)))
for key in keys_removed: logger.info(" \"{}\"".format(key))
# check for changes
keys_remaining = {key for key in keys_old if key in keys_new}
keys_changed = set()
for key in keys_remaining:
if params[key] != self.params[key]: keys_changed.add(key)
if self.ticks == 0:
keys_changed.add('funds'); keys_changed.add('logs_per_day'); keys_changed.add('log_dws')
if "funds" in keys_changed:
if params['funds'] == "0": logger.info("No maximum investment amount specified.")
else: logger.info("Maximum investment amount set to {} {}.".format(params['funds'], self.base))
self.params['funds'] = params['funds']
keys_changed.remove('funds')
if "logs_per_day" in keys_changed:
if params['logs_per_day'] == "0": logger.info("Log updates turned | |
<reponame>DebeshJha/tensorflow-1
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import test_utils as tu
from tensorflow.compiler.plugin.poplar.ops import gen_sendrecv_ops
from tensorflow.compiler.tests import xla_test
from tensorflow.python import ipu
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def next_feed_id():
result = 'feed' + str(next_feed_id.feed_count)
next_feed_id.feed_count += 1
return result
next_feed_id.feed_count = 0
def _configure_replicated_ipu_system():
cfg = ipu.utils.create_ipu_config(profiling=True)
cfg = ipu.utils.set_optimization_options(
cfg,
max_cross_replica_sum_buffer_size=10000,
max_reduce_scatter_buffer_size=10000)
cfg = ipu.utils.set_ipu_model_options(cfg, compile_ipu_code=False)
cfg = ipu.utils.auto_select_ipus(cfg, 2)
ipu.utils.configure_ipu_system(cfg)
class ReplicatedGraphTest(xla_test.XLATestCase):
def testCreateSimpleReplicatedGraph(self):
with self.session() as sess:
def my_graph(inp):
with ops.device("/device:IPU:0"):
x = inp + inp
return [ipu.ops.cross_replica_ops.cross_replica_sum(x)]
with ops.device('cpu'):
inp = array_ops.placeholder(np.float32, [4], name="data")
out = ipu.ipu_compiler.compile(my_graph, [inp])
_configure_replicated_ipu_system()
sess.run(variables.global_variables_initializer())
data = np.ones([4])
fd = {inp: data}
result = sess.run(out, fd)
# Test that the output is just the input
self.assertAllClose(result[0], 4 * data)
def testCrossReplicaSumDifferentTypes(self):
with self.session() as sess:
def my_graph(x, y):
with ops.device("/device:IPU:0"):
x = x + x
y = y + y + 1
return [
ipu.ops.cross_replica_ops.cross_replica_sum(x),
ipu.ops.cross_replica_ops.cross_replica_sum(y)
]
with ops.device('cpu'):
x = array_ops.placeholder(np.float32, [4], name="data")
y = array_ops.placeholder(np.int32, [4], name="data")
out = ipu.ipu_compiler.compile(my_graph, [x, y])
_configure_replicated_ipu_system()
sess.run(variables.global_variables_initializer())
ones = np.ones([4])
fd = {x: ones, y: ones}
result = sess.run(out, fd)
# Test that the output is just the input
self.assertAllClose(result[0], 4 * ones)
self.assertAllClose(result[1], 6 * ones)
def testCreateSimpleReplicatedGraphVariable(self):
with self.session() as sess:
def my_graph():
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("", use_resource=True):
x = variable_scope.get_variable(
"x",
dtype=np.float32,
shape=[4],
initializer=init_ops.constant_initializer(10.0))
x = x + x
return [ipu.ops.cross_replica_ops.cross_replica_sum(x)]
out = ipu.ipu_compiler.compile(my_graph, [])
_configure_replicated_ipu_system()
sess.run(variables.global_variables_initializer())
result = sess.run(out, {})
# Test that the output is just the input
self.assertAllClose(result[0], 4 * np.full([4], 10.0))
def testCreateSimpleReplicatedInfeedOutfeed(self):
with self.session() as sess:
shape = [2]
dataset = tu.create_single_increasing_dataset(3, shape)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(
dataset, feed_name=next_feed_id(), replication_factor=2)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id(), replication_factor=2)
def body(v, x):
v = ipu.ops.cross_replica_ops.cross_replica_sum(v + x)
outfeed = outfeed_queue.enqueue(v)
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
_configure_replicated_ipu_system()
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(48, shape))
outfed_result = sess.run(outfed)
self.assertTrue(outfed_result.shape[0], 2)
self.assertAllClose(outfed_result[0][0], outfed_result[0][1])
self.assertAllClose(outfed_result[0][0], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result[1][0], outfed_result[1][1])
self.assertAllClose(outfed_result[1][0], np.broadcast_to(4, shape))
self.assertAllClose(outfed_result[2][0], outfed_result[2][1])
self.assertAllClose(outfed_result[2][0], np.broadcast_to(11, shape))
self.assertAllClose(outfed_result[3][0], outfed_result[3][1])
self.assertAllClose(outfed_result[3][0], np.broadcast_to(23, shape))
self.assertAllClose(outfed_result[4][0], outfed_result[4][1])
self.assertAllClose(outfed_result[4][0], np.broadcast_to(48, shape))
def testCreateSimpleReplicatedInfeedOutfeedTuple(self):
with self.session() as sess:
shape = [2]
dataset = tu.create_single_increasing_dataset(3, shape)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(
dataset, feed_name=next_feed_id(), replication_factor=2)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id(), replication_factor=2)
def body(v, x):
out = ipu.ops.cross_replica_ops.cross_replica_sum(v + x)
outfeed = outfeed_queue.enqueue((v, out))
return (out, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
_configure_replicated_ipu_system()
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(48, shape))
outfed_result = sess.run(outfed)
self.assertTrue(outfed_result[0].shape[0], 2)
self.assertTrue(outfed_result[1].shape[0], 2)
self.assertAllClose(outfed_result[0][0][0], outfed_result[0][0][1])
self.assertAllClose(outfed_result[0][0][0], np.broadcast_to(0, shape))
self.assertAllClose(outfed_result[1][0][0], outfed_result[1][0][1])
self.assertAllClose(outfed_result[1][0][0], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result[0][1][0], outfed_result[0][1][1])
self.assertAllClose(outfed_result[0][1][0], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result[1][1][0], outfed_result[1][1][1])
self.assertAllClose(outfed_result[1][1][0], np.broadcast_to(4, shape))
self.assertAllClose(outfed_result[0][2][0], outfed_result[0][2][1])
self.assertAllClose(outfed_result[0][2][0], np.broadcast_to(4, shape))
self.assertAllClose(outfed_result[1][2][0], outfed_result[1][2][1])
self.assertAllClose(outfed_result[1][2][0], np.broadcast_to(11, shape))
self.assertAllClose(outfed_result[0][3][0], outfed_result[0][3][1])
self.assertAllClose(outfed_result[0][3][0], np.broadcast_to(11, shape))
self.assertAllClose(outfed_result[1][3][0], outfed_result[1][3][1])
self.assertAllClose(outfed_result[1][3][0], np.broadcast_to(23, shape))
self.assertAllClose(outfed_result[0][4][0], outfed_result[0][4][1])
self.assertAllClose(outfed_result[0][4][0], np.broadcast_to(23, shape))
self.assertAllClose(outfed_result[1][4][0], outfed_result[1][4][1])
self.assertAllClose(outfed_result[1][4][0], np.broadcast_to(48, shape))
def testCreateSimpleReplicatedInfeedOutfeedDict(self):
with self.session() as sess:
shape = [2]
dataset = tu.create_single_increasing_dataset(3, shape)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(
dataset, feed_name=next_feed_id(), replication_factor=2)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id(), replication_factor=2)
def body(v, x):
out = ipu.ops.cross_replica_ops.cross_replica_sum(v + x)
outfeed = outfeed_queue.enqueue({"last": v, "this": out})
return (out, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
_configure_replicated_ipu_system()
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(48, shape))
outfed_result = sess.run(outfed)
self.assertTrue(outfed_result["last"].shape[0], 2)
self.assertTrue(outfed_result["this"].shape[0], 2)
self.assertAllClose(outfed_result["last"][0][0],
outfed_result["last"][0][1])
self.assertAllClose(outfed_result["last"][0][0],
np.broadcast_to(0, shape))
self.assertAllClose(outfed_result["this"][0][0],
outfed_result["this"][0][1])
self.assertAllClose(outfed_result["this"][0][0],
np.broadcast_to(1, shape))
self.assertAllClose(outfed_result["last"][1][0],
outfed_result["last"][1][1])
self.assertAllClose(outfed_result["last"][1][0],
np.broadcast_to(1, shape))
self.assertAllClose(outfed_result["this"][1][0],
outfed_result["this"][1][1])
self.assertAllClose(outfed_result["this"][1][0],
np.broadcast_to(4, shape))
self.assertAllClose(outfed_result["last"][2][0],
outfed_result["last"][2][1])
self.assertAllClose(outfed_result["last"][2][0],
np.broadcast_to(4, shape))
self.assertAllClose(outfed_result["this"][2][0],
outfed_result["this"][2][1])
self.assertAllClose(outfed_result["this"][2][0],
np.broadcast_to(11, shape))
self.assertAllClose(outfed_result["last"][3][0],
outfed_result["last"][3][1])
self.assertAllClose(outfed_result["last"][3][0],
np.broadcast_to(11, shape))
self.assertAllClose(outfed_result["this"][3][0],
outfed_result["this"][3][1])
self.assertAllClose(outfed_result["this"][3][0],
np.broadcast_to(23, shape))
self.assertAllClose(outfed_result["last"][4][0],
outfed_result["last"][4][1])
self.assertAllClose(outfed_result["last"][4][0],
np.broadcast_to(23, shape))
self.assertAllClose(outfed_result["this"][4][0],
outfed_result["this"][4][1])
self.assertAllClose(outfed_result["this"][4][0],
np.broadcast_to(48, shape))
def testCreateCombinedReplicatedSumGraph(self):
with self.session() as sess:
def my_graph():
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("", use_resource=True):
x1 = variable_scope.get_variable(
"x1",
dtype=np.float32,
shape=[100],
initializer=init_ops.constant_initializer(10.0))
x2 = variable_scope.get_variable(
"x2",
dtype=np.int32,
shape=[100],
initializer=init_ops.constant_initializer(10))
y1 = ipu.ops.cross_replica_ops.cross_replica_sum(x1 + x1)
z1 = ipu.ops.cross_replica_ops.cross_replica_sum(x1 * x1)
y2 = ipu.ops.cross_replica_ops.cross_replica_sum(x2 + x2)
z2 = ipu.ops.cross_replica_ops.cross_replica_sum(x2 * x2)
return [
ipu.ops.cross_replica_ops.cross_replica_sum(z1 + y1),
ipu.ops.cross_replica_ops.cross_replica_sum(z2 + y2)
]
out = ipu.ipu_compiler.compile(my_graph, [])
_configure_replicated_ipu_system()
sess.run(variables.global_variables_initializer())
result = sess.run(out, {})
ref = np.empty([2, 100])
ref.fill(480.0)
# Check output equals the expected value
self.assertAllClose(result, ref)
def testReplicatedGraphWithoutAllReduce(self):
with self.session() as sess:
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(
dataset, feed_name=next_feed_id(), replication_factor=2)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id(), replication_factor=2)
def body(x):
outfeed = outfeed_queue.enqueue(x)
return outfeed
def my_net():
r = ipu.loops.repeat(2, body, infeed_queue=infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net)
outfed = outfeed_queue.dequeue()
_configure_replicated_ipu_system()
sess.run(infeed_queue.initializer)
sess.run(res)
outfed_result = sess.run(outfed)
self.assertAllClose([[1, 2], [3, 4]], outfed_result)
def testCreateSimpleReplicatedInfeedWrongReplicationFactor(self):
with self.session() as sess:
shape = [2]
dataset = tu.create_single_increasing_dataset(3, shape)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(
dataset, feed_name=next_feed_id(), replication_factor=4)
def body(v, x):
v = ipu.ops.cross_replica_ops.cross_replica_sum(v + x)
return v
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
_configure_replicated_ipu_system()
sess.run(infeed_queue.initializer)
with self.assertRaisesRegex(
errors.FailedPreconditionError,
'Current program has been created with replication_factor 2'):
sess.run(res)
def testCreateSimpleReplicatedOutfeedWrongReplicationFactor(self):
with self.session() as sess:
shape = [2]
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id(), replication_factor=4)
def body(v):
v = ipu.ops.cross_replica_ops.cross_replica_sum(v)
outfeed = outfeed_queue.enqueue(v)
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v])
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
_configure_replicated_ipu_system()
with self.assertRaisesRegex(
errors.FailedPreconditionError,
'Current program has been created with replication_factor 2'):
sess.run(res)
def testReplicatedGraphWithOutsideCompilationScope(self):
with self.session() as sess:
def my_net():
with ipu.scopes.ipu_scope("/device:IPU:0"):
x = ipu.replication_ops.replication_index()
with ipu.scopes.outside_compilation_scope():
# This receives the data from the first replica,
# and then broadcasts the result to all replicas.
# So both replicas should receive 0 + 1 = 1 from
# the host computation.
x += 1
return ipu.ops.cross_replica_ops.cross_replica_sum(x)
[res] = ipu.ipu_compiler.compile(my_net, inputs=[])
_configure_replicated_ipu_system()
# Both replicas should receive 1.
self.assertEqual(2, sess.run(res))
def testReplicatedReduceScatter(self):
with self.session() as sess:
replication_factor = 2
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id(), replication_factor=replication_factor)
def my_net(x):
with self.assertRaisesRegex(ValueError,
"Shape must be rank 1 but is rank 2"):
ipu.ops.reduce_scatter_op.reduce_scatter(
[x], replication_factor=replication_factor)
y = ipu.ops.reduce_scatter_op.reduce_scatter(
x, replication_factor=replication_factor)
self.assertEqual(1, len(y.shape))
expected_length = np.ceil(int(x.shape[0]) / replication_factor)
self.assertEqual(expected_length, y.shape[0])
return outfeed_queue.enqueue(y)
num_elements = 5 # To test padding
inputs = [np.arange(num_elements, dtype=np.float32)]
with ipu.scopes.ipu_scope("/device:IPU:0"):
compiled_net = ipu.ipu_compiler.compile(my_net, inputs=inputs)
with ops.device("/device:CPU:0"):
scattered_chunks = outfeed_queue.dequeue()
gathered_padded = array_ops.reshape(scattered_chunks, shape=[-1])
gathered = array_ops.slice(gathered_padded, [0], [num_elements])
_configure_replicated_ipu_system()
sess.run(compiled_net)
gathered_result = sess.run(gathered)
expected_result = replication_factor * np.arange(num_elements)
self.assertAllEqual(expected_result, gathered_result)
def testReplicatedReduceScatterCombining(self):
with self.session() as sess:
num_replicas = 2
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
feed_name=next_feed_id(), replication_factor=num_replicas)
def my_net(*xs):
y = [
ipu.ops.reduce_scatter_op.reduce_scatter(
x, replication_factor=num_replicas) for x in xs
]
return outfeed_queue.enqueue(y)
inputs = [i * np.arange(i, dtype=np.float32) for i in range(1, 6)]
with ipu.scopes.ipu_scope("/device:IPU:0"):
compiled_net = ipu.ipu_compiler.compile(my_net, inputs=inputs)
gathered = []
with ops.device("/device:CPU:0"):
dequeued = outfeed_queue.dequeue()
for scattered in dequeued:
gathered.append(array_ops.reshape(scattered, shape=[-1]))
_configure_replicated_ipu_system()
report = tu.ReportJSON(self, sess, configure_device=False)
report.reset()
sess.run(compiled_net)
out = sess.run(gathered)
# Check that the reduce scatters were combined into one.
report.parse_log()
report.assert_compute_sets_matches(
"IpuReduceScatter*/custom-call*/ReduceScatter", 1)
# Check padded lengths.
self.assertEqual(len(out[0]), np.ceil(1 / num_replicas) * num_replicas)
self.assertEqual(len(out[1]), np.ceil(2 / num_replicas) * num_replicas)
self.assertEqual(len(out[2]), np.ceil(3 / num_replicas) * num_replicas)
self.assertEqual(len(out[3]), np.ceil(4 / num_replicas) * num_replicas)
self.assertEqual(len(out[4]), np.ceil(5 / num_replicas) * num_replicas)
# Check payloads.
self.assertAllEqual(1.0 * num_replicas * np.arange(1), out[0][:1])
self.assertAllEqual(2.0 * num_replicas * np.arange(2), out[1][:2])
self.assertAllEqual(3.0 * num_replicas * np.arange(3), out[2][:3])
self.assertAllEqual(4.0 * num_replicas * np.arange(4), out[3][:4])
self.assertAllEqual(5.0 * num_replicas * np.arange(5), out[4][:5])
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
repair for flexural properties
- Block
class for blocks of plies oriented at the same fibre orientation
- calc_delta_lampamD_swap & calc_delta_lampamD_swap_1
returns the out-of-plane lamination parameters variation due to the swap
of ply groups, taking into account the two symmetric parts for symmetric
laminates - only one panel accounted for
#- calc_delta_lampamD_swap_2
# returns the out-of-plane lamination parameters variation due to the swap
# of ply groups, taking into account the two symmetric parts for symmetric
# laminates - account for several panels
- find_list_blocks
divides a stacking sequence into blocks of plies at the same
fibre orientation
"""
__version__ = '1.0'
__author__ = '<NAME>'
import sys
import operator
import numpy as np
import numpy.matlib
sys.path.append(r'C:\BELLA')
from src.LAYLA_V02.constraints import Constraints
from src.guidelines.disorientation import is_diso
from src.divers.pretty_print import print_lampam, print_ss, print_list_ss
from src.CLA.lampam_functions import calc_lampam
from src.BELLA.format_pdl import pos_in_ss_ref_to_pos_in_sst
from src.BELLA.format_pdl import pos_in_sst_to_pos_in_panel
from src.BELLA.parameters import Parameters as ParametersBELLA
from src.BELLA.constraints import Constraints as ConstraintsBELLA
from src.BELLA.panels import Panel
from src.BELLA.multipanels import MultiPanel
def calc_delta_lampamD_swap(
angle_first, angle_second, pos_first, pos_second, n_plies,
constraints):
'''
returns the out-of-plane lamination parameters variation due to the
modifications of some ply fibre orientations, taking into account the two
symmetric parts for symmetric laminates - only one panel accounted for
OUTPUTS
- delta_lampam_D: out-of-plane partial lamination parameters
INPUTS
- angle_first: fibre orientation of the first group of plies
- angle_second: fibre orientation of the second group of plies
- pos_first: position of the plies in the first group
- pos_second: position of the plies in the second group
- n_plies: ply count of the laminate
- constraints: set of constraints
'''
cos_sin_first = constraints.cos_sin[
constraints.ind_angles_dict[angle_first]].reshape((4, 1))
cos_sin_second = constraints.cos_sin[
constraints.ind_angles_dict[angle_second]].reshape((4, 1))
n_plies_first = pos_first.size
n_plies_second = pos_second.size
# vector of moments of area of the order 0 for each ply
z_0_first = np.ones(n_plies_first)
z_0_second = np.ones(n_plies_second)
# vector of second moments of area for each ply
z_2_first = np.array((
(- n_plies / 2) * z_0_first + pos_first)**3 \
- ((-n_plies / 2) * z_0_first + (pos_first - 1))**3)
z_2_second = np.array((
(- n_plies / 2) * z_0_second + pos_second)**3 \
- ((-n_plies / 2) * z_0_second + (pos_second - 1))**3)
if constraints.sym:
## REMOVE CONTRIBUTION OF FIRST BLOCK
delta_lampam_D = -np.array([
(8/n_plies**3)*np.matmul(
np.matlib.repmat(cos_sin_first, 1, n_plies_first),
z_2_first
)]).reshape((4,))
## ADD CONTRIBUTION OF FIRST BLOCK MOVED TO POSITION OF SECOND BLOCK
delta_lampam_D += np.array([
(8/n_plies**3)*np.matmul(
np.matlib.repmat(cos_sin_first, 1, n_plies_second),
z_2_second
)]).reshape((4,))
## REMOVE CONTRIBUTION OF SECOND BLOCK
delta_lampam_D -= np.array([
(8/n_plies**3)*np.matmul(
np.matlib.repmat(cos_sin_second, 1, n_plies_second),
z_2_second
)]).reshape((4,))
## ADD CONTRIBUTION OF SECOND BLOCK MOVED TO POSITION OF FIRST BLOCK
delta_lampam_D += np.array([
(8/n_plies**3)*np.matmul(
np.matlib.repmat(cos_sin_second, 1, n_plies_first),
z_2_first
)]).reshape((4,))
else:
## REMOVE CONTRIBUTION OF FIRST BLOCK
delta_lampam_D = -np.array([
(4/n_plies**3)*np.matmul(
np.matlib.repmat(cos_sin_first, 1, n_plies_first),
z_2_first
)]).reshape((4,))
## ADD CONTRIBUTION OF FIRST BLOCK MOVED TO POSITION OF SECOND BLOCK
delta_lampam_D += np.array([
(4/n_plies**3)*np.matmul(
np.matlib.repmat(cos_sin_first, 1, n_plies_second),
z_2_second
)]).reshape((4,))
## REMOVE CONTRIBUTION OF SECOND BLOCK
delta_lampam_D -= np.array([
(4/n_plies**3)*np.matmul(
np.matlib.repmat(cos_sin_second, 1, n_plies_second),
z_2_second
)]).reshape((4,))
## ADD CONTRIBUTION OF SECOND BLOCK MOVED TO POSITION OF FIRST BLOCK
delta_lampam_D += np.array([
(4/n_plies**3)*np.matmul(
np.matlib.repmat(cos_sin_second, 1, n_plies_first),
z_2_first
)]).reshape((4,))
# # Filter for numerical approximations
# sett = set([0, 45, -45, 90, -90, 135, -135])
# if np.all([el in sett for el in constraints.set_of_angles]):
# delta_lampam_D[3] = 0
return delta_lampam_D
def calc_delta_lampamD_swap_1(
angle_first, angle_second, n_first_ply, n_second_ply, n_plies_group,
n_plies, constraints):
'''
returns the out-of-plane lamination parameters variation due to the swap
of ply groups, taking into account the two symmetric parts for symmetric
laminates - only one panel accounted for
INPUTS
- angle_first: fibre orientation of the first block of plies
- angle_second: fibre orientation of the second block of plies
- n_first_ply is the position of the first ply of the first group
- n_second_ply is the position of the first ply of the second group
- n_plies_group: ply count of the block of plies
- n_plies: ply count of the laminate
- constraints: set of constraints
'''
return calc_delta_lampamD_swap(
angle_first,
angle_second,
pos_first=n_first_ply + np.arange(n_plies_group),
pos_second=n_second_ply + np.arange(n_plies_group),
n_plies=n_plies,
constraints=constraints)
#def calc_delta_lampamD_swap_2(
# multipanel,
# angle_first,
# angle_second,
# n_first_ply,
# n_second_ply,
# n_plies_group,
# constraints,
# reduced_pdl):
# '''
# not updated with the structure of blended panels using multipanel.reduced
#
# returns the out-of-plane lamination parameters variation due to the swap
# of ply groups, taking into account the two symmetric parts for symmetric
# laminates - account for several panels
#
# INPUTS
#
# - multipanel: multi-panel structure
# - reduced_pdl: reduced ply drop layout for guide-based blending
# - angle_first: fibre orientation of the first block of plies
# - angle_second: fibre orientation of the second block of plies
# - n_first_ply: the position of the first ply of the first group
# - n_second_ply: the position of the first ply of the second group
# - n_plies_group: ply count of the block of plies
# - multipanel.n_plies_in_panels: ply count of the laminates
# - constraints: set of constraints
# - parameters: oprimiser parameters
# '''
## print('n_first_ply,', n_first_ply)
## print('n_second_ply', n_second_ply)
## print('n_plies_group', n_plies_group)
## print('multipanel.n_plies_in_panels', multipanel.n_plies_in_panels)
# delta_lampam_D = np.zeros((reduced_multipanel.n_panels, 4), float)
#
# # positions of plies in reference stacking sequence
# pos_in_sst_first = pos_in_ss_ref_to_pos_in_sst(
# pos_ref=n_first_ply + np.arange(n_plies_group),
# pdl_ref=reduced_pdl[multipanel.ind_ref])
# pos_in_sst_second = pos_in_ss_ref_to_pos_in_sst(
# pos_ref=n_second_ply + np.arange(n_plies_group),
# pdl_ref=reduced_pdl[multipanel.ind_ref])
## print('pos_in_sst_first', pos_in_sst_first)
## print('pos_in_sst_second', pos_in_sst_second)
#
# for ind_panel in range(reduced_multipanel.n_panels):
## print('ind_panel', ind_panel)
#
# pos_in_panel_first = pos_in_sst_to_pos_in_panel(
# pos_sst=pos_in_sst_first,
# pdl_panel=reduced_pdl[ind_panel])
# pos_in_panel_second = pos_in_sst_to_pos_in_panel(
# pos_sst=pos_in_sst_second,
# pdl_panel=reduced_pdl[ind_panel])
## print('pos_in_panel_first', pos_in_panel_first)
## print('pos_in_panel_second', pos_in_panel_second)
#
# delta_lampam_D[ind_panel] = calc_delta_lampamD_swap(
# angle_first, angle_second, pos_in_panel_first, pos_in_panel_second,
# n_plies=multipanel.reduced_n_plies_in_panels[ind_panel],
# constraints=constraints)
# return delta_lampam_D
class Block():
" An object for a block of plies oriented at the same fibre orientation"
def __init__(self, ID, angle, n_block_of_plies, first_ply_pos,
n_plies, angle_before, angle_after, constraints):
self.ID = ID
self.angle = angle
self.n_block_of_plies = n_block_of_plies
self.first_ply_pos = first_ply_pos
if first_ply_pos < n_plies / 2:
position_1 = first_ply_pos
else:
position_1 = n_plies - first_ply_pos - 1
distance_1 = 2 * abs(position_1 - (n_plies / 2)) / n_plies
if first_ply_pos + n_block_of_plies - 1 < n_plies / 2:
position_2 = first_ply_pos + n_block_of_plies - 1
else:
position_2 = n_plies - first_ply_pos - n_block_of_plies
distance_2 = 2 * abs(position_2 - (n_plies / 2)) / n_plies
self.distance_middle = max(distance_1, distance_2)
self.neighbour_angles = []
if angle_before is not None:
self.neighbour_angles.append(angle_before)
if angle_after is not None and angle_before != angle_after:
self.neighbour_angles.append(angle_after)
self.neighbour_angles = self.neighbour_angles
self.calc_possible_angles(constraints)
def calc_possible_angles(self, constraints):
"""
finds the ply angles that the ply block can be changed to, whilst still
satisfying disorientation and contiguity
"""
possible_angles = []
for ang in constraints.set_of_angles:
if len(self.neighbour_angles) == 1 \
and ang != self.angle \
and ang not in self.neighbour_angles \
and (not constraints.diso \
or is_diso(ang, self.neighbour_angles[0],
constraints.delta_angle)):
possible_angles.append(ang)
if len(self.neighbour_angles) == 2 \
and ang != self.angle \
and ang not in self.neighbour_angles \
and (not constraints.diso \
or is_diso(ang, self.neighbour_angles[0],
constraints.delta_angle)) \
and (not constraints.diso \
or is_diso(ang, self.neighbour_angles[1],
constraints.delta_angle)):
possible_angles.append(ang)
self.possible_angles = possible_angles
def update_possible_angles(self, constraints, list_blocks, midply=None):
" update the block IDs"
self.neighbour_angles = []
for block in list_blocks:
if block.ID == self.ID + 1 or block.ID == self.ID - 1:
self.neighbour_angles.append(block.angle)
if self.ID == 0 and midply is not None:
self.neighbour_angles.append(midply)
self.calc_possible_angles(constraints)
for block1 in list_blocks:
# update the block / ID + 1
if block1.ID == self.ID + 1:
block1.neighbour_angles = []
for block2 in list_blocks:
if block2.ID == block1.ID + 1 \
or block2.ID == block1.ID - 1:
block1.neighbour_angles.append(block2.angle)
block1.calc_possible_angles(constraints)
# update the block / ID - 1
if block1.ID == self.ID - 1:
block1.neighbour_angles = []
for block2 in list_blocks:
if block2.ID == block1.ID + 1 \
or block2.ID == block1.ID - 1:
block1.neighbour_angles.append(block2.angle)
block1.calc_possible_angles(constraints)
def __repr__(self):
" Display object "
return f"""
Block of {self.n_block_of_plies} plies oriented at {self.angle} deg
ID: {self.ID}
First ply position: {self.first_ply_pos}
Neighbour ply orientations: {self.neighbour_angles}
Possible angles for a swap: {self.possible_angles}
Normalised distance from the middle surface: {self.distance_middle}
"""
def find_list_blocks(ss_ref, n_plies, constraints):
"""
divides a stacking sequence into blocks of plies at the same
fibre orientation
"""
if constraints.sym:
if n_plies % 2:
ind_start = n_plies // 2 - 1
while ss_ref[ind_start] == ss_ref[n_plies // 2]:
ind_start -= 1
else:
ind_start = n_plies // 2 - 1
else:
ind_start = n_plies - 1
list_blocks = []
ID = 0
n_block_of_plies = 1
while ind_start != 0:
if ss_ref[ind_start] == ss_ref[ind_start - 1]:
ind_start -= 1
n_block_of_plies += 1
| |
locks we need up front, in order, because
# locking in a subquery doing an INSERT isn't guaranteed to use that
# order (deadlocks seen with commits on MySQL 5.7 without this,
# when using REPEATABLE READ.)
#
# We must do this on its own, because some drivers (notably
# mysql-connector-python) get very upset
# ("mysql.connector.errors.InternalError: Unread result
# found") if you issue a SELECT that you don't then consume.
#
# Since we switched MySQL back to READ COMMITTED (what PostgreSQL uses)
# I haven't been able to produce the error anymore. So don't explicitly lock.
stmt = """
INSERT INTO pack_object (zoid, keep, keep_tid)
SELECT zoid, """ + ('%(TRUE)s' if keep else '%(FALSE)s') + """, MAX(tid)
FROM ( """ + affected_objects + """ ) t
GROUP BY zoid;
-- Keep the root object.
UPDATE pack_object
SET keep = %(TRUE)s
WHERE zoid = 0;
"""
self.runner.run_script(cursor, stmt, {'pack_tid': pack_tid})
self.connmanager.commit(conn, cursor)
def _pre_pack_without_gc(self, conn, cursor, pack_tid):
"""
Determine what to pack, without garbage collection.
With garbage collection disabled, there is no need to follow
object references.
"""
# Fill the pack_object table with OIDs, but configure them
# all to be kept by setting keep to true.
log.debug("pre_pack: populating pack_object")
self.__initial_populate_pack_object(conn, cursor, pack_tid, keep=True)
def _pre_pack_with_gc(self, conn, cursor, pack_tid, get_references):
"""
Determine what to pack, with garbage collection.
"""
stmt = self._script_create_temp_pack_visit
if stmt:
self.runner.run_script(cursor, stmt)
self.fill_object_refs(conn, cursor, get_references)
log.info("pre_pack: filling the pack_object table")
# Fill the pack_object table with OIDs that either will be
# removed (if nothing references the OID) or whose history will
# be cut.
self.__initial_populate_pack_object(conn, cursor, pack_tid, keep=False)
stmt = """
-- Keep objects that have been revised since pack_tid.
-- Use temp_pack_visit for temporary state; otherwise MySQL 5 chokes.
INSERT INTO temp_pack_visit (zoid, keep_tid)
SELECT zoid, 0
FROM current_object
WHERE tid > %(pack_tid)s
ORDER BY zoid;
UPDATE pack_object
SET keep = %(TRUE)s
WHERE zoid IN (
SELECT zoid
FROM temp_pack_visit
);
%(TRUNCATE)s temp_pack_visit;
-- Keep objects that are still referenced by object states in
-- transactions that will not be packed.
-- Use temp_pack_visit for temporary state; otherwise MySQL 5 chokes.
INSERT INTO temp_pack_visit (zoid, keep_tid)
SELECT DISTINCT to_zoid, 0
FROM object_ref
WHERE tid > %(pack_tid)s;
UPDATE pack_object
SET keep = %(TRUE)s
WHERE zoid IN (
SELECT zoid
FROM temp_pack_visit
);
%(TRUNCATE)s temp_pack_visit;
"""
self.runner.run_script(cursor, stmt, {'pack_tid': pack_tid})
# Traverse the graph, setting the 'keep' flags in pack_object
self._traverse_graph(cursor)
self.connmanager.commit(conn, cursor)
def _find_pack_tid(self):
"""If pack was not completed, find our pack tid again"""
conn, cursor = self.connmanager.open_for_pre_pack()
try:
stmt = self._script_find_pack_tid
self.runner.run_script_stmt(cursor, stmt)
res = [tid for (tid,) in cursor]
finally:
self.connmanager.close(conn, cursor)
return res[0] if res else 0
@metricmethod
def pack(self, pack_tid, packed_func=None):
"""Pack. Requires the information provided by pre_pack."""
# pylint:disable=too-many-locals
# Read committed mode is sufficient.
conn, cursor = self.connmanager.open_for_store()
try: # pylint:disable=too-many-nested-blocks
try:
# If we have a transaction entry in ``pack_state_tid`` (that is,
# we found a transaction with an object in the range of transactions
# we can pack away) that matches an actual transaction entry (XXX:
# How could we be in the state where the transaction row is gone but we still
# have object_state with that transaction id?), then we need to pack that
# transaction. The presence of an entry in ``pack_state_tid`` means that all
# object states from that transaction should be removed.
stmt = """
SELECT transaction.tid,
CASE WHEN packed = %(TRUE)s THEN 1 ELSE 0 END,
CASE WHEN pack_state_tid.tid IS NOT NULL THEN 1 ELSE 0 END
FROM transaction
LEFT OUTER JOIN pack_state_tid ON (transaction.tid = pack_state_tid.tid)
WHERE transaction.tid > 0
AND transaction.tid <= %(pack_tid)s
AND (packed = %(FALSE)s OR pack_state_tid.tid IS NOT NULL)
ORDER BY transaction.tid
"""
self.runner.run_script_stmt(
cursor, stmt, {'pack_tid': pack_tid})
tid_rows = list(self._fetchmany(cursor)) # oldest first, sorted in SQL
total = len(tid_rows)
log.info("pack: will pack %d transaction(s)", total)
stmt = self._script_create_temp_pack_visit
if stmt:
self.runner.run_script(cursor, stmt)
# Lock and delete rows in the same order that
# new commits would in order to prevent deadlocks.
# Pack in small batches of transactions only after we are able
# to obtain a commit lock in order to minimize the
# interruption of concurrent write operations.
start = time.time()
packed_list = []
counter, lastreport, statecounter = 0, 0, 0
# We'll report on progress in at most .1% step increments
reportstep = max(total / 1000, 1)
for tid, packed, has_removable in tid_rows:
self._pack_transaction(
cursor, pack_tid, tid, packed, has_removable,
packed_list)
counter += 1
if time.time() >= start + self.options.pack_batch_timeout:
self.connmanager.commit(conn, cursor)
if packed_func is not None:
for poid, ptid in packed_list:
packed_func(poid, ptid)
statecounter += len(packed_list)
if counter >= lastreport + reportstep:
log.info("pack: packed %d (%.1f%%) transaction(s), "
"affecting %d states",
counter, counter / float(total) * 100,
statecounter)
lastreport = counter / reportstep * reportstep
del packed_list[:]
start = time.time()
if packed_func is not None:
for oid, tid in packed_list:
packed_func(oid, tid)
packed_list = None
self._pack_cleanup(conn, cursor)
except:
log.exception("pack: failed")
self.connmanager.rollback_quietly(conn, cursor)
raise
else:
log.info("pack: finished successfully")
self.connmanager.commit(conn, cursor)
finally:
self.connmanager.close(conn, cursor)
def _pack_transaction(self, cursor, pack_tid, tid, packed,
has_removable, packed_list):
"""
Pack one transaction. Requires populated pack tables.
If *has_removable* is true, then we have object states and current
object pointers to remove.
"""
log.debug("pack: transaction %d: packing", tid)
removed_objects = 0
removed_states = 0
if has_removable:
stmt = self._script_pack_current_object
self.runner.run_script_stmt(cursor, stmt, {'tid': tid})
removed_objects = cursor.rowcount
stmt = self._script_pack_object_state
self.runner.run_script_stmt(cursor, stmt, {'tid': tid})
removed_states = cursor.rowcount
# Terminate prev_tid chains
stmt = """
UPDATE object_state SET prev_tid = 0
WHERE prev_tid = %(tid)s
AND tid <= %(pack_tid)s
"""
self.runner.run_script_stmt(cursor, stmt,
{'pack_tid': pack_tid, 'tid': tid})
stmt = """
SELECT pack_state.zoid
FROM pack_state
WHERE pack_state.tid = %(tid)s
"""
self.runner.run_script_stmt(cursor, stmt, {'tid': tid})
for (oid,) in self._fetchmany(cursor):
packed_list.append((oid, tid))
# Find out whether the transaction is empty
stmt = self._script_transaction_has_data
self.runner.run_script_stmt(cursor, stmt, {'tid': tid})
empty = not list(cursor)
# mark the transaction packed and possibly empty
if empty:
clause = 'is_empty = %(TRUE)s'
state = 'empty'
else:
clause = 'is_empty = %(FALSE)s'
state = 'not empty'
stmt = "UPDATE transaction SET packed = %(TRUE)s, " + clause
stmt += " WHERE tid = %(tid)s"
self.runner.run_script_stmt(cursor, stmt, {'tid': tid})
log.debug(
"pack: transaction %d (%s): removed %d object(s) and %d state(s)",
tid, state, removed_objects, removed_states)
def _pack_cleanup(self, conn, cursor):
"""Remove unneeded table rows after packing"""
# commit the work done so far, releasing row-level locks.
self.connmanager.commit(conn, cursor)
log.info("pack: cleaning up")
# This section does not need to hold the commit lock, as it only
# touches pack-specific tables. We already hold a pack lock for that.
log.debug("pack: removing unused object references")
stmt = self._script_pack_object_ref
self.runner.run_script(cursor, stmt)
# We need a commit lock when touching the transaction table though.
# We'll do it in batches of 1000 rows.
log.debug("pack: removing empty packed transactions")
while True:
stmt = self._script_delete_empty_transactions_batch
self.runner.run_script_stmt(cursor, stmt)
deleted = cursor.rowcount
self.connmanager.commit(conn, cursor)
self.locker.release_commit_lock(cursor)
if deleted < 1000:
# Last set of deletions complete
break
# perform cleanup that does not require the commit lock
log.debug("pack: clearing temporary pack state")
for _table in ('pack_object', 'pack_state', 'pack_state_tid'):
stmt = '%(TRUNCATE)s ' + _table
self.runner.run_script_stmt(cursor, stmt)
@implementer(IPackUndo)
class HistoryFreePackUndo(PackUndo):
"""
History-free pack/undo.
"""
keep_history = False
_script_choose_pack_transaction = """
SELECT tid
FROM object_state
WHERE tid > 0
AND tid <= %(tid)s
ORDER BY tid DESC
LIMIT 1
"""
_script_create_temp_pack_visit = """
CREATE TEMPORARY TABLE temp_pack_visit (
zoid BIGINT NOT NULL PRIMARY KEY,
keep_tid BIGINT NOT NULL
);
CREATE INDEX temp_pack_keep_tid ON temp_pack_visit (keep_tid)
"""
_script_delete_object = """
DELETE FROM object_state
WHERE zoid = %(oid)s
and tid = %(tid)s
"""
def verify_undoable(self, cursor, undo_tid):
"""Raise UndoError if it is not safe to undo the specified txn."""
raise UndoError("Undo is not supported by this storage")
def undo(self, cursor, undo_tid, self_tid):
"""Undo a transaction.
Parameters: "undo_tid", the integer tid of the transaction to undo,
and "self_tid", the integer tid of | |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import (
Header,
Menu,
Panel,
)
from bpy.app.translations import pgettext_iface as iface_
from bpy.app.translations import contexts as i18n_contexts
# -----------------------------------------------------------------------------
# Main Header
class USERPREF_HT_header(Header):
bl_space_type = 'PREFERENCES'
@staticmethod
def draw_buttons(layout, context):
prefs = context.preferences
layout.operator_context = 'EXEC_AREA'
if prefs.use_preferences_save and (not bpy.app.use_userpref_skip_save_on_exit):
pass
else:
# Show '*' to let users know the preferences have been modified.
layout.operator(
"wm.save_userpref",
text="Save Preferences{:s}".format(" *" if prefs.is_dirty else ""),
)
def draw(self, context):
layout = self.layout
layout.operator_context = 'EXEC_AREA'
layout.template_header()
USERPREF_MT_editor_menus.draw_collapsible(context, layout)
layout.separator_spacer()
self.draw_buttons(layout, context)
# -----------------------------------------------------------------------------
# Main Navigation Bar
class USERPREF_PT_navigation_bar(Panel):
bl_label = "Preferences Navigation"
bl_space_type = 'PREFERENCES'
bl_region_type = 'NAVIGATION_BAR'
bl_options = {'HIDE_HEADER'}
def draw(self, context):
layout = self.layout
prefs = context.preferences
col = layout.column()
col.scale_x = 1.3
col.scale_y = 1.3
col.prop(prefs, "active_section", expand=True)
class USERPREF_MT_editor_menus(Menu):
bl_idname = "USERPREF_MT_editor_menus"
bl_label = ""
def draw(self, _context):
layout = self.layout
layout.menu("USERPREF_MT_view")
layout.menu("USERPREF_MT_save_load", text="Preferences")
class USERPREF_MT_view(Menu):
bl_label = "View"
def draw(self, context):
layout = self.layout
layout.menu("INFO_MT_area")
class USERPREF_MT_save_load(Menu):
bl_label = "Save & Load"
def draw(self, context):
layout = self.layout
prefs = context.preferences
row = layout.row()
row.active = not bpy.app.use_userpref_skip_save_on_exit
row.prop(prefs, "use_preferences_save", text="Auto-Save Preferences")
layout.separator()
layout.operator_context = 'EXEC_AREA'
if prefs.use_preferences_save:
layout.operator("wm.save_userpref", text="Save Preferences")
sub_revert = layout.column(align=True)
sub_revert.active = prefs.is_dirty
sub_revert.operator("wm.read_userpref", text="Revert to Saved Preferences")
layout.operator_context = 'INVOKE_AREA'
layout.operator("wm.read_factory_userpref", text="Load Factory Preferences")
class USERPREF_PT_save_preferences(Panel):
bl_label = "Save Preferences"
bl_space_type = 'PREFERENCES'
bl_region_type = 'EXECUTE'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
# Hide when header is visible
for region in context.area.regions:
if region.type == 'HEADER' and region.height <= 1:
return True
return False
def draw(self, context):
layout = self.layout.row()
layout.operator_context = 'EXEC_AREA'
layout.menu("USERPREF_MT_save_load", text="", icon='COLLAPSEMENU')
USERPREF_HT_header.draw_buttons(layout, context)
# -----------------------------------------------------------------------------
# Min-In Helpers
# Panel mix-in.
class CenterAlignMixIn:
"""
Base class for panels to center align contents with some horizontal margin.
Deriving classes need to implement a ``draw_centered(context, layout)`` function.
"""
def draw(self, context):
layout = self.layout
width = context.region.width
ui_scale = context.preferences.system.ui_scale
# No horizontal margin if region is rather small.
is_wide = width > (350 * ui_scale)
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
row = layout.row()
if is_wide:
row.label() # Needed so col below is centered.
col = row.column()
col.ui_units_x = 50
# Implemented by sub-classes.
self.draw_centered(context, col)
if is_wide:
row.label() # Needed so col above is centered.
# -----------------------------------------------------------------------------
# Interface Panels
class InterfacePanel:
bl_space_type = 'PREFERENCES'
bl_region_type = 'WINDOW'
bl_context = "interface"
class USERPREF_PT_interface_display(InterfacePanel, CenterAlignMixIn, Panel):
bl_label = "Display"
def draw_centered(self, context, layout):
prefs = context.preferences
view = prefs.view
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(view, "ui_scale", text="Resolution Scale")
flow.prop(view, "ui_line_width", text="Line Width")
layout.separator()
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(view, "show_splash", text="Splash Screen")
flow.prop(view, "show_tooltips")
flow.prop(view, "show_tooltips_python")
flow.prop(view, "show_developer_ui")
class USERPREF_PT_interface_text(InterfacePanel, CenterAlignMixIn, Panel):
bl_label = "Text Rendering"
bl_options = {'DEFAULT_CLOSED'}
def draw_centered(self, context, layout):
prefs = context.preferences
view = prefs.view
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(view, "use_text_antialiasing", text="Anti-aliasing")
sub = flow.column()
sub.active = view.use_text_antialiasing
sub.prop(view, "text_hinting", text="Hinting")
flow.prop(view, "font_path_ui")
flow.prop(view, "font_path_ui_mono")
class USERPREF_PT_interface_translation(InterfacePanel, CenterAlignMixIn, Panel):
bl_label = "Translation"
bl_translation_context = i18n_contexts.id_windowmanager
@classmethod
def poll(cls, context):
return bpy.app.build_options.international
def draw_centered(self, context, layout):
prefs = context.preferences
view = prefs.view
layout.prop(view, "language")
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.active = (bpy.app.translations.locale != 'en_US')
flow.prop(view, "use_translate_tooltips", text="Tooltips")
flow.prop(view, "use_translate_interface", text="Interface")
flow.prop(view, "use_translate_new_dataname", text="New Data")
class USERPREF_PT_interface_editors(InterfacePanel, CenterAlignMixIn, Panel):
bl_label = "Editors"
def draw_centered(self, context, layout):
prefs = context.preferences
view = prefs.view
system = prefs.system
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(system, "use_region_overlap")
flow.prop(view, "show_layout_ui", text="Corner Splitting")
flow.prop(view, "show_navigate_ui")
flow.prop(view, "color_picker_type")
flow.row().prop(view, "header_align")
flow.prop(view, "factor_display_type")
class USERPREF_PT_interface_temporary_windows(InterfacePanel, CenterAlignMixIn, Panel):
bl_label = "Temporary Windows"
bl_parent_id = "USERPREF_PT_interface_editors"
bl_options = {'DEFAULT_CLOSED'}
def draw_centered(self, context, layout):
prefs = context.preferences
view = prefs.view
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(view, "render_display_type", text="Render in")
flow.prop(view, "filebrowser_display_type", text="File Browser")
class USERPREF_PT_interface_menus(InterfacePanel, Panel):
bl_label = "Menus"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
pass
class USERPREF_PT_interface_menus_mouse_over(InterfacePanel, CenterAlignMixIn, Panel):
bl_label = "Open on Mouse Over"
bl_parent_id = "USERPREF_PT_interface_menus"
def draw_header(self, context):
prefs = context.preferences
view = prefs.view
self.layout.prop(view, "use_mouse_over_open", text="")
def draw_centered(self, context, layout):
prefs = context.preferences
view = prefs.view
layout.active = view.use_mouse_over_open
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(view, "open_toplevel_delay", text="Top Level")
flow.prop(view, "open_sublevel_delay", text="Sub Level")
class USERPREF_PT_interface_menus_pie(InterfacePanel, CenterAlignMixIn, Panel):
bl_label = "Pie Menus"
bl_parent_id = "USERPREF_PT_interface_menus"
def draw_centered(self, context, layout):
prefs = context.preferences
view = prefs.view
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(view, "pie_animation_timeout")
flow.prop(view, "pie_tap_timeout")
flow.prop(view, "pie_initial_timeout")
flow.prop(view, "pie_menu_radius")
flow.prop(view, "pie_menu_threshold")
flow.prop(view, "pie_menu_confirm")
# -----------------------------------------------------------------------------
# Editing Panels
class EditingPanel:
bl_space_type = 'PREFERENCES'
bl_region_type = 'WINDOW'
bl_context = "editing"
class USERPREF_PT_edit_objects(EditingPanel, Panel):
bl_label = "Objects"
def draw(self, context):
pass
class USERPREF_PT_edit_objects_new(EditingPanel, CenterAlignMixIn, Panel):
bl_label = "New Objects"
bl_parent_id = "USERPREF_PT_edit_objects"
def draw_centered(self, context, layout):
prefs = context.preferences
edit = prefs.edit
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(edit, "material_link", text="Link Materials to")
flow.prop(edit, "object_align", text="Align to")
flow.prop(edit, "use_enter_edit_mode", text="Enter Edit Mode")
class USERPREF_PT_edit_objects_duplicate_data(EditingPanel, CenterAlignMixIn, Panel):
bl_label = "Duplicate Data"
bl_parent_id = "USERPREF_PT_edit_objects"
def draw_centered(self, context, layout):
prefs = context.preferences
edit = prefs.edit
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(edit, "use_duplicate_action", text="Action")
col.prop(edit, "use_duplicate_armature", text="Armature")
col.prop(edit, "use_duplicate_curve", text="Curve")
# col.prop(edit, "use_duplicate_fcurve", text="F-Curve") # Not implemented.
col.prop(edit, "use_duplicate_grease_pencil", text="Grease Pencil")
if hasattr(edit, "use_duplicate_hair"):
col.prop(edit, "use_duplicate_hair", text="Hair")
col.prop(edit, "use_duplicate_light", text="Light")
col = flow.column()
col.prop(edit, "use_duplicate_lightprobe", text="Light Probe")
col.prop(edit, "use_duplicate_material", text="Material")
col.prop(edit, "use_duplicate_mesh", text="Mesh")
col.prop(edit, "use_duplicate_metaball", text="Metaball")
col.prop(edit, "use_duplicate_particle", text="Particle")
col = flow.column()
if hasattr(edit, "use_duplicate_pointcloud"):
col.prop(edit, "use_duplicate_pointcloud", text="Point Cloud")
col.prop(edit, "use_duplicate_surface", text="Surface")
col.prop(edit, "use_duplicate_text", text="Text")
# col.prop(edit, "use_duplicate_texture", text="Texture") # Not implemented.
col.prop(edit, "use_duplicate_volume", text="Volume")
class USERPREF_PT_edit_cursor(EditingPanel, CenterAlignMixIn, Panel):
bl_label = "3D Cursor"
def draw_centered(self, context, layout):
prefs = context.preferences
edit = prefs.edit
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(edit, "use_mouse_depth_cursor")
flow.prop(edit, "use_cursor_lock_adjust")
class USERPREF_PT_edit_gpencil(EditingPanel, CenterAlignMixIn, Panel):
bl_label = "Grease Pencil"
bl_options = {'DEFAULT_CLOSED'}
def draw_centered(self, context, layout):
prefs = context.preferences
edit = prefs.edit
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(edit, "grease_pencil_manhattan_distance", text="Manhattan Distance")
flow.prop(edit, "grease_pencil_euclidean_distance", text="Euclidean Distance")
class USERPREF_PT_edit_annotations(EditingPanel, CenterAlignMixIn, Panel):
bl_label = "Annotations"
def draw_centered(self, context, layout):
prefs = context.preferences
edit = prefs.edit
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(edit, "grease_pencil_default_color", text="Default Color")
flow.prop(edit, "grease_pencil_eraser_radius", text="Eraser Radius")
class USERPREF_PT_edit_weight_paint(EditingPanel, CenterAlignMixIn, Panel):
bl_label = "Weight Paint"
bl_options = {'DEFAULT_CLOSED'}
def draw_centered(self, context, layout):
prefs = context.preferences
view = prefs.view
layout.prop(view, "use_weight_color_range", text="Use Custom Colors")
col = layout.column()
col.active = view.use_weight_color_range
col.template_color_ramp(view, "weight_color_range", expand=True)
class USERPREF_PT_edit_misc(EditingPanel, CenterAlignMixIn, Panel):
bl_label = "Miscellaneous"
bl_options = {'DEFAULT_CLOSED'}
def draw_centered(self, context, layout):
prefs = context.preferences
edit = prefs.edit
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(edit, "sculpt_paint_overlay_color", text="Sculpt Overlay Color")
flow.prop(edit, "node_margin", text="Node Auto-offset Margin")
# -----------------------------------------------------------------------------
# Animation Panels
class AnimationPanel:
bl_space_type = 'PREFERENCES'
bl_region_type = 'WINDOW'
bl_context = "animation"
class USERPREF_PT_animation_timeline(AnimationPanel, CenterAlignMixIn, Panel):
bl_label = "Timeline"
def draw_centered(self, context, layout):
prefs = context.preferences
view = prefs.view
edit = prefs.edit
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(edit, "use_negative_frames")
layout.separator()
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(view, "view2d_grid_spacing_min", text="Minimum Grid Spacing")
flow.prop(view, "timecode_style")
flow.prop(view, "view_frame_type")
if view.view_frame_type == 'SECONDS':
flow.prop(view, "view_frame_seconds")
elif view.view_frame_type == 'KEYFRAMES':
flow.prop(view, "view_frame_keyframes")
class USERPREF_PT_animation_keyframes(AnimationPanel, CenterAlignMixIn, Panel):
bl_label = "Keyframes"
def draw_centered(self, context, layout):
prefs = context.preferences
edit = prefs.edit
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(edit, "use_visual_keying")
flow.prop(edit, "use_keyframe_insert_needed", text="Only Insert Needed")
class USERPREF_PT_animation_autokey(AnimationPanel, CenterAlignMixIn, Panel):
bl_label = "Auto-Keyframing"
bl_parent_id = "USERPREF_PT_animation_keyframes"
def draw_centered(self, context, layout):
prefs = context.preferences
edit = prefs.edit
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(edit, "use_auto_keying_warning", text="Show Warning")
flow.prop(edit, "use_keyframe_insert_available", text="Only Insert | |
service object
self.client_service = client_service_class(
self.service_plugin,
self,
service_configuration,
exceptions.ServiceUtilsException,
extra_parameters
)
def start(self):
self.__start_base()
if EPOLL_SUPPORT:
self.__start_epoll()
def stop(self):
if EPOLL_SUPPORT:
self.__stop_epoll()
self.__stop_base()
def __start_base(self):
# generates a new wake "file" port
self.wake_file_port = self.service.service_utils.generate_service_port({})
# creates the wake "file" object
self.wake_file = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# sets the socket to be able to reuse the socket
self.wake_file.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sets the socket to non blocking mode
self.wake_file.setblocking(0)
# defines the bind parameters
bind_parameters = (
LOCAL_HOST,
self.wake_file_port
)
# binds to the current host
self.wake_file.bind(bind_parameters)
# retrieves the wake file descriptor
wake_file_descriptor = self.wake_file.fileno()
# adds the wake "file" to the service connection sockets list
self.service_connection_sockets_list.append(self.wake_file)
# sets the wake file in the connection socket file descriptor connection socket map
self.connection_socket_file_descriptor_connection_socket_map[wake_file_descriptor] = self.wake_file
def __start_epoll(self):
# retrieves the wake file descriptor
wake_file_descriptor = self.wake_file.fileno()
# creates a new epoll object
self.epoll = select.epoll() #@UndefinedVariable
# register the wake file in the epoll
self.epoll.register(wake_file_descriptor, REGISTER_MASK)
def __stop_base(self):
# retrieves the wake file descriptor
wake_file_descriptor = self.wake_file.fileno()
# closes the wake "file"
self.wake_file.close()
# removes the wake file from the service connection sockets list
self.service_connection_sockets_list.remove(self.wake_file)
# removes the wake file from the connection socket file descriptor connection socket map
self.connection_socket_file_descriptor_connection_socket_map[wake_file_descriptor]
def __stop_epoll(self):
# retrieves the wake file descriptor
wake_file_descriptor = self.wake_file.fileno()
# unregister the wake file from the epoll
self.epoll.unregister(wake_file_descriptor)
# stops the epoll object
self.epoll.close()
def process(self):
"""
Processes a work "tick".
The work tick consists in the polling of the connections
and the processing of the work.
"""
# polls the connections for canceling (in timeout)
self.poll_cancel_connections()
# polls the system to check for new connections
ready_sockets = self.poll_connections(POLL_TIMEOUT)
# sets the busy status
self.busy_status = True
try:
# handles the ready sockets
self.handle_ready_sockets(ready_sockets)
finally:
# unsets the busy status
self.busy_status = False
def wake(self):
"""
Wakes the current task releasing the current
process call.
"""
self.__wake_base()
def busy(self):
"""
Retrieves the current busy status.
:rtype: bool
:return: The current busy status.
"""
return self.busy_status
def work_added(self, work_reference):
"""
Called when a work is added.
:type work_reference: Object
:param work_reference: The reference to the work to be added.
"""
# unpacks the work reference retrieving the connection socket,
# address and port
connection_socket, connection_address, connection_port = work_reference
try:
# adds the connection to the current service connection handler
self.add_connection(connection_socket, connection_address, connection_port)
except Exception as exception:
# prints an error for not being able to add connection
self.service.service_utils_plugin.error("Problem while adding connection to service connection handler: %s" % colony.legacy.UNICODE(exception))
def work_removed(self, work_reference):
"""
Called when a work is removed.
:type work_reference: Object
:param work_reference: The reference to the work to be removed.
"""
# unpacks the work reference retrieving the connection socket,
# address and port
connection_socket, _connection_address, _connection_port = work_reference
try:
# removes the connection using the socket as reference
self.remove_connection_socket(connection_socket)
except Exception as exception:
# prints an error for not being able to remove connection
self.service.service_utils_plugin.error("Problem while removing connection from service connection handler: %s" % colony.legacy.UNICODE(exception))
def add_connection(self, connection_socket, connection_address, connection_port):
"""
Adds a new connection to the service connection handler.
:type connection_socket: Socket
:param connection_socket: The connection socket.
:type connection_address: Tuple
:param connection_address: The connection address.
:type connection_port: int
:param connection_port: The connection port.
:rtype: ServiceConnection
:return: The created service connection.
"""
# in case the connection socket already exists in
# the service connections map
if connection_socket in self.service_connections_map:
# raises the connection change failure exception
raise exceptions.ConnectionChangeFailure("trying to add duplicate socket: " + colony.legacy.UNICODE(connection_socket))
# creates the new service connection and sets the service execution thread
# on the service connection (for callable execution)
service_connection = ServiceConnection(self.service_plugin, self, connection_socket, connection_address, connection_port, self.request_timeout, self.response_timeout, self.chunk_size)
service_connection.service_execution_thread = self.service.service_execution_thread
# opens the service connection
service_connection.open()
# retrieves the connection socket file descriptor
connection_socket_file_descriptor = connection_socket.fileno()
# adds the service connection to the service connections list
self.service_connections_list.append(service_connection)
# adds the connection socket to the service connection sockets list
self.service_connection_sockets_list.append(connection_socket)
# sets the service connection in the service connections map
self.service_connections_map[connection_socket] = service_connection
# sets the connection socket in the connection socket file descriptor
# connection socket map
self.connection_socket_file_descriptor_connection_socket_map[connection_socket_file_descriptor] = connection_socket
# sets the connection socket file descriptor in the connection socket connection
# socket file descriptor map
self.connection_socket_connection_socket_file_descriptor_map[connection_socket] = connection_socket_file_descriptor
if EPOLL_SUPPORT:
self.__add_connection_epoll(connection_socket, connection_address, connection_port)
# sets the initial cancel timeout
service_connection.cancel(self.connection_timeout)
# handles the opened service connection
self.client_service.handle_opened(service_connection)
# returns the created service connection
return service_connection
def remove_connection(self, service_connection):
"""
Removes the given service connection.
:type service_connection: ServiceConnection
:param service_connection: The service connection to be removed.
"""
# retrieves the connection socket
connection_socket = service_connection.get_base_connection_socket()
# in case the connection socket does not exist in
# the service connections map
if not connection_socket in self.service_connections_map:
# raises the connection change failure exception
raise exceptions.ConnectionChangeFailure("trying to remove inexistent scoket: " + colony.legacy.UNICODE(connection_socket))
# retrieves the connection socket file descriptor
connection_socket_file_descriptor = self.__get_connection_socket_file_descriptor(connection_socket)
# handles the closed service connection
self.client_service.handle_closed(service_connection)
if EPOLL_SUPPORT:
self.__remove_connection_epoll(service_connection)
# closes the service connection
service_connection.close()
# removes the connection from the service connections list
self.service_connections_list.remove(service_connection)
# removes the connection socket from the service connection sockets list
self.service_connection_sockets_list.remove(connection_socket)
# removes the service connection from the service connections map
del self.service_connections_map[connection_socket]
# removes the connection socket from the connection socket file descriptor
# connection socket map
del self.connection_socket_file_descriptor_connection_socket_map[connection_socket_file_descriptor]
# removes the connection socket file descriptor from the connection socket connection
# socket file descriptor map
del self.connection_socket_connection_socket_file_descriptor_map[connection_socket]
def remove_connection_socket(self, connection_socket):
"""
Removes the connection with the given socket.
:type connection_socket: Socket
:param connection_socket: The connection socket to be used
in the removal of the connection.
"""
# retrieves the service connection from the service connections map
service_connection = self.service_connections_map[connection_socket]
# removes the connection for the given service connection
self.remove_connection(service_connection)
def poll_cancel_connections(self):
"""
Polls the current connection scheduled
for canceling.
In case a connection is found to be timed out
the associated work is canceled and and the
connection is closed.
"""
# retrieves the current time value to determine of the
# connection is already meant to be canceled
current_clock = time.time()
# iterates over all the service connections
for service_connection in self.service_connections_list:
# in case there is a cancel time defined and there is a timeout
if service_connection.cancel_time and service_connection.cancel_time < current_clock:
# retrieves the connection tuple and then
# removes the ready service connection (via remove work)
connection_tuple = service_connection.get_connection_tuple()
self.remove_work(connection_tuple)
def poll_connections(self, poll_timeout = POLL_TIMEOUT):
"""
Polls the current connection to check
if any contains new information to be read.
:type poll_timeout: float
:param poll_timeout: The timeout to be used in the polling.
:rtype: List
:return: The selected values for read (ready sockets).
"""
if EPOLL_SUPPORT:
return self.__poll_connections_epoll(poll_timeout)
else:
return self.__poll_connections_base(poll_timeout)
def handle_ready_sockets(self, ready_sockets):
"""
Handles the sockets that are ready to be handled.
The handling is done via the service plugin.
:type ready_sockets: List
:param ready_sockets: The list of sockets ready
to be handled.
"""
# iterates over all the ready sockets
for ready_socket in ready_sockets:
# retrieves the service connection
# that is ready for reading
ready_service_connection = self.service_connections_map[ready_socket]
try:
# handles the current request, retrieving the return value
return_value = self.client_service.handle_request(ready_service_connection)
except Exception as exception:
# prints an error message about the problem handling the request
self.service_plugin.error("Problem while handling the request: " + colony.legacy.UNICODE(exception))
# sets the return value to false, to close the connection
return_value = False
# if the request handling returned true the connection
# is meant to remain open
if return_value:
# sets the new cancel timeout
ready_service_connection.cancel(self.connection_timeout)
# otherwise the connection is meant to be closed
else:
# retrieves the connection tuple
connection_tuple = ready_service_connection.get_connection_tuple()
# removes the ready service connection (via remove work)
self.remove_work(connection_tuple)
def __wake_base(self):
"""
The wake task base implementation.
"""
# sends a "dummy" message to the wake "file" (via communication channel)
self.wake_file.sendto(DUMMY_MESSAGE_VALUE, (LOCAL_HOST, self.wake_file_port))
def __add_connection_epoll(self, connection_socket, connection_address, connection_port):
# retrieves the | |
import json
import uuid
from tests.testmodels import (
Event,
JSONFields,
Reporter,
SourceFields,
StraightFields,
Team,
Tournament,
UUIDFkRelatedModel,
UUIDFkRelatedNullModel,
UUIDM2MRelatedModel,
UUIDPkModel,
)
from tortoise import Tortoise, fields
from tortoise.contrib import test
from tortoise.fields.relational import BackwardFKField, ForeignKey, ManyToManyField, OneToOneField
class TestBasic(test.TortoiseTransactionedTestModelsTestCase):
maxDiff = None
async def test_describe_models_all_serializable(self):
val = Tortoise.describe_models()
json.dumps(val)
self.assertIn("models.SourceFields", val.keys())
self.assertIn("models.Event", val.keys())
async def test_describe_models_all_not_serializable(self):
val = Tortoise.describe_models(serializable=False)
with self.assertRaisesRegex(TypeError, "not JSON serializable"):
json.dumps(val)
self.assertIn("models.SourceFields", val.keys())
self.assertIn("models.Event", val.keys())
async def test_describe_models_some(self):
val = Tortoise.describe_models([Event, Tournament, Reporter, Team])
self.assertEqual(
{"models.Event", "models.Tournament", "models.Reporter", "models.Team"}, set(val.keys())
)
async def test_describe_model_straight(self):
val = StraightFields.describe()
self.assertEqual(
val,
{
"name": "models.StraightFields",
"app_label": "models",
"db_table": "models_straightfields",
"abstract": False,
"description": "Straight auto-mapped fields",
"unique_together": [["chars", "blip"]],
"pk_field": {
"name": "eyedee",
"field_type": "IntegerField",
"db_column": "eyedee",
"db_column_types": {"": "INT"},
"python_type": "int",
"generated": True,
"auto_created": False,
"nullable": False,
"unique": True,
"db_index": True,
"default": None,
"description": "Da PK",
},
"fields": [
{
"name": "chars",
"field_type": "CharField",
"db_column": "chars",
"db_column_types": {"": "VARCHAR(50)"},
"python_type": "str",
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": True,
"default": None,
"description": "Some chars",
},
{
"name": "blip",
"field_type": "CharField",
"db_column": "blip",
"db_column_types": {"": "VARCHAR(50)"},
"python_type": "str",
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": False,
"default": "BLIP",
"description": None,
},
{
"name": "fk",
"field_type": "ForeignKey",
"raw_field": "fk_id",
"python_type": "models.StraightFields",
"generated": False,
"auto_created": False,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "o2o",
"default": None,
"description": "Line",
"field_type": "OneToOneField",
"generated": False,
"auto_created": False,
"db_index": True,
"nullable": True,
"python_type": "models.StraightFields",
"raw_field": "o2o_id",
"unique": True,
},
{
"name": "rel_to",
"field_type": "ManyToManyField",
"python_type": "models.StraightFields",
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": False,
"default": None,
"description": "M2M to myself",
},
{
"name": "fk_id",
"field_type": "IntegerField",
"db_column": "fk_id",
"db_column_types": {"": "INT"},
"python_type": "int",
"generated": False,
"auto_created": True,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "fkrev",
"field_type": "BackwardFKField",
"python_type": "models.StraightFields",
"generated": False,
"auto_created": True,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"db_column": "o2o_id",
"db_column_types": {"": "INT"},
"default": None,
"description": "Line",
"field_type": "IntegerField",
"generated": False,
"auto_created": True,
"db_index": True,
"name": "o2o_id",
"nullable": True,
"python_type": "int",
"unique": True,
},
{
"name": "o2o_rev",
"default": None,
"description": "Line",
"field_type": "BackwardOneToOneField",
"generated": False,
"auto_created": True,
"db_index": False,
"nullable": True,
"python_type": "models.StraightFields",
"unique": False,
},
{
"name": "rel_from",
"field_type": "ManyToManyField",
"python_type": "models.StraightFields",
"generated": False,
"auto_created": True,
"nullable": False,
"unique": False,
"db_index": False,
"default": None,
"description": "M2M to myself",
},
],
},
)
async def test_describe_model_straight_native(self):
val = StraightFields.describe(serializable=False)
self.assertEqual(
val,
{
"name": "models.StraightFields",
"app_label": "models",
"db_table": "models_straightfields",
"abstract": False,
"description": "Straight auto-mapped fields",
"unique_together": [["chars", "blip"]],
"pk_field": {
"name": "eyedee",
"field_type": fields.IntegerField,
"db_column": "eyedee",
"db_column_types": {"": "INT"},
"python_type": int,
"generated": True,
"auto_created": False,
"nullable": False,
"unique": True,
"db_index": True,
"default": None,
"description": "Da PK",
},
"fields": [
{
"name": "chars",
"field_type": fields.CharField,
"db_column": "chars",
"db_column_types": {"": "VARCHAR(50)"},
"python_type": str,
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": True,
"default": None,
"description": "Some chars",
},
{
"name": "blip",
"field_type": fields.CharField,
"db_column": "blip",
"db_column_types": {"": "VARCHAR(50)"},
"python_type": str,
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": False,
"default": "BLIP",
"description": None,
},
{
"name": "fk",
"field_type": ForeignKey,
"raw_field": "fk_id",
"python_type": StraightFields,
"generated": False,
"auto_created": False,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "o2o",
"default": None,
"description": "Line",
"field_type": OneToOneField,
"generated": False,
"auto_created": False,
"db_index": True,
"nullable": True,
"python_type": StraightFields,
"raw_field": "o2o_id",
"unique": True,
},
{
"name": "rel_to",
"field_type": ManyToManyField,
"python_type": StraightFields,
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": False,
"default": None,
"description": "M2M to myself",
},
{
"name": "fk_id",
"field_type": fields.IntegerField,
"db_column": "fk_id",
"db_column_types": {"": "INT"},
"python_type": int,
"generated": False,
"auto_created": True,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "fkrev",
"field_type": BackwardFKField,
"python_type": StraightFields,
"generated": False,
"auto_created": True,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "o2o_id",
"field_type": fields.IntegerField,
"db_column": "o2o_id",
"db_column_types": {"": "INT"},
"python_type": int,
"generated": False,
"auto_created": True,
"nullable": True,
"unique": True,
"db_index": True,
"default": None,
"description": "Line",
},
{
"name": "o2o_rev",
"default": None,
"description": "Line",
"field_type": fields.BackwardOneToOneField,
"generated": False,
"auto_created": True,
"db_index": False,
"nullable": True,
"python_type": StraightFields,
"unique": False,
},
{
"name": "rel_from",
"field_type": ManyToManyField,
"python_type": StraightFields,
"generated": False,
"auto_created": True,
"nullable": False,
"unique": False,
"db_index": False,
"default": None,
"description": "M2M to myself",
},
],
},
)
async def test_describe_model_source(self):
val = SourceFields.describe()
self.assertEqual(
val,
{
"name": "models.SourceFields",
"app_label": "models",
"db_table": "sometable",
"abstract": False,
"description": "Source mapped fields",
"unique_together": [["chars", "blip"]],
"pk_field": {
"name": "eyedee",
"field_type": "IntegerField",
"db_column": "sometable_id",
"db_column_types": {"": "INT"},
"python_type": "int",
"generated": True,
"auto_created": False,
"nullable": False,
"unique": True,
"db_index": True,
"default": None,
"description": "Da PK",
},
"fields": [
{
"name": "chars",
"field_type": "CharField",
"db_column": "some_chars_table",
"db_column_types": {"": "VARCHAR(50)"},
"python_type": "str",
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": True,
"default": None,
"description": "Some chars",
},
{
"name": "blip",
"field_type": "CharField",
"db_column": "da_blip",
"db_column_types": {"": "VARCHAR(50)"},
"python_type": "str",
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": False,
"default": "BLIP",
"description": None,
},
{
"name": "fk",
"field_type": "ForeignKey",
"raw_field": "fk_id",
"python_type": "models.SourceFields",
"generated": False,
"auto_created": False,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "o2o",
"default": None,
"description": "Line",
"field_type": "OneToOneField",
"generated": False,
"auto_created": False,
"db_index": True,
"nullable": True,
"python_type": "models.SourceFields",
"raw_field": "o2o_id",
"unique": True,
},
{
"name": "rel_to",
"field_type": "ManyToManyField",
"python_type": "models.SourceFields",
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": False,
"default": None,
"description": "M2M to myself",
},
{
"name": "fk_id",
"field_type": "IntegerField",
"db_column": "fk_sometable",
"db_column_types": {"": "INT"},
"python_type": "int",
"generated": False,
"auto_created": True,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "fkrev",
"field_type": "BackwardFKField",
"python_type": "models.SourceFields",
"generated": False,
"auto_created": True,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "o2o_id",
"field_type": "IntegerField",
"db_column": "o2o_sometable",
"db_column_types": {"": "INT"},
"python_type": "int",
"generated": False,
"auto_created": True,
"nullable": True,
"unique": True,
"db_index": True,
"default": None,
"description": "Line",
},
{
"name": "o2o_rev",
"default": None,
"description": "Line",
"field_type": "BackwardOneToOneField",
"generated": False,
"auto_created": True,
"db_index": False,
"nullable": True,
"python_type": "models.SourceFields",
"unique": False,
},
{
"name": "rel_from",
"field_type": "ManyToManyField",
"python_type": "models.SourceFields",
"generated": False,
"auto_created": True,
"nullable": False,
"unique": False,
"db_index": False,
"default": None,
"description": "M2M to myself",
},
],
},
)
async def test_describe_model_source_native(self):
val = SourceFields.describe(serializable=False)
self.assertEqual(
val,
{
"name": "models.SourceFields",
"app_label": "models",
"db_table": "sometable",
"abstract": False,
"description": "Source mapped fields",
"unique_together": [["chars", "blip"]],
"pk_field": {
"name": "eyedee",
"field_type": fields.IntegerField,
"db_column": "sometable_id",
"db_column_types": {"": "INT"},
"python_type": int,
"generated": True,
"auto_created": False,
"nullable": False,
"unique": True,
"db_index": True,
"default": None,
"description": "Da PK",
},
"fields": [
{
"name": "chars",
"field_type": fields.CharField,
"db_column": "some_chars_table",
"db_column_types": {"": "VARCHAR(50)"},
"python_type": str,
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": True,
"default": None,
"description": "Some chars",
},
{
"name": "blip",
"field_type": fields.CharField,
"db_column": "da_blip",
"db_column_types": {"": "VARCHAR(50)"},
"python_type": str,
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": False,
"default": "BLIP",
"description": None,
},
{
"name": "fk",
"field_type": ForeignKey,
"raw_field": "fk_id",
"python_type": SourceFields,
"generated": False,
"auto_created": False,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "o2o",
"default": None,
"description": "Line",
"field_type": OneToOneField,
"generated": False,
"auto_created": False,
"db_index": True,
"nullable": True,
"python_type": SourceFields,
"raw_field": "o2o_id",
"unique": True,
},
{
"name": "rel_to",
"field_type": ManyToManyField,
"python_type": SourceFields,
"generated": False,
"auto_created": False,
"nullable": False,
"unique": False,
"db_index": False,
"default": None,
"description": "M2M to myself",
},
{
"name": "fk_id",
"field_type": fields.IntegerField,
"db_column": "fk_sometable",
"db_column_types": {"": "INT"},
"python_type": int,
"generated": False,
"auto_created": True,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "fkrev",
"field_type": BackwardFKField,
"python_type": SourceFields,
"generated": False,
"auto_created": True,
"nullable": True,
"unique": False,
"db_index": False,
"default": None,
"description": "Tree!",
},
{
"name": "o2o_id",
"field_type": fields.IntegerField,
"db_column": "o2o_sometable",
"db_column_types": {"": "INT"},
"python_type": int,
"generated": False,
"auto_created": True,
"nullable": True,
"unique": True,
"db_index": True,
"default": None,
"description": "Line",
},
{
"name": "o2o_rev",
"default": None,
"description": "Line",
"field_type": fields.BackwardOneToOneField,
"generated": False,
"auto_created": True,
"db_index": False,
"nullable": True,
"python_type": SourceFields,
"unique": False,
},
{
"name": | |
<reponame>cobrab11/black1-bot<filename>extensions/logger.py
# BS mark.1-55
# /* coding: utf-8 */
# BlackSmith mark.1
# logger.py
# © 2011-2013 simpleApps (http://simpleapps.ru)
# Thanks to: WitcherGeralt (<EMAIL>)
logConfigFile = "dynamic/logstate.txt"
logCacheFile = "logcache.txt"
logThemes = {}
Months, Days = ("", u"Январь", u"Февраль", u"Март", u"Апрель", u"Май", u"Июнь", u"Июль", u"Август", u"Сентябрь", u"Октябрь", u"Ноябрь", u"Декабрь"), (u"Понедельник", u"Вторник", u"Среда", u"Четверг", u"Пятница", u"Суббота", u"Воскресенье", u"Понедельник")
logAfl = {
"none": u"посетитель",
"member": u"зарегистрированный пользователь",
"admin": u"администратор",
"owner": u"владелец"
}
logRole = {
"visitor": u"гость",
"moderator": u"модератор",
"participant": u"участник"
}
logStatus = {
None: u"доступен",
"xa": u"недоступен",
"dnd": u"не беспокоить",
"away": u"отсутствую",
"chat": u"готов поболтать"
}
logCfg = {}
logNicks = {}
logSynchronize = {}
logger_compile_link = re.compile("((http[s]?|ftp)://[^\s'\"<>]+)")
DefaultLogHeader = u'''<!doctype html>
<html>
<head>
<title>%(date)s — %(chat)s</title>
<meta charset="utf-8">
<link rel="stylesheet" type="text/css" href="../../.theme/logger.css"/>
</head>
<body>
<div class="shadowed" align="right"><a href="http://simpleapps.ru/">BlackSmith Bot log file</a></div>
<div class="shadowed" align="center"><a href="xmpp:%(chat)s?join" title="Join to %(chat)s">%(chat)s</a><hr></div></hr>
<h3><div class="shadowed">%(date)s<hr></div></h3>
<div>
<tt>'''
LoggerCfg = {"theme": "LunnaCat", "enabled": False, "timetype": "local", "dir": "logs"}
Subjs = {}
def logGetDate(Time):
if LoggerCfg["timetype"] == "local":
func = time.localtime
else:
func = time.gmtime
get_date = lambda date: tuple(func(date))[:3]
year, month, day = Time[:3]
try:
date = time.mktime(time.struct_time((year, month, day, 6, 0, 0, 0, 0, 0)))
except Exception:
year_p = month_p = day_p = year_n = month_n = day_n = 0
else:
try:
year_p, month_p, day_p = get_date(date - 86400)
year_n, month_n, day_n = get_date(date + 86400)
except ValueError: ## Meet 2038 year!
year_p, month_p, day_p = year_n, month_n, day_n =\
[time.strftime(x) for x in ("%Y", "%m", "%d")] ## Just for fun.
if year_p == "2038":
Print("#-# Impossible! Bot works in 2038 year! Hello from 2013!", xmpp.debug.color_cyan) ## fuuuuuuuuuuuun!
return "{0}/{1:02}/{2:02}||{3}/{4:02}/{5:02}".format(year_p, month_p, day_p, year_n, month_n, day_n)
def getLogFile(chat, Time):
mon = "{0:02}".format(Time.tm_mon)
logDir = chkFile("%s/%s/%d/%s" % (LoggerCfg["dir"], chat, Time.tm_year, mon))
if not os.path.isdir(logDir):
try:
os.makedirs(logDir)
except:
return False
prev, next = logGetDate(Time).split("||")
day = "{0:02}".format(Time.tm_mday)
logFileName = "%s/%s.html" % (logDir, day)
if os.path.isfile(logFileName):
logFile = open(logFileName, "a")
INFO["fw"] += 1
else:
date = time.strftime("{0}, {1} %d, %Y".format(Days[Time.tm_wday], Months[Time.tm_mon]), Time)
themeFile = chkFile("%s/%s/.theme/pattern.html" % (LoggerCfg["dir"], chat))
if os.path.isfile(themeFile):
pattern = read_file(themeFile)
else:
pattern = DefaultLogHeader
exfile = logCfg[chat]["file"]
if logFileName != exfile:
if exfile and os.path.isfile(exfile):
write_file(exfile, "\n</tt>\n</div>\n</body>\n</html>", "a")
logCfg[chat]["file"] = logFileName
logFile = open(logFileName, "w")
INFO["fcr"] += 1
logFile.write(pattern % vars())
if chat in GROUPCHATS:
if Subjs[chat]['time'] and Subjs[chat]['body']:
Time = time.time()
if (Time - Subjs[chat]['time']) > 20:
Subjs[chat]['time'] = Time
logFile.write('<span class="topic">%s</span><br>' % logFormat(Subjs[chat]['body']))
#logWrite(chat, Subjs[chat]['body'].replace("\n", "<br>"), "subject")
return logFile
def logFormat(body):
body = xmpp.XMLescape(body)
body = logger_compile_link.sub(lambda obj: "<a href=\"{0}\">{0}</a>".format(obj.group(0)), body) #'
body = body.replace(chr(10), "<br>")
body = body.replace(chr(9), " " * 4) # "	" requires tag <pre>, but " " just eats your brain
return body
def logWrite(chat, state, body, nick = None):
if LoggerCfg["timetype"].lower() == "gmt":
Time = time.gmtime()
elif LoggerCfg["timetype"].lower() == "local":
Time = time.localtime()
with logSynchronize[chat]:
logFile = getLogFile(chat, Time)
if logFile:
timestamp = time.strftime("%H:%M:%S", Time)
if nick: nick = xmpp.XMLescape(nick)
body = logFormat(body)
logFile.write(chr(10))
if state == "subject":
logFile.write('<a id="t{0}" href="#t{0}">[{0}]</a> <span class="topic">{1}</span><br>'.format(timestamp, body))
elif state == "msg":
if nick:
nickColor = "nick%d" % coloredNick(chat, nick)
if body.startswith("/me"):
logFile.write('<span class="{0}"><a id="t{1}" href="#t{1}">[{1}]</a> *{2} {3}</span><br>'.format(nickColor, timestamp, nick, body[3:]))
else:
logFile.write('<span class="{0}"><a id="t{1}" href="#t{1}">[{1}]</a> <{2}></span> <span class="text">{3}</span><br>'.format(nickColor, timestamp, nick, body))
else:
logFile.write('<span class="status"><a id="t{0}" href="#t{0}">[{0}]</a></span> '.format(timestamp))
logFile.write('<span class="status">*** %s</span><br>' % (body))
else:
logFile.write('<span class="{0}"><a id="t{1}" href="#t{1}">[{1}]</a></span> '.format(state, timestamp))
logFile.write('<span class="%s">%s</span><br>' % (state, body))
logFile.close()
def coloredNick(chat, nick):
if logNicks[chat].has_key(nick):
return logNicks[chat][nick]
if len(logNicks[chat]) < 20:
ls = range(1, 21)
for x in logNicks[chat].values():
ls.remove(x)
logNicks[chat][nick] = x = random.choice(ls)
else:
logNicks[chat][nick] = x = random.randrange(1, 21)
return x
def logWriteMessage(stanza, mType, source, body):
if GROUPCHATS.has_key(source[1]) and logCfg[source[1]]["enabled"] and mType == "public" and source[2]:
logWrite(source[1], "msg", body, source[2])
def logWriteSubject(chat, nick, subject, body):
if chat in logCfg and logCfg[chat]["enabled"]:
Time = time.time()
if (Time - Subjs[chat]['time']) > 20:
Subjs[chat] = {'body': body, 'time': Time}
if nick:
body = "%s set subject:\n%s" % (nick.strip(), subject.strip())
logWrite(chat, "subject", body)
def logWriteJoined(chat, nick, afl, role, status, text):
if GROUPCHATS.has_key(chat) and logCfg[chat]["enabled"]:
some = ""
if logCfg[chat].get("jids"):
jid = GROUPCHATS[chat].get(nick, {}).get("full_jid", "?@?/?")
if not chat in jid:
some = " (%(jid)s)" % vars()
log = u"*** %(nick)s%(some)s заходит как %(role)s"
if afl != "none":
log += u" и %(afl)s"
log += u" и теперь %(status)s"
afl, role, status = logAfl.get(afl, afl), logRole.get(role, role), logStatus.get(status, status)
if text:
log += " (%(text)s)"
logWrite(chat, "join", log % vars())
def logWriteARole(chat, nick, aRole, reason):
if GROUPCHATS.has_key(chat) and logCfg[chat]["enabled"]:
role, afl = aRole
log = u"*** %(nick)s теперь %(role)s"
if afl != "none":
log += u" и %(afl)s"
if reason:
log += u" (%(reason)s)"
afl, role = logAfl.get(afl, ""), logRole.get(role, "")
logWrite(chat, "role", log % vars())
def logWriteNickChange(stanza, chat, oldNick, nick):
if GROUPCHATS.has_key(chat) and logCfg[chat]["enabled"]:
logWrite(chat, "nick", u'*** %s меняет ник на %s' % (oldNick, nick))
def logWriteStatusChange(chat, nick, status, priority, text):
if GROUPCHATS.has_key(chat) and logCfg[chat]["enabled"]:
log = u"*** %(nick)s теперь %(status)s"
if text:
log += " (%(text)s)"
if priority:
log += " [%s]" % priority
status = logStatus.get(status, "")
logWrite(chat, "status", log % vars())
def logWriteLeave(chat, nick, reason, code):
if GROUPCHATS.has_key(chat) and logCfg[chat]["enabled"]:
some = ""
if logCfg[chat].get("jids"):
jid = GROUPCHATS[chat].get(nick, {}).get("full_jid", "")
if not chat in jid:
some = " (%(jid)s)" % vars()
# status_code_change(["full_jid"], chat, nick) #?!
if code:
if code == "307":
if reason:
logWrite(chat, "kick", u"*** %s%s выгнали из конференции (%s)" % (nick, some, reason))
else:
logWrite(chat, "kick", u"*** %s%s выгнали из конференции" % (nick, some))
elif code == "301":
if reason:
logWrite(chat, "ban", u"*** %s%s запретили входить в данную конференцию (%s)" % (nick, some, reason))
else:
logWrite(chat, "ban", u"*** %s%s запретили входить в данную конференцию" % (nick, some))
elif reason:
logWrite(chat, "leave", u"*** %s%s выходит из конференции (%s)" % (nick, some, reason))
else:
logWrite(chat, "leave", u"*** %s%s выходит из конференции" % (nick, some))
def logFileInit(chat):
cfg = {"theme": LoggerCfg["theme"], "enabled": False, "file": "", "jids": 0}
Subjs[chat] = {'body': '', 'time': 0}
if check_file(chat, logCacheFile, str(cfg)):
cfg = eval(read_file("dynamic/%s/%s" % (chat, logCacheFile)))
else:
delivery(u"Внимание! Не удалось создать файл \"dynamic/%s/%s\"!" % (chat, logCacheFile))
logCfg[chat] = cfg
logNicks[chat] = {}
logSynchronize[chat] = threading.Semaphore()
if not os.path.isdir(chkFile("%s/%s/.theme" % (LoggerCfg["dir"], chat))) and logThemes.has_key(cfg["theme"]):
if logCfg[chat]["enabled"]:
logThemeCopier(chat, cfg["theme"])
def init_logger():
if initialize_file(logConfigFile, str(LoggerCfg)):
LoggerCfg.update(eval(read_file(logConfigFile)))
if LoggerCfg["enabled"]:
if not os.path.isdir(LoggerCfg["dir"]):
try:
os.makedirs(LoggerCfg["dir"])
except:
pass
Dir = "static/logger/themes"
for Theme in os.listdir(Dir):
path = "%s/%s" % (Dir, Theme)
if os.path.isdir(path):
if "logger.css" in os.listdir(path):
logThemes[Theme] = path
handler_register("01si", logFileInit)
handler_register("04eh", logWriteJoined)
handler_register("05eh", logWriteLeave)
handler_register("01eh", logWriteMessage)
handler_register("09eh", logWriteSubject)
handler_register("07eh", logWriteARole)
handler_register("06eh", logWriteNickChange)
handler_register("08eh", logWriteStatusChange)
command_handler(logSetState, 30, "logger")
return True
else:
Print("\nCan't init %s, logger wasn't enabled." % logConfigFile, color2)
def logThemeCopier(chat, theme):
import shutil
themeDir = chkFile("%s/%s/.theme" % (LoggerCfg["dir"], chat))
if os.path.exists(themeDir):
shutil.rmtree(themeDir)
shutil.copytree(logThemes[theme], themeDir)
del shutil
def logSetStateMain(mType, source, argv):
if argv:
argv = argv.split()
a0 = (argv.pop(0)).lower()
if a0 in ("1", u"вкл"):
if not LoggerCfg["enabled"]:
LoggerCfg["enabled"] = True
write_file(logConfigFile, str(LoggerCfg))
for chat in GROUPCHATS.keys():
execute_handler(logFileInit, (chat,))
if init_logger():
reply(mType, source, u"Включил логгер.")
else:
reply(mType, source, "Something wrong")
else:
reply(mType, source, u"Уже включено.")
elif a0 in ("0", u"выкл"):
if LoggerCfg["enabled"]:
LoggerCfg["enabled"] = False
write_file(logConfigFile, str(LoggerCfg))
name = logWriteMessage.func_name
for handler in Handlers["01eh"]:
if name == handler.func_name:
Handlers["01eh"].remove(handler)
name = logWriteSubject.func_name
for handler in Handlers["09eh"]:
if name == handler.func_name:
Handlers["09eh"].remove(handler)
name = logWriteNickChange.func_name
for handler in Handlers["06eh"]:
if name == handler.func_name:
Handlers["06eh"].remove(handler)
name = logWriteStatusChange.func_name
for handler in Handlers["08eh"]:
if name == handler.func_name:
Handlers["08eh"].remove(handler)
name = logWriteARole.func_name
for handler in Handlers["07eh"]:
if name == handler.func_name:
Handlers["07eh"].remove(handler)
name = logWriteJoined.func_name
for handler in Handlers["04eh"]:
if name == handler.func_name:
Handlers["04eh"].remove(handler)
name = logWriteLeave.func_name
for handler in Handlers["05eh"]:
if name == handler.func_name:
Handlers["05eh"].remove(handler)
name = logFileInit.func_name
try:
command = eval(read_file("help/logger").decode('utf-8'))[logSetState.func_name]["cmd"]
except:
delivery(u"Внимание! Не удалось загрузить файл помощи логгера.")
else:
del COMMAND_HANDLERS[command]
for handler in Handlers["01si"]:
if name == handler.func_name:
Handlers["01si"].remove(handler)
logCfg.clear()
logSynchronize.clear()
reply(mType, source, u"Выключил логгер.")
else:
reply(mType, source, u"Логгер вообще не включён.")
elif a0 in (u"тема", "темы"):
if argv:
if logThemes.has_key(argv[0]):
themeFile = "static/logger/themes/%s/name.txt" % LoggerCfg["theme"]
if os.path.isfile(themeFile) and argv[0] == read_file(themeFile):
reply(mType, source, u"Тема «%s» уже используется плагином." % argv[0])
else:
LoggerCfg["theme"] = argv[0]
write_file(logConfigFile, str(LoggerCfg))
reply(mType, source, u"Установил «%s» стандартной темой." % argv[0])
else:
reply(mType, source, u"Нет такой темы :(")
else:
ls = []
for Numb, Theme in enumerate(logThemes.keys(), 1):
ls.append("%d. %s." % (Numb , Theme))
reply(mType, source, str.join(chr(10), ls))
elif a0 == u"папка":
if argv:
LoggerCfg["dir"] = argv[0]
logThemeCopier(source[1], "LunnaCat")
write_file(logConfigFile, str(LoggerCfg))
repl = u"Теперь логи будут храниться в папке «%s»." % argv[0]
else:
repl = u"Сейчас логи хрянятся в «%s»." % LoggerCfg["dir"]
reply(mType, source, repl)
elif a0 == u"время":
if argv:
if argv[0] in ("gmt", "local"):
LoggerCfg["timetype"] = argv[0]
write_file(logConfigFile, str(LoggerCfg))
repl = u"Установил тип записи времени на «%s»." % argv[0]
logWrite(source[1], "status", u"*** Установлен тип записи времени: %s" % argv[0])
else:
repl = u"Недопустимый тип. Доступные: local, gmt."
else:
repl = u"Сейчас установлен тип записи времени «%s»." % LoggerCfg["timetype"]
reply(mType, source, repl)
else:
reply(mType, source, u"Что-то не то...")
elif LoggerCfg["enabled"]:
reply(mType, source, u"Сейчас логгер включён.")
else:
reply(mType, source, u"Сейчас логгер выключен.")
def logSetState(mType, source, argv):
if GROUPCHATS.has_key(source[1]):
chat = source[1]
if argv:
argv = argv.split()
a0 = (argv.pop(0)).lower()
if a0 in ("1", u"вкл"):
if not logCfg[chat]["enabled"]:
logCfg[chat]["enabled"] = True
write_file("dynamic/%s/%s" % (chat, logCacheFile), str(logCfg[chat]))
reply(mType, source, u"Включил логирование «%s»." % chat)
else:
reply(mType, source, u"Уже включено.")
elif a0 in ("0", u"выкл"):
if logCfg[chat]["enabled"]:
logCfg[chat]["enabled"] = False
write_file("dynamic/%s/%s" % (chat, logCacheFile), str(logCfg[chat]))
logWrite(chat, "status", u"*** Логирование конференции приостановлено")
reply(mType, source, u"Выключил логирование «%s»." % chat)
else:
reply(mType, source, u"«%s» не логируется." % chat)
elif a0 in (u"тема", "темы"):
if argv:
if logThemes.has_key(argv[0]):
themeFile = chkFile("%s/%s/.theme/name.txt" % (LoggerCfg["dir"], chat))
if os.path.isfile(themeFile) and argv[0] == read_file(themeFile):
reply(mType, source, u"Тема «%s» уже используется плагином." % argv[0])
else:
logCfg[chat]["theme"] = argv[0]
write_file("dynamic/%s/%s" % (chat, logCacheFile), str(logCfg[chat]))
logThemeCopier(chat, argv[0])
repl = u"Установил тему «%s». Она вступит в силу "
if os.path.exists(chkFile("%s/%s/.theme/pattern.html" % (LoggerCfg["dir"], chat))):
repl += u"с завтрашнего дня."
else:
repl += u"немедленно."
reply(mType, source, repl % argv[0])
else:
reply(mType, source, u"Нет такой темы :(.")
else:
repl = str()
for num, thm in enumerate(logThemes.keys()):
repl += "%d. %s.\n" % (num + 1, thm)
reply(mType, source, repl)
elif a0 in ("жиды", "жид"):
if argv:
if argv[0] == "1":
if not logCfg[chat].get("jids"):
logCfg[chat]["jids"] = 1
write_file("dynamic/%s/%s" % (chat, logCacheFile), str(logCfg[chat]))
reply(mType, source, "Теперь Jabber ID пользователей будут записываться в логи. Обратите внимание: уже записанные Jabber ID никуда не исчезнут даже после отключения данной опции.")
else:
reply(mType, source, "Эта опция уже включена.")
elif argv[0] == "0":
if logCfg[chat].get("jids"):
logCfg[chat]["jids"] = 0
write_file("dynamic/%s/%s" % (chat, logCacheFile), str(logCfg[chat]))
reply(mType, source, "Больше Jabber ID пользователей записываться не будут. Обратите внимание: уже записанные Jabber ID никуда не исчезнут.")
else:
reply(mType, source, "Эта опция не | |
<filename>osm_nbi/admin_topics.py
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import logging
from uuid import uuid4
from hashlib import sha256
from http import HTTPStatus
from time import time
from osm_nbi.validation import user_new_schema, user_edit_schema, project_new_schema, project_edit_schema, \
vim_account_new_schema, vim_account_edit_schema, sdn_new_schema, sdn_edit_schema, \
wim_account_new_schema, wim_account_edit_schema, roles_new_schema, roles_edit_schema, \
k8scluster_new_schema, k8scluster_edit_schema, k8srepo_new_schema, k8srepo_edit_schema, \
osmrepo_new_schema, osmrepo_edit_schema, \
validate_input, ValidationError, is_valid_uuid # To check that User/Project Names don't look like UUIDs
from osm_nbi.base_topic import BaseTopic, EngineException
from osm_nbi.authconn import AuthconnNotFoundException, AuthconnConflictException
from osm_common.dbbase import deep_update_rfc7396
__author__ = "<NAME> <<EMAIL>>"
class UserTopic(BaseTopic):
topic = "users"
topic_msg = "users"
schema_new = user_new_schema
schema_edit = user_edit_schema
multiproject = False
def __init__(self, db, fs, msg, auth):
BaseTopic.__init__(self, db, fs, msg, auth)
@staticmethod
def _get_project_filter(session):
"""
Generates a filter dictionary for querying database users.
Current policy is admin can show all, non admin, only its own user.
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:return:
"""
if session["admin"]: # allows all
return {}
else:
return {"username": session["username"]}
def check_conflict_on_new(self, session, indata):
# check username not exists
if self.db.get_one(self.topic, {"username": indata.get("username")}, fail_on_empty=False, fail_on_more=False):
raise EngineException("username '{}' exists".format(indata["username"]), HTTPStatus.CONFLICT)
# check projects
if not session["force"]:
for p in indata.get("projects") or []:
# To allow project addressing by Name as well as ID
if not self.db.get_one("projects", {BaseTopic.id_field("projects", p): p}, fail_on_empty=False,
fail_on_more=False):
raise EngineException("project '{}' does not exist".format(p), HTTPStatus.CONFLICT)
def check_conflict_on_del(self, session, _id, db_content):
"""
Check if deletion can be done because of dependencies if it is not force. To override
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id: internal _id
:param db_content: The database content of this item _id
:return: None if ok or raises EngineException with the conflict
"""
if _id == session["username"]:
raise EngineException("You cannot delete your own user", http_code=HTTPStatus.CONFLICT)
@staticmethod
def format_on_new(content, project_id=None, make_public=False):
BaseTopic.format_on_new(content, make_public=False)
# Removed so that the UUID is kept, to allow User Name modification
# content["_id"] = content["username"]
salt = uuid4().hex
content["_admin"]["salt"] = salt
if content.get("password"):
content["password"] = sha256(content["password"].encode('utf-8') + salt.encode('utf-8')).hexdigest()
if content.get("project_role_mappings"):
projects = [mapping["project"] for mapping in content["project_role_mappings"]]
if content.get("projects"):
content["projects"] += projects
else:
content["projects"] = projects
@staticmethod
def format_on_edit(final_content, edit_content):
BaseTopic.format_on_edit(final_content, edit_content)
if edit_content.get("password"):
salt = uuid4().hex
final_content["_admin"]["salt"] = salt
final_content["password"] = sha256(edit_content["password"].encode('utf-8') +
salt.encode('utf-8')).hexdigest()
return None
def edit(self, session, _id, indata=None, kwargs=None, content=None):
if not session["admin"]:
raise EngineException("needed admin privileges", http_code=HTTPStatus.UNAUTHORIZED)
# Names that look like UUIDs are not allowed
name = (indata if indata else kwargs).get("username")
if is_valid_uuid(name):
raise EngineException("Usernames that look like UUIDs are not allowed",
http_code=HTTPStatus.UNPROCESSABLE_ENTITY)
return BaseTopic.edit(self, session, _id, indata=indata, kwargs=kwargs, content=content)
def new(self, rollback, session, indata=None, kwargs=None, headers=None):
if not session["admin"]:
raise EngineException("needed admin privileges", http_code=HTTPStatus.UNAUTHORIZED)
# Names that look like UUIDs are not allowed
name = indata["username"] if indata else kwargs["username"]
if is_valid_uuid(name):
raise EngineException("Usernames that look like UUIDs are not allowed",
http_code=HTTPStatus.UNPROCESSABLE_ENTITY)
return BaseTopic.new(self, rollback, session, indata=indata, kwargs=kwargs, headers=headers)
class ProjectTopic(BaseTopic):
topic = "projects"
topic_msg = "projects"
schema_new = project_new_schema
schema_edit = project_edit_schema
multiproject = False
def __init__(self, db, fs, msg, auth):
BaseTopic.__init__(self, db, fs, msg, auth)
@staticmethod
def _get_project_filter(session):
"""
Generates a filter dictionary for querying database users.
Current policy is admin can show all, non admin, only its own user.
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:return:
"""
if session["admin"]: # allows all
return {}
else:
return {"_id.cont": session["project_id"]}
def check_conflict_on_new(self, session, indata):
if not indata.get("name"):
raise EngineException("missing 'name'")
# check name not exists
if self.db.get_one(self.topic, {"name": indata.get("name")}, fail_on_empty=False, fail_on_more=False):
raise EngineException("name '{}' exists".format(indata["name"]), HTTPStatus.CONFLICT)
@staticmethod
def format_on_new(content, project_id=None, make_public=False):
BaseTopic.format_on_new(content, None)
# Removed so that the UUID is kept, to allow Project Name modification
# content["_id"] = content["name"]
def check_conflict_on_del(self, session, _id, db_content):
"""
Check if deletion can be done because of dependencies if it is not force. To override
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id: internal _id
:param db_content: The database content of this item _id
:return: None if ok or raises EngineException with the conflict
"""
if _id in session["project_id"]:
raise EngineException("You cannot delete your own project", http_code=HTTPStatus.CONFLICT)
if session["force"]:
return
_filter = {"projects": _id}
if self.db.get_list("users", _filter):
raise EngineException("There is some USER that contains this project", http_code=HTTPStatus.CONFLICT)
def edit(self, session, _id, indata=None, kwargs=None, content=None):
if not session["admin"]:
raise EngineException("needed admin privileges", http_code=HTTPStatus.UNAUTHORIZED)
# Names that look like UUIDs are not allowed
name = (indata if indata else kwargs).get("name")
if is_valid_uuid(name):
raise EngineException("Project names that look like UUIDs are not allowed",
http_code=HTTPStatus.UNPROCESSABLE_ENTITY)
return BaseTopic.edit(self, session, _id, indata=indata, kwargs=kwargs, content=content)
def new(self, rollback, session, indata=None, kwargs=None, headers=None):
if not session["admin"]:
raise EngineException("needed admin privileges", http_code=HTTPStatus.UNAUTHORIZED)
# Names that look like UUIDs are not allowed
name = indata["name"] if indata else kwargs["name"]
if is_valid_uuid(name):
raise EngineException("Project names that look like UUIDs are not allowed",
http_code=HTTPStatus.UNPROCESSABLE_ENTITY)
return BaseTopic.new(self, rollback, session, indata=indata, kwargs=kwargs, headers=headers)
class CommonVimWimSdn(BaseTopic):
"""Common class for VIM, WIM SDN just to unify methods that are equal to all of them"""
config_to_encrypt = {} # what keys at config must be encrypted because contains passwords
password_to_encrypt = "" # key that contains a password
@staticmethod
def _create_operation(op_type, params=None):
"""
Creates a dictionary with the information to an operation, similar to ns-lcm-op
:param op_type: can be create, edit, delete
:param params: operation input parameters
:return: new dictionary with
"""
now = time()
return {
"lcmOperationType": op_type,
"operationState": "PROCESSING",
"startTime": now,
"statusEnteredTime": now,
"detailed-status": "",
"operationParams": params,
}
def check_conflict_on_new(self, session, indata):
"""
Check that the data to be inserted is valid. It is checked that name is unique
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param indata: data to be inserted
:return: None or raises EngineException
"""
self.check_unique_name(session, indata["name"], _id=None)
def check_conflict_on_edit(self, session, final_content, edit_content, _id):
"""
Check that the data to be edited/uploaded is valid. It is checked that name is unique
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param final_content: data once modified. This method may change it.
:param edit_content: incremental data that contains the modifications to apply
:param _id: internal _id
:return: None or raises EngineException
"""
if not session["force"] and edit_content.get("name"):
self.check_unique_name(session, edit_content["name"], _id=_id)
def format_on_edit(self, final_content, edit_content):
"""
Modifies final_content inserting admin information upon edition
:param final_content: final content to be stored at database
:param edit_content: user requested update content
:return: operation id
"""
super().format_on_edit(final_content, edit_content)
# encrypt passwords
schema_version = final_content.get("schema_version")
if schema_version:
if edit_content.get(self.password_to_encrypt):
final_content[self.password_to_encrypt] = self.db.encrypt(edit_content[self.password_to_encrypt],
schema_version=schema_version,
salt=final_content["_id"])
config_to_encrypt_keys = self.config_to_encrypt.get(schema_version) or self.config_to_encrypt.get("default")
if edit_content.get("config") and config_to_encrypt_keys:
for p in config_to_encrypt_keys:
if edit_content["config"].get(p):
final_content["config"][p] = self.db.encrypt(edit_content["config"][p],
schema_version=schema_version,
salt=final_content["_id"])
# create edit operation
final_content["_admin"]["operations"].append(self._create_operation("edit"))
return "{}:{}".format(final_content["_id"], len(final_content["_admin"]["operations"]) - 1)
def format_on_new(self, content, project_id=None, make_public=False):
"""
Modifies content descriptor to include _admin and insert create operation
:param content: descriptor to be modified
:param project_id: if included, it add project read/write permissions. Can be None or a list
:param make_public: if included it is generated as public for reading.
:return: op_id: operation id on asynchronous operation, None otherwise. In addition content is modified
"""
super().format_on_new(content, project_id=project_id, make_public=make_public)
content["schema_version"] = schema_version = "1.11"
# encrypt passwords
if content.get(self.password_to_encrypt):
content[self.password_to_encrypt] = self.db.encrypt(content[self.password_to_encrypt],
schema_version=schema_version,
salt=content["_id"])
config_to_encrypt_keys = self.config_to_encrypt.get(schema_version) or self.config_to_encrypt.get("default")
if content.get("config") and config_to_encrypt_keys:
for p in config_to_encrypt_keys:
if content["config"].get(p):
content["config"][p] = self.db.encrypt(content["config"][p],
schema_version=schema_version,
salt=content["_id"])
content["_admin"]["operationalState"] = "PROCESSING"
# create operation
content["_admin"]["operations"] = [self._create_operation("create")]
content["_admin"]["current_operation"] = None
return "{}:0".format(content["_id"])
def delete(self, session, _id, dry_run=False, not_send_msg=None):
"""
Delete item by its internal _id
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id: server internal id
:param dry_run: make checking but do not delete
:param not_send_msg: To not send message (False) | |
<reponame>hayesgb/mlrun
import base64
import json
import os
import unittest.mock
import deepdiff
import kubernetes
import nuclio
import pytest
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
import mlrun.errors
from mlrun import code_to_function, mlconf
from mlrun.platforms.iguazio import split_path
from mlrun.runtimes.constants import NuclioIngressAddTemplatedIngressModes
from mlrun.runtimes.function import (
compile_function_config,
deploy_nuclio_function,
enrich_function_with_ingress,
min_nuclio_versions,
resolve_function_ingresses,
validate_nuclio_version_compatibility,
)
from mlrun.runtimes.pod import KubeResourceSpec
from tests.api.conftest import K8sSecretsMock
from tests.api.runtimes.base import TestRuntimeBase
class TestNuclioRuntime(TestRuntimeBase):
@property
def runtime_kind(self):
# enables extending classes to run the same tests with different runtime
return "nuclio"
@property
def class_name(self):
# enables extending classes to run the same tests with different class
return "remote"
def custom_setup_after_fixtures(self):
self._mock_nuclio_deploy_config()
def custom_setup(self):
self.image_name = "test/image:latest"
self.code_handler = "test_func"
os.environ["V3IO_ACCESS_KEY"] = self.v3io_access_key = "1111-2222-3333-4444"
os.environ["V3IO_USERNAME"] = self.v3io_user = "test-user"
def _serialize_and_deploy_nuclio_function(self, function):
# simulating sending to API - serialization through dict
function = function.from_dict(function.to_dict())
deploy_nuclio_function(function)
@staticmethod
def _mock_nuclio_deploy_config():
nuclio.deploy.deploy_config = unittest.mock.Mock(return_value="some-server")
@staticmethod
def _get_expected_struct_for_http_trigger(parameters):
expected_struct = {
"kind": "http",
"maxWorkers": parameters["workers"],
"attributes": {
"ingresses": {
"0": {
"host": parameters["host"],
"paths": parameters["paths"],
"secretName": parameters["secret"],
}
},
"port": parameters["port"],
},
}
if "canary" in parameters:
expected_struct["annotations"] = {
"nginx.ingress.kubernetes.io/canary": "true",
"nginx.ingress.kubernetes.io/canary-weight": parameters["canary"],
}
return expected_struct
def _get_expected_struct_for_v3io_trigger(self, parameters):
container, path = split_path(parameters["stream_path"])
# Remove leading / in the path
path = path[1:]
# TODO - Not sure what happens to the "shards" parameter. Seems to be dropped along the way?
return {
"kind": "v3ioStream",
"name": parameters["name"],
"password": <PASSWORD>,
"attributes": {
"containerName": container,
"streamPath": path,
"consumerGroup": parameters["group"],
"seekTo": parameters["seek_to"],
},
}
def _generate_runtime(self, kind="nuclio", labels=None):
runtime = code_to_function(
name=self.name,
project=self.project,
filename=self.code_filename,
handler=self.code_handler,
kind=kind,
image=self.image_name,
description="test function",
labels=labels,
)
return runtime
def _assert_deploy_called_basic_config(
self,
expected_class="remote",
call_count=1,
expected_params=[],
expected_labels=None,
expected_env_from_secrets=None,
):
if expected_labels is None:
expected_labels = {}
deploy_mock = nuclio.deploy.deploy_config
assert deploy_mock.call_count == call_count
call_args_list = deploy_mock.call_args_list
for single_call_args in call_args_list:
args, kwargs = single_call_args
parent_function = None
if expected_params:
current_parameters = expected_params.pop(0)
expected_function_name = current_parameters["function_name"]
source_filename = current_parameters["file_name"]
parent_function = current_parameters.get("parent_function")
else:
expected_function_name = f"{self.project}-{self.name}"
source_filename = self.code_filename
assert kwargs["name"] == expected_function_name
assert kwargs["project"] == self.project
deploy_config = args[0]
function_metadata = deploy_config["metadata"]
assert function_metadata["name"] == expected_function_name
labels_for_diff = expected_labels.copy()
labels_for_diff.update({"mlrun/class": expected_class})
if parent_function:
labels_for_diff.update({"mlrun/parent-function": parent_function})
assert deepdiff.DeepDiff(function_metadata["labels"], labels_for_diff) == {}
build_info = deploy_config["spec"]["build"]
# Nuclio source code in some cases adds a suffix to the code, initializing nuclio context.
# We just verify that the code provided starts with our code.
original_source_code = open(source_filename, "r").read()
spec_source_code = base64.b64decode(
build_info["functionSourceCode"]
).decode("utf-8")
assert spec_source_code.startswith(original_source_code)
assert build_info["baseImage"] == self.image_name
if expected_env_from_secrets:
env_vars = deploy_config["spec"]["env"]
self._assert_pod_env_from_secrets(env_vars, expected_env_from_secrets)
def _assert_triggers(self, http_trigger=None, v3io_trigger=None):
args, _ = nuclio.deploy.deploy_config.call_args
triggers_config = args[0]["spec"]["triggers"]
if http_trigger:
expected_struct = self._get_expected_struct_for_http_trigger(http_trigger)
assert (
deepdiff.DeepDiff(
triggers_config["http"],
expected_struct,
ignore_order=True,
# TODO - (in Nuclio) There is a bug with canary configuration:
# the nginx.ingress.kubernetes.io/canary-weight annotation gets assigned the host name
# rather than the actual weight. Remove this once bug is fixed.
exclude_paths=[
"root['annotations']['nginx.ingress.kubernetes.io/canary-weight']"
],
)
== {}
)
if v3io_trigger:
expected_struct = self._get_expected_struct_for_v3io_trigger(v3io_trigger)
diff_result = deepdiff.DeepDiff(
triggers_config[v3io_trigger["name"]],
expected_struct,
ignore_order=True,
)
# It's ok if the Nuclio trigger has additional parameters, these are constants that we don't care
# about. We just care that the values we look for are fully there.
diff_result.pop("dictionary_item_removed", None)
assert diff_result == {}
def _assert_nuclio_v3io_mount(self, local_path="", remote_path="", cred_only=False):
args, _ = nuclio.deploy.deploy_config.call_args
deploy_spec = args[0]["spec"]
env_config = deploy_spec["env"]
expected_env = {
"V3IO_ACCESS_KEY": self.v3io_access_key,
"V3IO_USERNAME": self.v3io_user,
"V3IO_API": None,
"MLRUN_NAMESPACE": self.namespace,
}
self._assert_pod_env(env_config, expected_env)
if cred_only:
assert len(deploy_spec["volumes"]) == 0
return
container, path = split_path(remote_path)
expected_volume = {
"volume": {
"flexVolume": {
"driver": "v3io/fuse",
"options": {
"accessKey": self.v3io_access_key,
"container": container,
"subPath": path,
},
},
"name": "v3io",
},
"volumeMount": {"mountPath": local_path, "name": "v3io", "subPath": ""},
}
assert (
deepdiff.DeepDiff(
deploy_spec["volumes"], [expected_volume], ignore_order=True
)
== {}
)
def _assert_node_selections(
self,
kube_resource_spec: KubeResourceSpec,
expected_node_name=None,
expected_node_selector=None,
expected_affinity=None,
):
args, _ = nuclio.deploy.deploy_config.call_args
deploy_spec = args[0]["spec"]
if expected_node_name:
assert deploy_spec["nodeName"] == expected_node_name
if expected_node_selector:
assert (
deepdiff.DeepDiff(
deploy_spec["nodeSelector"],
expected_node_selector,
ignore_order=True,
)
== {}
)
if expected_affinity:
# deploy_spec returns affinity in CamelCase, V1Affinity is in snake_case
assert (
deepdiff.DeepDiff(
kube_resource_spec._transform_affinity_to_k8s_class_instance(
deploy_spec["affinity"]
),
expected_affinity,
ignore_order=True,
)
== {}
)
def test_enrich_with_ingress_no_overriding(self, db: Session, client: TestClient):
"""
Expect no ingress template to be created, thought its mode is "always",
since the function already have a pre-configured ingress
"""
function = self._generate_runtime(self.runtime_kind)
# both ingress and node port
ingress_host = "something.com"
function.with_http(host=ingress_host, paths=["/"], port=30030)
function_name, project_name, config = compile_function_config(function)
service_type = "NodePort"
enrich_function_with_ingress(
config, NuclioIngressAddTemplatedIngressModes.always, service_type
)
ingresses = resolve_function_ingresses(config["spec"])
assert len(ingresses) > 0, "Expected one ingress to be created"
for ingress in ingresses:
assert "hostTemplate" not in ingress, "No host template should be added"
assert ingress["host"] == ingress_host
def test_enrich_with_ingress_always(self, db: Session, client: TestClient):
"""
Expect ingress template to be created as the configuration templated ingress mode is "always"
"""
function = self._generate_runtime(self.runtime_kind)
function_name, project_name, config = compile_function_config(function)
service_type = "NodePort"
enrich_function_with_ingress(
config, NuclioIngressAddTemplatedIngressModes.always, service_type
)
ingresses = resolve_function_ingresses(config["spec"])
assert ingresses[0]["hostTemplate"] != ""
def test_enrich_with_ingress_on_cluster_ip(self, db: Session, client: TestClient):
"""
Expect ingress template to be created as the configuration templated ingress mode is "onClusterIP" while the
function service type is ClusterIP
"""
function = self._generate_runtime(self.runtime_kind)
function_name, project_name, config = compile_function_config(function)
service_type = "ClusterIP"
enrich_function_with_ingress(
config, NuclioIngressAddTemplatedIngressModes.on_cluster_ip, service_type,
)
ingresses = resolve_function_ingresses(config["spec"])
assert ingresses[0]["hostTemplate"] != ""
def test_enrich_with_ingress_never(self, db: Session, client: TestClient):
"""
Expect no ingress to be created automatically as the configuration templated ingress mode is "never"
"""
function = self._generate_runtime(self.runtime_kind)
function_name, project_name, config = compile_function_config(function)
service_type = "DoesNotMatter"
enrich_function_with_ingress(
config, NuclioIngressAddTemplatedIngressModes.never, service_type
)
ingresses = resolve_function_ingresses(config["spec"])
assert ingresses == []
def test_nuclio_config_spec_env(self, db: Session, client: TestClient):
function = self._generate_runtime(self.runtime_kind)
name = "env1"
secret = "shh"
secret_key = "open sesame"
function.set_env_from_secret(name, secret=secret, secret_key=secret_key)
name2 = "env2"
value2 = "value2"
function.set_env(name2, value2)
expected_env_vars = [
{
"name": name,
"valueFrom": {"secretKeyRef": {"key": secret_key, "name": secret}},
},
{"name": name2, "value": value2},
]
function_name, project_name, config = compile_function_config(function)
for expected_env_var in expected_env_vars:
assert expected_env_var in config["spec"]["env"]
assert isinstance(function.spec.env[0], kubernetes.client.V1EnvVar)
assert isinstance(function.spec.env[1], kubernetes.client.V1EnvVar)
# simulating sending to API - serialization through dict
function = function.from_dict(function.to_dict())
function_name, project_name, config = compile_function_config(function)
for expected_env_var in expected_env_vars:
assert expected_env_var in config["spec"]["env"]
def test_deploy_with_project_secrets(
self, db: Session, k8s_secrets_mock: K8sSecretsMock
):
secret_keys = ["secret1", "secret2", "secret3"]
secrets = {key: "some-secret-value" for key in secret_keys}
k8s_secrets_mock.store_project_secrets(self.project, secrets)
function = self._generate_runtime(self.runtime_kind)
self._serialize_and_deploy_nuclio_function(function)
# This test runs in KubeJob as well, with different secret names encoding
expected_secrets = k8s_secrets_mock.get_expected_env_variables_from_secrets(
self.project, encode_key_names=(self.class_name != "remote")
)
self._assert_deploy_called_basic_config(
expected_class=self.class_name, expected_env_from_secrets=expected_secrets
)
def test_deploy_basic_function(self, db: Session, client: TestClient):
function = self._generate_runtime(self.runtime_kind)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(expected_class=self.class_name)
def test_deploy_function_with_labels(self, db: Session, client: TestClient):
labels = {
"key": "value",
"key-2": "value-2",
}
function = self._generate_runtime(self.runtime_kind, labels)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(
expected_labels=labels, expected_class=self.class_name
)
def test_deploy_with_triggers(self, db: Session, client: TestClient):
function = self._generate_runtime(self.runtime_kind)
http_trigger = {
"workers": 2,
"port": 12345,
"host": "http://my.host",
"paths": ["/path/1", "/path/2"],
"secret": "my little secret",
"canary": 50,
}
v3io_trigger = {
"stream_path": "/container/and/path",
"name": "test_stream",
"group": "beatles",
"seek_to": "latest",
"shards": 42,
}
function.with_http(**http_trigger)
function.add_v3io_stream_trigger(**v3io_trigger)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(expected_class=self.class_name)
self._assert_triggers(http_trigger, v3io_trigger)
def test_deploy_with_v3io(self, db: Session, client: TestClient):
function = self._generate_runtime(self.runtime_kind)
local_path = "/local/path"
remote_path = "/container/and/path"
function.with_v3io(local_path, remote_path)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(expected_class=self.class_name)
self._assert_nuclio_v3io_mount(local_path, remote_path)
def test_deploy_with_node_selection(self, db: Session, client: TestClient):
mlconf.nuclio_version = "1.6.10"
function = self._generate_runtime(self.runtime_kind)
node_name = "some-node-name"
function.with_node_selection(node_name=node_name)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(expected_class=self.class_name)
self._assert_node_selections(function.spec, expected_node_name=node_name)
function = self._generate_runtime(self.runtime_kind)
node_selector = {
"label-1": "val1",
"label-2": "val2",
}
mlconf.default_function_node_selector = base64.b64encode(
json.dumps(node_selector).encode("utf-8")
)
function.with_node_selection(node_selector=node_selector)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(
call_count=2, expected_class=self.class_name
)
self._assert_node_selections(
function.spec, expected_node_selector=node_selector
)
function = self._generate_runtime(self.runtime_kind)
node_selector = {
"label-3": "val3",
"label-4": "val4",
}
function.with_node_selection(node_selector=node_selector)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(
call_count=3, expected_class=self.class_name
)
self._assert_node_selections(
function.spec, expected_node_selector=node_selector
)
function = self._generate_runtime(self.runtime_kind)
affinity = self._generate_affinity()
function.with_node_selection(affinity=affinity)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(
call_count=4, expected_class=self.class_name
)
self._assert_node_selections(function.spec, expected_affinity=affinity)
function = self._generate_runtime(self.runtime_kind)
function.with_node_selection(node_name, node_selector, affinity)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(
call_count=5, expected_class=self.class_name
)
self._assert_node_selections(
function.spec,
expected_node_name=node_name,
expected_node_selector=node_selector,
expected_affinity=affinity,
)
def test_deploy_with_priority_class_name(self, db: Session, client: TestClient):
mlconf.nuclio_version = "1.5.20"
default_priority_class_name = "default-priority"
mlrun.mlconf.default_function_priority_class_name = default_priority_class_name
mlrun.mlconf.valid_function_priority_class_names = default_priority_class_name
function = self._generate_runtime(self.runtime_kind)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(expected_class=self.class_name)
args, _ = nuclio.deploy.deploy_config.call_args
deploy_spec = args[0]["spec"]
assert "priorityClassName" not in deploy_spec
mlconf.nuclio_version = "1.6.18"
mlrun.mlconf.valid_function_priority_class_names = ""
function = self._generate_runtime(self.runtime_kind)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(
call_count=2, expected_class=self.class_name
)
args, _ = nuclio.deploy.deploy_config.call_args
deploy_spec = args[0]["spec"]
assert "priorityClassName" not in deploy_spec
mlrun.mlconf.valid_function_priority_class_names = default_priority_class_name
function = self._generate_runtime(self.runtime_kind)
self._serialize_and_deploy_nuclio_function(function)
self._assert_deploy_called_basic_config(
call_count=3, expected_class=self.class_name
)
args, _ | |
<filename>energyPATHWAYS/supply.py
__author__ = '<NAME> & <NAME>'
import config as cfg
import util
import pandas as pd
import numpy as np
import copy
import logging
import time
from util import DfOper
from collections import defaultdict
from supply_measures import BlendMeasure, RioBlendMeasure, CO2PriceMeasure
from supply_technologies import SupplyTechnology, StorageTechnology
from supply_classes import SupplySpecifiedStock, SupplyStock
from shared_classes import SalesShare, SpecifiedStock, Stock, StockItem
from rollover import Rollover
from solve_io import solve_IO
from dispatch_classes import Dispatch, DispatchFeederAllocation
import dispatch_classes
import inspect
import operator
from shape import Shapes, Shape
from outputs import Output
from multiprocessing import Pool, cpu_count
import energyPATHWAYS.helper_multiprocess as helper_multiprocess
import pdb
import os
from datetime import datetime
import random
import dispatch_budget
import dispatch_generators
from unit_converter import UnitConverter
from geomapper import GeoMapper
from energyPATHWAYS.generated import schema
from data_object import DataObject
#def node_update_stock(node):
# if hasattr(node, 'stock'):
# node.update_stock(node.year,node.loop)
# return node
class Supply(object):
"""This module calculates all supply nodes in an IO loop to calculate energy,
emissions, and cost flows through the energy economy
"""
def __init__(self, scenario, demand_object=None, api_run=False,rio_scenario=None):
"""Initializes supply instance"""
self.all_nodes, self.blend_nodes, self.non_storage_nodes, self.storage_nodes = [], [], [], []
self.nodes = {}
self.demand_object = demand_object # used to retrieve results from demand-side
self.scenario = scenario # used in writing dispatch outputs
self.rio_scenario = rio_scenario
self.demand_sectors = demand_object.sectors.keys()
self.demand_sectors.sort()
self.thermal_dispatch_node_name = cfg.getParam('thermal_dispatch_node', 'opt')
self.distribution_node_name = cfg.getParam('distribution_node', 'opt')
self.distribution_grid_node_name = cfg.getParam('distribution_grid_node', 'opt')
self.transmission_node_name = cfg.getParam('transmission_node', 'opt')
self.dispatch_zones = [self.distribution_node_name, self.transmission_node_name]
self.electricity_nodes = defaultdict(list)
self.injection_nodes = defaultdict(list)
self.ghgs = util.csv_read_table('GreenhouseGases', 'name', return_iterable=True)
self.dispatch_feeder_allocation = demand_object.get_weighted_feeder_allocation_by_sector()
self.dispatch_feeders = sorted(self.dispatch_feeder_allocation.index.get_level_values('dispatch_feeder').unique())
self.dispatch = Dispatch(self.dispatch_feeders, GeoMapper.dispatch_geography, GeoMapper.dispatch_geographies, self.scenario)
self.outputs = Output()
self.outputs.hourly_dispatch_results = None
self.outputs.hourly_marginal_cost = None
self.outputs.hourly_production_cost = None
self.active_thermal_dispatch_df_list = []
self.map_dict = dict(util.csv_read_table('SupplyNodes', ['final_energy_link', 'name']))
self.api_run = api_run
if self.map_dict.has_key(None):
del self.map_dict[None]
self.add_co2_price_to_dispatch(scenario)
self.rio_distribution_losses = dict()
self.rio_transmission_losses = dict()
self.rio_distribution_load = dict()
self.rio_flex_load = dict()
self.rio_bulk_load = dict()
self.rio_flex_pmin = dict
self.rio_flex_pmax = dict()
self.add_rio_inputs()
def add_co2_price_to_dispatch(self, scenario):
self.CO2PriceMeasures = scenario.get_measures('CO2PriceMeasures', self.thermal_dispatch_node_name)
if len(self.CO2PriceMeasures)>1:
raise ValueError('multiple CO2 price measures are active')
elif len(self.CO2PriceMeasures)==1:
self.CO2PriceMeasure = CO2PriceMeasure(self.CO2PriceMeasures[0], scenario)
self.CO2PriceMeasure.calculate()
else:
self.CO2PriceMeasure = None
def calculate_technologies(self):
""" initiates calculation of all technology attributes - costs, efficiency, etc.
"""
for node in self.nodes.values():
if not hasattr(node, 'technologies'):
continue
for technology in node.technologies.values():
technology.calculate([node.vintages[0] - 1] + node.vintages, node.years)
# indentation is correct
if isinstance(technology, StorageTechnology):
node.remap_tech_attrs(cfg.storage_tech_classes)
else:
node.remap_tech_attrs(cfg.tech_classes)
def aggregate_results(self):
def remove_na_levels(df):
if df is None:
return None
levels_with_na_only = [name for level, name in zip(df.index.levels, df.index.names) if list(level)==[u'N/A']]
return util.remove_df_levels(df, levels_with_na_only).sort_index()
output_list = ['stock', 'annual_costs', 'levelized_costs', 'capacity_utilization']
for output_name in output_list:
df = self.group_output(output_name)
df = remove_na_levels(df) # if a level only as N/A values, we should remove it from the final outputs
setattr(self.outputs, "s_"+output_name, df)
setattr(self.outputs,'s_energy',self.format_output_io_supply())
def format_output_io_supply(self):
energy = self.io_supply_df.stack().to_frame()
util.replace_index_name(energy,'year')
energy_unit = cfg.calculation_energy_unit
energy.columns = [energy_unit.upper()]
return energy
def group_output(self, output_type, levels_to_keep=None):
levels_to_keep = cfg.output_supply_levels if levels_to_keep is None else levels_to_keep
dfs = [node.group_output(output_type, levels_to_keep) for node in self.nodes.values()]
if all([df is None for df in dfs]) or not len(dfs):
return None
keys = [node.name for node in self.nodes.values()]
dfs, keys = zip(*[(df, key) for df, key in zip(dfs, keys) if df is not None])
new_names = 'supply_node'
return util.df_list_concatenate(dfs, keys, new_names, levels_to_keep)
def calculate_years(self):
"""
Determines the period of stock rollover within a node based on the minimum year
of specified sales or stock.
"""
for node in self.nodes.values():
node.min_year = cfg.getParamAsInt('current_year')
if hasattr(node,'technologies'):
for technology in node.technologies.values():
for reference_sales in technology.reference_sales.values():
min_year = min(reference_sales.raw_values.index.levels[util.position_in_index(reference_sales.raw_values, 'vintage')])
if min_year < node.min_year:
node.min_year = min_year
for sales in technology.sales.values():
min_year = min(sales.raw_values.index.get_level_values('vintage'))
if min_year < node.min_year:
node.min_year = min_year
if hasattr(node,'stock') and node.stock.raw_values is not None:
min_year = min(node.stock.raw_values.index.levels[util.position_in_index(node.stock.raw_values, 'year')])
if min_year < node.min_year:
node.min_year = min_year
node.min_year = int(max(node.min_year, cfg.getParamAsInt('supply_start_year')))
node.years = range(node.min_year, cfg.getParamAsInt('end_year') + cfg.getParamAsInt('year_step'), cfg.getParamAsInt('year_step'))
node.vintages = copy.deepcopy(node.years)
self.years = cfg.supply_years
self.years_subset = cfg.combined_years_subset
def initial_calculate(self):
"""Calculates all nodes in years before IO loop"""
logging.info("Calculating supply-side prior to current year")
self.calculate_years()
self.add_empty_output_df()
logging.info("Creating input-output table")
self.create_IO()
self.create_inverse_dict()
self.cost_dict = util.recursivedict()
self.emissions_dict = util.recursivedict()
self.create_embodied_cost_and_energy_demand_link()
self.create_embodied_emissions_demand_link()
logging.info("Initiating calculation of technology attributes")
self.calculate_technologies()
logging.info("Running stock rollover prior to current year")
self.calculate_nodes()
self.calculate_initial_demand()
def final_calculate(self):
self.concatenate_annual_costs()
self.concatenate_levelized_costs()
self.calculate_capacity_utilization()
def calculate_nodes(self):
"""Performs an initial calculation for all import, conversion, delivery, and storage nodes"""
if cfg.getParamAsBoolean('parallel_process'):
nodes = helper_multiprocess.safe_pool(helper_multiprocess.node_calculate, self.nodes.values())
self.nodes = dict(zip(self.nodes.keys(), nodes))
else:
for node in self.nodes.values():
if node.name ==self.bulk_electricity_node_name and cfg.rio_supply_run:
node.calculate(calculate_residual=False)
else:
node.calculate()
for node in self.nodes.values():
if node.name in self.blend_nodes and node.name in cfg.evolved_blend_nodes and cfg.evolved_run=='true':
node.values = node.values.groupby(level=[x for x in node.values.index.names if x !='supply_node']).transform(lambda x: 1/float(x.count()))
for x in node.nodes:
if x in self.storage_nodes:
indexer = util.level_specific_indexer(node.values,'supply_node',x)
node.values.loc[indexer,:] = 1e-7 * 4
node.values = node.values.groupby(level=[x for x in node.values.index.names if x !='supply_node']).transform(lambda x: x/x.sum())/4.0
def create_IO(self):
"""Creates a dictionary with year and demand sector keys to store IO table structure"""
self.io_dict = util.recursivedict()
index = pd.MultiIndex.from_product([GeoMapper.supply_geographies, self.all_nodes], names=[GeoMapper.supply_primary_geography, 'supply_node'])
for year in self.years:
for sector in util.ensure_iterable(self.demand_sectors):
self.io_dict[year][sector] = util.empty_df(index = index, columns = index, fill_value=0.0).sort_index(axis=0).sort_index(axis=1)
self.io_dict = util.freeze_recursivedict(self.io_dict)
def add_rio_inputs(self):
if cfg.rio_supply_run:
self.rio_inputs = RioInputs(self.rio_scenario, self)
self.dispatch.transmission.constraints.values = self.rio_inputs.transmission_constraint
self.dispatch.transmission.constraints.clean_timeseries(attr='values', inplace=True, time_index=cfg.supply_years,
time_index_name='year', interpolation_method=self.dispatch.transmission.constraints.interpolation_method,
extrapolation_method=self.dispatch.transmission.constraints.extrapolation_method)
def add_nodes(self):
"""Adds node instances for all active supply nodes"""
logging.info('Adding supply nodes')
supply_nodes = util.csv_read_table('SupplyNodes', column_names=['name', 'supply_type', 'is_active'], return_iterable=True)
supply_nodes.sort()
for name, supply_type, is_active in supply_nodes:
if is_active:
self.all_nodes.append(name)
logging.info(' {} node {}'.format(supply_type, name))
self.add_node(name, supply_type, self.scenario)
# this ideally should be moved to the init statements for each of the nodes
for node in self.nodes.values():
node.demand_sectors = self.demand_sectors
node.ghgs = self.ghgs
node.distribution_grid_node_name = self.distribution_grid_node_name
# for some reason, when this next part gets moved to the init for node, the DR node ends up having a tradeable geography of none
if node.tradable_geography is None:
node.enforce_tradable_geography = False
node.tradable_geography = GeoMapper.supply_primary_geography
else:
node.enforce_tradable_geography = True
def add_node(self, name, supply_type, scenario):
"""Add node to Supply instance
Args:
name (int): supply node id
supply_type (str): supply type i.e. 'blend'
"""
if supply_type == "Blend":
self.nodes[name] = BlendNode(name, scenario)
self.blend_nodes.append(name)
elif supply_type == "Storage":
if len(util.csv_read_table('SupplyTechs', 'supply_node', supply_node=name, return_iterable=True)):
self.nodes[name] = StorageNode(name, scenario)
else:
logging.debug(ValueError('insufficient data in storage node %s' % name))
elif supply_type == "Import":
self.nodes[name] = ImportNode(name, scenario)
elif supply_type == "Primary":
self.nodes[name] = PrimaryNode(name, scenario)
else:
if len(util.csv_read_table('SupplyEfficiency', 'name', name=name, return_iterable=True)):
self.nodes[name] = SupplyNode(name, scenario)
elif len(util.csv_read_table('SupplyTechs', 'supply_node', supply_node=name, return_iterable=True)):
self.nodes[name] = SupplyStockNode(name, scenario)
elif len(util.csv_read_table('SupplyStock', 'supply_node', supply_node=name, return_iterable=True)):
self.nodes[name] = SupplyNode(name, scenario)
else:
logging.debug(ValueError('insufficient data in supply node %s' % name))
if supply_type != "Storage":
self.non_storage_nodes.append(name)
else:
self.storage_nodes.append(name)
def reformat_gen_share_measures(self,df):
def find_supply_node(x):
for node in self.nodes.values():
if hasattr(node,'technologies') and x in node.technologies.keys():
return node.name
df['supply_node'] = [find_supply_node(x) for x in df.index.get_level_values('technology')]
df = df.set_index('supply_node',append=True)
df = df.groupby(level=['year',cfg.rio_geography,'supply_node']).sum()
return df
def reformat_fuel_share_measures(self,df):
df = copy.deepcopy(df)
def find_supply_node(y):
for node in self.nodes.values():
if hasattr(node,'technologies') and y.lower() in [x.lower() for x in node.technologies.keys()]:
return node.name
elif not hasattr(node,'technologies') and y.lower()==node.name.lower():
return node.name
return y
df['supply_node'] = [find_supply_node(x) for x in df.index.get_level_values('ep_fuel')]
df = df.set_index('supply_node',append=True)
df = df.groupby(level=['year', 'blend',cfg.rio_geography,'supply_node']).sum()
return df
def reformat_delivered_gen(self,df):
if df is not None:
def find_supply_node(x):
for node in self.nodes.values():
if hasattr(node,'technologies') and x in node.technologies.keys():
return node.name
df['supply_node'] = [find_supply_node(x) for x in df.index.get_level_values('technology')]
df = df.set_index('supply_node',append=True)
df = df.groupby(level=['year',cfg.rio_geography + "_from",'supply_node']).sum()
return df
else:
return None
def reformat_bulk_thermal_share_measures(self,df):
df['supply_node'] = self.thermal_dispatch_node_name
df = df.set_index('supply_node',append=True)
return df
def add_measures(self):
""" Adds measures to supply nodes based on scenario inputs"""
logging.info('Adding supply measures')
scenario = self.scenario
self.discover_bulk_name()
if cfg.rio_supply_run:
#reformats from technology/supply node to supply node for blend measures
self.rio_inputs.zonal_fuel_outputs = self.reformat_fuel_share_measures(self.rio_inputs.
zonal_fuel_outputs)
self.rio_inputs.fuel_outputs = self.reformat_fuel_share_measures(self.rio_inputs.
fuel_outputs)
for node in self.nodes.values():
#all nodes have export measures
if cfg.rio_supply_run and node.name in cfg.rio_export_blends:
df = self.rio_inputs.zonal_fuel_exports[
self.rio_inputs.zonal_fuel_exports.index.get_level_values('blend') == node.name]
node.export = RioExport(node.name, df)
elif cfg.rio_supply_run and node.name in cfg.rio_outflow_products:
df = self.rio_inputs.product_exports[
self.rio_inputs.product_exports.index.get_level_values('supply_node') == node.name]
node.export = RioExport(node.name, df)
else:
node.add_exports(scenario)
#once measures are loaded, export classes can be initiated
if node.supply_type == 'Blend':
if cfg.rio_supply_run and node.name!=self.bulk_electricity_node_name and node.name!=self.distribution_node_name and node.name!=self.thermal_dispatch_node_name and node.name not in cfg.rio_excluded_blends:
node.add_rio_fuel_blend_measures(self.rio_inputs)
elif cfg.rio_supply_run and node.name==self.bulk_electricity_node_name:
node.blend_measures = dict()
node.add_rio_bulk_blend_measures(self.reformat_gen_share_measures(self.rio_inputs.bulk_share))
node.add_rio_bulk_blend_measures(self.reformat_bulk_thermal_share_measures(self.rio_inputs.bulk_thermal_share))
node.rio_trades = self.rio_inputs.electricity_trades
node.delivered_gen = self.reformat_delivered_gen(self.rio_inputs.cleaned_delivered_gen)
elif cfg.rio_supply_run and node.name == self.thermal_dispatch_node_name:
node.add_rio_thermal_blend_measures(self.reformat_gen_share_measures(self.rio_inputs.thermal_share))
else:
node.add_blend_measures(scenario)
if cfg.rio_supply_run and node.name in self.rio_inputs.blend_levelized_costs.index.get_level_values('supply_node'):
df = util.df_slice(self.rio_inputs.blend_levelized_costs,node.name,'supply_node').unstack('year')
df.columns = df.columns.droplevel()
node.levelized_costs = df
elif isinstance(node, SupplyStockNode) or isinstance(node, | |
<filename>tools/explorer_utils.py<gh_stars>0
from tools.lang import SELECT_THE_DIRECTORY
from tools.lang import DO_YOU_WANT_EXPLORE_SUBDIRECTORIES
from tools.lang import DO_YOU_WANT_SPLIT_THE_DB_BY_DIRECTORY
from tools.lang import ENTER_A_NAME_FOR_YOUR_DB
from tools.lang import SELECT_THE_OLDER_BD
from tools.lang import SELECT_THE_NEWER_BD
from tools.lang import ENTER_A_NAME_FOR_YOUR_CDB
from tools.lang import SELECT_THE_CDB
from tools.lang import EXPLORING_DIR
from tools.lang import CANNOT_READ_CONTENTS
from tools.lang import SUCCEED
from tools.lang import CANNOT_WRITE_FILE
from tools.lang import FILE_CREATED_AT
from tools.lang import NOT_FILES_FOUND
from tools.lang import XLSX_DOESNT_MEET_COLUMNS
from tools.lang import CREATING
from tools.lang import M_OBJECTS_OF_N_OBJECTS
from tools.lang import BAD_DB_ORDER
from tools.lang import PROCESSED
from tools.lang import M_FILES_OF_N_FILES
from tools.lang import FILE_NOT_FOUND
from tools.FileHash import FileHash
from tools.FileHash import FILE_MOVED
from tools.FileHash import FILE_CREATED
from tools.FileHash import FILE_DELETED
from tools.FileHash import FILE_RENAMED
from tools.FileHash import FILE_MODIFIED
from tools.FileHash import FILE_RENAMED_AND_MOVED
from tools.FileHash import FILE_MODIFIED_AND_MOVED
from tools.util import choose
from tools.util import print_status
from tools.file_utils import file_browser
from tools.file_utils import create_directories
from tools.file_utils import copy_file
from tools.file_utils import create_file_hash
from pathlib import Path
from openpyxl import Workbook
from openpyxl import load_workbook
from zipfile import BadZipFile
# Utilities related to exploration of filesystem and creation of pseudo databases
CWD = Path.cwd()
# User_input: Path, bool, str -> "/home", True, "db"
# Output: list -> [["/home"], PosixPath("/home/cdelaof/db.xlsx"), True]
#
def setup_pdb_creation() -> list:
print(SELECT_THE_DIRECTORY)
directory = file_browser(allow_files=False)
print(DO_YOU_WANT_EXPLORE_SUBDIRECTORIES)
explore_subdirectories = choose(["1", "2"], [True, False])
divide_by_directory = False
if explore_subdirectories:
print(DO_YOU_WANT_SPLIT_THE_DB_BY_DIRECTORY)
divide_by_directory = choose(["1", "2"], [True, False])
pdb_name = input(ENTER_A_NAME_FOR_YOUR_DB) + ".xlsx"
pdb_path = Path.cwd().joinpath(pdb_name)
return [[directory], pdb_path, explore_subdirectories, divide_by_directory]
# User_input: Path, Path, str -> "C:\\db.xlsx", "C:\\db1.xlsx", "cdb"
# Output: list -> [WindowsPath("C:\\db.xlsx"), WindowsPath("C:\\db1.xlsx"), WindowsPath("C:\\cdb.xlsx")]
#
def setup_pdb_comparison() -> list:
print(SELECT_THE_OLDER_BD)
older_pdb = file_browser(allow_directories=False, allowed_extensions=[".xlsx"])
print(SELECT_THE_NEWER_BD)
newer_pdb = file_browser(allow_directories=False, allowed_extensions=[".xlsx"])
comparison_pdb_name = input(ENTER_A_NAME_FOR_YOUR_CDB) + ".xlsx"
comparison_pdb_path = Path.cwd().joinpath(comparison_pdb_name)
return [older_pdb, newer_pdb, comparison_pdb_path]
# User_input: Path, Path, str -> "C:\\db.xlsx", "C:\\db1.xlsx", "cdb"
# Output: list -> [WindowsPath("C:\\db.xlsx"), WindowsPath("C:\\db1.xlsx"), WindowsPath("C:\\cdb.xlsx")]
#
def setup_ach_comparison() -> list:
print(SELECT_THE_OLDER_BD)
older_pdb = file_browser(allow_directories=False, allowed_extensions=[".xlsx"])
print(SELECT_THE_NEWER_BD)
newer_pdb = file_browser(allow_directories=False, allowed_extensions=[".xlsx"])
print(SELECT_THE_CDB)
comparison_pdb_path = file_browser(allow_directories=False, allowed_extensions=[".xlsx"])
return [older_pdb, newer_pdb, comparison_pdb_path]
# cp
def setup_cp() -> list:
print(SELECT_THE_DIRECTORY)
origin = file_browser(allow_files=False)
print(SELECT_THE_DIRECTORY)
destiny = file_browser(allow_files=False)
return [origin, destiny]
# Input: list, list, bool
#
# Notes: Uses reference
#
def explore_dir(directories, files, explore_subdirectories, hash_func, blacklist_extensions):
# I think it's a lot of processing just to display it nicely, maybe I will change it.
print_status(EXPLORING_DIR, directories[0])
try:
for item in directories[0].iterdir():
if item.name in blacklist_extensions or item.suffix in blacklist_extensions:
continue
# In order to prevent an infinite process, cwd is excluded to be scanned
# likely to happen with divide_by_directory option enabled
if item == CWD:
continue
if explore_subdirectories and item.is_dir():
directories.append(item)
elif item.is_file():
file_hash = None
if hash_func is not None:
file_hash = create_file_hash(item, hash_func)
files.append(FileHash(str(item), file_hash))
except (PermissionError, FileNotFoundError) as e:
print(CANNOT_READ_CONTENTS % str(directories[0]))
print(e)
print()
directories.pop(0)
# Input: list -> ["Ingredient", "Price"], [FileHash(), FileHash(), FileHash()], Path("[...]")
#
def write_xlsx(columns, data, file_path):
if not data:
print(NOT_FILES_FOUND)
else:
workbook = Workbook()
sheet = workbook.active
for column, column_name in enumerate(columns):
sheet.cell(row=1, column=(column + 1), value=column_name)
for i, file in enumerate(data):
# i + 2 since first row is for column labels
sheet.cell(row=(i + 2), column=1, value=str(file.get_file_hash()))
sheet.cell(row=(i + 2), column=2, value=str(file.get_file_path()))
sheet.cell(row=(i + 2), column=3, value=str(file.get_file_stat()))
while True:
try:
# Just in case if file_path has " on its name
file_path = str(file_path).replace("\"", "\\\"")
workbook.save(file_path)
print(SUCCEED + FILE_CREATED_AT.format(str(file_path)))
workbook.close()
break
except FileNotFoundError as e:
print(CANNOT_WRITE_FILE % (file_path, e))
if choose(["1", "2"], [False, True]):
break
# Too way complicated explain it xd
#
def retrieve_workbook_origin_path(sheet, file_path=None) -> list:
# if there is not sheet, then is created
workbook = None
if file_path:
try:
workbook = load_workbook(file_path)
sheet = workbook.active
except BadZipFile:
print(CANNOT_READ_CONTENTS % str(file_path))
return list()
# Origin path is only available for p-databases
# cp-databases uses column 3 for file_stat
# Basically does this:
# "Origin_path: Path_name".split(": ")
# ['Origin_path', 'Path_name']
# If path origin_path_split[1] is found, then is returned
#
origin_path_split = sheet.cell(row=1, column=3).value
origin_path = None
matched_columns = True
if origin_path_split is not None:
origin_path_split = origin_path_split.split(": ")
if len(origin_path_split) == 2:
origin_path = origin_path_split[1]
else:
# If is needed data_origin_path but not found, then file
# Doesn't meet the columns needed
matched_columns = False
if file_path:
workbook.close()
return [origin_path, matched_columns]
# Input: list, Path -> ["Ingredient", "Price"], "/root/c.xlsx"
# Output: list -> [<tools.FileHash.FileHash object at [...]>,
# <tools.FileHash.FileHash object at [...]>,
# ...]
#
def retrieve_workbook_objects(columns, file_path, retrieve_origin_path) -> list:
try:
workbook = load_workbook(file_path)
except BadZipFile:
print(CANNOT_READ_CONTENTS % str(file_path))
return list()
sheet = workbook.active
matched_columns = True
files = list()
origin_path = None
for column, column_name in enumerate(columns):
matched_columns = matched_columns and (sheet.cell(row=1, column=(column + 1)).value == column_name)
if retrieve_origin_path:
origin_path, matched_columns = retrieve_workbook_origin_path(sheet)
if not matched_columns:
print(XLSX_DOESNT_MEET_COLUMNS % (str(file_path), columns, origin_path is not None))
workbook.close()
if retrieve_origin_path:
return [files, origin_path]
return files
total_objects = sheet.max_row - 1
created_objects = 1
# Starts in 2 because we don't want the column name as object
# (The other +1 in 2 is because openpyxl cells start with 1 :v)
for row in range(2, sheet.max_row + 1):
print_status(CREATING, M_OBJECTS_OF_N_OBJECTS.format(created_objects, total_objects))
file_hash = str(sheet.cell(row=row, column=1).value)
file_path = Path(sheet.cell(row=row, column=2).value)
file_stat = str(sheet.cell(row=row, column=3).value)
files.append(FileHash(file_path, file_hash, file_stat))
created_objects += 1
print()
if retrieve_origin_path:
return [files, origin_path]
return files
# Input: list, list
# Output: list
#
# Notes: example in README
#
def compare_data(older_db_data, newer_db_data, older_db_origin_path, newer_db_origin_path) -> list:
cdb_data = list()
files_processed = 1
total_files = len(older_db_data)
while older_db_data:
print_status(PROCESSED, M_FILES_OF_N_FILES.format(files_processed, total_files))
old_data = older_db_data[0]
remove_old_data_from_list = False
# This returns a part of the original path
# Let's say:
# older_db_origin_path = "/Users/cdelaof26/Desktop"
# old_data.get_file_path() = "/Users/cdelaof26/Desktop/pgitHash/settings/File.txt"
#
# To compare relative paths to catch file movement, we just compare after origin and excludes file's name
# old_data_partial_path = "/pgitHash/settings"
# new_data_partial_path = "/pgitHash/"
#
# -> same_path = False
#
# The full path cannot be used since mount point for newer and older databases is not the same.
#
old_data_path = old_data.get_file_path()
old_data_partial_path = str(old_data_path).replace(older_db_origin_path, "")
old_data_partial_path = old_data_partial_path.replace(old_data_path.name, "")
for i_new_data, new_data in enumerate(newer_db_data):
same_names = Path(old_data.get_file_path()).stem == Path(new_data.get_file_path()).stem
# Code explanation at line 285
new_data_path = new_data.get_file_path()
new_data_partial_path = str(new_data_path).replace(newer_db_origin_path, "")
new_data_partial_path = new_data_partial_path.replace(new_data_path.name, "")
same_path = old_data_partial_path == new_data_partial_path
# If hashes are the same
if old_data.get_file_hash() == new_data.get_file_hash():
if same_names and not same_path:
# If hashes and names are the same but paths not, is considered as moved
old_data.set_file_stat(FILE_MOVED)
cdb_data.append(old_data)
if not same_names and same_path:
# If hashes and paths are the same but the names are not, is considered as renamed
old_data.set_file_stat(FILE_RENAMED)
cdb_data.append(old_data)
if not same_names and not same_path:
# If hashes are the same but names and paths are not, is considered as RenamedAndMoved
old_data.set_file_stat(FILE_RENAMED_AND_MOVED)
cdb_data.append(old_data)
# If hashes, paths and names are the same is considered as unmodified
# so, not added to list of differences
newer_db_data.pop(i_new_data)
remove_old_data_from_list = True
break
elif same_names:
if same_path:
# If hashes are not the same, but paths and names are, then is considered as modified
old_data.set_file_stat(FILE_MODIFIED)
else:
# If hashes and paths are not the same, but names are, then is considered as ModifiedAndMoved
old_data.set_file_stat(FILE_MODIFIED_AND_MOVED)
cdb_data.append(old_data)
newer_db_data.pop(i_new_data)
remove_old_data_from_list = True
break
if not remove_old_data_from_list:
# If a file was not found in newer data is considered as deleted
old_data.set_file_stat(FILE_DELETED)
cdb_data.append(old_data)
# Removes old files
older_db_data.pop(0)
files_processed += 1
# Any non-deleted file on newer data is considered as created
while newer_db_data:
new_data = newer_db_data[0]
# Code explanation at line 285
# To push changes is needed change the path to the older one
new_data_path = new_data.get_file_path()
# To join it, it can't start with / or \, that's why [1:]
new_data_partial_path = str(new_data_path).replace(newer_db_origin_path, "")[1:]
# Here name is not remove since we need it
new_data_path = Path(older_db_origin_path).joinpath(new_data_partial_path)
new_data.set_file_stat(FILE_CREATED)
new_data.set_file_path(new_data_path)
cdb_data.append(newer_db_data[0])
newer_db_data.pop(0)
return cdb_data
# Input: str, list
#
def search_file_by_hash(file_hash, data) -> FileHash:
for dat in data:
if dat.get_file_hash() == file_hash:
return dat
print(FILE_NOT_FOUND % file_hash)
return FileHash(Path.cwd(), "", "")
# Input: str, list
#
def search_file_by_name(file_name, data) -> FileHash:
for dat in data:
if dat.get_file_path().name == file_name:
return dat
print(FILE_NOT_FOUND % file_name)
return FileHash(Path.cwd(), "", "")
# Input: Path, str, list, Path, Path
#
def move_file(old_file_path, old_file_hash, newer_db_data, newer_db_origin_path, older_db_origin_path,
search_by_hash=True):
if search_by_hash:
new_file = search_file_by_hash(old_file_hash, newer_db_data)
else:
new_file = search_file_by_name(old_file_hash, newer_db_data)
# Code explanation at line 285
#
# Those lines are needed to create the new partial path of the file
# Here we are just getting the directories between newer_db_origin_path and file's name
#
new_data_path | |
<gh_stars>1000+
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common functions
"""
#Import Local Modules
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
import marvin.sshClient
from .utils import *
from .base import *
#Import System modules
import time
def get_domain(apiclient, services=None):
"Returns a default domain"
cmd = listDomains.listDomainsCmd()
if services:
if "domainid" in services:
cmd.id = services["domainid"]
domains = apiclient.listDomains(cmd)
if isinstance(domains, list):
return domains[0]
else:
raise Exception("Failed to find specified domain.")
def get_zone(apiclient, services=None):
"Returns a default zone"
cmd = listZones.listZonesCmd()
if services:
if "zoneid" in services:
cmd.id = services["zoneid"]
zones = apiclient.listZones(cmd)
if isinstance(zones, list):
return zones[0]
else:
raise Exception("Failed to find specified zone.")
def get_pod(apiclient, zoneid, services=None):
"Returns a default pod for specified zone"
cmd = listPods.listPodsCmd()
cmd.zoneid = zoneid
if services:
if "podid" in services:
cmd.id = services["podid"]
pods = apiclient.listPods(cmd)
if isinstance(pods, list):
return pods[0]
else:
raise Exception("Exception: Failed to find specified pod.")
def get_template(apiclient, zoneid, ostypeid=12, services=None):
"Returns a template"
cmd = listTemplates.listTemplatesCmd()
cmd.templatefilter = 'featured'
cmd.zoneid = zoneid
if services:
if "template" in services:
cmd.id = services["template"]
list_templates = apiclient.listTemplates(cmd)
for template in list_templates:
if template.ostypeid == ostypeid:
return template
raise Exception("Exception: Failed to find template with OSTypeID: %s" %
ostypeid)
return
def download_systemplates_sec_storage(server, services):
"""Download System templates on sec storage"""
try:
# Login to management server
ssh = marvin.sshClient.SshClient(
server["ipaddress"],
server["port"],
server["username"],
server["password"]
)
except Exception as e:
raise Exception("SSH access failted for server with IP address: %s" %
server["ipaddess"])
# Mount Secondary Storage on Management Server
cmds = [
"mkdir -p %s" % services["mnt_dir"],
"mount -t nfs %s:/%s %s" % (
services["sec_storage"],
services["path"],
services["mnt_dir"]
),
"%s -m %s -u %s -h %s -F" % (
services["command"],
services["mnt_dir"],
services["download_url"],
services["hypervisor"]
)
]
for c in cmds:
result = ssh.execute(c)
res = str(result)
# Unmount the Secondary storage
ssh.execute("umount %s" % (services["mnt_dir"]))
if res.count("Successfully installed system VM template") == 1:
return
else:
raise Exception("Failed to download System Templates on Sec Storage")
return
def wait_for_ssvms(apiclient, zoneid, podid, interval=60):
"""After setup wait for SSVMs to come Up"""
time.sleep(interval)
timeout = 40
while True:
list_ssvm_response = list_ssvms(
apiclient,
systemvmtype='secondarystoragevm',
zoneid=zoneid,
podid=podid
)
ssvm = list_ssvm_response[0]
if ssvm.state != 'Running':
# Sleep to ensure SSVMs are Up and Running
time.sleep(30)
timeout = timeout - 1
elif ssvm.state == 'Running':
break
elif timeout == 0:
raise Exception("SSVM failed to come up")
break
timeout = 40
while True:
list_ssvm_response = list_ssvms(
apiclient,
systemvmtype='consoleproxy',
zoneid=zoneid,
podid=podid
)
cpvm = list_ssvm_response[0]
if cpvm.state != 'Running':
# Sleep to ensure SSVMs are Up and Running
time.sleep(interval)
timeout = timeout - 1
elif cpvm.state == 'Running':
break
elif timeout == 0:
raise Exception("CPVM failed to come up")
break
return
def download_builtin_templates(apiclient, zoneid, hypervisor, host, linklocalip, interval=60):
"""After setup wait till builtin templates are downloaded"""
# Change IPTABLES Rules
result = get_process_status(
host["ipaddress"],
host["port"],
host["username"],
host["password"],
linklocalip,
"iptables -P INPUT ACCEPT"
)
time.sleep(interval)
# Find the BUILTIN Templates for given Zone, Hypervisor
list_template_response = list_templates(
apiclient,
hypervisor=hypervisor,
zoneid=zoneid,
templatefilter='self'
)
if not isinstance(list_template_response, list):
raise Exception("Failed to download BUILTIN templates")
# Ensure all BUILTIN templates are downloaded
templateid = None
for template in list_template_response:
if template.templatetype == "BUILTIN":
templateid = template.id
# Sleep to ensure that template is in downloading state after adding
# Sec storage
time.sleep(interval)
while True:
template_response = list_templates(
apiclient,
id=templateid,
zoneid=zoneid,
templatefilter='self'
)
template = template_response[0]
# If template is ready,
# template.status = Download Complete
# Downloading - x% Downloaded
# Error - Any other string
if template.status == 'Download Complete':
break
elif 'Downloaded' in template.status:
time.sleep(interval)
elif 'Installing' not in template.status:
raise Exception("ErrorInDownload")
return
def update_resource_limit(apiclient, resourcetype, account=None, domainid=None,
max=None):
"""Updates the resource limit to 'max' for given account"""
cmd = updateResourceLimit.updateResourceLimitCmd()
cmd.resourcetype = resourcetype
if account:
cmd.account = account
if domainid:
cmd.domainid = domainid
if max:
cmd.max = max
apiclient.updateResourceLimit(cmd)
return
def list_routers(apiclient, **kwargs):
"""List all Routers matching criteria"""
cmd = listRouters.listRoutersCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listRouters(cmd))
def list_zones(apiclient, **kwargs):
"""List all Zones matching criteria"""
cmd = listZones.listZonesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listZones(cmd))
def list_networks(apiclient, **kwargs):
"""List all Networks matching criteria"""
cmd = listNetworks.listNetworksCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listNetworks(cmd))
def list_clusters(apiclient, **kwargs):
"""List all Clusters matching criteria"""
cmd = listClusters.listClustersCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listClusters(cmd))
def list_ssvms(apiclient, **kwargs):
"""List all SSVMs matching criteria"""
cmd = listSystemVms.listSystemVmsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listSystemVms(cmd))
def list_storage_pools(apiclient, **kwargs):
"""List all storage pools matching criteria"""
cmd = listStoragePools.listStoragePoolsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listStoragePools(cmd))
def list_virtual_machines(apiclient, **kwargs):
"""List all VMs matching criteria"""
cmd = listVirtualMachines.listVirtualMachinesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listVirtualMachines(cmd))
def list_hosts(apiclient, **kwargs):
"""List all Hosts matching criteria"""
cmd = listHosts.listHostsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listHosts(cmd))
def list_configurations(apiclient, **kwargs):
"""List configuration with specified name"""
cmd = listConfigurations.listConfigurationsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listConfigurations(cmd))
def list_publicIP(apiclient, **kwargs):
"""List all Public IPs matching criteria"""
cmd = listPublicIpAddresses.listPublicIpAddressesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listPublicIpAddresses(cmd))
def list_nat_rules(apiclient, **kwargs):
"""List all NAT rules matching criteria"""
cmd = listPortForwardingRules.listPortForwardingRulesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listPortForwardingRules(cmd))
def list_lb_rules(apiclient, **kwargs):
"""List all Load balancing rules matching criteria"""
cmd = listLoadBalancerRules.listLoadBalancerRulesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listLoadBalancerRules(cmd))
def list_lb_instances(apiclient, **kwargs):
"""List all Load balancing instances matching criteria"""
cmd = listLoadBalancerRuleInstances.listLoadBalancerRuleInstancesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listLoadBalancerRuleInstances(cmd))
def list_firewall_rules(apiclient, **kwargs):
"""List all Firewall Rules matching criteria"""
cmd = listFirewallRules.listFirewallRulesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listFirewallRules(cmd))
def list_volumes(apiclient, **kwargs):
"""List all volumes matching criteria"""
cmd = listVolumes.listVolumesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listVolumes(cmd))
def list_isos(apiclient, **kwargs):
"""Lists all available ISO files."""
cmd = listIsos.listIsosCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listIsos(cmd))
def list_snapshots(apiclient, **kwargs):
"""List all snapshots matching criteria"""
cmd = listSnapshots.listSnapshotsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listSnapshots(cmd))
def list_templates(apiclient, **kwargs):
"""List all templates matching criteria"""
cmd = listTemplates.listTemplatesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listTemplates(cmd))
def list_domains(apiclient, **kwargs):
"""Lists domains"""
cmd = listDomains.listDomainsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listDomains(cmd))
def list_accounts(apiclient, **kwargs):
"""Lists accounts and provides detailed account information for
listed accounts"""
cmd = listAccounts.listAccountsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listAccounts(cmd))
def list_users(apiclient, **kwargs):
"""Lists users and provides detailed account information for
listed users"""
cmd = listUsers.listUsersCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listUsers(cmd))
def list_snapshot_policy(apiclient, **kwargs):
"""Lists snapshot policies."""
cmd = listSnapshotPolicies.listSnapshotPoliciesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listSnapshotPolicies(cmd))
def list_events(apiclient, **kwargs):
"""Lists events"""
cmd = listEvents.listEventsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listEvents(cmd))
def list_disk_offering(apiclient, **kwargs):
"""Lists all available disk offerings."""
cmd = listDiskOfferings.listDiskOfferingsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listDiskOfferings(cmd))
def list_service_offering(apiclient, **kwargs):
"""Lists all available service offerings."""
cmd = listServiceOfferings.listServiceOfferingsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listServiceOfferings(cmd))
def list_vlan_ipranges(apiclient, **kwargs):
"""Lists all VLAN IP ranges."""
cmd = listVlanIpRanges.listVlanIpRangesCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listVlanIpRanges(cmd))
def list_usage_records(apiclient, **kwargs):
"""Lists usage records for accounts"""
cmd = listUsageRecords.listUsageRecordsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listUsageRecords(cmd))
def list_network_offerings(apiclient, **kwargs):
"""Lists network offerings"""
cmd = listNetworkOfferings.listNetworkOfferingsCmd()
[setattr(cmd, k, v) for k, v in list(kwargs.items())]
return(apiclient.listNetworkOfferings(cmd))
def list_resource_limits(apiclient, **kwargs):
"""Lists resource limits"""
cmd = | |
<reponame>kamfretoz/KamFreBOT
import asyncio
import time
from discord.ext import commands
import discord
import libneko
import json
import io
import dotenv
import os
import data.topics as topics
from textwrap import shorten, fill
from datetime import datetime
from random import randint, choice, random
from math import floor
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
from libneko import embeds
from owoify import Owoifator
from vaporwavely import vaporipsum, vaporize
from utils.masks import ellipse
from modules.http import HttpCogBase
from modules.dictobj import DictObject
import data.quotes
# To retrieve KSoft.Si API KEY
dotenv.load_dotenv()
ksoft_key = os.environ.get("KSOFT_API_KEY")
class Fun(HttpCogBase):
def __init__(self, bot):
self.bot = bot
self.jynxed = {}
self.loop = asyncio.get_event_loop()
@commands.command(aliases=["talk", "speak", "sy"])
@commands.bot_has_permissions(manage_messages=True)
async def say(self, ctx, *, text: commands.clean_content = None):
"""Say whatever you typed in"""
try:
if text is None:
await ctx.send("❓ What do you want me to say?", delete_after=5.0)
await ctx.message.add_reaction("❓")
else:
await ctx.message.delete()
await ctx.trigger_typing()
await ctx.send(text)
except:
pass
@commands.command(aliases=["sghost", "sayg", "sg"])
@commands.bot_has_permissions(manage_messages=True)
async def sayghost(self, ctx, *, text: commands.clean_content = None):
"""Say whatever you typed in and immediately deletes it"""
try:
if text is None:
await ctx.send("❓ What do you want me to say?", delete_after=5.0)
await ctx.message.add_reaction("❓")
else:
await ctx.message.delete()
await ctx.trigger_typing()
await ctx.send(text, delete_after=1)
except:
pass
# Say Command with TTS
@commands.command(aliases=["ttstalk", "speaktts"], hidden=True)
@commands.bot_has_permissions(manage_messages=True)
async def saytts(self, ctx, *, text=None):
"""Say whatever you typed in, this time with TTS!"""
if text == None:
await ctx.reply("❓ What do you want me to say?", delete_after=10.0)
await ctx.message.add_reaction("❓")
else:
try:
await ctx.message.delete()
await ctx.trigger_typing()
await ctx.sayy(content=text, tts=True)
except discord.Forbidden:
await ctx.author.send(
":no_entry_sign: I'm not allowed to send message here!",
delete_after=10.0,
)
except discord.NotFound:
await ctx.say(
":grey_exclamation: ERROR: Original message not found! (404 UNKNOWN MESSAGE)"
)
except discord.ext.commands.BotMissingPermissions:
await ctx.say(
"I don't have permission to delete the original message!",
delete_after=5.0,
)
@commands.command(aliases=["embedsay", "syd", "emb"])
@commands.bot_has_permissions(embed_links=True)
@commands.guild_only()
async def sayembed(self, ctx, *, message: commands.clean_content = None):
'''A command to embed messages quickly.'''
if message is None:
await ctx.reply(discord.Embed(description="❓ What do you want me to say?", delete_after=5))
await ctx.message.add_reaction("❓")
else:
await ctx.message.delete()
em = discord.Embed(color=randint(0, 0xFFFFFF), timestamp=datetime.utcnow())
em.description = message
em.set_footer(icon_url=ctx.message.author.avatar_url, text=f"Sent by: {ctx.message.author}")
await ctx.say(embed=em)
@commands.command(aliases=["sto"])
@commands.bot_has_permissions(manage_messages=True)
@commands.guild_only()
async def sayto(self, ctx, destination: discord.TextChannel, *, text=None):
"""Send whatever you want to specific channel"""
if text == None:
await ctx.say("What do you want me to say?", delete_after=10.0)
await ctx.message.add_reaction("❓")
else:
try:
await ctx.message.delete()
await destination.trigger_typing()
await destination.send(text)
except discord.Forbidden:
await ctx.say(
f"I'm not allowed to send a message on #{destination}!",
delete_after=10.0,
)
except discord.ext.commands.BotMissingPermissions:
await ctx.say(
"I don't have permission to delete the original message!",
delete_after=5.0,
)
@commands.command()
async def f(self, ctx, *, text: commands.clean_content = None):
""" Press F to pay respect """
hearts = ['❤', '💛', '💚', '💙', '💜', '♥']
reason = f"for **{text}** " if text else ""
await ctx.reply(f"**{ctx.author.name}** has paid their respect {reason}{choice(hearts)}")
@commands.command()
async def hack(self, ctx, user: libneko.converters.InsensitiveMemberConverter = None):
"""Hack someone's account! Try it!"""
if user is None:
user = ctx.message.author
gifs = [
"https://thumbs.gfycat.com/LightheartedObviousBlowfish-size_restricted.gif",
"https://media3.giphy.com/media/115BJle6N2Av0A/giphy.gif",
"https://giffiles.alphacoders.com/119/119969.gif",
"https://thumbs.gfycat.com/FlippantAdeptHatchetfish-size_restricted.gif",
"https://media1.tenor.com/images/3d190af70cfeea404f796f869f46a3c3/tenor.gif",
"https://media1.tenor.com/images/505ddb5e0b0e8c3e96b66e1469ef47c1/tenor.gif",
]
gifemb = discord.Embed()
gifemb.set_image(url=choice(gifs))
msg = await ctx.reply(embed=gifemb, content=f"Hacking! Target: {user}")
await asyncio.sleep(2)
await msg.edit(content="Accessing Discord Files... [▓▓ ]")
await asyncio.sleep(2)
await msg.edit(content="Accessing Discord Files... [▓▓▓ ]")
await asyncio.sleep(2)
await msg.edit(content="Accessing Discord Files... [▓▓▓▓▓ ]")
await asyncio.sleep(2)
await msg.edit(content="Accessing Discord Files COMPLETE! [▓▓▓▓▓▓]")
await asyncio.sleep(2)
await msg.edit(content="Retrieving Login Info... [▓▓▓ ]")
await asyncio.sleep(3)
await msg.edit(content="Retrieving Login Info... [▓▓▓▓▓ ]")
await asyncio.sleep(3)
await msg.edit(content="Retrieving Login Info... [▓▓▓▓▓▓ ]")
await asyncio.sleep(4)
await msg.edit(content=f"An error has occurred hacking {user}'s account. Please try again later. ❌")
# 8Ball Command
@commands.command(name="8ball", aliases=["ball", "8b"])
async def ball(self, ctx, *, question: str):
"""Ask a question to the 8Ball!"""
ps = {
"psgood": [
"Yes",
"It is certain",
"It is decidedly so",
"Without a doubt",
"Yes - definitely",
"You may rely on it",
"As I see it, yes",
"Most likely",
"Outlook good",
"Signs point to yes",
],
"psbad": [
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful",
"No",
],
}
choices = choice(choice(list(ps.values())))
if choices in ps["psbad"]:
color = discord.Color(0xFF0000)
elif choices in ps["psgood"]:
color = discord.Color(0x26D934)
eightball = discord.Embed(color=color)
eightball.add_field(
name="Question:", value=question.capitalize(), inline=False)
eightball.add_field(name="Answer:", value=f"{choices}.")
eightball.set_author(name="The mighty 8-Ball")
eightball.set_footer(
text=f"Requested by: {ctx.message.author}", icon_url=ctx.message.author.avatar_url)
eightball.set_thumbnail(url="https://i.imgur.com/Q9dxpTz.png")
await ctx.reply(embed=eightball, content=None)
@commands.command(hidden=True, aliases=["ily"])
async def iloveyou(self, ctx):
"""
❤❤❤
"""
await ctx.reply(f"{ctx.author.mention}, I love you too! :heart::heart::heart:")
@commands.command(aliases=["rr"], hidden=True)
async def rickroll(self, ctx):
"""
Never gonna give you up...
"""
rick = discord.Embed()
rick.set_image(
url="https://i.kym-cdn.com/photos/images/original/000/041/494/1241026091_youve_been_rickrolled.gif")
await ctx.reply(embed=rick)
@commands.command(aliases=["bg"])
async def bigtext(self, ctx, *, text: str):
"""
Make your text 🇧 🇮 🇬
Only 1024 characters will be printed, due to limit imposed by Discord.
"""
s = ""
if len(text) >= 1024:
shorten(text, width=1024)
for char in text:
if char.isalpha():
s += f":regional_indicator_{char.lower()}: "
elif char.isspace():
s += " "
await ctx.reply(s)
@commands.command(aliases=["kitty", "kitten", "kat", "catto"])
@commands.cooldown(rate=3, per=5, type=commands.BucketType.user)
async def cat(self, ctx):
"""
Send cute cat pics.
"""
await ctx.trigger_typing()
session = self.acquire_session()
async with session.get('https://api.thecatapi.com/v1/images/search') as resp:
resp.raise_for_status()
data = await resp.json()
url = data[0]["url"]
color = ctx.author.color
embed = discord.Embed(
description="Here's a cute kitty :D", color=color, timestamp=datetime.utcnow())
embed.set_footer(icon_url=ctx.message.author.avatar_url,
text=f"Requested by: {ctx.message.author}")
embed.set_image(url=url)
await ctx.reply(embed=embed)
@commands.command(aliases=["doggie", "doge", "doggo"])
@commands.cooldown(rate=3, per=5, type=commands.BucketType.user)
async def dog(self, ctx):
"""
Send cute dog pics.
"""
await ctx.trigger_typing()
session = self.acquire_session()
async with session.get('https://api.thedogapi.com/v1/images/search') as resp:
resp.raise_for_status()
data = await resp.json()
url = data[0]["url"]
color = ctx.author.color
embed = discord.Embed(description="Here's a cute doggo!! :D",color=color, timestamp=datetime.utcnow())
embed.set_footer(icon_url=ctx.message.author.avatar_url,text=f"Requested by: {ctx.message.author}")
embed.set_image(url=url)
await ctx.reply(embed=embed)
@commands.command(aliases=["foxes"])
@commands.cooldown(rate=3, per=5, type=commands.BucketType.user)
async def fox(self, ctx):
"""
Send cute fox pics.
"""
await ctx.trigger_typing()
session = self.acquire_session()
async with session.get('https://randomfox.ca/floof/') as resp:
resp.raise_for_status()
data = await resp.json()
image = data["image"]
emb = discord.Embed(description="Here's a cute fox!! :D",
color=ctx.author.color, timestamp=datetime.utcnow())
emb.set_footer(icon_url=ctx.message.author.avatar_url,
text=f"Requested by: {ctx.message.author}")
emb.set_image(url=image)
await ctx.reply(embed=emb)
@commands.command()
@commands.cooldown(rate=3, per=5, type=commands.BucketType.user)
async def shibe(self, ctx):
"""
Send cute shibe pics.
"""
await ctx.trigger_typing()
session = self.acquire_session()
async with session.get('https://shibe.online/api/shibes') as resp:
resp.raise_for_status()
data = await resp.json()
img = data[0]
emb = discord.Embed(description="Here's a cute shibe!! :D",
color=ctx.author.color, timestamp=datetime.utcnow())
emb.set_footer(icon_url=ctx.message.author.avatar_url,
text=f"Requested by: {ctx.message.author}")
emb.set_image(url=img)
await ctx.reply(embed=emb)
@commands.command()
async def triggered(self, ctx, member: libneko.converters.InsensitiveMemberConverter=None):
"""**TRIGGERED**"""
member = member or ctx.author
await ctx.trigger_typing()
parameters = {
"avatar" : str(member.avatar_url_as(format="png", size=1024))
}
session = self.acquire_session()
async with session.get(f'https://some-random-api.ml/canvas/triggered', params = parameters) as resp:
imageData = io.BytesIO(await resp.read()) # read the image/bytes
img = discord.File(imageData, "triggered.gif")
em = discord.Embed(
title=f"{member.name} have been triggered!",
color=0xf1f1f1,
)
em.set_image(url="attachment://triggered.gif")
await ctx.reply(embed=em,file=img) # sending the file
@commands.command(aliases=["mpass"])
async def missionpass(self, ctx, member: libneko.converters.InsensitiveMemberConverter=None):
"""Mission Passed!"""
member = member or ctx.author
await ctx.trigger_typing()
parameters = {
"avatar" : str(member.avatar_url_as(format="png", size=1024))
}
session = self.acquire_session()
async with session.get(f'https://some-random-api.ml/canvas/passed', params = parameters) as resp:
imageData = io.BytesIO(await resp.read()) # read the image/bytes
img = discord.File(imageData, "passed.png")
em = discord.Embed(
title=f"Mission passed",
description="Respect +100",
color=0xf1f1f1,
)
em.set_image(url="attachment://passed.png")
await ctx.reply(embed=em,file=img) # sending the file
@commands.command()
async def wasted(self, ctx, member: libneko.converters.InsensitiveMemberConverter=None):
"""You Died"""
member = member or ctx.author
await ctx.trigger_typing()
parameters = {
"avatar" : str(member.avatar_url_as(format="png", size=1024))
}
session = self.acquire_session()
async with session.get(f'https://some-random-api.ml/canvas/wasted', params=parameters) as resp:
imageData = io.BytesIO(await resp.read()) # read the image/bytes
img = discord.File(imageData, "Wasted.png")
em = discord.Embed(
title=f"Wasted",
color=0xf1f1f1,
)
em.set_image(url="attachment://Wasted.png")
await ctx.reply(embed=em,file=img) # sending the file
@commands.command(aliases=["prison"])
async def jail(self, ctx, member: libneko.converters.InsensitiveMemberConverter=None):
"""Welcome to the Jail"""
member = member or ctx.author
await ctx.trigger_typing()
parameters = {
"avatar" : str(member.avatar_url_as(format="png", size=1024))
}
session = self.acquire_session()
async with session.get(f'https://some-random-api.ml/canvas/jail', params=parameters) as resp:
imageData = io.BytesIO(await resp.read()) # read the image/bytes
img = discord.File(imageData, "jail.png")
em = discord.Embed(
title=f"{member.name} have been jailed.",
color=0xf1f1f1,
)
em.set_image(url="attachment://jail.png")
await ctx.reply(embed=em,file=img) # sending the file
@commands.command(aliases=["simp"])
async def simpcard(self, ctx, member: libneko.converters.InsensitiveMemberConverter=None):
"""Simp card for u"""
member = member or ctx.author
await ctx.trigger_typing()
parameters = {
"avatar" : str(member.avatar_url_as(format="png", size=1024))
}
session = self.acquire_session()
async with session.get(f'https://some-random-api.ml/canvas/simpcard', params=parameters) as resp:
imageData = io.BytesIO(await resp.read()) # read the image/bytes
img = discord.File(imageData, "simpcard.png")
em = discord.Embed(
title=f"what a simp, {member.name}.",
color=0xf1f1f1,
)
em.set_image(url="attachment://simp.png")
await ctx.reply(embed=em,file=img) # sending the file
@commands.command(aliases=["lolice"])
async def lolipolice(self, ctx, member: libneko.converters.InsensitiveMemberConverter=None):
"""the police | |
w
# location based attention
class AttLoc(chainer.Chain):
def __init__(self, eprojs, dunits, att_dim, aconv_chans, aconv_filts):
super(AttLoc, self).__init__()
with self.init_scope():
self.mlp_enc = L.Linear(eprojs, att_dim)
self.mlp_dec = L.Linear(dunits, att_dim, nobias=True)
self.mlp_att = L.Linear(aconv_chans, att_dim, nobias=True)
self.loc_conv = L.Convolution2D(1, aconv_chans, ksize=(
1, 2 * aconv_filts + 1), pad=(0, aconv_filts))
self.gvec = L.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.aconv_chans = aconv_chans
def reset(self):
'''reset states
:return:
'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
def __call__(self, enc_hs, dec_z, att_prev, scaling=2.0):
'''AttLoc forward
:param enc_hs:
:param dec_z:
:param att_prev:
:param scaling:
:return:
'''
batch = len(enc_hs)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = F.pad_sequence(enc_hs) # utt x frame x hdim
self.h_length = self.enc_h.shape[1]
# utt x frame x att_dim
self.pre_compute_enc_h = linear_tensor(self.mlp_enc, self.enc_h)
if dec_z is None:
dec_z = chainer.Variable(self.xp.zeros(
(batch, self.dunits), dtype=np.float32))
else:
dec_z = F.reshape(dec_z, (batch, self.dunits))
# initialize attention weight with uniform dist.
if att_prev is None:
att_prev = [self.xp.full(
hh.shape[0], 1.0 / hh.shape[0], dtype=np.float32) for hh in enc_hs]
att_prev = [chainer.Variable(att) for att in att_prev]
att_prev = F.pad_sequence(att_prev)
# TODO(watanabe) use <chainer variable>.reshpae(), instead of F.reshape()
# att_prev: utt x frame -> utt x 1 x 1 x frame -> utt x att_conv_chans x 1 x frame
att_conv = self.loc_conv(
F.reshape(att_prev, (batch, 1, 1, self.h_length)))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = F.swapaxes(F.squeeze(att_conv, axis=2), 1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = linear_tensor(self.mlp_att, att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = F.broadcast_to(
F.expand_dims(self.mlp_dec(dec_z), 1), self.pre_compute_enc_h.shape)
# dot with gvec
# utt x frame x att_dim -> utt x frame
# TODO(watanabe) use batch_matmul
e = F.squeeze(linear_tensor(self.gvec, F.tanh(
att_conv + self.pre_compute_enc_h + dec_z_tiled)), axis=2)
# Applying a minus-large-number filter to make a probability value zero for a padded area
# simply degrades the performance, and I gave up this implementation
# Apply a scaling to make an attention sharp
w = F.softmax(scaling * e)
# weighted sum over flames
# utt x hdim
c = F.sum(self.enc_h * F.broadcast_to(F.expand_dims(w, 2), self.enc_h.shape), axis=1)
return c, w
class NoAtt(chainer.Chain):
def __init__(self):
super(NoAtt, self).__init__()
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.c = None
def reset(self):
'''reset states
:return:
'''
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.c = None
def __call__(self, enc_hs, dec_z, att_prev):
'''NoAtt forward
:param enc_hs:
:param dec_z: dummy
:param att_prev:
:return:
'''
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = F.pad_sequence(enc_hs) # utt x frame x hdim
self.h_length = self.enc_h.shape[1]
# initialize attention weight with uniform dist.
if att_prev is None:
att_prev = [self.xp.full(
hh.shape[0], 1.0 / hh.shape[0], dtype=np.float32) for hh in enc_hs]
att_prev = [chainer.Variable(att) for att in att_prev]
att_prev = F.pad_sequence(att_prev)
self.c = F.sum(self.enc_h * F.broadcast_to(F.expand_dims(att_prev, 2), self.enc_h.shape), axis=1)
return self.c, att_prev
# ------------- Decoder Network ----------------------------------------------------------------------------------------
class Decoder(chainer.Chain):
def __init__(self, eprojs, odim, dlayers, dunits, sos, eos, att, verbose=0,
char_list=None, labeldist=None, lsm_weight=0.):
super(Decoder, self).__init__()
with self.init_scope():
self.embed = DL.EmbedID(odim, dunits)
self.lstm0 = L.StatelessLSTM(dunits + eprojs, dunits)
for l in six.moves.range(1, dlayers):
setattr(self, 'lstm%d' % l, L.StatelessLSTM(dunits, dunits))
self.output = L.Linear(dunits, odim)
self.loss = None
self.att = att
self.dlayers = dlayers
self.dunits = dunits
self.sos = sos
self.eos = eos
self.verbose = verbose
self.char_list = char_list
# for label smoothing
self.labeldist = labeldist
self.vlabeldist = None
self.lsm_weight = lsm_weight
def __call__(self, hs, ys):
'''Decoder forward
:param Variable hs:
:param Variable ys:
:return:
'''
self.loss = None
# prepare input and output word sequences with sos/eos IDs
eos = self.xp.array([self.eos], 'i')
sos = self.xp.array([self.sos], 'i')
ys_in = [F.concat([sos, y], axis=0) for y in ys]
ys_out = [F.concat([y, eos], axis=0) for y in ys]
# padding for ys with -1
# pys: utt x olen
pad_ys_in = F.pad_sequence(ys_in, padding=self.eos)
pad_ys_out = F.pad_sequence(ys_out, padding=-1)
# get dim, length info
batch = pad_ys_out.shape[0]
olength = pad_ys_out.shape[1]
logging.info(self.__class__.__name__ + ' input lengths: ' + str(self.xp.array([h.shape[0] for h in hs])))
logging.info(self.__class__.__name__ + ' output lengths: ' + str(self.xp.array([y.shape[0] for y in ys_out])))
# initialization
c_list = [None] # list of cell state of each layer
z_list = [None] # list of hidden state of each layer
for l in six.moves.range(1, self.dlayers):
c_list.append(None)
z_list.append(None)
att_w = None
z_all = []
self.att.reset() # reset pre-computation of h
att_weight_all = [] # for debugging
# pre-computation of embedding
eys = self.embed(pad_ys_in) # utt x olen x zdim
eys = F.separate(eys, axis=1)
# loop for an output sequence
for i in six.moves.range(olength):
att_c, att_w = self.att(hs, z_list[0], att_w)
ey = F.hstack((eys[i], att_c)) # utt x (zdim + hdim)
c_list[0], z_list[0] = self.lstm0(c_list[0], z_list[0], ey)
for l in six.moves.range(1, self.dlayers):
c_list[l], z_list[l] = self['lstm%d' % l](c_list[l], z_list[l], z_list[l - 1])
z_all.append(z_list[-1])
att_weight_all.append(att_w.data) # for debugging
z_all = F.reshape(F.stack(z_all, axis=1),
(batch * olength, self.dunits))
# compute loss
y_all = self.output(z_all)
self.loss = F.softmax_cross_entropy(y_all, F.flatten(pad_ys_out))
# -1: eos, which is removed in the loss computation
self.loss *= (np.mean([len(x) for x in ys_in]) - 1)
acc = F.accuracy(y_all, F.flatten(pad_ys_out), ignore_label=-1)
logging.info('att loss:' + str(self.loss.data))
# show predicted character sequence for debug
if self.verbose > 0 and self.char_list is not None:
y_hat = F.reshape(y_all, (batch, olength, -1))
y_true = pad_ys_out
for (i, y_hat_), y_true_ in zip(enumerate(y_hat.data), y_true.data):
if i == MAX_DECODER_OUTPUT:
break
idx_hat = self.xp.argmax(y_hat_[y_true_ != -1], axis=1)
idx_true = y_true_[y_true_ != -1]
seq_hat = [self.char_list[int(idx)] for idx in idx_hat]
seq_true = [self.char_list[int(idx)] for idx in idx_true]
seq_hat = "".join(seq_hat).replace('<space>', ' ')
seq_true = "".join(seq_true).replace('<space>', ' ')
logging.info("groundtruth[%d]: " % i + seq_true)
logging.info("prediction [%d]: " % i + seq_hat)
if self.labeldist is not None:
if self.vlabeldist is None:
self.vlabeldist = chainer.Variable(self.xp.asarray(self.labeldist))
loss_reg = - F.sum(F.scale(F.log_softmax(y_all), self.vlabeldist, axis=1)) / len(ys_in)
self.loss = (1. - self.lsm_weight) * self.loss + self.lsm_weight * loss_reg
return self.loss, acc, att_weight_all
def recognize_beam(self, h, lpz, recog_args, char_list, rnnlm=None):
'''beam search implementation
:param h:
:param recog_args:
:param char_list:
:return:
'''
logging.info('input lengths: ' + str(h.shape[0]))
# initialization
c_list = [None] # list of cell state of each layer
z_list = [None] # list of hidden state of each layer
for l in six.moves.range(1, self.dlayers):
c_list.append(None)
z_list.append(None)
a = None
self.att.reset() # reset pre-computation of h
# search parms
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = recog_args.ctc_weight
# preprate sos
y = self.xp.full(1, self.sos, 'i')
if recog_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
# maxlen >= 1
maxlen = max(1, int(recog_args.maxlenratio * h.shape[0]))
minlen = int(recog_args.minlenratio * h.shape[0])
logging.info('max output length: ' + str(maxlen))
logging.info('min output length: ' + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {'score': 0.0, 'yseq': [y], 'c_prev': c_list, 'z_prev': z_list, 'a_prev': a, 'rnnlm_prev': None}
else:
hyp = {'score': 0.0, 'yseq': [y], 'c_prev': c_list, 'z_prev': z_list, 'a_prev': a}
if lpz is not None:
ctc_prefix_score = CTCPrefixScore(lpz, 0, self.eos, self.xp)
hyp['ctc_state_prev'] = ctc_prefix_score.initial_state()
hyp['ctc_score_prev'] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz.shape[-1]
hyps = [hyp]
ended_hyps = []
for i in six.moves.range(maxlen):
logging.debug('position ' + str(i))
hyps_best_kept = []
for hyp in hyps:
ey = self.embed(hyp['yseq'][i]) # utt list (1) x zdim
att_c, att_w = self.att([h], hyp['z_prev'][0], hyp['a_prev'])
ey = F.hstack((ey, att_c)) # utt(1) x (zdim + hdim)
c_list[0], z_list[0] = self.lstm0(hyp['c_prev'][0], hyp['z_prev'][0], ey)
for l in six.moves.range(1, self.dlayers):
c_list[l], z_list[l] = self['lstm%d' % l](
hyp['c_prev'][l], hyp['z_prev'][l], z_list[l - 1])
# get nbest local scores and their ids
local_att_scores = F.log_softmax(self.output(z_list[-1])).data
if rnnlm:
rnnlm_state, z_rnnlm = rnnlm.predictor(hyp['rnnlm_prev'], hyp['yseq'][i])
local_lm_scores = F.log_softmax(z_rnnlm).data
local_scores = local_att_scores + recog_args.lm_weight * local_lm_scores
else:
local_scores = local_att_scores
if lpz is not None:
local_best_ids = self.xp.argsort(local_scores, axis=1)[0, ::-1][:ctc_beam]
ctc_scores, ctc_states = ctc_prefix_score(hyp['yseq'], local_best_ids, hyp['ctc_state_prev'])
local_scores = \
(1.0 - ctc_weight) * local_att_scores[:, local_best_ids] \
+ ctc_weight * (ctc_scores - hyp['ctc_score_prev'])
if rnnlm:
local_scores += | |
<gh_stars>0
'''
*
* Copyright (C) 2020 Universitat Politècnica de Catalunya.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
'''
# -*- coding: utf-8 -*-
""" This module defines a generic way to communicate with the MySQL database.
This module takes advantage of the regular database structure to ease the
requests and data management.
It basically has two different classes: db and Connector.
- db: defines some standard methods to create the most basic query structures
for accessing database tables.
- Connector: uses db class to represent a database table inside the
application.
To search for an item in the database you have to create a Connector passing
as a parameter a string with the table you want to load data from, then call
the method 'load' passing as a parameter the id as an integer or the 'hash' as
a string to look for, and the Connector creates the request and searches the
item in the database.
Once you have the item you can just get the related data calling the 'get'
function with the table you want to take the data from. The 'get' function
looks for the related data using the previously gotten element id and it
returns a list of Connectors containing the data. You can then look for other
related data using the returned Connectors.
Inside each connector there is a parameter called 'values' that contains a list
with all the selected table column information (the selected element properties).
You can then modify those values and call the method 'save' to update the
values in the database table.
When you want to create a new row in a table you only need to create a new
Connector to that table, call the 'load' method with the new hash you want
to insert in the table, and if the element doesn't already exist the method
will return a new Connector element with all the columns of the table
initialized as None values. You only have to insert the values in the
list, and when you call the 'save' method it will insert the row into the
table as a new row.
There is an extra function called 'get_all' that returns all the elements
inside the table of the Connector. If for example you want to take all the
domains, you call a new Connector passing 'domain' as the table name,
and then call the 'get_all' function to get all the domains. The results are
returned as a list of Connectors representing the given data.
This way of management simplifies a lot the database requests needed inside the
code but clearly overgenerates requests. For the sake of speed and performance
there are some other specific requests included in the Connector to get some
extensive data that will slow the loading a lot using only the simple methods.
Last, there is a function called 'custom' where you can generate a custom
request for specific reasons.
"""
# Basic modules
import MySQLdb
import re
import config
import logging
import logging.config
from utils import hash_string
logging.config.fileConfig('../logging.conf')
logger = logging.getLogger("DB_MANAGER")
CROSS_TABLES = ["domain_subdomain", "domain_category", "domain_third_party", "domain_url",
"pattern_url", "resource_fingerprint"]
class Db(object):
"""
This class manages the basic database operations. It defines the most
basic requests taking into account the database table definitions to
make easier the data management.
"""
def __init__(self):
self.host = config.MYSQL_HOST
self.user = config.MYSQL_USER
self.password = config.MYSQL_PASSWORD
self.db = config.MYSQL_DB
self.conn = MySQLdb.connect(host=self.host,port=3306,user=self.user, passwd=<PASSWORD>, db=self.db,
use_unicode=True, charset='utf8mb4')
def close(self):
""" Closes the connection to the database. """
self.conn.close()
def initialize(self, sites, start, timestamp):
""" initializes the database with the Alexa's list domain information. """
for i, domain in enumerate(sites, start + 1):
# domain = extract_domain(domain)
print(str(i) + ": " + domain)
hash_key = hash_string(domain)
element = {"hash": hash_key, "name": domain, "rank": i, "insert_date": timestamp}
element_id = self.custom(query="SELECT id FROM domain WHERE domain.hash = %s", values=[hash_key])
if not element_id:
self.insert("domain", element)
else:
element["id"] = element_id[0]["id"]
self.update("domain", element)
def __select(self, fields, tables, conditions, order, values, log=None):
""" Creates a standard SELECT request. """
request = "SELECT "
field_list = ", ".join(fields)
request += field_list
request += " FROM " + ", ".join(tables)
if conditions:
cond_list = " WHERE "
for index, cond in enumerate(conditions):
cond_list += "(" + cond
if values[index] == "NULL":
cond_list += " IS %s)"
values[index] = None
elif values[index] == "NOT NULL":
cond_list += " IS NOT %s)"
values[index] = None
else:
cond_list += " = %s)"
if index < len(conditions) - 1:
cond_list += " AND "
request += cond_list
if order:
# request += " ORDER BY '"+"', '".join(order)+"'"
request += " ORDER BY " + ", ".join(order)
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
results = []
try:
if values:
if log:
logger.debug(request % tuple(values))
cursor.execute(request, tuple(values))
else:
if log:
logger.debug(request)
cursor.execute(request)
except MySQLdb.Error as error:
if values:
logger.error(request % tuple(values))
else:
logger.error(request)
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
else:
for row in cursor.fetchall():
result = {}
for key in row.keys():
result[key] = row[key]
if row[key] == "NULL":
result[key] = None
results.append(result)
if log:
logger.debug("REQUEST OK. Results: " + str(results) + "\n-----------------")
cursor.close()
return results
def __insert(self, table, fields, values, log=None):
""" Creates a standard INSERT request. """
if fields and len(fields) != len(values):
logger.warning("Incorrect number of field/values")
return 0
request = "INSERT INTO " + table
if fields:
request += " (" + fields[0]
if len(fields) > 1:
for index in range(1, len(fields)):
if "rank" in fields[index]:
request += ", `" + fields[index] + "`"
else:
request += ", " + fields[index]
request += ")"
request += " VALUES (%s"
if len(values) > 1:
for index in range(1, len(values)):
request += ", %s"
request += ")"
request += " ON DUPLICATE KEY UPDATE "
if fields:
request += fields[0]+"=%s"
if len(fields) > 1:
for index in range(1, len(fields)):
if "rank" in fields[index]:
request += ", `" + fields[index] + "`=%s"
else:
request += ", " + fields[index] + "=%s"
new_values = values.copy()
for value in new_values:
values.append(value)
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
try:
if log:
logger.debug(request % tuple(values))
cursor.execute(request, tuple(values))
except MySQLdb.Error as error:
logger.error(request % tuple(values))
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
return 0
else:
self.conn.commit()
if log:
logger.debug("REQUEST OK. Id: " + str(cursor.lastrowid) + "\n-----------------")
last_row_id = cursor.lastrowid
cursor.close()
return last_row_id
def __update(self, table, fields, conditions, values, log=None):
""" Creates a standard UPDATE request. """
if fields and len(fields) + len(conditions) != len(values):
logger.warning("Incorrect number of fields/conditions/values")
return 0
request = "UPDATE IGNORE " + table
request += " SET " + fields[0] + " = %s"
if len(fields) > 1:
for index in range(1, len(fields)):
request += ", " + fields[index] + " = %s"
request += " WHERE " + conditions[0] + " = %s"
if len(conditions) > 1:
for index in range(1, len(conditions)):
request += " AND " + conditions[index] + " = %s"
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
try:
if log:
logger.debug(request % tuple(values))
cursor.execute(request, tuple(values))
except MySQLdb.Error as error:
logger.error(request % tuple(values))
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
cursor.close()
return 0
else:
self.conn.commit()
if log:
logger.debug("REQUEST OK.\n-----------------")
cursor.close()
return -1
def _delete(self, table, conditions, values, log=None):
""" Creates a standard DELETE request. """
request = "DELETE FROM " + table
request += " WHERE " + conditions[0] + " = %s"
if len(conditions) > 1:
for index in range(1, len(conditions)):
request += " AND " + conditions[index] + " = %s"
self.conn.ping()
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
try:
if log:
logger.debug(request % tuple(values))
cursor.execute(request, tuple(values))
except MySQLdb.Error as error:
logger.error(request % tuple(values))
logger.error("SQL ERROR: " + str(error) + "\n-----------------")
cursor.close()
return 0
else:
self.conn.commit()
if log:
logger.debug("REQUEST OK.\n-----------------")
cursor.close()
return 1
| |
<reponame>Jsl-1/homeassistant-zigate
"""
ZiGate component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/zigate/
"""
import logging
import voluptuous as vol
import os
import datetime
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.components.group import \
ENTITY_ID_FORMAT as GROUP_ENTITY_ID_FORMAT
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.event import track_time_change
from homeassistant.const import (ATTR_BATTERY_LEVEL, CONF_PORT,
CONF_HOST,
ATTR_ENTITY_ID,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['zigate==0.31.0']
# REQUIREMENTS = ['https://github.com/doudz/zigate/archive/dev.zip#1.0.0']
DEPENDENCIES = ['persistent_notification']
DOMAIN = 'zigate'
SCAN_INTERVAL = datetime.timedelta(seconds=60)
DATA_ZIGATE_DEVICES = 'zigate_devices'
DATA_ZIGATE_ATTRS = 'zigate_attributes'
ADDR = 'addr'
IEEE = 'ieee'
GROUP_NAME_ALL_ZIGATE = 'all zigate'
ENTITY_ID_ALL_ZIGATE = GROUP_ENTITY_ID_FORMAT.format('all_zigate')
SUPPORTED_PLATFORMS = ('sensor',
'binary_sensor',
'switch',
'light',
'cover',
'climate')
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_PORT): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional('channel'): cv.positive_int,
vol.Optional('gpio'): cv.boolean,
vol.Optional('enable_led'): cv.boolean,
vol.Optional('polling'): cv.boolean,
})
}, extra=vol.ALLOW_EXTRA)
REFRESH_DEVICE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
DISCOVER_DEVICE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
RAW_COMMAND_SCHEMA = vol.Schema({
vol.Required('cmd'): cv.string,
vol.Optional('data'): cv.string,
})
IDENTIFY_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
REMOVE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
READ_ATTRIBUTE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Required('cluster'): cv.string,
vol.Required('attribute_id'): cv.string,
vol.Optional('manufacturer_code'): cv.string,
})
WRITE_ATTRIBUTE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Required('cluster'): cv.string,
vol.Required('attribute_id'): cv.string,
vol.Required('attribute_type'): cv.string,
vol.Required('value'): cv.string,
vol.Optional('manufacturer_code'): cv.string,
})
ADD_GROUP_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Optional('group_addr'): cv.string,
})
REMOVE_GROUP_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Optional('group_addr'): cv.string,
})
GET_GROUP_MEMBERSHIP_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
})
ACTION_ONOFF_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('onoff'): cv.string,
vol.Optional('endpoint'): cv.string,
vol.Optional('on_time'): cv.string,
vol.Optional('off_time'): cv.string,
vol.Optional('effect'): cv.string,
vol.Optional('gradient'): cv.string,
})
OTA_LOAD_IMAGE_SCHEMA = vol.Schema({
vol.Required('imagepath'): cv.string,
})
OTA_IMAGE_NOTIFY_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Optional('destination_enpoint'): cv.string,
vol.Optional('payload_type'): cv.string,
})
VIEW_SCENE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Required('group_addr'): cv.string,
vol.Required('scene'): cv.string,
})
ADD_SCENE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Required('group_addr'): cv.string,
vol.Required('scene'): cv.string,
vol.Required('name'): cv.string,
vol.Optional('transition'): cv.string,
})
REMOVE_SCENE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Required('group_addr'): cv.string,
vol.Optional('scene'): cv.string,
})
STORE_SCENE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Required('group_addr'): cv.string,
vol.Required('scene'): cv.string,
})
RECALL_SCENE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Required('group_addr'): cv.string,
vol.Required('scene'): cv.string,
})
SCENE_MEMBERSHIP_REQUEST_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Required('group_addr'): cv.string,
})
COPY_SCENE_SCHEMA = vol.Schema({
vol.Optional(ADDR): cv.string,
vol.Optional(IEEE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required('endpoint'): cv.string,
vol.Required('from_group_addr'): cv.string,
vol.Required('from_scene'): cv.string,
vol.Required('to_group_addr'): cv.string,
vol.Required('to_scene'): cv.string,
})
BUILD_NETWORK_TABLE_SCHEMA = vol.Schema({
vol.Optional('force'): cv.boolean,
})
def setup(hass, config):
"""Setup zigate platform."""
import zigate
port = config[DOMAIN].get(CONF_PORT)
host = config[DOMAIN].get(CONF_HOST)
gpio = config[DOMAIN].get('gpio', False)
enable_led = config[DOMAIN].get('enable_led', True)
polling = config[DOMAIN].get('polling', True)
channel = config[DOMAIN].get('channel')
persistent_file = os.path.join(hass.config.config_dir,
'zigate.json')
_LOGGER.debug('Port : %s', port)
_LOGGER.debug('Host : %s', host)
_LOGGER.debug('GPIO : %s', gpio)
_LOGGER.debug('Led : %s', enable_led)
_LOGGER.debug('Channel : %s', channel)
myzigate = zigate.connect(port=port, host=host,
path=persistent_file,
auto_start=False,
gpio=gpio
)
_LOGGER.debug('ZiGate object created %s', myzigate)
hass.data[DOMAIN] = myzigate
hass.data[DATA_ZIGATE_DEVICES] = {}
hass.data[DATA_ZIGATE_ATTRS] = {}
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_ZIGATE)
component.setup(config)
entity = ZiGateComponentEntity(myzigate)
hass.data[DATA_ZIGATE_DEVICES]['zigate'] = entity
component.add_entities([entity])
def device_added(**kwargs):
device = kwargs['device']
_LOGGER.debug('Add device {}'.format(device))
ieee = device.ieee or device.addr # compatibility
if ieee not in hass.data[DATA_ZIGATE_DEVICES]:
entity = ZiGateDeviceEntity(hass, device, polling)
hass.data[DATA_ZIGATE_DEVICES][ieee] = entity
component.add_entities([entity])
if 'signal' in kwargs:
hass.components.persistent_notification.create(
('A new ZiGate device "{}"'
' has been added !'
).format(device),
title='ZiGate')
def device_removed(**kwargs):
# component.async_remove_entity
device = kwargs['device']
ieee = device.ieee or device.addr # compatibility
hass.components.persistent_notification.create(
'The ZiGate device {}({}) is gone.'.format(device.ieee,
device.addr),
title='ZiGate')
entity = hass.data[DATA_ZIGATE_DEVICES][ieee]
component.async_remove_entity(entity.entity_id)
del hass.data[DATA_ZIGATE_DEVICES][ieee]
def device_need_discovery(**kwargs):
device = kwargs['device']
hass.components.persistent_notification.create(
('The ZiGate device {}({}) needs to be discovered'
' (missing important'
' information)').format(device.ieee, device.addr),
title='ZiGate')
zigate.dispatcher.connect(device_added,
zigate.ZIGATE_DEVICE_ADDED, weak=False)
zigate.dispatcher.connect(device_removed,
zigate.ZIGATE_DEVICE_REMOVED, weak=False)
zigate.dispatcher.connect(device_need_discovery,
zigate.ZIGATE_DEVICE_NEED_DISCOVERY, weak=False)
def attribute_updated(**kwargs):
device = kwargs['device']
ieee = device.ieee or device.addr # compatibility
attribute = kwargs['attribute']
_LOGGER.debug('Update attribute for device {} {}'.format(device,
attribute))
entity = hass.data[DATA_ZIGATE_DEVICES].get(ieee)
event_data = attribute.copy()
if type(event_data.get('type')) == type:
event_data['type'] = event_data['type'].__name__
event_data['ieee'] = device.ieee
event_data['addr'] = device.addr
event_data['device_type'] = device.get_property_value('type')
if entity:
event_data['entity_id'] = entity.entity_id
hass.bus.fire('zigate.attribute_updated', event_data)
zigate.dispatcher.connect(attribute_updated,
zigate.ZIGATE_ATTRIBUTE_UPDATED, weak=False)
def device_updated(**kwargs):
device = kwargs['device']
_LOGGER.debug('Update device {}'.format(device))
ieee = device.ieee or device.addr # compatibility
entity = hass.data[DATA_ZIGATE_DEVICES].get(ieee)
if not entity:
_LOGGER.debug('Device not found {}, adding it'.format(device))
device_added(device=device)
event_data = {}
event_data['ieee'] = device.ieee
event_data['addr'] = device.addr
event_data['device_type'] = device.get_property_value('type')
if entity:
event_data['entity_id'] = entity.entity_id
hass.bus.fire('zigate.device_updated', event_data)
zigate.dispatcher.connect(device_updated,
zigate.ZIGATE_DEVICE_UPDATED, weak=False)
zigate.dispatcher.connect(device_updated,
zigate.ZIGATE_ATTRIBUTE_ADDED, weak=False)
zigate.dispatcher.connect(device_updated,
zigate.ZIGATE_DEVICE_ADDRESS_CHANGED, weak=False)
def zigate_reset(service):
myzigate.reset()
def permit_join(service):
myzigate.permit_join()
def zigate_cleanup(service):
'''
Remove missing device
'''
myzigate.cleanup_devices()
def start_zigate(service_event=None):
myzigate.autoStart(channel)
myzigate.start_auto_save()
myzigate.set_led(enable_led)
version = myzigate.get_version_text()
if version < '3.0f':
hass.components.persistent_notification.create(
('Your zigate firmware is outdated, '
'Please upgrade to 3.0f or later !'),
title='ZiGate')
# first load
for device in myzigate.devices:
device_added(device=device)
for platform in SUPPORTED_PLATFORMS:
load_platform(hass, platform, DOMAIN, {}, config)
hass.bus.fire('zigate.started')
def stop_zigate(service_event):
myzigate.save_state()
myzigate.close()
hass.bus.fire('zigate.stopped')
def refresh_devices_list(service):
myzigate.get_devices_list()
def generate_templates(service):
myzigate.generate_templates(hass.config.config_dir)
def _get_addr_from_service_request(service):
entity_id = service.data.get(ATTR_ENTITY_ID)
ieee = service.data.get(IEEE)
addr = service.data.get(ADDR)
if entity_id:
entity = component.get_entity(entity_id)
if entity:
addr = entity._device.addr
elif ieee:
device = myzigate.get_device_from_ieee(ieee)
if device:
addr = device.addr
return addr
def _to_int(value):
'''
convert str to int
'''
if 'x' in value:
return int(value, 16)
return int(value)
def refresh_device(service):
addr = _get_addr_from_service_request(service)
if addr:
myzigate.refresh_device(addr, full=True)
else:
for device in myzigate.devices:
device.refresh_device(full=True)
def discover_device(service):
addr = _get_addr_from_service_request(service)
if addr:
myzigate.discover_device(addr, True)
def network_scan(service):
myzigate.start_network_scan()
def raw_command(service):
cmd = _to_int(service.data.get('cmd'))
data = service.data.get('data', '')
myzigate.send_data(cmd, data)
def identify_device(service):
addr = _get_addr_from_service_request(service)
myzigate.identify_device(addr)
def remove_device(service):
addr = _get_addr_from_service_request(service)
myzigate.remove_device(addr)
def initiate_touchlink(service):
myzigate.initiate_touchlink()
def touchlink_factory_reset(service):
myzigate.touchlink_factory_reset()
def read_attribute(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint'))
cluster = _to_int(service.data.get('cluster'))
attribute_id = _to_int(service.data.get('attribute_id'))
manufacturer_code = _to_int(service.data.get('manufacturer_code', '0'))
myzigate.read_attribute_request(addr, endpoint, cluster, attribute_id,
manufacturer_code=manufacturer_code)
def write_attribute(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint'))
cluster = _to_int(service.data.get('cluster'))
attribute_id = _to_int(service.data.get('attribute_id'))
attribute_type = _to_int(service.data.get('attribute_type'))
value = _to_int(service.data.get('value'))
attributes = [(attribute_id, attribute_type, value)]
manufacturer_code = _to_int(service.data.get('manufacturer_code', '0'))
myzigate.write_attribute_request(addr, endpoint, cluster, attributes,
manufacturer_code=manufacturer_code)
def add_group(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint'))
groupaddr = service.data.get('group_addr')
myzigate.add_group(addr, endpoint, groupaddr)
def remove_group(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint'))
groupaddr = service.data.get('group_addr')
myzigate.remove_group(addr, endpoint, groupaddr)
def get_group_membership(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint'))
myzigate.get_group_membership(addr, endpoint)
def action_onoff(service):
addr = _get_addr_from_service_request(service)
onoff = _to_int(service.data.get('onoff'))
endpoint = _to_int(service.data.get('endpoint', '0'))
ontime = _to_int(service.data.get('on_time', '0'))
offtime = _to_int(service.data.get('off_time', '0'))
effect = _to_int(service.data.get('effect', '0'))
gradient = _to_int(service.data.get('gradient', '0'))
myzigate.action_onoff(addr, endpoint, onoff, ontime, offtime, effect, gradient)
def build_network_table(service):
table = myzigate.build_neighbours_table(service.data.get('force', False))
_LOGGER.debug('Neighbours table {}'.format(table))
entity = hass.data[DATA_ZIGATE_DEVICES].get('zigate')
if entity:
entity.network_table = table
def ota_load_image(service):
ota_image_path = service.data.get('imagepath')
myzigate.ota_load_image(ota_image_path)
def ota_image_notify(service):
addr = _get_addr_from_service_request(service)
destination_endpoint = _to_int(service.data.get('destination_endpoint', '1'))
payload_type = _to_int(service.data.get('payload_type', '0'))
myzigate.ota_image_notify(addr, destination_endpoint, payload_type)
def get_ota_status(service):
myzigate.get_ota_status()
def view_scene(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint', '1'))
groupaddr = service.data.get('group_addr')
scene = _to_int(service.data.get('scene'))
myzigate.view_scene(addr, endpoint, groupaddr, scene)
def add_scene(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint', '1'))
groupaddr = service.data.get('group_addr')
scene = _to_int(service.data.get('scene'))
name = service.data.get('scene_name')
transition = _to_int(service.data.get('transition', '0'))
myzigate.add_scene(addr, endpoint, groupaddr, scene, name, transition)
def remove_scene(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint', '1'))
groupaddr = service.data.get('group_addr')
scene = _to_int(service.data.get('scene', -1))
if scene == -1:
scene = None
myzigate.remove_scene(addr, endpoint, groupaddr, scene)
def store_scene(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint', '1'))
groupaddr = service.data.get('group_addr')
scene = _to_int(service.data.get('scene'))
myzigate.store_scene(addr, endpoint, groupaddr, scene)
def recall_scene(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint', '1'))
groupaddr = service.data.get('group_addr')
scene = _to_int(service.data.get('scene'))
myzigate.recall_scene(addr, endpoint, groupaddr, scene)
def scene_membership_request(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint', '1'))
groupaddr = service.data.get('group_addr')
myzigate.scene_membership_request(addr, endpoint, groupaddr)
def copy_scene(service):
addr = _get_addr_from_service_request(service)
endpoint = _to_int(service.data.get('endpoint', '1'))
fromgroupaddr = service.data.get('from_group_addr')
fromscene = _to_int(service.data.get('from_scene'))
togroupaddr = service.data.get('to_group_addr')
toscene = _to_int(service.data.get('to_scene'))
myzigate.copy_scene(addr, endpoint, fromgroupaddr, fromscene, togroupaddr, toscene)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_zigate)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_zigate)
hass.services.register(DOMAIN, 'refresh_devices_list',
refresh_devices_list)
hass.services.register(DOMAIN, 'generate_templates',
generate_templates)
hass.services.register(DOMAIN, 'reset', zigate_reset)
hass.services.register(DOMAIN, 'permit_join', permit_join)
hass.services.register(DOMAIN, 'start_zigate', start_zigate)
hass.services.register(DOMAIN, 'stop_zigate', stop_zigate)
hass.services.register(DOMAIN, 'cleanup_devices', zigate_cleanup)
hass.services.register(DOMAIN, 'refresh_device',
refresh_device,
schema=REFRESH_DEVICE_SCHEMA)
hass.services.register(DOMAIN, 'discover_device',
discover_device,
schema=DISCOVER_DEVICE_SCHEMA)
hass.services.register(DOMAIN, 'network_scan', network_scan)
hass.services.register(DOMAIN, 'raw_command', raw_command,
schema=RAW_COMMAND_SCHEMA)
hass.services.register(DOMAIN, 'identify_device', identify_device,
schema=IDENTIFY_SCHEMA)
hass.services.register(DOMAIN, 'remove_device', remove_device,
schema=REMOVE_SCHEMA)
hass.services.register(DOMAIN, 'initiate_touchlink', initiate_touchlink)
hass.services.register(DOMAIN, 'touchlink_factory_reset',
touchlink_factory_reset)
hass.services.register(DOMAIN, 'read_attribute', read_attribute,
schema=READ_ATTRIBUTE_SCHEMA)
hass.services.register(DOMAIN, 'write_attribute', write_attribute,
schema=WRITE_ATTRIBUTE_SCHEMA)
hass.services.register(DOMAIN, 'add_group', add_group,
schema=ADD_GROUP_SCHEMA)
hass.services.register(DOMAIN, 'get_group_membership', get_group_membership,
schema=GET_GROUP_MEMBERSHIP_SCHEMA)
hass.services.register(DOMAIN, 'remove_group', remove_group,
schema=REMOVE_GROUP_SCHEMA)
hass.services.register(DOMAIN, 'action_onoff', action_onoff,
schema=ACTION_ONOFF_SCHEMA)
hass.services.register(DOMAIN, 'build_network_table', build_network_table,
schema=BUILD_NETWORK_TABLE_SCHEMA)
hass.services.register(DOMAIN, 'ota_load_image', ota_load_image,
schema=OTA_LOAD_IMAGE_SCHEMA)
hass.services.register(DOMAIN, 'ota_image_notify', ota_image_notify,
schema=OTA_IMAGE_NOTIFY_SCHEMA)
hass.services.register(DOMAIN, 'ota_get_status', get_ota_status)
hass.services.register(DOMAIN, 'view_scene', view_scene,
schema=VIEW_SCENE_SCHEMA)
hass.services.register(DOMAIN, 'add_scene', add_scene,
schema=ADD_SCENE_SCHEMA)
hass.services.register(DOMAIN, 'remove_scene', remove_scene,
schema=REMOVE_SCENE_SCHEMA)
hass.services.register(DOMAIN, 'store_scene', store_scene,
schema=STORE_SCENE_SCHEMA)
hass.services.register(DOMAIN, 'recall_scene', recall_scene,
schema=RECALL_SCENE_SCHEMA)
hass.services.register(DOMAIN, 'scene_membership_request', scene_membership_request,
schema=SCENE_MEMBERSHIP_REQUEST_SCHEMA)
hass.services.register(DOMAIN, 'copy_scene', copy_scene,
schema=COPY_SCENE_SCHEMA)
track_time_change(hass, refresh_devices_list,
| |
:param biz_info: 业务ID及其对应的名称
:param username: 用户名
:return
"""
# 节点变量,用于后续订阅任务注册主机,安装等操作
subscription_nodes = []
for host in accept_list:
inner_ip = host["inner_ip"]
outer_ip = host.get("outer_ip", "")
login_ip = host.get("login_ip", "")
host_ap_id, host_node_type = self.check_ap_and_biz_scope(node_type, host, cloud_info)
instance_info = {
"is_manual": host["is_manual"],
"ap_id": host_ap_id,
"install_channel_id": host.get("install_channel_id"),
"bk_os_type": constants.BK_OS_TYPE[host["os_type"]],
"bk_host_innerip": inner_ip,
"bk_host_outerip": outer_ip,
"login_ip": login_ip,
"username": username,
"bk_biz_id": host["bk_biz_id"],
"bk_biz_name": biz_info.get(host["bk_biz_id"]),
"bk_cloud_id": host["bk_cloud_id"],
"bk_cloud_name": str(cloud_info.get(host["bk_cloud_id"], {}).get("bk_cloud_name")),
"bk_supplier_account": settings.DEFAULT_SUPPLIER_ACCOUNT,
"host_node_type": host_node_type,
"os_type": host["os_type"],
"auth_type": host.get("auth_type", "MANUAL"),
"account": host.get("account", "MANUAL"),
"port": host.get("port"),
"password": base64.b64encode(host.get("password", "").encode()).decode(),
"key": base64.b64encode(host.get("key", "").encode()).decode(),
"retention": host.get("retention", 1),
"peer_exchange_switch_for_agent": host.get("peer_exchange_switch_for_agent"),
"bt_speed_limit": host.get("bt_speed_limit"),
}
if host_node_type == constants.NodeType.PROXY and host.get("data_path"):
# proxy增加数据文件路径
instance_info.update({"data_path": host["data_path"]})
if host.get("bk_host_id"):
instance_info.update({"bk_host_id": host.get("bk_host_id")})
# 写入ticket
if host.get("auth_type") == constants.AuthType.TJJ_PASSWORD:
instance_info["extra_data"] = {"oa_ticket": host["ticket"]}
# 写入节点变量
subscription_nodes.append(
{
"bk_supplier_account": settings.DEFAULT_SUPPLIER_ACCOUNT,
"bk_cloud_id": host["bk_cloud_id"],
"ip": inner_ip,
"instance_info": instance_info,
}
)
return subscription_nodes
def update(self, accept_list: list, ip_filter_list: list, is_manual: bool = False):
"""
用于更新identity认证信息
:param accept_list: 所有需要修改的数据
:param ip_filter_list: 过滤数据
"""
identity_to_create = []
host_to_create = []
identity_id_to_delete = []
host_id_to_delete = []
# 获得需要修改的认证信息的rentention
if not is_manual:
# 非手动模式需要认证信息
identity_info = {
identity["bk_host_id"]: {
"auth_type": identity["auth_type"],
"retention": identity["retention"],
"account": identity["account"],
"password": identity["password"],
"key": identity["key"],
"port": identity["port"],
"extra_data": identity["extra_data"],
}
for identity in models.IdentityData.objects.filter(
bk_host_id__in=[host["bk_host_id"] for host in accept_list]
).values("bk_host_id", "auth_type", "retention", "account", "password", "key", "port", "extra_data")
}
else:
# 手动模式无需认证信息
identity_info = {}
host_info = {
host["bk_host_id"]: {
"bk_host_id": host["bk_host_id"],
"bk_biz_id": host["bk_biz_id"],
"bk_cloud_id": host["bk_cloud_id"],
"inner_ip": host["inner_ip"],
"outer_ip": host["outer_ip"],
"login_ip": host["login_ip"],
"data_ip": host["data_ip"],
"os_type": host["os_type"],
"node_type": host["node_type"],
"ap_id": host["ap_id"],
"install_channel_id": host["install_channel_id"],
"upstream_nodes": host["upstream_nodes"],
"created_at": host["created_at"],
"updated_at": host["updated_at"],
"is_manual": host["is_manual"],
"extra_data": host["extra_data"],
}
for host in models.Host.objects.filter(bk_host_id__in=[host["bk_host_id"] for host in accept_list]).values()
}
# 认证信息和Host校验
update_data_info, ip_filter_list = validator.bulk_update_validate(
host_info, accept_list, identity_info, ip_filter_list, is_manual
)
# 准备对需要修改的identity数据bulk_create
for host in update_data_info["modified_identity"]:
update_time = timezone.now()
the_identity = identity_info[host["bk_host_id"]]
# 更新ticket
if host.get("auth_type") == constants.AuthType.TJJ_PASSWORD:
extra_data = {"oa_ticket": host.get("ticket")}
else:
extra_data = the_identity["extra_data"]
identity_to_create.append(
models.IdentityData(
**{
"bk_host_id": host["bk_host_id"],
"auth_type": host.get("auth_type", the_identity["auth_type"]),
"account": host.get("account", the_identity["account"]),
"password": host.get("password", the_identity["password"]),
"port": host.get("port", the_identity["port"]),
"key": host.get("key", the_identity["key"]),
"retention": host.get("retention", the_identity["retention"]),
"extra_data": extra_data,
"updated_at": update_time,
}
)
)
identity_id_to_delete.append(host["bk_host_id"])
# 准备对需要修改的Host数据bulk_create
for host in update_data_info["modified_host"]:
# 如果 操作系统 或 接入点 发生修改
update_time = timezone.now()
origin_host = host_info[host["bk_host_id"]]
host_extra_data = {
"peer_exchange_switch_for_agent": host.get(
"peer_exchange_switch_for_agent",
origin_host["extra_data"].get("peer_exchange_switch_for_agent"),
),
"bt_speed_limit": host.get("bt_speed_limit", origin_host["extra_data"].get("bt_speed_limit")),
}
# 更新为新传入或者使用原来的数据
if host.get("data_path") or origin_host["extra_data"].get("data_path"):
host_extra_data.update(
{"data_path": host.get("data_path") or origin_host["extra_data"].get("data_path")}
)
host_to_create.append(
models.Host(
**{
"bk_host_id": origin_host["bk_host_id"],
"bk_biz_id": origin_host["bk_biz_id"],
"bk_cloud_id": origin_host["bk_cloud_id"],
"inner_ip": origin_host["inner_ip"],
"outer_ip": origin_host["outer_ip"],
"login_ip": host.get("login_ip", origin_host["login_ip"]),
"data_ip": origin_host["data_ip"],
"os_type": host.get("os_type", origin_host["os_type"]),
"node_type": origin_host["node_type"],
"ap_id": host.get("ap_id", origin_host["ap_id"]),
"install_channel_id": host.get("install_channel_id", origin_host["install_channel_id"]),
"upstream_nodes": origin_host["upstream_nodes"],
"created_at": origin_host["created_at"],
"updated_at": update_time,
"is_manual": is_manual,
"extra_data": host_extra_data,
}
)
)
host_id_to_delete.append(host["bk_host_id"])
with transaction.atomic():
# 修改是否手动安装为is_manual
host_id_no_modified = [host["bk_host_id"] for host in update_data_info["not_modified_host"]]
models.Host.objects.filter(bk_host_id__in=host_id_no_modified).update(is_manual=is_manual)
# 删除需要修改的原数据
models.IdentityData.objects.filter(bk_host_id__in=identity_id_to_delete).delete()
models.Host.objects.filter(bk_host_id__in=host_id_to_delete).delete()
# bulk_create创建新的信息
models.IdentityData.objects.bulk_create(identity_to_create)
models.Host.objects.bulk_create(host_to_create)
return update_data_info["subscription_host_ids"], ip_filter_list
def operate(self, params: dict, username: str, is_superuser: bool):
"""
用于只有bk_host_id参数的下线、重启等操作
:param params: 任务类型及host_id
:param is_superuser: 是否超管
"""
# 获得正在执行的任务状态
task_info = self.task_status_list()
if params["node_type"] == constants.NodeType.PROXY:
# 是否为针对代理的操作,用户有权限获取的业务
# 格式 { bk_biz_id: bk_biz_name , ...}
user_biz = CmdbHandler().biz_id_name({"action": constants.IamActionType.proxy_operate})
filter_node_types = [constants.NodeType.PROXY]
is_proxy = True
else:
# 用户有权限获取的业务
# 格式 { bk_biz_id: bk_biz_name , ...}
user_biz = CmdbHandler().biz_id_name({"action": constants.IamActionType.agent_operate})
filter_node_types = [constants.NodeType.AGENT, constants.NodeType.PAGENT]
is_proxy = False
if params.get("exclude_hosts") is not None:
# 跨页全选
db_host_sql = (
HostHandler()
.multiple_cond_sql(params, user_biz, proxy=is_proxy)
.exclude(bk_host_id__in=params.get("exclude_hosts", []))
.values("bk_host_id", "bk_biz_id", "bk_cloud_id", "inner_ip", "node_type", "os_type")
)
else:
# 不是跨页全选
db_host_sql = models.Host.objects.filter(
bk_host_id__in=params["bk_host_id"], node_type__in=filter_node_types
).values("bk_host_id", "bk_biz_id", "bk_cloud_id", "inner_ip", "node_type", "os_type")
# 校验器进行校验
db_host_ids, host_biz_scope = validator.operate_validator(
list(db_host_sql), user_biz, username, task_info, is_superuser
)
subscription = self.create_subscription(params["job_type"], db_host_ids)
# 创建Job
job = models.Job.objects.create(
job_type=params["job_type"],
subscription_id=subscription["subscription_id"],
task_id_list=[subscription["task_id"]],
statistics={
"success_count": 0,
"failed_count": 0,
"pending_count": len(db_host_ids),
"running_count": 0,
"total_count": len(db_host_ids),
},
error_hosts=[],
created_by=username,
bk_biz_scope=list(set(host_biz_scope)),
)
return {"job_id": job.id}
def create_subscription(self, job_type, nodes: list):
"""
创建订阅任务
:param job_type: INSTALL_AGENT
:param nodes: 任务范围
1.重装、卸载等操作
[{"bk_host_id": 1}, {"bk_host_id": 2}]
2.新装,替换:
[
{
"bk_supplier_account": "0",
"bk_cloud_id": 0,
"ip": "127.0.0.1",
"instance_info": {
"ap_id": 1,
"bk_os_type": "1",
"bk_host_innerip": "127.0.0.1",
"bk_host_outerip": "127.0.0.1",
"bk_biz_id": 2,
"bk_biz_name": "蓝鲸",
"bk_cloud_id": 0,
"bk_cloud_name": "default area",
"bk_supplier_account": "0",
"auth_type": "PASSWORD",
"account": "root",
"port": 22,
"auth_type": "PASSWORD",
"password": "<PASSWORD>",
"key": "",
"retention": 1
}
}
]
:return:
"""
params = {
"run_immediately": True,
"bk_app_code": "nodeman",
"bk_username": "admin",
"scope": {"node_type": "INSTANCE", "object_type": "HOST", "nodes": nodes},
"steps": [
{
"id": "agent",
"type": "AGENT",
"config": {"job_type": job_type},
"params": {"context": {}, "blueking_language": get_language()},
}
],
}
return NodeApi.create_subscription(params)
def retry(self, username: str, instance_id_list: List[str] = None):
"""
重试部分实例或主机
:param username: 用户名
:param instance_id_list: 需重试的实例列表
:return: task_id_list
"""
# 检测是否有权限
self.check_job_permission(username, self.data.bk_biz_scope)
params = {
"subscription_id": self.data.subscription_id,
"task_id_list": self.data.task_id_list,
"instance_id_list": instance_id_list,
}
task_id = NodeApi.retry_subscription_task(params)["task_id"]
self.data.task_id_list.append(task_id)
if instance_id_list:
running_count = self.data.statistics["running_count"] + len(instance_id_list)
failed_count = self.data.statistics["failed_count"] - len(instance_id_list)
else:
running_count = self.data.statistics["failed_count"]
failed_count = 0
self.data.statistics.update({"running_count": running_count, "failed_count": failed_count})
self.data.status = constants.JobStatusType.RUNNING
self.data.save()
return self.data.task_id_list
def revoke(self, instance_id_list: list, username: str):
# 检测是否有权限
self.check_job_permission(username, self.data.bk_biz_scope)
params = {
"subscription_id": self.data.subscription_id,
}
if instance_id_list:
params["instance_id_list"] = instance_id_list
NodeApi.revoke_subscription_task(params)
self.data.status = constants.JobStatusType.TERMINATED
self.data.end_time = timezone.now()
self.data.save()
return self.data.task_id_list
def retrieve(self, params: Dict[str, Any], username: str):
"""
任务详情页接口
:param params: 接口请求参数
:param username: 用户名
"""
# 检测是否有权限
self.check_job_permission(username, self.data.bk_biz_scope)
if self.data.task_id_list:
try:
task_result = NodeApi.get_subscription_task_status(
tools.JobTools.parse2task_result_query_params(job=self.data, query_params=params)
)
except ApiResultError as err:
logger.exception(err)
if err.code != SubscriptionTaskNotReadyError().code:
raise err
# 任务未准备就绪
task_result = {"list": [], "total": 0, "status_counter": {"total": 0}}
else:
# 任务已准备就绪,但执行数量为0,代表没有需要变更的主机,插入一条忽略的主机在前端进行提示
# task_result["total"]是筛选条件过滤后的数量,全部执行数量通过状态计数获取 - task_result["status_counter"]
if task_result["status_counter"].get("total", 0) == 0 and not self.data.error_hosts:
# lazy object 通过save保存到db,如果不先转为字符串,会报错:
# TypeError at /en/ Object of type '__proxy__' is not JSON serializable
# 参考:https://stackoverflow.com/questions/48454398/
self.data.error_hosts = [
{"ip": "", "msg": str(_("没有需要变更的实例")), "status": constants.JobStatusType.IGNORED}
]
self.data.save(update_fields=["error_hosts"])
else:
# 异步执行任务,任务状态默认为PENDING
task_result = {"list": [], "total": 0, "status_counter": {"total": 0}}
bk_host_ids = []
host_execute_status_list = []
for instance_status in task_result["list"]:
host_info = instance_status["instance_info"]["host"]
job_type_info = tools.JobTools.unzip_job_type(
tools.JobTools.get_job_type_in_inst_status(instance_status, self.data.job_type)
)
host_execute_status = {
"instance_id": instance_status["instance_id"],
"inner_ip": host_info["bk_host_innerip"],
"bk_host_id": host_info.get("bk_host_id"),
"bk_cloud_id": host_info["bk_cloud_id"],
"bk_cloud_name": host_info.get("bk_cloud_name"),
"bk_biz_id": host_info["bk_biz_id"],
"bk_biz_name": host_info["bk_biz_name"],
"status": instance_status["status"],
"start_time": local_dt_str2utc_dt(dt_str=instance_status["start_time"]),
"end_time": local_dt_str2utc_dt(dt_str=instance_status["finish_time"]),
**{"op_type": job_type_info["op_type"], "op_type_display": job_type_info["op_type_display"]},
**tools.JobTools.get_current_step_display(instance_status),
}
if host_execute_status["start_time"]:
end_time = host_execute_status["end_time"] or timezone.now()
# 不能通过.seconds获取datetime对象的时间差值总秒数,在间隔超过一天时会有bug
# 在源码中,.seconds的计算为:days, seconds = divmod(seconds, 24*3600),由一天的总秒数取模而得
# 正确做法是使用 .total_seconds():((self.days * 86400 + self.seconds) * 106 + self.microseconds) / 106
# 参考:https://stackoverflow.com/questions/4362491/
host_execute_status["cost_time"] = (end_time - host_execute_status["start_time"]).total_seconds()
host_execute_status_list.append(host_execute_status)
bk_host_ids.append(host_info.get("bk_host_id"))
id__host_extra_info_map = {
host_extra_info["bk_host_id"]: host_extra_info
for host_extra_info in models.Host.objects.filter(bk_host_id__in=bk_host_ids).values(
"bk_host_id", "ap_id", "is_manual", "os_type", "cpu_arch"
)
}
for host in host_execute_status_list:
host["is_manual"] = id__host_extra_info_map.get(host.get("bk_host_id"), {}).get("is_manual", False)
host["ap_id"] = id__host_extra_info_map.get(host.get("bk_host_id"), {}).get("ap_id")
statuses_in_conditions = tools.JobTools.fetch_values_from_conditions(
conditions=params.get("conditions", []), key="status"
)
filter_hosts = []
for host in self.data.error_hosts:
status = host.get("status", constants.JobStatusType.FAILED)
# conditions中无status或者筛选条件为空列表 视为全选,过滤不在筛选条件中的排除主机
if statuses_in_conditions and status not in statuses_in_conditions:
continue
filter_hosts.append(
{
"filter_host": True,
"bk_host_id": host.get("bk_host_id"),
"inner_ip": host["ip"],
"bk_cloud_id": host.get("bk_cloud_id"),
"bk_cloud_name": host.get("bk_cloud_name"),
"bk_biz_id": host.get("bk_biz_id"),
"bk_biz_name": host.get("bk_biz_name"),
"job_id": host.get("job_id"),
"status": host.get("status") or constants.JobStatusType.FAILED,
"status_display": host.get("msg"),
"step": "",
}
)
host_execute_status_list.extend(filter_hosts)
# 补充业务名、云区域名称
cloud_id_name_map = models.Cloud.cloud_id_name_map()
biz_name_map = CmdbHandler.biz_id_name_without_permission()
for host_execute_status in host_execute_status_list:
host_execute_status.update(
bk_biz_name=biz_name_map.get(host_execute_status.get("bk_biz_id")),
bk_cloud_name=cloud_id_name_map.get(host_execute_status["bk_cloud_id"]),
)
tools.JobTools.update_job_statistics(self.data, task_result["status_counter"])
job_detail = {
"job_id": self.data.id,
"created_by": self.data.created_by,
"job_type": self.data.job_type,
"job_type_display": constants.JOB_TYPE_DICT.get(self.data.job_type, ""),
"ip_filter_list": [host["ip"] for host in self.data.error_hosts],
"total": task_result["total"],
"list": host_execute_status_list,
"statistics": self.data.statistics,
"status": self.data.status,
"end_time": self.data.end_time,
"start_time": self.data.start_time,
}
tools.JobTools.fill_cost_time(job_detail, job_detail)
tools.JobTools.fill_sub_info_to_job_detail(job=self.data, job_detail=job_detail)
if job_detail["meta"].get("category") != models.Subscription.CategoryType.POLICY:
return job_detail
# 策略关联任务填充目标版本及当前插件版本
policy_info = tools.PolicyTools.get_policy(self.data.subscription_id, show_deleted=True, need_steps=True)
os_cpu__config_map = tools.PolicyTools.get_os_cpu__config_map(policy_info)
bk_host_id__plugin_version_map = tools.HostV2Tools.get_bk_host_id_plugin_version_map(
project=policy_info["plugin_name"], bk_host_ids=bk_host_ids
)
for host_execute_status in job_detail["list"]:
host_extra_info = id__host_extra_info_map.get(host_execute_status["bk_host_id"])
if not host_extra_info:
host_execute_status.update({"current_version": None, "target_version": None})
continue
os_cpu_key = f"{host_extra_info['os_type'].lower()}_{host_extra_info['cpu_arch']}"
host_execute_status["current_version"] = bk_host_id__plugin_version_map.get(
host_execute_status["bk_host_id"]
)
host_execute_status["target_version"] = os_cpu__config_map.get(os_cpu_key, {}).get("version")
return job_detail
@staticmethod
def get_log_base(subscription_id: int, task_id_list: List[int], instance_id: str) -> list:
"""
根据订阅任务ID,实例ID,获取日志
:param subscription_id: 订阅任务ID
:param task_id_list: 任务ID列表
:param instance_id: 实例ID
:return: 日志列表
"""
params = {"subscription_id": subscription_id, "instance_id": instance_id, "task_id_list": task_id_list}
task_result_detail = NodeApi.get_subscription_task_detail(params)
logs = []
if task_result_detail.get("steps"):
if task_result_detail["steps"][0].get("target_hosts"):
for step in task_result_detail["steps"][0]["target_hosts"][0].get("sub_steps"):
logs.append(
{
"step": step["node_name"],
"status": step["status"],
"log": step["log"],
"start_time": step.get("start_time"),
"finish_time": step.get("finish_time"),
}
)
return logs
def get_log(self, instance_id: str, username: str) -> list:
"""
获得日志
:param instance_id: 实例ID
:param username: 用户名
:return: 日志列表
"""
# 检测是否有权限
self.check_job_permission(username, self.data.bk_biz_scope)
# 获得并返回日志
return JobHandler.get_log_base(self.data.subscription_id, self.data.task_id_list, instance_id)
def collect_log(self, instance_id: int, username: str) -> list:
self.check_job_permission(username, self.data.bk_biz_scope)
res = NodeApi.collect_subscription_task_detail({"job_id": self.job_id, "instance_id": instance_id})
return res
def retry_node(self, instance_id: str, username: str):
| |
= self._parent._succ[self._end][edge_type]
def extend(self, path):
r"""
Extends self with another path.
EXAMPLES::
sage: p = iet.Permutation('a b c d','d c b a')
sage: r = p.rauzy_diagram()
sage: g1 = r.path(p,'t','t')
sage: g2 = r.path(p.rauzy_move('t',iteration=2),'b','b')
sage: g = r.path(p,'t','t','b','b')
sage: g == g1 + g2
True
sage: g = copy(g1)
sage: g.extend(g2)
sage: g == g1 + g2
True
"""
if self._parent != path._parent:
raise ValueError("Not on the same Rauzy diagram")
if self._end != path._start:
raise ValueError("The end of the first path must the start of the second")
self._edge_types.extend(path._edge_types)
self._end = path._end
def _fast_extend(self, path):
r"""
Extension with no verification.
EXAMPLES::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: p0, p1 = r[p]
sage: g = r.path(p)
sage: g._fast_extend(r.path(p0))
sage: g
Path of length 0 in a Rauzy diagram
sage: g._fast_extend(r.path(p1))
sage: g
Path of length 0 in a Rauzy diagram
"""
self._edge_types.extend(path._edge_types)
self._end = path._end
def __len__(self):
r"""
Returns the length of the path.
TEST::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: len(r.path(p))
0
sage: len(r.path(p,0))
1
sage: len(r.path(p,1))
1
"""
return len(self._edge_types)
def __getitem__(self, i):
r"""
TESTS::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: g = r.path(p,'t','b')
sage: g[0] == p
True
sage: g[1] == p.rauzy_move('t')
True
sage: g[2] == p.rauzy_move('t').rauzy_move('b')
True
sage: g[-1] == g[2]
True
sage: g[-2] == g[1]
True
sage: g[-3] == g[0]
True
"""
if i > len(self) or i < -len(self)-1:
raise IndexError("path index out of range")
if i == 0: return self.start()
if i < 0: i = i + len(self) + 1
v = self._start
for k in range(i):
v = self._parent._succ[v][self._edge_types[k]]
return self._parent._vertex_to_permutation(v)
def __add__(self, other):
r"""
Concatenation of paths.
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: r.path(p) + r.path(p,'b') == r.path(p,'b')
True
sage: r.path(p,'b') + r.path(p) == r.path(p,'b')
True
sage: r.path(p,'t') + r.path(p,'b') == r.path(p,'t','b')
True
"""
if self._end != other._start:
raise ValueError("The end of the first path is not the start of the second")
res = copy(self)
res._fast_extend(other)
return res
def __mul__(self, n):
r"""
Multiple of a loop.
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: l = r.path(p,'b')
sage: l * 2 == r.path(p,'b','b')
True
sage: l * 3 == r.path(p,'b','b','b')
True
"""
if not self.is_loop():
raise ValueError("Must be a loop to have multiple")
if not isinstance(n, (Integer,int)):
raise TypeError("The multiplier must be an integer")
if n < 0:
raise ValueError("The multiplier must be non negative")
res = copy(self)
for i in range(n-1):
res += self
return res
def is_loop(self):
r"""
Tests whether the path is a loop (start point = end point).
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: r.path(p).is_loop()
True
sage: r.path(p,0,1,0,0).is_loop()
True
"""
return self._start == self._end
def winners(self):
r"""
Returns the winner list associated to the edge of the path.
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: r.path(p).winners()
[]
sage: r.path(p,0).winners()
['b']
sage: r.path(p,1).winners()
['a']
"""
return self.composition(
self._parent.edge_to_winner,
list.__add__)
def losers(self):
r"""
Returns a list of the loosers on the path.
EXAMPLES::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: g0 = r.path(p,'t','b','t')
sage: g0.losers()
['a', 'c', 'b']
sage: g1 = r.path(p,'b','t','b')
sage: g1.losers()
['c', 'a', 'b']
"""
return self.composition(
self._parent.edge_to_loser,
list.__add__)
def __iter__(self):
r"""
Iterator over the permutations of the path.
EXAMPLES::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: g = r.path(p)
sage: for q in g:
....: print p
a b c
c b a
sage: g = r.path(p, 't', 't')
sage: for q in g:
....: print q, "\n*****"
a b c
c b a
*****
a b c
c a b
*****
a b c
c b a
*****
sage: g = r.path(p,'b','t')
sage: for q in g:
....: print q, "\n*****"
a b c
c b a
*****
a c b
c b a
*****
a c b
c b a
*****
"""
i = self._start
for edge_type in self._edge_types:
yield self._parent._vertex_to_permutation(i)
i = self._parent._succ[i][edge_type]
yield self.end()
def composition(self, function, composition = None):
r"""
Compose an edges function on a path
INPUT:
- ``path`` - either a Path or a tuple describing a path
- ``function`` - function must be of the form
- ``composition`` - the composition function
AUTHOR:
- <NAME> (2009-09-29)
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: def f(i,t):
....: if t is None: return []
....: return [t]
sage: g = r.path(p)
sage: g.composition(f,list.__add__)
[]
sage: g = r.path(p,0,1)
sage: g.composition(f, list.__add__)
[0, 1]
"""
result = function(None,None)
cur_vertex = self._start
p = self._parent._element
if composition is None: composition = result.__class__.__mul__
for i in self._edge_types:
self._parent._set_element(cur_vertex)
result = composition(result, function(p,i))
cur_vertex = self._parent._succ[cur_vertex][i]
return result
def right_composition(self, function, composition = None) :
r"""
Compose an edges function on a path
INPUT:
- ``function`` - function must be of the form (indice,type) -> element. Moreover function(None,None) must be an identity element for initialization.
- ``composition`` - the composition function for the function. * if None (default None)
TEST::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: def f(i,t):
....: if t is None: return []
....: return [t]
sage: g = r.path(p)
sage: g.right_composition(f,list.__add__)
[]
sage: g = r.path(p,0,1)
sage: g.right_composition(f, list.__add__)
[1, 0]
"""
result = function(None,None)
p = self._parent._element
cur_vertex = self._start
if composition is None: composition = result.__class__.__mul__
for i in self._edge_types:
self._parent._set_element(cur_vertex)
result = composition(function(p,i),result)
cur_vertex = self._parent._succ[cur_vertex][i]
return result
def __init__(self, p,
right_induction=True,
left_induction=False,
left_right_inversion=False,
top_bottom_inversion=False,
symmetric=False):
r"""
- ``self._succ`` contains successors
- ``self._pred`` contains predecessors
- ``self._element_class`` is the class of elements of ``self``
- ``self._element`` is an instance of this class (hence
contains the alphabet, the representation mode, ...). It is
used to store data about property of permutations and also as
a fast iterator.
INPUT:
- ``right_induction`` - boolean or 'top' or 'bottom': consider the
right induction
- ``left_induction`` - boolean or 'top' or 'bottom': consider the
left induction
- ``left_right_inversion`` - consider the left right inversion
- ``top_bottom_inversion`` - consider the top bottom inversion
- ``symmetric`` - consider the symmetric
TESTS::
sage: r1 = iet.RauzyDiagram('a b','b a')
sage: r2 = loads(dumps(r1))
"""
self._edge_types = []
self._index = {}
if right_induction is True:
self._index['rt_rauzy'] = len(self._edge_types)
self._edge_types.append(('rauzy_move',(0,-1)))
self._index['rb_rauzy'] = len(self._edge_types)
self._edge_types.append(('rauzy_move',(1,-1)))
elif isinstance(right_induction, str):
if right_induction == '':
raise ValueError("right_induction can not be empty string")
elif 'top'.startswith(right_induction):
self._index['rt_rauzy'] = len(self._edge_types)
self._edge_types.append(('rauzy_move',(0,-1)))
elif 'bottom'.startswith(right_induction):
self._index['rb_rauzy'] = len(self._edge_types)
self._edge_types.append(('rauzy_move',(1,-1)))
else:
raise ValueError("%s is not valid for right_induction" % (right_induction))
if left_induction is True:
self._index['lt_rauzy'] = len(self._edge_types)
self._edge_types.append(('rauzy_move',(0,0)))
self._index['lb_rauzy'] = len(self._edge_types)
self._edge_types.append(('rauzy_move',(1,0)))
elif isinstance(left_induction,str):
if left_induction == '':
raise ValueError("left_induction can not be empty string")
elif 'top'.startswith(left_induction):
self._index['lt_rauzy'] = len(self._edge_types)
self._edge_types.append(('rauzy_move', (0,0)))
elif 'bottom'.startswith(left_induction):
self._index['lb_rauzy'] = len(self._edge_types)
self._edge_types.append(('rauzy_move', (1,0)))
else:
raise ValueError("%s is not valid for left_induction" % (right_induction))
if left_right_inversion is True:
self._index['lr_inverse'] = len(self._edge_types)
self._edge_types.append(('left_right_inverse', ()))
if top_bottom_inversion is True:
self._index['tb_inverse'] = len(self._edge_types)
self._edge_types.append(('top_bottom_inverse', ()))
if symmetric is True:
self._index['symmetric'] = len(self._edge_types)
self._edge_types.append(('symmetric', ()))
self._n = len(p)
self._element_class = p.__class__
self._element = copy(p)
self._alphabet = self._element._alphabet
self._pred = {}
self._succ = {}
self.complete(p)
def __eq__(self, other):
r"""
Tests equality.
TESTS:
::
sage: iet.RauzyDiagram('a b','b a') == iet.RauzyDiagram('a b c','c b a')
False
sage: r = iet.RauzyDiagram('a b c','c b a')
sage: r1 = iet.RauzyDiagram('a c b','c b a', alphabet='abc')
sage: r2 = iet.RauzyDiagram('a b c','c a b', alphabet='abc')
sage: r == r1
True
sage: r == r2
True
sage: r1 == r2
True
::
sage: r = iet.RauzyDiagram('a b c d','d c b a')
sage: for p in r:
....: p.rauzy_diagram() == r
True
True
True
True
True
True
True
"""
return (
type(self) is type(other) and
self._edge_types == other._edge_types and
self._succ.keys()[0] in | |
<filename>vcfpy/parser.py
# -*- coding: utf-8 -*-
"""Parsing of VCF files from ``str``
"""
import ast
import functools
import math
import re
import warnings
from . import header
from . import record
from . import exceptions
from .exceptions import (
CannotConvertValue,
LeadingTrailingSpaceInKey,
UnknownFilter,
UnknownVCFVersion,
SpaceInChromLine,
)
from .compat import OrderedDict
__author__ = "<NAME> <<EMAIL>>"
# expected "#CHROM" header prefix when there are samples
REQUIRE_SAMPLE_HEADER = ("#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT")
# expected "#CHROM" header prefix when there are no samples
REQUIRE_NO_SAMPLE_HEADER = ("#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO")
#: Supported VCF versions, a warning will be issued otherwise
SUPPORTED_VCF_VERSIONS = ("VCFv4.0", "VCFv4.1", "VCFv4.2", "VCFv4.3")
class QuotedStringSplitter:
"""Helper class for splitting quoted strings
Has support for interpreting quoting strings but also brackets. Meant
for splitting the VCF header line dicts
"""
#: state constant for normal
NORMAL = 0
#: state constant for quoted
QUOTED = 1
#: state constant for delimiter
ESCAPED = 2
#: state constant for array
ARRAY = 3
#: state constant for delimiter
DELIM = 4
def __init__(self, delim=",", quote='"', brackets="[]"):
#: string delimiter
self.delim = delim
#: quote character
self.quote = quote
#: two-character string with opening and closing brackets
assert len(brackets) == 2
self.brackets = brackets
def run(self, s):
"""Split string ``s`` at delimiter, correctly interpreting quotes
Further, interprets arrays wrapped in one level of ``[]``. No
recursive brackets are interpreted (as this would make the grammar
non-regular and currently this complexity is not needed). Currently,
quoting inside of braces is not supported either. This is just to
support the example from VCF v4.3.
"""
begins, ends = [0], []
# transition table
DISPATCH = {
self.NORMAL: self._handle_normal,
self.QUOTED: self._handle_quoted,
self.ARRAY: self._handle_array,
self.DELIM: self._handle_delim,
self.ESCAPED: self._handle_escaped,
}
# run state automaton
state = self.NORMAL
for pos, c in enumerate(s):
state = DISPATCH[state](c, pos, begins, ends)
ends.append(len(s))
assert len(begins) == len(ends)
# Build resulting list
return [s[start:end] for start, end in zip(begins, ends)]
def _handle_normal(self, c, pos, begins, ends): # pylint: disable=W0613
if c == self.delim:
ends.append(pos)
return self.DELIM
elif c == self.quote:
return self.QUOTED
elif c == self.brackets[0]:
return self.ARRAY
else:
return self.NORMAL
def _handle_quoted(self, c, pos, begins, ends): # pylint: disable=W0613
if c == "\\":
return self.ESCAPED
elif c == self.quote:
return self.NORMAL
else:
return self.QUOTED
def _handle_array(self, c, pos, begins, ends): # pylint: disable=W0613
if c == self.brackets[1]:
return self.NORMAL
else:
return self.ARRAY
def _handle_delim(self, c, pos, begins, ends): # pylint: disable=W0613
begins.append(pos)
return self.NORMAL
def _handle_escaped(self, c, pos, begins, ends): # pylint: disable=W0613
return self.QUOTED
def split_quoted_string(s, delim=",", quote='"', brackets="[]"):
return QuotedStringSplitter(delim, quote, brackets).run(s)
def split_mapping(pair_str):
"""Split the ``str`` in ``pair_str`` at ``'='``
Warn if key needs to be stripped
"""
orig_key, value = pair_str.split("=", 1)
key = orig_key.strip()
if key != orig_key:
warnings.warn(
"Mapping key {} has leading or trailing space".format(repr(orig_key)),
LeadingTrailingSpaceInKey,
)
return key, value
def parse_mapping(value):
"""Parse the given VCF header line mapping
Such a mapping consists of "key=value" pairs, separated by commas and
wrapped into angular brackets ("<...>"). Strings are usually quoted,
for certain known keys, exceptions are made, depending on the tag key.
this, however, only gets important when serializing.
:raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` if
there was a problem parsing the file
"""
if not value.startswith("<") or not value.endswith(">"):
raise exceptions.InvalidHeaderException(
"Header mapping value was not wrapped in angular brackets"
)
# split the comma-separated list into pairs, ignoring commas in quotes
pairs = split_quoted_string(value[1:-1], delim=",", quote='"')
# split these pairs into key/value pairs, converting flags to mappings
# to True
key_values = []
for pair in pairs:
if "=" in pair:
key, value = split_mapping(pair)
if value.startswith('"') and value.endswith('"'):
value = ast.literal_eval(value)
elif value.startswith("[") and value.endswith("]"):
value = [v.strip() for v in value[1:-1].split(",")]
else:
key, value = pair, True
key_values.append((key, value))
# return completely parsed mapping as OrderedDict
return OrderedDict(key_values)
class HeaderLineParserBase:
"""Parse into appropriate HeaderLine"""
def parse_key_value(self, key, value):
"""Parse the key/value pair
:param str key: the key to use in parsing
:param str value: the value to parse
:returns: :py:class:`vcfpy.header.HeaderLine` object
"""
raise NotImplementedError("Must be overridden")
class StupidHeaderLineParser(HeaderLineParserBase):
"""Parse into HeaderLine (no particular structure)"""
def parse_key_value(self, key, value):
return header.HeaderLine(key, value)
class MappingHeaderLineParser(HeaderLineParserBase):
"""Parse into HeaderLine (no particular structure)"""
def __init__(self, line_class):
"""Initialize the parser"""
#: the class to use for the VCF header line
self.line_class = line_class
def parse_key_value(self, key, value):
return self.line_class(key, value, parse_mapping(value))
def build_header_parsers():
"""Return mapping for parsers to use for each VCF header type
Inject the WarningHelper into the parsers.
"""
result = {
"ALT": MappingHeaderLineParser(header.AltAlleleHeaderLine),
"contig": MappingHeaderLineParser(header.ContigHeaderLine),
"FILTER": MappingHeaderLineParser(header.FilterHeaderLine),
"FORMAT": MappingHeaderLineParser(header.FormatHeaderLine),
"INFO": MappingHeaderLineParser(header.InfoHeaderLine),
"META": MappingHeaderLineParser(header.MetaHeaderLine),
"PEDIGREE": MappingHeaderLineParser(header.PedigreeHeaderLine),
"SAMPLE": MappingHeaderLineParser(header.SampleHeaderLine),
"__default__": StupidHeaderLineParser(), # fallback
}
return result
# Field value converters
_CONVERTERS = {
"Integer": int,
"Float": float,
"Flag": lambda x: True,
"Character": str,
"String": str,
}
def convert_field_value(type_, value):
"""Convert atomic field value according to the type"""
if value == ".":
return None
elif type_ in ("Character", "String"):
if "%" in value:
for k, v in record.UNESCAPE_MAPPING:
value = value.replace(k, v)
return value
else:
try:
return _CONVERTERS[type_](value)
except ValueError:
warnings.warn(
("{} cannot be converted to {}, keeping as " "string.").format(value, type_),
CannotConvertValue,
)
return value
def parse_field_value(field_info, value):
"""Parse ``value`` according to ``field_info``
"""
if field_info.id == "FT":
return [x for x in value.split(";") if x != "."]
elif field_info.type == "Flag":
return True
elif field_info.number == 1:
return convert_field_value(field_info.type, value)
else:
if value == ".":
return []
else:
return [convert_field_value(field_info.type, x) for x in value.split(",")]
# Regular expression for break-end
BREAKEND_PATTERN = re.compile("[\\[\\]]")
def parse_breakend(alt_str):
"""Parse breakend and return tuple with results, parameters for BreakEnd
constructor
"""
arr = BREAKEND_PATTERN.split(alt_str)
mate_chrom, mate_pos = arr[1].split(":", 1)
mate_pos = int(mate_pos)
if mate_chrom[0] == "<":
mate_chrom = mate_chrom[1:-1]
within_main_assembly = False
else:
within_main_assembly = True
FWD_REV = {True: record.FORWARD, False: record.REVERSE}
orientation = FWD_REV[alt_str[0] == "[" or alt_str[0] == "]"]
mate_orientation = FWD_REV["[" in alt_str]
if orientation == record.FORWARD:
sequence = arr[2]
else:
sequence = arr[0]
return (mate_chrom, mate_pos, orientation, mate_orientation, sequence, within_main_assembly)
def process_sub_grow(ref, alt_str):
"""Process substution where the string grows"""
if len(alt_str) == 0:
raise exceptions.InvalidRecordException("Invalid VCF, empty ALT")
elif len(alt_str) == 1:
if ref[0] == alt_str[0]:
return record.Substitution(record.DEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
def process_sub_shrink(ref, alt_str):
"""Process substution where the string shrink"""
if len(ref) == 0:
raise exceptions.InvalidRecordException("Invalid VCF, empty REF")
elif len(ref) == 1:
if ref[0] == alt_str[0]:
return record.Substitution(record.INS, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
def process_sub(ref, alt_str):
"""Process substitution"""
if len(ref) == len(alt_str):
if len(ref) == 1:
return record.Substitution(record.SNV, alt_str)
else:
return record.Substitution(record.MNV, alt_str)
elif len(ref) > len(alt_str):
return process_sub_grow(ref, alt_str)
else: # len(ref) < len(alt_str):
return process_sub_shrink(ref, alt_str)
def process_alt(header, ref, alt_str): # pylint: disable=W0613
"""Process alternative value using Header in ``header``"""
# By its nature, this function contains a large number of case distinctions
if "]" in alt_str or "[" in alt_str:
return record.BreakEnd(*parse_breakend(alt_str))
elif alt_str[0] == "." and len(alt_str) > 0:
return record.SingleBreakEnd(record.FORWARD, alt_str[1:])
elif alt_str[-1] == "." and len(alt_str) > 0:
return record.SingleBreakEnd(record.REVERSE, alt_str[:-1])
elif alt_str[0] == "<" and alt_str[-1] == ">":
inner = alt_str[1:-1]
return record.SymbolicAllele(inner)
else: # substitution
return process_sub(ref, alt_str)
class HeaderParser:
"""Helper class for parsing a VCF header
"""
def __init__(self):
#: Sub parsers to use for parsing the header lines
self.sub_parsers = build_header_parsers()
def parse_line(self, line):
"""Parse VCF header ``line`` (trailing '\r\n' or '\n' is ignored)
:param str line: ``str`` with line to parse
:param dict sub_parsers: ``dict`` mapping header line types to
appropriate parser objects
:returns: appropriate :py:class:`HeaderLine` parsed from ``line``
:raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` if
there was a problem parsing the file
"""
if not line or not line.startswith("##"):
raise exceptions.InvalidHeaderException(
'Invalid VCF header line (must start with "##") {}'.format(line)
)
if "=" not in line:
raise exceptions.InvalidHeaderException(
'Invalid VCF header line (must contain "=") {}'.format(line)
)
line = line[len("##") :].rstrip() # trim '^##' and trailing whitespace
# split key/value pair at "="
key, value = split_mapping(line)
sub_parser = self.sub_parsers.get(key, self.sub_parsers["__default__"])
return sub_parser.parse_key_value(key, value)
class RecordParser:
"""Helper class for parsing VCF records"""
def __init__(self, header, samples, record_checks=None):
#: Header with the meta information
self.header = header
#: | |
between Entities, Specifications and
# attributes, please see @ref entities_specifications_and_attributes
# "this" page.
#
# The action of 'publishing' itself, is split into two parts, depending on
# the nature of the item to be published.
#
# @li **Preflight** When a Host is about to create some new media/asset.
# @li **Registration** When a Host is ready to publish media that exists.
#
# For examples of how to correctly call these parts of the
# API within a host, see the @ref examples page.
#
# @note The term '@ref publish' is somewhat loaded. It generally means
# something different depending on who you are talking to. See the @ref
# publish "Glossary entry" for more on this, but to help avoid confusion,
# this API provides the @ref updateTerminology call, in order to allow the
# implementation to standardize some of the language and terminology used in a
# Hosts presentation of the asset management system with other integrations
# of the system.
#
# @{
def preflight(self, targetEntityRefs, entitySpecs, context, hostSession):
"""
Prepares for some work to be done to create data for the
referenced entity. The entity may not yet exist (@ref
entity_reference). This call is designed to allow sanity
checking, placeholder creation or any other sundry preparatory
actions to be carried out.
Generally, this will be called before register() in any host
that creates media, where the return to @ref managementPolicy
has the constants.kWillManagePath bit set.
@param targetEntityRefs `List[str]` An @ref entity_reference
for each entity that it is desired to publish the forthcoming
data to. See the notes in the API documentation for the
specifics of this.
@param entitySpecs `List[`
specifications.EntitySpecification `]`
A description of each entity that is being published.
@param context Context The calling context. This is not
replaced with an array in order to simplify implementation.
Otherwise, transactional handling has the potential to be
extremely complex if different contexts are allowed.
@param hostSession HostSession The API session.
@return `List[Union[str,`
exceptions.PreflightError, exceptions.RetryableError `]]`
The preflight result for each corresponding entity. If
successful, this should be an @ref entity_reference that the
host should resolve to determine the path to write media to.
This may or may not be the same as the input reference. A host
will resolve this reference to get the working URL before
writing any files or other data. If preflight was
unsuccessful, the result for an entity should be either a
`PreflightError` if some fatal exception happens during
preflight, indicating the process should be aborted; or
`RetryableError` if any non-fatal error occurs that means the
host should retry from the beginning of any given process.
@note it is important for the implementation to pay attention
to @ref openassetio.Context.Context.retention
"Context.retention", as not all hosts will support the
reference changing at this point.
@see @ref register
"""
return targetEntityRefs
def register(self, primaryStrings, targetEntityRefs, entitySpecs, context, hostSession):
"""
Publish entities to the @ref asset_management_system.
This instructs the implementation to ensure a valid entity
exists for each given reference and spec. This will be called
either in isolation or after calling preflight, depending on the
nature of the data being published and the `kWillManagePath` bit
of the returned @ref managementPolicy.
This is an opportunity to do other things in the host as part of
publishing if required. The context's locale will tell you more
about the specifics of the calling application. Depending on the
implementation of your plugin, you can use this opportunity to
make use of the host-native SDK to extract additional
information or schedule additional processes to produce
derivative data.
@param primaryStrings `List[str]` The @ref primary_string that
each entity should resolve to if passed to a call to
resolveEntityReference(). This may be left blank if there is
no meaningful string representation of that entity (eg: a
'sequence' in a hierarchy). This must be stored by the
manager and a logically equivalent value returned by @ref
resolveEntityReference for the same reference. The meaning of
'logical' here, in the case of a URL, is that it points to
the same data. The manager is free to relocate data as
required. If a primary string is not a URL, then it should be
returned verbatim.
@param targetEntityRefs `List[str]` The @ref entity_reference
of each entity to publish. It is up to the manager to ensure
that this is meaningful, as it is most likely implementation
specific. For example, if a 'Shot' specification is requested
to be published to a reference that points to a 'Sequence' it
makes sense to interpret this as a 'add a shot of this spec
to the sequence'. For other types of entity, there may be
different constraints on what makes sense.
@param entitySpecs `List[`
specifications.EntitySpecification `]`
A description of each entity (or 'asset') that is being
published. It is *not* required for the implementation to
store any information contained in the specification, though
it may choose to use it if it is meaningful. The host will
separately call setEntityAttributes() if it wishes to persist
any other information in an entity.
@param context Context The calling context. This is not
replaced with an array in order to simplify implementation.
Otherwise, transactional handling has the potential to be
extremely complex if different contexts are allowed.
@param hostSession HostSession The API session.
@return `List[Union[str,`
exceptions.RegistrationError, exceptions.RetryableError `]]`
The publish result for each corresponding entity. This is
either an @ref entity_reference to the 'final' entity created
by the publish action (which does not need to be the same as
the corresponding entry in `targetEntityRefs`); a
`RegistrationError` if some fatal exception happens during
publishing, indicating the process should be aborted; or
`RetryableError` if any non-fatal error occurs that means the
host should retry from the beginning of any given process.
@note it is important for the implementation to pay attention to
openassetio.Context.Context.retention, as not all Hosts will
support the reference changing at this point.
@see @ref preflight
@see @ref resolveEntityReference
"""
raise NotImplementedError
## @}
##
# @name Manager State
#
# A single 'task' in a host, may require more than one interaction with
# the asset management system.
#
# Because the @ref openassetio.managerAPI.ManagerInterface
# "ManagerInterface" is effectively state-less. To simplify error
# handling, and allow an implementation to know which interactions
# are related, this API supports the concept of a @ref manager_state
# object. This is contained in every @ref Context and passed to
# relevant calls.
#
# This mechanism may be used for a variety of purposes. For example, it
# could ensure that queries are made from a coherent time stamp during a
# render, or to undo the publishing of multiple assets. It can also be used
# to define 'transactions' - groups of related actions that may be cancelled
# together/rolled back.
#
# @note Not all implementations may support transactions, there is no
# requirement for any of the functions in this group being implemented. The
# defaults are effectively no-ops.
#
# @{
def createState(self, hostSession, parentState=None):
"""
Create a new object to represent the state of the interface and
return it (or some handle that can be persisted within the
context). You are free to implement this however you like, as
long as it can be uniquely represented by the object returned
from this function.
This method is called whenever a new @ref Context is made by a
@ref openassetio.hostAPI.Session.Session.createContext. The return is
then stored in the newly created Context, and is consequently
available to all the API calls in the ManagerInterface that take
a Context instance via @ref openassetio.Context.Context.managerInterfaceState
"managerInterfaceState". Your implementation can then use this
to anchor the api call to a particular snapshot of the state of
the asset inventory.
This object is also extracted from the context and passed
| |
# -*- coding: utf-8 -*-
# from __future__ import absolute_import, print_function, division
from future.utils import with_metaclass
from builtins import str
from builtins import range
import numpy as np
import scipy as sp
from abc import ABCMeta, abstractmethod
from scipy import integrate
import scipy.interpolate as interpolate
from . import core
from . import refstate
from . import _debye
__all__ = ['ThermalEos','ThermalCalc']
# 'CompressedThermalEos','CompressedThermalCalc']
#====================================================================
# Base Classes
#====================================================================
def set_calculator(eos_mod, kind, kind_opts, external_bcoef=False):
assert kind in kind_opts, (
kind + ' is not a valid thermal calculator. '+
'You must select one of: ' + str(kind_opts))
if kind=='Debye':
calc = _Debye(eos_mod)
elif kind=='Einstein':
calc = _Einstein(eos_mod)
elif kind=='PTherm':
calc = _PTherm(eos_mod)
elif kind=='GenRosenfeldTarazona':
calc = _GenRosenfeldTarazona(eos_mod, external_bcoef=external_bcoef)
elif kind=='ConstHeatCap':
calc = _ConstHeatCap(eos_mod)
elif kind=='Cp-Berman':
calc = _Cp_Berman(eos_mod)
elif kind=='Cp-Fei':
calc = _Cp_Fei(eos_mod)
elif kind=='Cp-Maier-Kelley':
calc = _Cp_Maier_Kelley(eos_mod)
else:
raise NotImplementedError(kind+' is not a valid '+\
'Thermal Calculator.')
eos_mod._add_calculator(calc, calc_type='thermal')
pass
#====================================================================
class ThermalEos(with_metaclass(ABCMeta, core.Eos)):
"""
EOS model for thermal energy heating path.
Parameters
----------
Path can either be isochoric (V=const) or isobaric (P=const)
For this restricted path, thermodyn properties depend only on temperature.
"""
_path_opts = ['V','P']
_kind_opts = ['Debye','Einstein','GenRosenfeldTarazona','ConstHeatCap',
'Cp-Berman','Cp-Fei','Cp-Maier-Kelley']
def __init__(self, kind='Debye', natom=1, model_state={}):
ref_compress_state='P0'
ref_thermal_state='T0'
ref_energy_type='E0'
self._pre_init(natom=natom)
set_calculator(self, kind, self._kind_opts)
# self._set_ref_state()
refstate.set_calculator(self, ref_compress_state=ref_compress_state,
ref_thermal_state=ref_thermal_state,
ref_energy_type=ref_energy_type)
self._post_init(model_state=model_state)
pass
def __repr__(self):
calc = self.calculators['thermal']
return ("ThermalEos(kind={kind}, natom={natom}, "
"model_state={model_state}, "
")"
.format(kind=repr(calc.name),
natom=repr(self.natom),
model_state=self.model_state
)
)
def _set_ref_state(self):
calc = self.calculators['thermal']
path_const = calc.path_const
# Add needed extra parameters (depending on path_const)
if path_const=='V':
param_ref_names = ['V0']
param_ref_units = ['ang^3']
param_ref_defaults = [100]
param_ref_scales = [100]
elif path_const=='P':
P0 = 0
param_ref_names = ['P0']
param_ref_units = ['GPa']
param_ref_defaults = [0.0]
param_ref_scales = [100]
else:
raise NotImplementedError(
'path_const '+path_const+' is not valid for ThermalEos.')
self._path_const = path_const
self._param_ref_names = param_ref_names
self._param_ref_units = param_ref_units
self._param_ref_defaults = param_ref_defaults
self._param_ref_scales = param_ref_scales
pass
@property
def path_opts(self):
return self._path_opts
@property
def path_const(self):
return self._path_const
def energy(self, T_a):
calculator = self.calculators['thermal']
energy_a = calculator._calc_energy(T_a)
return energy_a
def heat_capacity(self, T_a):
calculator = self.calculators['thermal']
heat_capacity_a = calculator._calc_heat_capacity(T_a)
return heat_capacity_a
def entropy(self, T_a):
calculator = self.calculators['thermal']
entropy_a = calculator._calc_entropy(T_a)
return entropy_a
def dEdV_T(self, T_a):
pass
def dEdV_S(self, T_a):
pass
#====================================================================
#====================================================================
# Calculators
#====================================================================
class ThermalCalc(with_metaclass(ABCMeta, core.Calculator)):
"""
Abstract Equation of State class for a reference Thermal Energy Path
Path can either be isothermal (T=const) or adiabatic (S=const)
For this restricted path, thermodyn properties depend only on volume
"""
_path_opts = ['V','P']
def __init__(self, eos_mod, path_const=None):
# assert path_const in self.path_opts, path_const + ' is not a valid ' + \
# 'path const. You must select one of: ' + path_opts
self._eos_mod = eos_mod
self._init_params()
self._required_calculators = None
self._path_const = path_const
pass
@property
def path_opts(self):
return self._path_opts
@property
def path_const(self):
return self._path_const
@property
def ndof(self):
return self._ndof
####################
# Required Methods #
####################
@abstractmethod
def _init_params(self):
"""Initialize list of calculator parameter names."""
pass
# @abstractmethod
# def _calc_heat_capacity(self, T_a):
# """Returns heat capacity as a function of temperature."""
# pass
def _get_Cv_limit(self):
Cvlimfac, = self.eos_mod.get_param_values(param_names=['Cvlimfac'])
ndof = self.ndof
natom = self.eos_mod.natom
# print('ndof = ',ndof)
# print('natom = ',natom)
# print('Cvlimfac = ',Cvlimfac)
Cvlim = Cvlimfac*ndof/2*natom*core.CONSTS['kboltz']
return Cvlim
# @abstractmethod
# def _calc_energy(self, T_a):
# """Returns thermal energy as a function of temperature."""
# pass
@abstractmethod
def _calc_entropy(self, T_a):
pass
@abstractmethod
def _calc_dEdV_T(self, T_a):
pass
@abstractmethod
def _calc_dEdV_S(self, T_a):
pass
####################
# Optional Methods #
####################
def _calc_param_deriv(self, fname, paramname, V_a, dxfrac=1e-6):
scale_a, paramkey_a = self.get_param_scale(apply_expand_adj=True )
scale = scale_a[paramkey_a==paramname][0]
# print 'scale: ' + np.str(scale)
#if (paramname is 'E0') and (fname is 'energy'):
# return np.ones(V_a.shape)
try:
fun = getattr(self, fname)
# Note that self is implicitly included
val0_a = fun(V_a)
except:
assert False, 'That is not a valid function name ' + \
'(e.g. it should be press or energy)'
try:
param = core.get_params([paramname])[0]
dparam = scale*dxfrac
# print 'param: ' + np.str(param)
# print 'dparam: ' + np.str(dparam)
except:
assert False, 'This is not a valid parameter name'
# set param value in eos_d dict
core.set_params([paramname,], [param+dparam,])
# Note that self is implicitly included
dval_a = fun(V_a) - val0_a
# reset param to original value
core.set_params([paramname], [param])
deriv_a = dval_a/dxfrac
return deriv_a
def _calc_energy_perturb(self, V_a):
"""Returns Energy pertubation basis functions resulting from fractional changes to EOS params."""
fname = 'energy'
scale_a, paramkey_a = self.get_param_scale(
apply_expand_adj=self.expand_adj)
Eperturb_a = []
for paramname in paramkey_a:
iEperturb_a = self._calc_param_deriv(fname, paramname, V_a)
Eperturb_a.append(iEperturb_a)
Eperturb_a = np.array(Eperturb_a)
return Eperturb_a, scale_a, paramkey_a
#====================================================================
#====================================================================
# Implementations
#====================================================================
class _Debye(ThermalCalc):
"""
Implimentation copied from Burnman.
"""
_path_opts=['V']
_ndof = 6
def __init__(self, eos_mod):
super(_Debye, self).__init__(eos_mod, path_const='V')
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
T0 = 0
T0_scale = 300
theta0 = 1000
Cvlimfac = 1
param_names = ['theta0', 'Cvlimfac']
param_units = ['K', '1']
param_defaults = [theta0, Cvlimfac]
param_scales = [theta0, Cvlimfac]
self._set_params(param_names, param_units,
param_defaults, param_scales)
pass
def _calc_heat_capacity(self, T_a, theta=None):
"""Returns heat capacity as a function of temperature."""
T_a = core.fill_array(T_a)
Cvlim = self._get_Cv_limit()
if theta is None:
theta, = self.eos_mod.get_param_values(param_names=['theta0'])
x = theta/T_a
Cv_values = Cvlim*_debye.debye_heat_capacity_fun(x)
return Cv_values
def _calc_energy(self, T_a, theta=None, T0=None):
"""Returns heat capacity as a function of temperature."""
T_a = core.fill_array(T_a)
Cvlim = self._get_Cv_limit()
if theta is None:
theta, = self.eos_mod.get_param_values(param_names=['theta0'])
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
x = core.fill_array(theta/T_a)
xref = core.fill_array(theta/T0)
energy = Cvlim*(T_a*_debye.debye3_fun(x)
-T0*_debye.debye3_fun(xref))
return energy
def _calc_entropy(self, T_a, theta=None, T0=None, theta0=None):
"""Returns heat capacity as a function of temperature."""
T_a = core.fill_array(T_a)
Cvlim = self._get_Cv_limit()
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
if theta is None:
theta, = self.eos_mod.get_param_values(param_names=['theta0'])
if theta0 is None:
theta0, = self.eos_mod.get_param_values(param_names=['theta0'])
x = core.fill_array(theta/T_a)
xref = core.fill_array(theta0/T0)
entropy = Cvlim*(+_debye.debye_entropy_fun(x)
-_debye.debye_entropy_fun(xref))
return entropy
def _calc_dEdV_T(self, V_a, T_a, theta_a, gamma_a):
Cvlim = self._get_Cv_limit()
x = theta_a/np.array(T_a)
dEdV_T = -Cvlim*gamma_a/V_a*theta_a*_debye.debye3_deriv_fun(x)
return dEdV_T
def _calc_dEdV_S(self, V_a, T_a, theta_a, gamma_a):
x = theta_a/np.array(T_a)
dEdV_S = 1/x*self._calc_dEdV_T(V_a, T_a, theta_a, gamma_a)
return dEdV_S
#====================================================================
class _Einstein(ThermalCalc):
_ndof = 6
_EPS = np.finfo(np.float).eps
_path_opts=['V']
def __init__(self, eos_mod):
super(_Einstein, self).__init__(eos_mod, path_const='V')
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
natom = self.eos_mod.natom
T0 = 0
T0_scale = 300
theta0 = 1000
Cvlimfac = 1
param_names = ['theta0', 'Cvlimfac']
param_units = ['K', '1']
param_defaults = [theta0, Cvlimfac]
param_scales = [theta0, Cvlimfac]
self._set_params(param_names, param_units,
param_defaults, param_scales)
pass
def _calc_energy_factor(self, x):
fac = 1/(np.exp(x)-1)
try:
fac[1/x < self._EPS] = 0
except TypeError:
if 1/x < self._EPS:
fac = 0
return fac
def _calc_flogf(self, x, Nosc):
f = Nosc*self._calc_energy_factor(x)
flogf = f*np.log(f)
try:
flogf[f==0] = 0.0
except TypeError:
if f==0:
flogf = 0
return flogf
def _calc_heat_capacity(self, T_a, theta=None):
"""Returns heat capacity as a function of temperature."""
theta0, = self.eos_mod.get_param_values(param_names=['theta0'])
Cvlim = self._get_Cv_limit()
if theta is None:
theta = theta0
T_a = np.array(T_a)
x = theta/T_a
Cv_a = Cvlim*x**2*np.exp(x)/(np.exp(x)-1)**2
Cv_a[1/x < self._EPS] = 0
return Cv_a
def _calc_energy(self, T_a, theta=None, T0=None):
"""Returns heat capacity as a function of temperature."""
T_a = core.fill_array(T_a)
Cvlim = self._get_Cv_limit()
if theta is None:
theta, = self.eos_mod.get_param_values(param_names=['theta0'])
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
x = core.fill_array(theta/T_a)
xref = core.fill_array(theta/T0)
# NOTE: Cannot include zero-pt energy since we are using energy diff
energy = Cvlim*theta*(
self._calc_energy_factor(x)-self._calc_energy_factor(xref))
return energy
def _calc_entropy(self, T_a, theta=None, T0=None, theta0=None):
"""Returns heat capacity as a function of temperature."""
T_a = core.fill_array(T_a)
Cvlim = self._get_Cv_limit()
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
if theta is None:
theta, = self.eos_mod.get_param_values(param_names=['theta0'])
if theta0 is None:
theta0, = self.eos_mod.get_param_values(param_names=['theta0'])
x = core.fill_array(theta/T_a)
xref = core.fill_array(theta0/T0)
Nosc = Cvlim/core.CONSTS['kboltz']
Equanta = Nosc*self._calc_energy_factor(x)
Squanta = self._calc_flogf(x, Nosc)
Equanta0 = Nosc*self._calc_energy_factor(xref)
Squanta0 = self._calc_flogf(xref, Nosc)
entropy = core.CONSTS['kboltz']*(
(Nosc+Equanta)*np.log(Nosc+Equanta)
- (Nosc+Equanta0)*np.log(Nosc+Equanta0)
- (Squanta-Squanta0))
return entropy
def _einstein_fun(self, x):
energy_fac = 1/2 + 1/(np.exp(x)-1)
return energy_fac
def _einstein_deriv_fun(self, x):
deriv_fac = -np.exp(x)/(np.exp(x)-1)**2
return deriv_fac
# FIX THESE!!!!
def _calc_dEdV_T(self, V_a, T_a, theta_a, gamma_a, Cvmax=None):
Cvlim = self._get_Cv_limit()
# Cvmax, = self.eos_mod.get_param_values(
# param_names=['Cvmax'], overrides=[Cvmax])
x = theta_a/np.array(T_a)
dEdV_S = self._calc_dEdV_S(V_a, T_a, theta_a, gamma_a, Cvmax=Cvlim)
dEdV_T = dEdV_S - Cvlim*theta_a*gamma_a/V_a*x*self._einstein_deriv_fun(x)
return dEdV_T
def _calc_dEdV_S(self, V_a, T_a, theta_a, gamma_a, Cvmax=None):
Cvlim = self._get_Cv_limit()
# Cvmax, = self.eos_mod.get_param_values(
# param_names=['Cvmax'], overrides=[Cvmax])
x = theta_a/np.array(T_a)
dEdV_S = -Cvlim*theta_a*gamma_a/V_a*self._einstein_fun(x)
return dEdV_S
#====================================================================
class _GenRosenfeldTarazona(ThermalCalc):
_ndof | |
"""Data Provider implementation module for constructing data based on standard stock indicators
The data provider in this module is not indented to be instantiated outside of this module. Instead, upon the importing
of this module, the provider will create an instance of itself and register itself with the global DataProviderRegistry.
After this, data consumers can register themselves as recipients of data from this provider using the id located at
data_provider_static_names.INDICATOR_BLOCK_PROVIDER_ID.
Detailed argument list that can be provided to this provider can be found in the generate_data method.
TODO[<NAME>] Extract repeated code from generate_prediction_data and generate_data after global style rewrite
"""
import configparser
import datetime
import numpy
from data_providing_module import configurable_registry
from data_providing_module import data_provider_registry
from data_providing_module.data_providers import data_provider_static_names
from general_utils.config import config_util
from general_utils.logging import logger
from general_utils.mysql_management.mysql_tables import stock_data_table
from stock_data_analysis_module.data_processing_module.data_retrieval_module import ranged_data_retriever
from stock_data_analysis_module.indicators import moving_average
from stock_data_analysis_module.indicators import bollinger_band
from stock_data_analysis_module.indicators import stochastic_oscillator
_ENABLED_CONFIG_ID = "enabled"
def _standardize_price_data(price_data):
ret_data = numpy.copy(price_data)
ret_data = ret_data.flatten()
max_price = numpy.max(ret_data)
min_price = numpy.min(ret_data)
for i in range(len(ret_data)):
ret_data[i] = (ret_data[i]-min_price)/max_price
return ret_data.reshape(price_data.shape)
class IndicatorBlockProvider(data_provider_registry.DataProviderBase):
"""Data Provider that will provide data constructed using stock indicators normally used by stock traders
Details on these indicators can be found in the modules of the indicators package.
Additionally, this provider provides support for configurable parameters through the configuration file. These
parameters are listed in the Configurable Parameters section.
Configurable Parameters:
enable: Whether this provider is enabled for consumers to receive data from.
"""
def generate_prediction_data(self, *args, **kwargs):
"""Generates data for a Consumer wanting to make predictions about the next day's state.
This method is identical to generate_data for all but the return values. As such, for arguments
and further details, see generate_data.
Returns:
List[Tuple[str, numpy.ndarray, float, float]]. Broken down, for every stock, there is a tuple
containing the ticker, the data block generated, the average price, and the average volume.
The average price and volume is to allow for the original magnitudes of the prices and volumes to
be reconstructed should the predictions require it.
For a breakdown of the rows in the data block, see generate_data's documentation in the Returns section.
"""
if len(args) < 1:
raise ValueError("Expected %d positional argument but received %d" % (1, len(args)))
data_block_length = args[0]
max_additional_period = 0
for key, value in self.default_kwargs.items():
if key not in kwargs:
kwargs[key] = self.default_kwargs[key]
if key.endswith("period") and value > max_additional_period:
max_additional_period = value
padded_data_block_length = max_additional_period + data_block_length
start_date = datetime.datetime.now() - datetime.timedelta(weeks=(padded_data_block_length + 360) // 5)
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = ranged_data_retriever.RangedDataRetriever(
[
stock_data_table.HIGH_PRICE_COLUMN_NAME,
stock_data_table.LOW_PRICE_COLUMN_NAME,
stock_data_table.CLOSING_PRICE_COLUMN_NAME,
stock_data_table.VOLUME_COLUMN_NAME
],
start_date,
end_date)
ret_blocks = []
for ticker, sources in data_retriever.data_sources.items():
ticker_data = data_retriever.retrieveData(ticker, sources[0])
ticker_data = numpy.array(ticker_data, dtype=numpy.float32)
high = ticker_data[:, 0]
low = ticker_data[:, 1]
close = ticker_data[:, 2]
volume = ticker_data[:, 3]
# high, low, close, volume = ticker_data # unpack manually
avg_high = numpy.average(high)
avg_low = numpy.average(low)
avg_close = numpy.average(close)
avg_price = ((avg_high * len(high)) + (avg_low * len(high)) + (avg_close * len(high))) / (len(high) * 3)
avg_vol = numpy.average(volume)
std_high = [(high[i] - avg_price) / avg_price
for i in range(len(high))]
std_low = [(low[i] - avg_price) / avg_price
for i in range(len(high))]
std_close = [(close[i] - avg_price) / avg_price
for i in range(len(high))]
volume = [(volume[i] - avg_vol) / avg_vol
for i in range(len(volume))]
if len(std_high) < padded_data_block_length:
len_warning = (
"Could not process %s into an indicator block, "
"needed %d days of trading data but received %d" %
(ticker, padded_data_block_length, len(std_high))
)
logger.logger.log(logger.WARNING, len_warning)
continue
sma = moving_average.SMA(std_close, kwargs['sma_period'])
sma = sma[-data_block_length:]
boll_band = bollinger_band.bollinger_band(std_high, std_low, std_close,
smoothing_period=kwargs["bollinger_band_period"],
standard_deviations=kwargs["bollinger_band_stdev"]
)
oscillator = stochastic_oscillator.stochastic_oscillator(close, high,
low, kwargs['oscillator_period'])
oscillator = oscillator[-data_block_length:]
oscillator /= 100
data_block = numpy.zeros((8, data_block_length), dtype=numpy.float32)
data_block[0] = std_high[-data_block_length:]
data_block[1] = std_low[-data_block_length:]
data_block[2] = std_close[-data_block_length:]
data_block[3] = volume[-data_block_length:]
data_block[4] = sma
data_block[5] = boll_band[0][-data_block_length:]
data_block[6] = boll_band[1][-data_block_length:]
data_block[7] = oscillator
ret_blocks.append((ticker, data_block, avg_price, avg_vol))
return ret_blocks
def write_default_configuration(self, section: "configparser.SectionProxy"):
"""Writes default configuration values into the SectionProxy provided.
For more details see abstract class documentation.
"""
section[_ENABLED_CONFIG_ID] = "True"
def load_configuration(self, parser: "configparser.ConfigParser"):
"""Attempts to load the configurable parameters for this provider from the provided parser.
For more details see abstract class documentation.
"""
section = config_util.create_type_section(parser, self)
if not parser.has_option(section.name, _ENABLED_CONFIG_ID):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIG_ID)
if enabled:
data_provider_registry.registry.register_provider(
data_provider_static_names.INDICATOR_BLOCK_PROVIDER_ID, self)
def generate_data(self, *args, **kwargs):
"""Generates data using stock indicators over a set period of time
Generates blocks (numpy arrays) of data using indicators that are used by normal stock traders.
These include bollinger bands, simple moving average and the stochastic oscillator.
The types of data that get fed into these algorithms come from the high, low, closing, and volume columns
of the data tables in the database. Additionally, these values are standardized to allow algorithms to draw
conclusions based off the relative change in the stock, and not be blinded by the magnitude of the prices or
volumes.
This standardization process is performed by calculating the average price across the highs, lows, and closing
prices of the stock, then every element in each of the lists is updated according to the following equation
(assume that price is the high, low, or closing price being modified):
(price - avg_price) / avg_price
The same process is also performed on the volume data.
Additionally, consumers are required to pass in a positional argument through *args, and may pass in
keyword arguments. These are covered in the Arguments section below
Arguments:
*args:
Only one positional argument is required.
data_block_length: int This controls how many columns will
be present in the return data block. As a note the data block will always have 8 rows.
**kwargs:
Several keyword arguments are supported.
sma_period: int Controls how many days are considered in the calculation of the simple moving average.
For a given day x, the previous x-sma_period days will be used
bollinger_band_stdev: int Controls how many standard deviations will be used in the calculation
of the bollinger bands
bollinger_band_period: int Controls how many days will be used in the calculation of the bollinger
bands.
oscillator_period: int Controls the number of days used in the calculation of the stochastic oscillator
Returns:
Numpy.ndarray object with three dimensions. This is effectively a 3D matrix of data blocks, where each
data block will have 8 rows and data_block_length columns.
Each data block row corresponds to one data type or calculated indicator values, are listed below:
0: high price
1: low price
2: closing price
3: volume
4: simple moving average (SMA)
5: upper bollinger band
6: lower bollinger band
7: stochastic oscillator
"""
if len(args) < 1:
raise ValueError("Expected %d positional argument but received %d" % (1, len(args)))
data_block_length = args[0]
max_additional_period = 0
for key, value in self.default_kwargs.items():
if key not in kwargs:
kwargs[key] = self.default_kwargs[key]
if key.endswith("period") and value > max_additional_period:
max_additional_period = value
padded_data_block_length = max_additional_period + data_block_length
start_date = datetime.datetime.now() - datetime.timedelta(weeks=(padded_data_block_length + 360) // 5)
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = ranged_data_retriever.RangedDataRetriever(
[
stock_data_table.HIGH_PRICE_COLUMN_NAME,
stock_data_table.LOW_PRICE_COLUMN_NAME,
stock_data_table.CLOSING_PRICE_COLUMN_NAME,
stock_data_table.VOLUME_COLUMN_NAME
],
start_date,
end_date)
ret_blocks = []
for ticker, sources in data_retriever.data_sources.items():
ticker_data = data_retriever.retrieveData(ticker, sources[0])
ticker_data = numpy.array(ticker_data, dtype=numpy.float32)
high = ticker_data[:, 0]
low = ticker_data[:, 1]
close = ticker_data[:, 2]
volume = ticker_data[:, 3]
# high, low, close, volume = ticker_data # unpack manually
std_high = _standardize_price_data(high)
std_close = _standardize_price_data(close)
std_low = _standardize_price_data(low)
volume = _standardize_price_data(volume)
if len(std_high) < padded_data_block_length:
len_warning = (
"Could not process %s into an indicator block, "
"needed %d days of trading data but received %d" %
(ticker, padded_data_block_length, len(std_high))
)
logger.logger.log(logger.WARNING, len_warning)
continue
sma = moving_average.SMA(std_close, kwargs['sma_period'])
sma = sma[-data_block_length:]
boll_band = bollinger_band.bollinger_band(std_high, std_low, std_close,
smoothing_period=kwargs["bollinger_band_period"],
standard_deviations=kwargs["bollinger_band_stdev"]
)
oscillator = stochastic_oscillator.stochastic_oscillator(close, high,
low, kwargs['oscillator_period'])
oscillator = oscillator[-data_block_length:]
oscillator /= 100
data_block = numpy.zeros((8, data_block_length), dtype=numpy.float32)
data_block[0] = std_high[-data_block_length:]
data_block[1] = std_low[-data_block_length:]
data_block[2] = std_close[-data_block_length:]
data_block[3] = volume[-data_block_length:]
data_block[4] = sma
data_block[5] = boll_band[0][-data_block_length:]
data_block[6] = boll_band[1][-data_block_length:]
data_block[7] = oscillator
ret_blocks.append(data_block)
return numpy.array(ret_blocks, dtype=numpy.float32)
def __init__(self):
"""Initializes IndicatorBlockProvider and registers the | |
<reponame>sarang-apps/darshan_browser
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import subprocess
import unittest
import PRESUBMIT
from PRESUBMIT_test_mocks import MockFile, MockAffectedFile
from PRESUBMIT_test_mocks import MockInputApi, MockOutputApi
_TEST_DATA_DIR = 'base/test/data/presubmit'
class VersionControlConflictsTest(unittest.TestCase):
def testTypicalConflict(self):
lines = ['<<<<<<< HEAD',
' base::ScopedTempDir temp_dir_;',
'=======',
' ScopedTempDir temp_dir_;',
'>>>>>>> master']
errors = PRESUBMIT._CheckForVersionControlConflictsInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(3, len(errors))
self.assertTrue('1' in errors[0])
self.assertTrue('3' in errors[1])
self.assertTrue('5' in errors[2])
def testIgnoresReadmes(self):
lines = ['A First Level Header',
'====================',
'',
'A Second Level Header',
'---------------------']
errors = PRESUBMIT._CheckForVersionControlConflictsInFile(
MockInputApi(), MockFile('some/polymer/README.md', lines))
self.assertEqual(0, len(errors))
class UmaHistogramChangeMatchedOrNotTest(unittest.TestCase):
def testTypicalCorrectlyMatchedChange(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Bla.Foo.Dummy", true)']
diff_xml = ['<histogram name="Bla.Foo.Dummy"> </histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testTypicalNotMatchedChange(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Bla.Foo.Dummy", true)']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual('warning', warnings[0].type)
self.assertTrue('foo.cc' in warnings[0].items[0])
self.assertTrue('foo.java' in warnings[0].items[1])
def testTypicalNotMatchedChangeViaSuffixes(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Bla.Foo.Dummy", true)']
diff_xml = ['<histogram_suffixes name="SuperHistogram">',
' <suffix name="Dummy"/>',
' <affected-histogram name="Snafu.Dummy"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual('warning', warnings[0].type)
self.assertTrue('foo.cc' in warnings[0].items[0])
self.assertTrue('foo.java' in warnings[0].items[1])
def testTypicalCorrectlyMatchedChangeViaSuffixes(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Bla.Foo.Dummy", true)']
diff_xml = ['<histogram_suffixes name="SuperHistogram">',
' <suffix name="Dummy"/>',
' <affected-histogram name="Bla.Foo"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testTypicalCorrectlyMatchedChangeViaSuffixesWithSeparator(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Snafu_Dummy", true)']
diff_java = ['RecordHistogram.recordBooleanHistogram("Snafu_Dummy", true)']
diff_xml = ['<histogram_suffixes name="SuperHistogram" separator="_">',
' <suffix name="Dummy"/>',
' <affected-histogram name="Snafu"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testCorrectlyMatchedChangeViaSuffixesWithLineWrapping(self):
diff_cc = [
'UMA_HISTOGRAM_BOOL("LongHistogramNameNeedsLineWrapping.Dummy", true)']
diff_java = ['RecordHistogram.recordBooleanHistogram(' +
'"LongHistogramNameNeedsLineWrapping.Dummy", true)']
diff_xml = ['<histogram_suffixes',
' name="LongHistogramNameNeedsLineWrapping"',
' separator=".">',
' <suffix name="Dummy"/>',
' <affected-histogram',
' name="LongHistogramNameNeedsLineWrapping"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testNameMatch(self):
# Check that the detected histogram name is "Dummy" and not, e.g.,
# "Dummy\", true); // The \"correct"
diff_cc = ['UMA_HISTOGRAM_BOOL("Dummy", true); // The "correct" histogram']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Dummy", true);' +
' // The "correct" histogram']
diff_xml = ['<histogram name="Dummy"> </histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testSimilarMacroNames(self):
diff_cc = ['PUMA_HISTOGRAM_COOL("Mountain Lion", 42)']
diff_java = [
'FakeRecordHistogram.recordFakeHistogram("Mountain Lion", 42)']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testMultiLine(self):
diff_cc = ['UMA_HISTOGRAM_BOOLEAN(', ' "Multi.Line", true)']
diff_cc2 = ['UMA_HISTOGRAM_BOOLEAN(', ' "Multi.Line"', ' , true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram(',
' "Multi.Line", true);',
]
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo2.cc', diff_cc2),
MockFile('some/path/foo.java', diff_java),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual('warning', warnings[0].type)
self.assertTrue('foo.cc' in warnings[0].items[0])
self.assertTrue('foo2.cc' in warnings[0].items[1])
class BadExtensionsTest(unittest.TestCase):
def testBadRejFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', ''),
MockFile('some/path/foo.cc.rej', ''),
MockFile('some/path2/bar.h.rej', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(2, len(results[0].items))
self.assertTrue('foo.cc.rej' in results[0].items[0])
self.assertTrue('bar.h.rej' in results[0].items[1])
def testBadOrigFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h.orig', ''),
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(1, len(results[0].items))
self.assertTrue('qux.h.orig' in results[0].items[0])
def testGoodFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(0, len(results))
class CheckSingletonInHeadersTest(unittest.TestCase):
def testSingletonInArbitraryHeader(self):
diff_singleton_h = ['base::subtle::AtomicWord '
'base::Singleton<Type, Traits, DifferentiatingType>::']
diff_foo_h = ['// base::Singleton<Foo> in comment.',
'friend class base::Singleton<Foo>']
diff_foo2_h = [' //Foo* bar = base::Singleton<Foo>::get();']
diff_bad_h = ['Foo* foo = base::Singleton<Foo>::get();']
mock_input_api = MockInputApi()
mock_input_api.files = [MockAffectedFile('base/memory/singleton.h',
diff_singleton_h),
MockAffectedFile('foo.h', diff_foo_h),
MockAffectedFile('foo2.h', diff_foo2_h),
MockAffectedFile('bad.h', diff_bad_h)]
warnings = PRESUBMIT._CheckSingletonInHeaders(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual(1, len(warnings[0].items))
self.assertEqual('error', warnings[0].type)
self.assertTrue('Found base::Singleton<T>' in warnings[0].message)
def testSingletonInCC(self):
diff_cc = ['Foo* foo = base::Singleton<Foo>::get();']
mock_input_api = MockInputApi()
mock_input_api.files = [MockAffectedFile('some/path/foo.cc', diff_cc)]
warnings = PRESUBMIT._CheckSingletonInHeaders(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
class InvalidOSMacroNamesTest(unittest.TestCase):
def testInvalidOSMacroNames(self):
lines = ['#if defined(OS_WINDOWS)',
' #elif defined(OS_WINDOW)',
' # if defined(OS_MACOSX) || defined(OS_CHROME)',
'# else // defined(OS_MAC)',
'#endif // defined(OS_MACOS)']
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(len(lines), len(errors))
self.assertTrue(':1 OS_WINDOWS' in errors[0])
self.assertTrue('(did you mean OS_WIN?)' in errors[0])
def testValidOSMacroNames(self):
lines = ['#if defined(%s)' % m for m in PRESUBMIT._VALID_OS_MACROS]
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(0, len(errors))
class InvalidIfDefinedMacroNamesTest(unittest.TestCase):
def testInvalidIfDefinedMacroNames(self):
lines = ['#if defined(TARGET_IPHONE_SIMULATOR)',
'#if !defined(TARGET_IPHONE_SIMULATOR)',
'#elif defined(TARGET_IPHONE_SIMULATOR)',
'#ifdef TARGET_IPHONE_SIMULATOR',
' # ifdef TARGET_IPHONE_SIMULATOR',
'# if defined(VALID) || defined(TARGET_IPHONE_SIMULATOR)',
'# else // defined(TARGET_IPHONE_SIMULATOR)',
'#endif // defined(TARGET_IPHONE_SIMULATOR)']
errors = PRESUBMIT._CheckForInvalidIfDefinedMacrosInFile(
MockInputApi(), MockFile('some/path/source.mm', lines))
self.assertEqual(len(lines), len(errors))
def testValidIfDefinedMacroNames(self):
lines = ['#if defined(FOO)',
'#ifdef BAR']
errors = PRESUBMIT._CheckForInvalidIfDefinedMacrosInFile(
MockInputApi(), MockFile('some/path/source.cc', lines))
self.assertEqual(0, len(errors))
class CheckAddedDepsHaveTestApprovalsTest(unittest.TestCase):
def calculate(self, old_include_rules, old_specific_include_rules,
new_include_rules, new_specific_include_rules):
return PRESUBMIT._CalculateAddedDeps(
os.path, 'include_rules = %r\nspecific_include_rules = %r' % (
old_include_rules, old_specific_include_rules),
'include_rules = %r\nspecific_include_rules = %r' % (
new_include_rules, new_specific_include_rules))
def testCalculateAddedDeps(self):
old_include_rules = [
'+base',
'-chrome',
'+content',
'-grit',
'-grit/",',
'+jni/fooblat.h',
'!sandbox',
]
old_specific_include_rules = {
'compositor\.*': {
'+cc',
},
}
new_include_rules = [
'-ash',
'+base',
'+chrome',
'+components',
'+content',
'+grit',
'+grit/generated_resources.h",',
'+grit/",',
'+jni/fooblat.h',
'+policy',
'+' + os.path.join('third_party', 'WebKit'),
]
new_specific_include_rules = {
'compositor\.*': {
'+cc',
},
'widget\.*': {
'+gpu',
},
}
expected = set([
os.path.join('chrome', 'DEPS'),
os.path.join('gpu', 'DEPS'),
os.path.join('components', 'DEPS'),
os.path.join('policy', 'DEPS'),
os.path.join('third_party', 'WebKit', 'DEPS'),
])
self.assertEqual(
expected,
self.calculate(old_include_rules, old_specific_include_rules,
new_include_rules, new_specific_include_rules))
def testCalculateAddedDepsIgnoresPermutations(self):
old_include_rules = [
'+base',
'+chrome',
]
new_include_rules = [
'+chrome',
'+base',
]
self.assertEqual(set(),
self.calculate(old_include_rules, {}, new_include_rules,
{}))
class JSONParsingTest(unittest.TestCase):
def testSuccess(self):
input_api = MockInputApi()
filename = 'valid_json.json'
contents = ['// This is a comment.',
'{',
' "key1": ["value1", "value2"],',
' "key2": 3 // This is an inline comment.',
'}'
]
input_api.files = [MockFile(filename, contents)]
self.assertEqual(None,
PRESUBMIT._GetJSONParseError(input_api, filename))
def testFailure(self):
input_api = MockInputApi()
test_data = [
('invalid_json_1.json',
['{ x }'],
'Expecting property name:'),
('invalid_json_2.json',
['// Hello world!',
'{ "hello": "world }'],
'Unterminated string starting at:'),
('invalid_json_3.json',
['{ "a": "b", "c": "d", }'],
'Expecting property name:'),
('invalid_json_4.json',
['{ "a": "b" "c": "d" }'],
'Expecting , delimiter:'),
]
input_api.files = [MockFile(filename, contents)
for (filename, contents, _) in test_data]
for (filename, _, expected_error) in test_data:
actual_error = PRESUBMIT._GetJSONParseError(input_api, filename)
self.assertTrue(expected_error in str(actual_error),
"'%s' not found in '%s'" % (expected_error, actual_error))
def testNoEatComments(self):
input_api = MockInputApi()
file_with_comments = 'file_with_comments.json'
contents_with_comments = ['// This is a comment.',
'{',
' "key1": ["value1", "value2"],',
' "key2": 3 // This is an inline comment.',
'}'
]
file_without_comments = 'file_without_comments.json'
contents_without_comments = ['{',
' "key1": ["value1", "value2"],',
' "key2": 3',
'}'
]
input_api.files = [MockFile(file_with_comments, contents_with_comments),
MockFile(file_without_comments,
contents_without_comments)]
self.assertEqual('No JSON object could be decoded',
str(PRESUBMIT._GetJSONParseError(input_api,
file_with_comments,
eat_comments=False)))
self.assertEqual(None,
PRESUBMIT._GetJSONParseError(input_api,
file_without_comments,
eat_comments=False))
class IDLParsingTest(unittest.TestCase):
def testSuccess(self):
input_api = MockInputApi()
filename = 'valid_idl_basics.idl'
contents = ['// Tests a valid IDL file.',
'namespace idl_basics {',
' enum EnumType {',
' name1,',
' name2',
' };',
'',
' dictionary MyType1 {',
' DOMString a;',
' };',
'',
' callback Callback1 = void();',
' callback Callback2 = void(long x);',
' callback Callback3 = void(MyType1 arg);',
' callback Callback4 = void(EnumType type);',
'',
' interface Functions {',
' static void function1();',
' static void function2(long x);',
' static void function3(MyType1 arg);',
' static void function4(Callback1 cb);',
' static void function5(Callback2 cb);',
' static void function6(Callback3 cb);',
' static void function7(Callback4 cb);',
' };',
'',
' interface Events {',
' static void onFoo1();',
' static void onFoo2(long x);',
' static void onFoo2(MyType1 arg);',
' static void onFoo3(EnumType type);',
' };',
'};'
]
input_api.files = [MockFile(filename, contents)]
self.assertEqual(None,
PRESUBMIT._GetIDLParseError(input_api, filename))
def testFailure(self):
input_api = MockInputApi()
test_data = [
('invalid_idl_1.idl',
['//',
'namespace test {',
' dictionary {',
' DOMString s;',
' };',
'};'],
'Unexpected "{" after keyword "dictionary".\n'),
# TODO(yoz): Disabled because it causes the IDL parser to hang.
# See crbug.com/363830.
# ('invalid_idl_2.idl',
# (['namespace test {',
# ' dictionary MissingSemicolon {',
# ' DOMString a',
# ' DOMString b;',
# ' };',
# '};'],
# 'Unexpected symbol DOMString after symbol a.'),
('invalid_idl_3.idl',
['//',
'namespace | |
stdout, and stderr is returned.
"""
p = subprocess.Popen(
self.composeCmdString(),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return [p.returncode,stdout.strip(),stderr.strip()]
def __dir__(self):
keys = self.__dict__.keys()
if "parameterdefs" in self.__dict__:
keys = list(set(keys + self.parameterdefs.keys()))
return sorted(keys)
def __getattr__(self,name):
if "parameterdefs" in self.__dict__ and name in self.parameterdefs:
if name in self.cmdparametervalues:
return self.cmdparametervalues[name]
else:
return None
else:
return self.__dict__[name]
def __setattr__(self,name,value):
if "parameterdefs" in self.__dict__ and name in self.parameterdefs:
self.setArgValue(name, value)
else:
self.__dict__[name] = value
class SbatchCommand(Command):
"""
Modifications specific to Sbatch, including script generation
and setting dependencies
"""
def __init__(self,*args,**kwargs):
"""
Set a script path, so that *.sbatch scripts can be written. Default is cwd.
"""
if "scriptpath" in kwargs:
self.scriptpath = kwargs["scriptpath"]
del kwargs["scriptpath"]
else:
self.scriptpath = "./"
super(self.__class__,self).__init__(*args,**kwargs)
def composeCmdString(self):
# If options like --help or --usage are set, use parent for command processing
for option in SBATCH_NOSUBMIT_OPTIONS:
if option in self.cmdparametervalues and self.cmdparametervalues[option]:
return super(self.__class__,self).composeCmdString()
cmdstring = "#!/bin/bash\n"
# Determines if the argument pattern is an optional one
optionalargre = re.compile("\?.+?\?")
# Determines if the argument pattern has quoting of the <VALUE>
quotecheckre = re.compile("(\S)<VALUE>(\S)")
# Go through the parameter defs in order and
# for any parameter with a value, substitute the value into the
# "pattern"
# Sort the parameterdef keys based on pdef.order
sortednames = sorted(self.parameterdefs.iterkeys(),key=lambda name: int(self.parameterdefs[name].order))
scriptname = None
commands = []
for pname in sortednames:
pdef = self.parameterdefs[pname]
if pname in self.cmdparametervalues or (hasattr(pdef,'default') and pdef.default is not None):
value = None
if pname in self.cmdparametervalues:
value = self.cmdparametervalues[pname]
elif hasattr(pdef,'default'):
value = pdef.default
else:
continue
if value == False:
continue
# Process scriptname
if pname == "scriptname":
scriptname = value
continue
# Process command(s)
if pname == "command":
if isinstance(value,basestring):
commands.append(value + "\n")
else:
if not isinstance(value,list):
value = [value]
for command in value:
if isinstance(command,Command):
commands.append("%s\n" % command.composeCmdString())
elif isinstance(command,basestring):
commands.append(command + "\n")
else:
raise Exception("Why are you using %s as an sbatch command?" % command.__class__.__name__)
continue
# If <VALUE> is surrounded by something (e.g. single quotes)
# then we should make sure that char is escaped in the value
quotestring = None
match = quotecheckre.search(pdef.pattern)
if match is not None:
if len(match.groups()) == 2:
if match.group(1) == match.group(2):
quotestring = match.group(1)
# Do some courtesy escaping
if isinstance(value,basestring) and quotestring is not None:
# Remove existing escapes
value = value.replace("\\" + quotestring,quotestring)
# Escape the quote
value = value.replace(quotestring,"\\" + quotestring)
# Substitute the value into the pattern
if optionalargre.search(pdef.pattern) is not None:
# This is the case of a switch with an optional argument
if value == True:
# Adding the switch with no argument
cmdstring += "#SBATCH %s\n" % optionalargre.sub("",pdef.pattern)
else:
# Remove the question marks and substitute the VALUE
cmdstring += "#SBATCH %s\n" % pdef.pattern.replace("?","").replace("<VALUE>",value)
else:
if value == True:
cmdstring += "#SBATCH %s\n" % pdef.pattern
else:
cmdstring += "#SBATCH %s\n" % pdef.pattern.replace("<VALUE>",value)
cmdstring += "\n".join(commands)
scriptfile = None
if scriptname is None:
# Generate a tempfile scriptname
scriptfile = tempfile.NamedTemporaryFile(mode='w',suffix='.sbatch', dir=self.scriptpath,delete=False)
scriptname = scriptfile.name
else:
if scriptname.startswith("/"):
scriptfile = open(scriptname,'w')
else:
scriptname = os.path.join(self.scriptpath,scriptname)
scriptfile = open(scriptname,'w')
scriptfile.write(cmdstring)
scriptfile.close()
newcmdstring = ' '.join([self.bin,scriptname])
return newcmdstring.encode('ascii','ignore')
def __str__(self):
s = "%s\n" % self.composeCmdString()
for k,v in self.cmdparametervalues.iteritems():
s += '-- %s : %s\n' % (k,v)
return s
DEFAULT_SLURM_CONF_FILE="/etc/slurm/slurm.conf"
class Slurm(object):
"""
Encapsulation of Slurmy things. Uses Command objects populated with truncated
ParameterDefs. Mostly static, stateless methods
"""
conf = None
sbatchstr = """
{
"name" : "sbatch",
"version" : "14.03.8",
"bin" : "sbatch",
"description" : "sbatch submits a batch script to SLURM.",
"cmdclass" : "jobTree.batchSystems.slurm.SbatchCommand",
"parameterdefs" : [
{
"name" : "scriptname",
"description" : "Name of the script that will be submitted with sbatch",
"pattern" : "<VALUE>",
"required" : "yes",
"order" : "1000"
},
{
"name" : "error",
"description" : "Instruct SLURM to connect the batch script standard error directly to the file name specified",
"switches" : ["--error","-e"],
"pattern" : "--error='<VALUE>'",
"required" : "no"
},
{
"name" : "job_name",
"description" : "Instruct SLURM to connect the batch script standard input directly to the file name specified",
"switches" : ["--job-name","-J"],
"pattern" : "--job-name=<VALUE>",
"required" : "no"
},
{
"name" : "mem",
"description" : "Specify the real memory required per node in MegaBytes.",
"switches" : ["--mem"],
"pattern" : "--mem=<VALUE>",
"required" : "no"
},
{
"name" : "mem_per_cpu",
"description" : "Mimimum memory required per allocated CPU in MegaBytes.",
"switches" : ["--mem-per-cpu"],
"pattern" : "--mem-per-cpu=<VALUE>",
"required" : "no"
},
{
"name" : "nodes",
"description" : "Request that a minimum of nodes be allocated to this job. A maximum node count may also be specified.",
"switches" : ["--nodes","-N"],
"pattern" : "--nodes=<VALUE>",
"required" : "no"
},
{
"name" : "ntasks",
"description" : "This option advises the SLURM controller that job steps run within the allocation will launch a maximum of number tasks and to provide for sufficient resources",
"switches" : ["--ntasks","-n"],
"pattern" : "--ntasks=<VALUE>",
"required" : "no"
},
{
"name" : "output",
"description" : "Instruct SLURM to connect the batch script's standard output directly to the file name specified.",
"switches" : ["--output","-o"],
"pattern" : "--output='<VALUE>'",
"required" : "no"
},
{
"name" : "partition",
"description" : "Open the output and error files using append or truncate mode as specified.",
"switches" : ["--partition","-p"],
"pattern" : "--partition=<VALUE>",
"required" : "no"
},
{
"name" : "qos",
"description" : "Request a quality of service for the job. ",
"switches" : ["--qos"],
"pattern" : "--qos=<VALUE>",
"required" : "no"
},
{
"name" : "constraint",
"description" : "Limit nodes to those with matching constraint values.",
"switches" : ["--constraint","-C"],
"pattern" : "--constraint='<VALUE>'",
"required" : "no"
},
{
"name" : "time",
"description" : "Set a limit on the total run time of the job allocation.",
"switches" : ["--time","-t"],
"pattern" : "--time=<VALUE>",
"required" : "no"
},
{
"name" : "tmp",
"description" : "Specify a minimum amount of temporary disk space (in MB).",
"switches" : ["--tmp"],
"pattern" : "--tmp=<VALUE>",
"required" : "no"
},
{
"name" : "command",
"description" : "Command to be submitted to Slurm cluster",
"switches" : [""],
"pattern" : "<VALUE>",
"required" : "no",
"order" : "1000"
}
]
}
"""
scancelstr = """
{
"name" : "scancel",
"version" : "14.03.8",
"bin" : "scancel",
"description" : "Used to signal jobs or job steps that are under the control of Slurm"
"cmdclass" : "jobTree.batchSystems.slurm.Command",
"parameterdefs" : [
{
"name" : "jobid",
"description" : "The Slurm job ID to be signaled.",
"pattern" : "<VALUE>",
"required" : "yes",
"order" : "100"
}
]
}
"""
sacctcmdstr = """
{
"name" : "sacct",
"version" : "14.03.8",
"bin" : "sacct",
"description" : "Displays accounting data for all jobs and job steps in the SLURM job accounting log or SLURM database.",
"cmdclass" : "jobTree.batchSystems.slurm.Command",
"parameterdefs" : [
{
"name" : "jobs",
"description" : "Displays information about the specified job(.step) or list of job(.step)s.",
"switches" : ["--jobs","-j"],
"pattern" : "--jobs='<VALUE>'",
"required" : "no"
},
{
"name" : "noheader",
"description" : "No heading will be added to the output. The default action is to display a header.",
"switches" : ["--noheader","-n"],
"pattern" : "--noheader",
"required" : "no"
},
{
"name" : "format",
"description" : "Comma separated list of fields.",
"switches" : ["--format","-o"],
"pattern" : "--format='<VALUE>'",
"required" : "no"
}
]
}
"""
squeuecmdstr = """
{
"name" : "squeue",
"version" : "14.03.8",
"bin" : "/usr/local/bin/squeue",
"description" : "squeue is used to view job and job step information for jobs managed by SLURM..",
"cmdclass" : "jobTree.batchSystems.slurm.Command",
"parameterdefs" : [
{
"name" : "noheader",
"description" : "Do not print a header on the output",
"switches" : ["--noheader","-h"],
"pattern" : "--noheader",
"required" : "no"
},
{
"name" : "jobs",
"description" : "Requests a comma separated list of job IDs to display. Defaults to all jobs.",
"switches" : | |
by self.eval_symbol())
"""
if tup in self.memo_C:
# result from memoization dictionary
return self.memo_C[tup]
else:
# iteratively determine coefficients by
# expansion of the Lyapunov equation
coeff = multinomial_coefficient(tup)
A0 = self.eval_symbol('A', ())
D = None # second matrix in Lyapunov equation
ind_1 = ()
ind_2 = ()
for i in tup:
if i[1] == 1:
ind_1 += (i,)
elif i[1] == 2:
ind_2 += (i,)
else:
raise KeyError("Unknown upper index for eta: %s." % i[1])
# B * B.T term
ind_B1 = [i[0] for i in ind_1]
ind_B2 = [i[0] for i in ind_2]
B1 = self.eval_symbol('B', ind_B1) * multinomial_coefficient(ind_1)
B2 = self.eval_symbol('B', ind_B2) * multinomial_coefficient(ind_2)
D = B1 * B2.T
# A * C terms
ind_A = list(ind_1)
ind_C = list(ind_2)
for i in range(len(ind_A)):
if i > 0:
ind_C.insert(0, ind_A.pop(-1))
D += ( self.eval_symbol('A', [i[0] for i in ind_A])
* self._Taylor_C(tuple(ind_C), chop_imag=chop_imag)
* multinomial_coefficient(ind_A)
* multinomial_coefficient(ind_C))
# C * A.T terms
ind_A = list(ind_2)
ind_C = list(ind_1)
for i in range(len(ind_A)):
if i > 0:
ind_C.append(ind_A.pop(0))
D += ( self._Taylor_C(tuple(ind_C), chop_imag=chop_imag)
* self.eval_symbol('A', [i[0] for i in ind_A]).T
* multinomial_coefficient(ind_A)
* multinomial_coefficient(ind_C))
# Lyapunov equation
func = lyapunov_equation(coeff*A0, D, chop_imag=chop_imag)
self.memo_C.update({tup: func})
return func
#-----------------------------------------
class ReactionSystem(ReactionSystemBase):
"""
Class to define a reaction system and calculate Taylor coefficients.
"""
# symbols to be evaluated by self.eval_at_phis:
_EVAL = ["eta", "S", "f", "g", "A", "C", "B", "DM"]
#-----------------------------------------
def __init__(self, data, map_dict=False, C_attempt=False, factorize=False,
omega=FREQ, Omega=SIZE, verbose=0):
"""
Class to define a reaction system and calculate Taylor coefficients.
The matrices 'A' and, if possible, 'C' as well as intermediate results
are calculated if they are not provided directly.
:Parameters:
- `data`: Dictionary that defines the reaction system.
- `map_dict`: Optional dictionary to map strings to symbols.
It may also be added to the 'data' dictionary. It is
created automatically by the self.from_string() method.
The key/value pairs are also mapped to self.s.key.
- `C_attempt`: If True, the calculation of C is attempted.
This is in general not possible and may be
unnecessarily time consuming.
- `factorize`: factorize terms with factor() function
- `omega`: frequency symbol string for the spectrum
- `Omega`: symbol string for the system size parameter
- `verbose`: print phis, A, B, DM and C (0: not at all, or
with 1: print, 2: sympy pprint, 3: IPython display)
The following keys are accepted in 'data' (see DATA_KEYS).
- `phi`: Vector of symbols for macroscopic concentrations
- `phis`: Macroscopic stationary state concentrations
- `eta`: Vector of symbols for external fluctuations
- `etavars`: Vector of variances of the external fluctuations
- `etaKs`: Vector of inverse correlation times of the external
fluctuations
- `S`: Stoichiometric matrix
- `f`: Macroscopic transition rates f_i({phi_j}, {eta_k})
- `g`: Reaction rate equations g_i({phi_j}, {eta_k})
- `A`: Jacobian matrix
- `C`: Lyapunov matrix
- `B`: Cholesky matrix
- `DM`: Diffusion matrix
One of the following sets should be provided to unambiguously
define a reaction system at macroscopic stationary state.
- eta, phi, f(phi), S
- eta, phi, g(phi), C or B or DM
- eta, A, C or B or DM, optionally phi
- eta, A, S, f, optionally phi
If any function depends on phi, the latter should be provided to
have the function evaluated at stationary concentration phis.
Therefor, self.eval_at_phis() should be called.
:Raises: `DefinitionError` if system definition fails.
:Example: see module docstring
"""
eta = data.get('eta')
if not eta:
raise DefinitionError("'eta' is not defined")
for key in data.keys():
if not key in DATA_KEYS:
raise DefinitionError("Key '%s' is not recognized." % key)
ReactionSystemBase.__init__(self, len(eta), omega=omega,
Omega=Omega, factorize=factorize)
self.phis = None
self.eta = eta
self.C_attempt = C_attempt
# read data
phi = data.get('phi')
phis = data.get('phis')
S = data.get('S')
f = data.get('f')
g = data.get('g')
A = data.get('A')
C = data.get('C')
B = data.get('B')
DM = data.get('DM')
# remove keys without value
remove_keys = []
for key, value in data.items():
if not value:
remove_keys.append(key)
for key in remove_keys:
data.pop(key)
# get optional keys
self.etavars = data.get('etavars', self.etavars)
self.etaKs = data.get('etaKs', self.etaKs)
self.omega = data.get('omega', self.omega)
self.Omega = data.get('Omega', self.Omega)
map_dict = data.get('map_dict', {})
self.map_dict = {}
# add keys that might be missing in map_dict
for i in list(self.etavars) \
+ list(self.etaKs) \
+ [self.omega, self.Omega]:
symbol = i.as_base_exp()[0]
self.map_dict.update({symbol.name: symbol})
# map_dict has priority
self.map_dict.update(map_dict)
# make key/symbol pairs available as self.s.key
class s(object):
"""
class with symbol attributes
"""
def __init__(self,dictionary):
for key, value in dictionary.items():
setattr(self, key, value)
self.s = s(map_dict)
# dictionaries for eta with and without upper indices
ReactionSystemBase._update_eta_dicts(self)
# Jacobian at stationary state
if not A:
if not phi:
error = "'A' cannot be determined "
if S and f or g:
error += "('phi' is missing)"
else:
error = "'A' is missing"
raise DefinitionError(error)
else:
if not g:
if S and f:
g = S*f
A = g.jacobian(phi)
else:
error = "'A' cannot be determined "
if S or f:
error += "('S' or 'f' is missing)"
elif B or C or DM:
error += "('g' is missing)"
else:
error += "('A' or 'S' and 'f' are missing)"
raise DefinitionError(error)
else:
A = g.jacobian(phi)
# Lyapunov matrix at stationary state
if not C and not B:
if not DM:
if S and f:
# diffusion matrix
DM = S*diag(f)*S.T
else:
error = "'C' cannot be determined "
if g:
error += "(provide 'C', 'B' or 'DM')"
else:
error += "(provide 'C', 'B', 'DM' or 'S' and 'f')"
raise DefinitionError(error)
B = DM.cholesky()
# set existing variables as class attributes
if phis:
self.phis = Matrix(phis)
if phi:
self.phi = Matrix(phi)
if eta:
self.eta = Matrix(eta)
if S:
self.S = Matrix(S)
if f:
self.f = Matrix(f)
if g:
self.g = Matrix(g)
if A:
self.A = Matrix(A)
if C:
self.C = Matrix(C)
if B:
self.B = Matrix(B)
if DM:
self.DM = Matrix(DM)
# attempt to calculate C directly
# definition: diploma thesis equation (3.30)
if self.C_attempt:
self.C = _lyapunov_equation_C(self, self.A, self.B)
if self.C:
self.C = matsimp(self.C, factorize=self.factorize)
else:
# instead the Taylor coefficients will be calculated if needed
self.C = None
if verbose:
self.print_out("ReactionSystem", verbose)
#-----------------------------------------
@staticmethod
def from_string(data=None, yaml_file=None, C_attempt=False,
factorize=False, verbose=0):
"""
Create object from strings in a dictionary or file.
:Parameters:
- `data`: dictionary of strings
- `yaml_file`: yaml file defining a dictionary of strings
- `C_attempt`: If True, the calculation of C is attempted.
This is in general not possible and may be
unnecessarily time consuming.
- `factorize`: factorize terms with factor() function
- `verbose`: print the obtained system definition (0: not at all,
or with 1: print, 2: sympy pprint, 3: IPython display)
The keys 'concentrations', 'extrinsic_variables', 'transition_rates'
and 'stoichiometric_matrix' are required and may in part defined by
'data' and by 'yaml_file'. To choose non-default symbols, there are
the optional keys 'normal_variances', 'inverse_correlation_times',
'frequency' and 'system_size'.
:Returns: ReactionSystem object
:Example:
see module docstring
"""
data = string_parser(data=data, yaml_file=yaml_file, verbose=verbose)
return ReactionSystem(data, C_attempt=C_attempt,
factorize=factorize, verbose=verbose)
#-----------------------------------------
def copy(self):
"""
Returns a clone of self.
"""
data = {'eta': self.eta, 'A': self.A, 'B': self.B}
new = ReactionSystem(data, map_dict=self.map_dict)
for key, item in self.__dict__.items():
setattr(new, key, item)
return new
#-----------------------------------------
def eval_at_phis(self, phis=None, solver=None, select=None,
C_attempt=False, verbose=0):
"""
Substitute concentrations phi by phis.
A warning is printed if g(phis) is not zero.
:Warning:
Be sure, that phis is the correct stationary state!
:Parameters:
- `phis`: Macroscopic stationary state concentrations
If not defined here, self.phis will be used instead.
- `solver`: solver, phis = solver(self.g, self.phi, select)
or phis = solver(self.g, self.phi), e.g. sympy.solve
- `select`: selected solution from solver (may be ambiguous)
- `C_attempt`: If True, the calculation of C is attempted.
This is forced, if the object was created with the
option C_attempt=True. The calculation is in general
not possible and may be unnecessarily time consuming.
- `verbose`: print phis, A, B, DM and C (0: | |
<gh_stars>0
#!/usr/bin/python3
# Copyright (c) 2019
# Physics, Computer Science and Engineering (PCSE)
# Christopher Newport University
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author: <NAME>
from gitlab_utils import gl_auth
import gitlab_utils
import argparse
import gitlab
import sys
import datetime
class Student:
def __init__(self, email_line):
student_info = email_line.split('@')
self.email_line = email_line.strip()
student_name = student_info[0].split('.')
self.username = student_info[0].strip()
self.first_name = student_name[0].strip()
self.last_name = student_name[1].strip()
self.year = student_name[2].strip()
def get_email(self):
return self.email_line
def get_last_name(self):
return self.last_name
def get_first_name(self):
return self.first_name
def get_input(self):
return self.email_line
def mk_gl_username(self):
return self.username
def __str__(self):
return '{}, {} ({}): {}'.format(self.last_name, self.first_name, self.year, self.email)
def __repr__(self):
return self.__str__()
today = datetime.datetime.now()
delta = datetime.timedelta(5*365/12) # 5 months in future
expire = today+delta
parser = argparse.ArgumentParser()
parser.add_argument('token', help='Your private access token from Gitlab')
parser.add_argument('email_input', help='A text file with 1 column of email address')
parser.add_argument('master_student_group', help='The student group on Gitlab to add all students to')
parser.add_argument('--personal_group_suffix', default='', help='The suffix to append to all student personal groups (default: no personal group)')
parser.add_argument('--expire', default=expire.isoformat()[:10],
help='The date that you want to be removed from all of these groups. Format: yyyy-mm-dd. Default 5 months from today.')
parser.add_argument('--instructor', default='', help='The Gitlab username of the instructor')
parser.add_argument('--delete_admin', default=False, help='Remove the admin from created groups (default:False)')
parser.add_argument('--gitlab_url', default='https://gitlab.pcs.cnu.edu', help='The url for the Gitlab server.')
args = parser.parse_args()
user_is_instructor = (args.instructor == '')
delete_admin = False
student_group_name = args.master_student_group
personal_group_suffix = args.personal_group_suffix
expire_date = args.expire
print(" expiration date:"+expire_date )
print( "Args:"+str(args))
# Authenticate
try:
with gl_auth(args.gitlab_url, args.token, admin=True) as gl:
instructor_name = args.instructor if not user_is_instructor else gl.users.get(gl.user.id).username
print("instructor name: "+instructor_name)
admin_id = gl.user.id
admin_name = gl.users.get(gl.user.id).username
print(" admin name: "+admin_name)
if instructor_name == admin_name:
print(" API user is both admin and instructor ")
user_is_instructor = True
delete_admin = False
else:
try:
tmp_del_admin = args.delete_admin
if type(tmp_del_admin) == str:
tmp_del_admin = tmp_del_admin.lower() in ["yes","true","t",1]
elif type(tmp_del_admin) != bool:
tmp_del_admin = False
if (tmp_del_admin):
print("Delete the admin from any created groups")
delete_admin = True
except:
print("Do not delete admin from created groups!")
# Find master student group (e.g. cpsc150-students-s2018)
gl_student_group = None
try:
gl_student_group = gitlab_utils.gl_get_group_by_name(gl, student_group_name)
print(" Found existing student group "+student_group_name)
# If we can't find it, make it.
except gitlab.exceptions.GitlabHttpError as e:
print(" Create the new student group "+student_group_name)
gl_student_group = gitlab_utils.gl_create_group(gl, student_group_name)
except gitlab.exceptions.GitlabGetError as e:
print(" Create the new student group "+student_group_name)
gl_student_group = gitlab_utils.gl_create_group(gl, student_group_name)
# If we found or made the student group
if gl_student_group:
# Get the admin's account
current_user = gl.user
# Get list of members of student group (possibly slow, depending on size of group)
master_group_members = gl_student_group.members.list(as_list=False)
# Construct list of students
student_list = []
with open(args.email_input) as csv_in:
for line in csv_in:
if not line.startswith('#'):
student_list.append(Student(line))
# Get a list of all users from GL instance. SLOW.
# There does exist a better way to do this though, I think.
print(" Retrieve master list from Gitlab ... (slow ...)")
all_user_list = gl.users.list(as_list=False)
# Find existing users
# Add existing users to student group and create their personal group
gl_instructor = None
gl_existing_users = {}
bad_account = []
bad_group = []
bad_add = []
for gl_user in all_user_list:
if gl_user.username == instructor_name:
gl_instructor = gl_user
for student in student_list:
if gl_user.email == student.get_email():
gl_existing_users[student.get_email()] = gl_user
if not user_is_instructor:
# Add instructor as owner
print(" Adding instructor "+gl_instructor.username+" as owner of student group ...")
try:
gl_instructor_member = gitlab_utils.gl_add_user_group_project(gl_student_group,
gl_instructor,
gitlab.OWNER_ACCESS)
gl_instructor_member.save()
except gitlab.exceptions.GitlabCreateError as ex:
print(" GitlabCreateError: Could not add instructor as owner of student group ... " + str(ex))
except Exception as ex:
print(" Exception: Could not add instructor as owner of student group ... " + str(ex))
if (delete_admin):
# Remove admin
print("Removing admin "+admin_name+" from student group ...")
try:
gl_student_group.members.get(admin_id).delete()
except Exception as ex:
print("Could not remove api user as admin from the student group ... "+str(ex))
# Set up accounts
for student in student_list:
gl_user = None
print(student.get_email())
# If the user exists, grab its GL handle
if student.get_email() in gl_existing_users.keys():
print(" Found existing user "+student.mk_gl_username()+"!")
gl_user = gl_existing_users[student.get_email()]
# Otherwise, create missing user
else:
gl_user = None
try:
# Use email as email, the prescribed username as password and username
print(" Create new user "+student.mk_gl_username()+" to group ...")
gl_user = gitlab_utils.gl_create_user(gl, student.get_email(), student.mk_gl_username(),
student.mk_gl_username(), student.get_last_name(),
student.get_first_name())
except Exception as e:
print('Could not create account for email {}'.format(student.get_email()))
bad_account.append(student.get_email())
# If we found the user or created the user
if gl_user is not None:
# Check if student is in student group
in_student_group = False
for group_member in master_group_members:
if group_member.id == gl_user.id:
print(" user already in student group!")
in_student_group = True
break
# Add user to student group, if not already in it
if not in_student_group:
try:
print(" Adding "+student.mk_gl_username()+" to group")
member = gitlab_utils.gl_add_user_group_project(gl_student_group, gl_user, gitlab.REPORTER_ACCESS)
member.expires_at = expire_date
member.save()
except Exception as ex:
print(' Could not add {} to student group'.format(student.get_email())+" "+str(ex))
bad_add.append(student.get_email())
# Check if the student's personal group exists
if (len(personal_group_suffix) == 0):
pass
else:
personal_group_name = student.mk_gl_username() + '-' + personal_group_suffix
personal_group_exists = False
gl_personal_group = None
try:
gl_personal_group = gitlab_utils.gl_get_group_by_name(gl, personal_group_name)
personal_group_exists = True
except gitlab.exceptions.GitlabHttpError as e:
personal_group_exists = False
except Exception as ex:
print(" Exception searching for "+personal_group_name+" " + str(ex))
personal_group_exists = False
# if it does not exist
if not personal_group_exists:
# Create student personal group
print(" Create personal group "+personal_group_name)
try:
gl_personal_group = gitlab_utils.gl_create_group(gl, personal_group_name)
if gl_personal_group is not None:
print(" now add student to group ...")
gitlab_utils.gl_add_user_group_project(gl_personal_group, gl_user, gitlab.OWNER_ACCESS)
print(" set as private access ...")
gl_personal_group.visibility = "private"
gl_personal_group.save()
else:
print(' Could not create personal group for email {}'.format(student.get_email()))
bad_group.append(student.get_email())
except Exception as e:
print(' Exception creating personal group for email {}'.format(student.get_email())," ", str(e))
bad_group.append(student.get_email())
# If we successfully created the student's personal group
if gl_personal_group is not None:
try:
if user_is_instructor:
# Demote instructor to reporter
try:
gl_instructor_member = gl_personal_group.members.get(admin_id)
if gl_instructor_member is not None:
print(" Demoting admin "+admin_name+" to reporter access ...")
gl_instructor_member.access_level = gitlab.REPORTER_ACCESS
gl_instructor_member.expires_at = expire_date
gl_instructor_member.save()
except Exception as ex:
print(" group not created by this admin - cannot demote to reporter! "+str(ex))
try:
print(" add admin as reporter to existing group")
gl_instructor_member = gitlab_utils.gl_add_user_group_project(gl_personal_group,
gl_instructor,
gitlab.REPORTER_ACCESS)
print(" Set the expiration date ...")
gl_instructor_member.expires_at = expire_date
gl_instructor_member.save()
except gitlab.exceptions.GitlabCreateError as ex:
print(" GitlabCreateError: Could not add admin as reporter ... "+str(ex))
except Exception as ex:
print(" Exception: Could not add admin as reporter ... "+str(ex))
else:
# Add instructor as reporter
print(" Adding instructor "+gl_instructor.username+" as reporter ...")
try:
gl_instructor_member = gitlab_utils.gl_add_user_group_project(gl_personal_group,
gl_instructor,
gitlab.REPORTER_ACCESS)
# Set expiration date
print(" Set the expiration date ...")
gl_instructor_member.expires_at = expire_date
gl_instructor_member.save()
except gitlab.exceptions.GitlabCreateError as ex:
print(" GitlabCreateError: Could not add instructor as reporter ... "+str(ex))
except Exception as ex:
print(" Exception: Could not add instructor as reporter ... "+str(ex))
if | |
<filename>examples/plot_physical_vs_observed/plot_underlying_vs_observed_period_radius_credible.py
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
from src.functions_compute_RVs import *
##### To load the underlying and observed populations:
savefigures = False
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/'
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/Model_Optimization/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/Best_models/GP_best_models/PR_grids/'
run_number = ''
model_name = 'Maximum_AMD_model' + run_number
compute_ratios = compute_ratios_adjacent
AD_mod = True
weights_all = load_split_stars_weights_only()
dists_include = ['delta_f',
'mult_CRPD_r',
'periods_KS',
'period_ratios_KS',
#'durations_KS',
#'durations_norm_circ_KS',
'durations_norm_circ_singles_KS',
'durations_norm_circ_multis_KS',
'duration_ratios_nonmmr_KS',
'duration_ratios_mmr_KS',
'depths_KS',
'radius_ratios_KS',
'radii_partitioning_KS',
'radii_monotonicity_KS',
'gap_complexity_KS',
]
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)
param_vals_all = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys, sssp = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
sss_per_sys, sss = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory, run_number=run_number, compute_ratios=compute_ratios)
ssk_per_sys, ssk = compute_summary_stats_from_Kepler_catalog(P_min, P_max, radii_min, radii_max, compute_ratios=compute_ratios)
dists, dists_w = compute_distances_sim_Kepler(sss_per_sys, sss, ssk_per_sys, ssk, weights_all['all'], dists_include, N_sim, cos_factor=cos_factor, AD_mod=AD_mod, compute_ratios=compute_ratios)
##### To load many catalogs:
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_best_models/'
runs = 100
PRK_obs_all = [] # 2D array to be filled with: [period, radius, K status, mean intrinsic multiplicity, mean multiplicity with K > 0.1m/s, mean multiplicity with K > 1m/s] for each observed planet, where K status = 1 (it is an intrinsic single), 2 (it is the largest K in a multiplanet system), 3 (it is NOT the largest K in a multiplanet system)
sssp_per_sys_P_all = []
sssp_per_sys_R_all = []
for i in range(runs): #range(1,runs+1)
run_number = i+1
print(i)
N_sim_i = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)[0]
param_vals_i = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys_i, sssp_i = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
PRK_obs = [] # 2D array to be filled with: [period, radius, K status, mean intrinsic multiplicity, mean multiplicity with K > 0.1m/s, mean multiplicity with K > 1m/s] for each observed planet, where K status = 1 (it is an intrinsic single), 2 (it is the largest K in a multiplanet system), 3 (it is NOT the largest K in a multiplanet system)
for i,det_sys in enumerate(sssp_per_sys_i['det_all']):
n_pl_det = np.sum(det_sys)
if n_pl_det > 0:
P_sys = sssp_per_sys_i['P_all'][i]
det_sys = det_sys[P_sys > 0]
Mp_sys = sssp_per_sys_i['mass_all'][i][P_sys > 0]
Rp_sys = sssp_per_sys_i['radii_all'][i][P_sys > 0]
e_sys = sssp_per_sys_i['e_all'][i][P_sys > 0]
incl_sys = sssp_per_sys_i['incl_all'][i][P_sys > 0]
P_sys = P_sys[P_sys > 0]
n_pl = len(P_sys)
K_sys = rv_K(Mp_sys, P_sys, e=e_sys, i=incl_sys, Mstar=sssp_i['Mstar_all'][i])
n_pl_K0p1 = np.sum(K_sys > 0.1)
n_pl_K1 = np.sum(K_sys > 1.)
if n_pl == 1:
PRK_obs.append([P_sys[0], Rp_sys[0], 1, n_pl, n_pl_K0p1, n_pl_K1, 0, 0, 0])
else:
j_Kmax = np.argsort(K_sys)[-1]
for j in np.arange(n_pl)[det_sys == 1]:
n_pl_miss_interior = np.sum(det_sys[:j] == 0)
n_pl_miss_interior_K0p1 = np.sum((det_sys[:j] == 0) & (K_sys[:j] > 0.1))
n_pl_miss_interior_K1 = np.sum((det_sys[:j] == 0) & (K_sys[:j] > 1.))
if j == j_Kmax:
PRK_obs.append([P_sys[j], Rp_sys[j], 2, n_pl, n_pl_K0p1, n_pl_K1, n_pl_miss_interior, n_pl_miss_interior_K0p1, n_pl_miss_interior_K1])
else:
PRK_obs.append([P_sys[j], Rp_sys[j], 3, n_pl, n_pl_K0p1, n_pl_K1, n_pl_miss_interior, n_pl_miss_interior_K0p1, n_pl_miss_interior_K1])
PRK_obs = np.array(PRK_obs)
PRK_obs_all.append(PRK_obs)
sssp_per_sys_P_all.append(sssp_per_sys_i['P_all'])
sssp_per_sys_R_all.append(sssp_per_sys_i['radii_all'])
##### To plot period-radius diagrams:
afs = 20 # axes labels font size
tfs = 20 # text labels font size
lfs = 16 # legend labels font size
mfs = 16 # main numbers font size
sfs = 12 # secondary numbers font size
# Period-radius grids (custom bins):
#P_bins = np.logspace(np.log10(P_min), np.log10(P_max), 5+1)
#R_bins = np.array([0.5, 1., 1.5, 2., 3., 5., 10.])
P_bins = np.array([4., 8., 16., 32., 64., 128., 256.])
R_bins = np.array([0.5, 1., 1.5, 2., 3., 4., 6.])
n_P_bins, n_R_bins = len(P_bins)-1, len(R_bins)-1
# Specify edges of GridSpec panels to ensure that the legend is the same size as a cell:
bgrid, tgrid = 0.1, 0.95 # bottom and top of grid
lgrid, rleg, wcb = 0.08, 0.97, 0.09 # left of grid, width of space for colorbar, and right of legend
rgrid = (rleg-lgrid-wcb)*(n_P_bins/(n_P_bins+1)) + lgrid
lleg = rgrid + wcb
bleg, tleg = tgrid - (tgrid-bgrid)/n_R_bins, tgrid
# Occurrence rates (intrinsic mean number of planets per star and fraction of stars with planets) in each bin:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(1,1,left=lgrid,bottom=bgrid,right=rgrid,top=tgrid)
ax = plt.subplot(plot[:,:])
mean_pl_grid = np.zeros((n_R_bins, n_P_bins))
fswp_grid = np.zeros((n_R_bins, n_P_bins))
for j in range(n_R_bins):
for i in range(n_P_bins):
pl_tot_cell_all = []
sys_tot_cell_all = []
for k in range(runs):
pl_cell_bools = (sssp_per_sys_P_all[k] > P_bins[i]) & (sssp_per_sys_P_all[k] < P_bins[i+1]) & (sssp_per_sys_R_all[k] > R_bins[j]) & (sssp_per_sys_R_all[k] < R_bins[j+1])
sys_cell_bools = np.any(pl_cell_bools, axis=1)
pl_tot_cell_all.append(np.sum(pl_cell_bools))
sys_tot_cell_all.append(np.sum(sys_cell_bools))
pl_tot_cell_all = np.array(pl_tot_cell_all)
sys_tot_cell_all = np.array(sys_tot_cell_all)
mean_pl_cell_all = pl_tot_cell_all/N_sim_i # mean number of such planets per star
mean_pl_sys_cell_all = pl_tot_cell_all/sys_tot_cell_all # mean number of such planets per system with at least one such planet
fswp_cell_all = sys_tot_cell_all/N_sim_i # fraction of stars with such planets
mean_pl_cell_qtls = np.quantile(mean_pl_cell_all, [0.16,0.5,0.84])
mean_pl_sys_cell_qtls = np.quantile(mean_pl_sys_cell_all, [0.16,0.5,0.84])
fswp_cell_qtls = np.quantile(fswp_cell_all, [0.16,0.5,0.84])
mean_pl_grid[j,i] = mean_pl_cell_qtls[1]
fswp_grid[j,i] = fswp_cell_qtls[1]
plt.text(x=-0.01+(i+1)*(1./n_P_bins), y=(j+1)*(1./n_R_bins)-0.025, s=r'${:.2f}$'.format(np.round(mean_pl_cell_qtls[1], 2)), ha='right', va='top', color='b', fontsize=sfs, transform=ax.transAxes)
plt.text(x=-0.01+(i+1)*(1./n_P_bins), y=(j+1)*(1./n_R_bins)-0.075, s=r'${:.2f}$'.format(np.round(mean_pl_sys_cell_qtls[1], 2)), ha='right', va='top', color='r', fontsize=sfs, transform=ax.transAxes)
snum = r'${:.2f}_{{-{:.2f}}}^{{+{:.2f}}}$'.format(np.round(fswp_cell_qtls[1], 2), np.round(fswp_cell_qtls[1]-fswp_cell_qtls[0], 2), np.round(fswp_cell_qtls[2]-fswp_cell_qtls[1], 2))
plt.text(x=0.01+i*(1./n_P_bins), y=(j+1)*(1./n_R_bins)-0.09, s=snum, ha='left', va='center', color='k', fontsize=mfs, fontweight='bold', transform=ax.transAxes)
img = plt.imshow(fswp_grid, cmap='coolwarm', norm=matplotlib.colors.LogNorm(), aspect='auto', interpolation="nearest", origin='lower') #cmap='coolwarm'
ax.tick_params(axis='both', labelsize=afs)
plt.xticks(np.linspace(-0.5, n_P_bins-0.5, n_P_bins+1), P_bins)
plt.yticks(np.linspace(-0.5, n_R_bins-0.5, n_R_bins+1), R_bins)
plt.xlabel(r'Period of conditioned planet, $P_{\rm cond}$ (days)', fontsize=tfs)
plt.ylabel(r'Radius of conditioned planet, $R_{p,\rm cond}$ ($R_\oplus$)', fontsize=tfs)
plot = GridSpec(1,1,left=rgrid+0.01,bottom=bgrid,right=rgrid+0.03,top=tgrid) # colorbar
cax = plt.subplot(plot[:,:])
cbar = plt.colorbar(img, cax=cax)
cbar.ax.tick_params(labelsize=lfs)
cbar.set_label(r'$f_{\rm swp} = \frac{\bar{n}_{\rm star}}{\bar{n}_{\rm sys}}$', rotation=270, va='bottom', fontsize=tfs)
plot = GridSpec(1,1,left=lleg,bottom=bleg,right=rleg,top=tleg) # legend
ax = plt.subplot(plot[:,:])
plt.text(x=0.95, y=0.9, s=r'$\bar{n}_{\rm star}$', ha='right', va='top', color='b', fontsize=sfs, transform=ax.transAxes)
plt.text(x=0.95, y=0.6, s=r'$\bar{n}_{\rm sys}$', ha='right', va='top', color='r', fontsize=sfs, transform=ax.transAxes)
plt.text(x=0.05, y=0.5, s=r'$f_{\rm swp} = \frac{\bar{n}_{\rm star}}{\bar{n}_{\rm sys}}$', ha='left', va='center', color='k', fontsize=mfs, transform=ax.transAxes)
plt.xticks([])
plt.yticks([])
plt.xlabel('Legend', fontsize=tfs)
if savefigures:
plt.savefig(savefigures_directory + model_name + '_PR_grid_mean_pl_fswp.pdf')
plt.close()
#plt.show()
# Observed counts in each bin:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(1,1,left=lgrid,bottom=bgrid,right=rgrid,top=tgrid)
ax = plt.subplot(plot[:,:])
counts_sim_grid = np.zeros((n_R_bins, n_P_bins))
counts_Kep_grid = np.zeros((n_R_bins, n_P_bins))
for j in range(n_R_bins):
for i in range(n_P_bins):
counts_cell_all = []
for PRK_obs in PRK_obs_all:
PRK_obs_cell = PRK_obs[(PRK_obs[:,0] > P_bins[i]) & (PRK_obs[:,0] < P_bins[i+1]) & (PRK_obs[:,1] > R_bins[j]) & (PRK_obs[:,1] < R_bins[j+1])]
counts_cell = len(PRK_obs_cell) # number of observed planets in cell
counts_cell_all.append(counts_cell)
counts_cell_all = np.array(counts_cell_all)
counts_sim_cell_qtls = np.quantile(counts_cell_all, [0.16,0.5,0.84])
counts_Kep_cell = np.sum((ssk['P_obs'] > P_bins[i]) & (ssk['P_obs'] < P_bins[i+1]) & (ssk['radii_obs'] > R_bins[j]) & (ssk['radii_obs'] < R_bins[j+1]))
counts_ratio_cell_qtls = np.quantile(counts_cell_all/counts_Kep_cell, [0.16,0.5,0.84])
counts_sim_grid[j,i] = counts_sim_cell_qtls[1]
counts_Kep_grid[j,i] = counts_Kep_cell
plt.text(x=-0.01+(i+1)*(1./n_P_bins), y=(j+1)*(1./n_R_bins)-0.025, s=r'${:.1f}$'.format(np.round(counts_sim_cell_qtls[1], 1)), ha='right', va='top', color='b', fontsize=sfs, transform=ax.transAxes)
plt.text(x=-0.01+(i+1)*(1./n_P_bins), y=(j+1)*(1./n_R_bins)-0.075, s='%s' % counts_Kep_cell, ha='right', va='top', color='r', fontsize=sfs, transform=ax.transAxes)
snum = r'${:.2f}_{{-{:.2f}}}^{{+{:.2f}}}$'.format(np.round(counts_ratio_cell_qtls[1], 2), np.round(counts_ratio_cell_qtls[1]-counts_ratio_cell_qtls[0], 2), np.round(counts_ratio_cell_qtls[2]-counts_ratio_cell_qtls[1], 2)) if counts_Kep_cell > 0 else r'$-$'
plt.text(x=0.01+i*(1./n_P_bins), y=(j+1)*(1./n_R_bins)-0.09, s=snum, ha='left', va='center', color='k', fontsize=mfs, fontweight='bold', transform=ax.transAxes)
img = plt.imshow(counts_sim_grid/counts_Kep_grid, cmap='coolwarm', aspect='auto', interpolation="nearest", vmin=0., vmax=2., origin='lower') #cmap='coolwarm'
ax.tick_params(axis='both', labelsize=afs)
plt.xticks(np.linspace(-0.5, n_P_bins-0.5, n_P_bins+1), P_bins)
plt.yticks(np.linspace(-0.5, n_R_bins-0.5, n_R_bins+1), R_bins)
plt.xlabel(r'Period of conditioned planet, $P_{\rm cond}$ (days)', fontsize=tfs)
plt.ylabel(r'Radius of conditioned planet, $R_{p,\rm cond}$ ($R_\oplus$)', fontsize=tfs)
plot = GridSpec(1,1,left=rgrid+0.01,bottom=bgrid,right=rgrid+0.03,top=tgrid) # colorbar
cax = plt.subplot(plot[:,:])
cbar = plt.colorbar(img, cax=cax)
cbar.ax.tick_params(labelsize=lfs)
cbar.set_label(r'$N_{\rm Sim}/N_{\rm Kep}$', rotation=270, va='bottom', fontsize=tfs)
plot = GridSpec(1,1,left=lleg,bottom=bleg,right=rleg,top=tleg) # legend
ax = plt.subplot(plot[:,:])
plt.text(x=0.9, y=0.9, s=r'$N_{\rm Sim}$', ha='right', va='top', color='b', fontsize=sfs, transform=ax.transAxes)
plt.text(x=0.9, y=0.6, s=r'$N_{\rm Kep}$', ha='right', va='top', color='r', fontsize=sfs, transform=ax.transAxes)
plt.text(x=0.1, y=0.5, s=r'$\frac{N_{\rm Sim}}{N_{\rm Kep}}$', ha='left', va='center', color='k', fontsize=mfs, transform=ax.transAxes)
plt.xticks([])
plt.yticks([])
plt.xlabel('Legend', fontsize=tfs)
if savefigures:
plt.savefig(savefigures_directory + model_name + '_PR_grid_counts_obs.pdf')
plt.close()
#plt.show()
# Fraction of time when observed planet is the maximum K:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(1,1,left=lgrid,bottom=bgrid,right=rgrid,top=tgrid)
ax = plt.subplot(plot[:,:])
frac_K3_grid = np.zeros((n_R_bins, n_P_bins))
for j in range(n_R_bins):
for i in range(n_P_bins):
counts_cell_all = []
counts_K1_cell_all = []
counts_K2_cell_all = []
counts_K3_cell_all = []
for PRK_obs in PRK_obs_all:
PRK_obs_cell = PRK_obs[(PRK_obs[:,0] > P_bins[i]) & (PRK_obs[:,0] < P_bins[i+1]) & (PRK_obs[:,1] > R_bins[j]) & (PRK_obs[:,1] < R_bins[j+1])]
counts_cell = len(PRK_obs_cell) # number of observed planets in cell
counts_K1_cell = np.sum(PRK_obs_cell[:,2] == 1) # number of observed planets in cell that are intrinsic singles
counts_K2_cell = np.sum(PRK_obs_cell[:,2] == 2) # number of observed planets in cell that are the largest K in their multiplanet systems
counts_K3_cell = np.sum(PRK_obs_cell[:,2] == 3) # number of observed planets in cell that are NOT the largest K in their multiplanet systems
counts_cell_all.append(counts_cell)
counts_K1_cell_all.append(counts_K1_cell)
counts_K2_cell_all.append(counts_K2_cell)
counts_K3_cell_all.append(counts_K3_cell)
counts_cell_all = np.array(counts_cell_all)
counts_K1_cell_all = np.array(counts_K1_cell_all)
counts_K2_cell_all = np.array(counts_K2_cell_all)
counts_K3_cell_all = np.array(counts_K3_cell_all)
counts_cell_qtls = np.quantile(counts_cell_all, [0.16,0.5,0.84])
counts_K1_cell_qtls = np.quantile(counts_K1_cell_all, [0.16,0.5,0.84])
counts_K2_cell_qtls = np.quantile(counts_K2_cell_all, [0.16,0.5,0.84])
counts_K3_cell_qtls = np.quantile(counts_K3_cell_all, [0.16,0.5,0.84])
plt.text(x=-0.01+(i+1)*(1./n_P_bins), y=(j+1)*(1./n_R_bins)-0.025, s=r'${:.2f}$'.format(np.round(np.sum(counts_K1_cell_all)/np.sum(counts_cell_all), 2)), ha='right', va='top', color='darkblue', fontsize=sfs, transform=ax.transAxes)
plt.text(x=-0.01+(i+1)*(1./n_P_bins), y=(j+1)*(1./n_R_bins)-0.075, s=r'${:.2f}$'.format(np.round(np.sum(counts_K2_cell_all)/np.sum(counts_cell_all), 2)), ha='right', va='top', color='maroon', fontsize=sfs, transform=ax.transAxes)
#plt.text(x=-0.01+(i+1)*(1./n_P_bins), y=(j+1)*(1./n_R_bins)-0.125, s=r'${:.1f}$'.format(np.round(counts_K3_cell_qtls[1], 1)), ha='right', | |
# see if POSIX standard variables will work
return (int(os.environ.get('LINES')),
int(os.environ.get('COLUMNS')))
except TypeError:
# fall back on configuration variables, or if not
# set, (25, 80)
lines = options.console.max_lines
width = options.console.max_width
if lines is None:
lines = 25
if width is None:
width = 80
return lines, width
def _color_text(text, color):
"""
Returns a string wrapped in ANSI color codes for coloring the
text in a terminal::
colored_text = color_text('Here is a message', 'blue')
This won't actually effect the text until it is printed to the
terminal.
Parameters
----------
text : str
The string to return, bounded by the color codes.
color : str
An ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
"""
color_mapping = {
'black': '0;30',
'red': '0;31',
'green': '0;32',
'brown': '0;33',
'blue': '0;34',
'magenta': '0;35',
'cyan': '0;36',
'lightgrey': '0;37',
'default': '0;39',
'darkgrey': '1;30',
'lightred': '1;31',
'lightgreen': '1;32',
'yellow': '1;33',
'lightblue': '1;34',
'lightmagenta': '1;35',
'lightcyan': '1;36',
'white': '1;37'}
if sys.platform == 'win32' and OutStream is None:
# On Windows do not colorize text unless in IPython
return text
color_code = color_mapping.get(color, '0;39')
return '\033[{0}m{1}\033[0m'.format(color_code, text)
def _decode_preferred_encoding(s):
"""Decode the supplied byte string using the preferred encoding
for the locale (`locale.getpreferredencoding`) or, if the default encoding
is invalid, fall back first on utf-8, then on latin-1 if the message cannot
be decoded with utf-8.
"""
enc = locale.getpreferredencoding()
try:
try:
return s.decode(enc)
except LookupError:
enc = _DEFAULT_ENCODING
return s.decode(enc)
except UnicodeDecodeError:
return s.decode('latin-1')
def _write_with_fallback(s, write, fileobj):
"""Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding in case
of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or
'latin-1'.
"""
if IPythonIOStream is not None and isinstance(fileobj, IPythonIOStream):
# If the output stream is an IPython.utils.io.IOStream object that's
# not going to be very helpful to us since it doesn't raise any
# exceptions when an error occurs writing to its underlying stream.
# There's no advantage to us using IOStream.write directly though;
# instead just write directly to its underlying stream:
write = fileobj.stream.write
try:
write(s)
return write
except UnicodeEncodeError:
# Let's try the next approach...
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter(_DEFAULT_ENCODING)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter('latin-1')
f = Writer(fileobj)
write = f.write
# If this doesn't work let the exception bubble up; I'm out of ideas
write(s)
return write
def color_print(*args, **kwargs):
"""
Prints colors and styles to the terminal uses ANSI escape
sequences.
::
color_print('This is the color ', 'default', 'GREEN', 'green')
Parameters
----------
positional args : str
The positional arguments come in pairs (*msg*, *color*), where
*msg* is the string to display and *color* is the color to
display it in.
*color* is an ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
file : writeable file-like object, optional
Where to write to. Defaults to `sys.stdout`. If file is not
a tty (as determined by calling its `isatty` member, if one
exists), no coloring will be included.
end : str, optional
The ending of the message. Defaults to ``\\n``. The end will
be printed after resetting any color or font state.
"""
file = kwargs.get('file', _get_stdout())
end = kwargs.get('end', '\n')
write = file.write
if isatty(file) and options.console.use_color:
for i in range(0, len(args), 2):
msg = args[i]
if i + 1 == len(args):
color = ''
else:
color = args[i + 1]
if color:
msg = _color_text(msg, color)
# Some file objects support writing unicode sensibly on some Python
# versions; if this fails try creating a writer using the locale's
# preferred encoding. If that fails too give up.
if not six.PY3 and isinstance(msg, bytes):
msg = _decode_preferred_encoding(msg)
write = _write_with_fallback(msg, write, file)
write(end)
else:
for i in range(0, len(args), 2):
msg = args[i]
if not six.PY3 and isinstance(msg, bytes):
# Support decoding bytes to unicode on Python 2; use the
# preferred encoding for the locale (which is *sometimes*
# sensible)
msg = _decode_preferred_encoding(msg)
write(msg)
write(end)
def human_time(seconds):
"""
Returns a human-friendly time string that is always exactly 6
characters long.
Depending on the number of seconds given, can be one of::
1w 3d
2d 4h
1h 5m
1m 4s
15s
Will be in color if console coloring is turned on.
Parameters
----------
seconds : int
The number of seconds to represent
Returns
-------
time : str
A human-friendly representation of the given number of seconds
that is always exactly 6 characters.
"""
units = [
('y', 60 * 60 * 24 * 7 * 52),
('w', 60 * 60 * 24 * 7),
('d', 60 * 60 * 24),
('h', 60 * 60),
('m', 60),
('s', 1),
]
seconds = int(seconds)
if seconds < 60:
return ' {0:2d}s'.format(seconds)
for i in range(len(units) - 1):
unit1, limit1 = units[i]
unit2, limit2 = units[i + 1]
if seconds >= limit1:
return '{0:2d}{1}{2:2d}{3}'.format(
seconds // limit1, unit1,
(seconds % limit1) // limit2, unit2)
return ' ~inf'
def human_file_size(size):
"""
Returns a human-friendly string representing a file size
that is 2-4 characters long.
For example, depending on the number of bytes given, can be one
of::
256b
64k
1.1G
Parameters
----------
size : int
The size of the file (in bytes)
Returns
-------
size : str
A human-friendly representation of the size of the file
"""
suffixes = ' kMGTPEZY'
if size == 0:
num_scale = 0
else:
num_scale = int(math.floor(math.log(size) / math.log(1000)))
num_scale = max(num_scale, 0)
if num_scale > 7:
suffix = '?'
else:
suffix = suffixes[num_scale]
num_scale = int(math.pow(1000, num_scale))
value = float(size) / num_scale
str_value = str(value)
if suffix == ' ':
str_value = str_value[:str_value.index('.')]
elif str_value[2] == '.':
str_value = str_value[:2]
else:
str_value = str_value[:3]
return "{0:>3s}{1}".format(str_value, suffix)
def create_progress_widget():
# Import only if an IPython widget, i.e., widget in iPython NB
if ipython_major_version < 4:
widget_cls = widgets.FloatProgressWidget
from IPython.utils.traitlets import Unicode
else:
widget_cls = widgets.FloatProgress
from traitlets import Unicode
from .ui.common import build_trait
class TransientProgressBar(widget_cls):
_view_name = build_trait(Unicode, 'TransientProgressView', sync=True)
_view_module = build_trait(Unicode, 'pyodps/progress', sync=True)
return TransientProgressBar()
class ProgressBar(six.Iterator):
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the ``with`` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, ipython_widget=False, file=None):
"""
Parameters
----------
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If `file` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the progress bar will be
completely silent.
"""
if ipython_widget:
# Import only if ipython_widget, i.e., widget in IPython
# notebook
try:
if ipython_major_version < 4:
from IPython.html import widgets
else:
from ipywidgets import widgets
from IPython.display import display
ipython_widget = is_widgets_available()
except ImportError:
ipython_widget = False
if file is None:
file = _get_stdout()
if not isatty(file) and not ipython_widget:
self.update = self._silent_update
self._silent = True
else:
self._silent = False
if isinstance(total_or_items, collections.Iterable):
self._items = iter(total_or_items)
self._total = len(total_or_items)
else:
try:
self._total = int(total_or_items)
except TypeError:
raise TypeError("First argument must be int or sequence")
else:
self._items = iter(range(self._total))
self._file = file
self._start_time = time.time()
self._human_total = human_file_size(self._total)
self._ipython_widget = ipython_widget
self._signal_set = False
if not | |
**ReservedNode** *(dict) --*
Describes a reserved node. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings.
- **ReservedNodeId** *(string) --*
The unique identifier for the reservation.
- **ReservedNodeOfferingId** *(string) --*
The identifier for the reserved node offering.
- **NodeType** *(string) --*
The node type of the reserved node.
- **StartTime** *(datetime) --*
The time the reservation started. You purchase a reserved node offering for a duration. This is the start time of that duration.
- **Duration** *(integer) --*
The duration of the node reservation in seconds.
- **FixedPrice** *(float) --*
The fixed cost Amazon Redshift charges you for this reserved node.
- **UsagePrice** *(float) --*
The hourly rate Amazon Redshift charges you for this reserved node.
- **CurrencyCode** *(string) --*
The currency code for the reserved cluster.
- **NodeCount** *(integer) --*
The number of reserved compute nodes.
- **State** *(string) --*
The state of the reserved compute node.
Possible Values:
* pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.
* active-This reserved node is owned by the caller and is available for use.
* payment-failed-Payment failed for the purchase attempt.
* retired-The reserved node is no longer available.
* exchanging-The owner is exchanging the reserved node for another reserved node.
- **OfferingType** *(string) --*
The anticipated utilization of the reserved node, as defined in the reserved node offering.
- **RecurringCharges** *(list) --*
The recurring charges for the reserved node.
- *(dict) --*
Describes a recurring charge.
- **RecurringChargeAmount** *(float) --*
The amount charged per the period of time specified by the recurring charge frequency.
- **RecurringChargeFrequency** *(string) --*
The frequency at which the recurring charge amount is applied.
- **ReservedNodeOfferingType** *(string) --*
:type ReservedNodeOfferingId: string
:param ReservedNodeOfferingId: **[REQUIRED]**
The unique identifier of the reserved node offering you want to purchase.
:type NodeCount: integer
:param NodeCount:
The number of reserved nodes that you want to purchase.
Default: ``1``
:rtype: dict
:returns:
"""
pass
def reboot_cluster(self, ClusterIdentifier: str) -> Dict:
"""
Reboots a cluster. This action is taken as soon as possible. It results in a momentary outage to the cluster, during which the cluster status is set to ``rebooting`` . A cluster event is created when the reboot is completed. Any pending cluster modifications (see ModifyCluster ) are applied at this reboot. For more information about managing clusters, go to `Amazon Redshift Clusters <https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html>`__ in the *Amazon Redshift Cluster Management Guide* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/RebootCluster>`_
**Request Syntax**
::
response = client.reboot_cluster(
ClusterIdentifier='string'
)
**Response Syntax**
::
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': '<PASSWORD>',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'DataTransferProgress': {
'Status': 'string',
'CurrentRateInMegaBytesPerSecond': 123.0,
'TotalDataInMegaBytes': 123,
'DataTransferredInMegaBytes': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ElapsedTimeInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
],
'PendingActions': [
'string',
],
'MaintenanceTrackName': 'string',
'ElasticResizeNumberOfNodeOptions': 'string',
'DeferredMaintenanceWindows': [
{
'DeferMaintenanceIdentifier': 'string',
'DeferMaintenanceStartTime': datetime(2015, 1, 1),
'DeferMaintenanceEndTime': datetime(2015, 1, 1)
},
],
'SnapshotScheduleIdentifier': 'string',
'SnapshotScheduleState': 'MODIFYING'|'ACTIVE'|'FAILED',
'ResizeInfo': {
'ResizeType': 'string',
'AllowCancelResize': True|False
}
}
}
**Response Structure**
- *(dict) --*
- **Cluster** *(dict) --*
Describes a cluster.
- **ClusterIdentifier** *(string) --*
The unique identifier of the cluster.
- **NodeType** *(string) --*
The node type for the nodes in the cluster.
- **ClusterStatus** *(string) --*
The current state of the cluster. Possible values are the following:
* ``available``
* ``available, prep-for-resize``
* ``available, resize-cleanup``
* ``cancelling-resize``
* ``creating``
* ``deleting``
* ``final-snapshot``
* ``hardware-failure``
* ``incompatible-hsm``
* ``incompatible-network``
* ``incompatible-parameters``
* ``incompatible-restore``
* ``modifying``
* ``rebooting``
* ``renaming``
* ``resizing``
* ``rotating-keys``
* ``storage-full``
* ``updating-hsm``
- **ModifyStatus** *(string) --*
The status of a modify operation, if any, initiated for the cluster.
- **MasterUsername** *(string) --*
The master user name for the cluster. This name is used to connect to the database that is specified in the **DBName** parameter.
- **DBName** *(string) --*
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named ``dev`` dev was created by default.
- **Endpoint** *(dict) --*
The connection endpoint.
- **Address** *(string) --*
The DNS address of the Cluster.
- **Port** *(integer) --*
The port that the database engine is listening on.
- **ClusterCreateTime** *(datetime) --*
The date and time that the cluster was created.
- **AutomatedSnapshotRetentionPeriod** *(integer) --*
The number of days that automatic cluster snapshots are retained.
- **ManualSnapshotRetentionPeriod** *(integer) --*
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
- **ClusterSecurityGroups** *(list) --*
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ``ClusterSecurityGroup.Name`` and ``ClusterSecurityGroup.Status`` subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the **VpcSecurityGroups** parameter.
- *(dict) --*
Describes a cluster security group.
- **ClusterSecurityGroupName** *(string) --*
The name of the cluster security group.
- **Status** *(string) --*
The status of the cluster security group.
- **VpcSecurityGroups** *(list) --*
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
- *(dict) --*
Describes the members of a VPC security group.
- **VpcSecurityGroupId** *(string) --*
The identifier of the VPC security group.
- **Status** *(string) --*
The status of the VPC security group.
- **ClusterParameterGroups** *(list) --*
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
- *(dict) --*
Describes the status of a parameter group.
- **ParameterGroupName** *(string) --*
The name of the cluster parameter group.
- **ParameterApplyStatus** *(string) --*
The status of parameter updates.
- **ClusterParameterStatusList** *(list) --*
The list of parameter statuses.
For more information about parameters and parameter groups, go to `Amazon Redshift Parameter Groups <https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html>`__ in the *Amazon Redshift Cluster Management Guide* .
- *(dict) --*
Describes | |
# Scale factor of image 4
f'{msid_prefix}CA01176', # Scale factor of image 5
f'{msid_prefix}CA01392', # Scale factor of image 6
f'{msid_prefix}CA01608'], # Scale factor of image 7
'image_status': [f'AOIMAGE{i}' for i in range(8)], # IMAGE STATUS FLAG
'fiducial_flag': [f'AOACFID{i}' for i in range(8)], # FIDUCIAL LIGHT FLAG (OBC)
'image_function': [f'AOACFCT{i}' for i in range(8)], # IMAGE FUNCTION (OBC)
# this one exists also as FUNCTION2/3/4
# 'image_function_pea':
# [f'{msid_prefix}AIMGF{i}1' for i in range(8)], # IMAGE FUNCTION1 (PEA)
'saturated_pixel': [f'{msid_prefix}ASPXF{i}' for i in range(8)], # DEFECTIVE PIXEL FLAG
'defective_pixel': [f'{msid_prefix}ADPXF{i}' for i in range(8)], # SATURATED PIXEL FLAG
'quad_bound': [f'{msid_prefix}QBNDF{i}' for i in range(8)], # QUADRANT BOUNDRY FLAG
'common_col': [f'{msid_prefix}ACOLF{i}' for i in range(8)], # COMMON COLUMN FLAG
'multi_star': [f'{msid_prefix}AMSTF{i}' for i in range(8)], # MULTIPLE STAR FLAG
'ion_rad': [f'{msid_prefix}AIRDF{i}' for i in range(8)], # IONIZING RADIATION FLAG
'background_rms': [f'{msid_prefix}CRMSBG{i}' for i in range(8)],
'background_avg': [f'{msid_prefix}CA00110', f'{msid_prefix}CA00326',
f'{msid_prefix}CA00542', f'{msid_prefix}CA00758',
f'{msid_prefix}CA00974', f'{msid_prefix}CA01190',
f'{msid_prefix}CA01406', f'{msid_prefix}CA01622'],
'housing_temperature':
[f'{msid_prefix}ACH1T{i}2' for i in range(8)], # AC HOUSING TEMPERATURE
'ccd_temperature': [f'{msid_prefix}CCDPT{i}2' for i in range(8)], # CCD TEMPERATURE
'primary_temperature':
[f'{msid_prefix}QTAPMT{i}' for i in range(8)], # PRIMARY MIRROR/LENS CELL TEMP
'secondary_temperature':
[f'{msid_prefix}QTH2MT{i}' for i in range(8)], # AC SECONDARY MIRROR TEMPERATURE
'magnitude': [f'AOACMAG{i}' for i in range(8)], # STAR OR FIDUCIAL MAGNITUDE (OBC)
'centroid_ang_y': [f'AOACYAN{i}' for i in range(8)], # YAG CENTROID Y ANGLE (OBC)
'centroid_ang_z': [f'AOACZAN{i}' for i in range(8)], # ZAG CENTROID Z ANGLE (OBC)
'bgd_stat_pixels': [[f'ACBPX{j}1{i}' for j in 'ABGH'] +
[f'ACBPX{j}4{i}' for j in 'IJOP']
for i in range(8)]
}
return [{k: res[k][i] for k in res.keys()} for i in range(8)]
ACA_MSID_LIST = {i + 1: _aca_msid_list(i + 1) for i in range(2)}
ACA_SLOT_MSID_LIST = {i + 1: _aca_image_msid_list(i + 1) for i in range(2)}
_a2p = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']
_IMG_INDICES = [
np.array([PIXEL_MAP_INV['4x4'][f'{k}1'] for k in _a2p]).T,
np.array([PIXEL_MAP_INV['6x6'][f'{k}1'] for k in _a2p]).T,
np.array([PIXEL_MAP_INV['6x6'][f'{k}2'] for k in _a2p]).T,
[],
np.array([PIXEL_MAP_INV['8x8'][f'{k}1'] for k in _a2p]).T,
np.array([PIXEL_MAP_INV['8x8'][f'{k}2'] for k in _a2p]).T,
np.array([PIXEL_MAP_INV['8x8'][f'{k}3'] for k in _a2p]).T,
np.array([PIXEL_MAP_INV['8x8'][f'{k}4'] for k in _a2p]).T
]
######################
# VCDU-based functions
######################
# these are used multiple times
_aca_front_fmt = Struct('>HBBBBBB')
_size_bits = np.zeros((8, 8), dtype=np.uint8)
_pixel_bits = np.zeros((16, 16), dtype=np.uint16)
_bits = np.array([1 << i for i in range(64)], dtype=np.uint64)[::-1]
# I'm sure there is a better way...
def _packbits(a, unsigned=True):
# take something like this: [1,0,1,1,0] and return 2^4 + 2^2 + 2
# This handles integer types only
n = len(a)
if not unsigned and a[0]:
return np.int64(np.sum(a * _bits[-n:]) - (1 << n))
return np.sum(a * _bits[-n:])
class _AcaImageHeaderDecom:
"""
Class to decommute ACA image telemtry headers.
These methods are grouped into a class because header 3 packet is split into up to 8 parts.
The __call__ method in this class accumulates the partial packets. Once all images are of known
types, and the packets are known, it will return the header 3 data.
"""
def __init__(self):
self._header = [
self._aca_header_1, self._aca_header_1, self._aca_header_2, lambda b: {},
self._aca_header_1, self._aca_header_2, self._aca_header_3, self._aca_header_3,
]
def __call__(self, imgnum, imgtype, byte_array):
return self._header[imgtype](byte_array)
@staticmethod
def _aca_header_1(bits):
"""
Unpack ACA header 1 (ACA User Manual 5.3.2.2.1).
:param bits: bytes-like object of length 7
:return: dict
"""
bits = np.unpackbits(np.array(_unpack('BBBbbBB', bits), dtype=np.uint8))
return {
'IMGFID': bool(bits[0]),
'IMGNUM': _packbits(bits[1:4]),
'IMGFUNC': _packbits(bits[4:6]),
'IMGSTAT': _packbits(bits[6:12]),
'SAT_PIXEL': bool(bits[6]),
'DEF_PIXEL': bool(bits[7]),
'QUAD_BOUND': bool(bits[8]),
'COMMON_COL': bool(bits[9]),
'MULTI_STAR': bool(bits[10]),
'ION_RAD': bool(bits[11]),
'IMGROW0': _packbits(bits[12:22], unsigned=False),
'IMGCOL0': _packbits(bits[22:32], unsigned=False),
'IMGSCALE': _packbits(bits[32:46]),
'BGDAVG': _packbits(bits[46:56])
}
@staticmethod
def _aca_header_2(bits):
"""
Unpack ACA header 2 (ACA User Manual 5.3.2.2.2).
:param bits: bytes-like object of length 7
:return: dict
"""
bits = _unpack('BbbbbBB', bits)
c = np.unpackbits(np.array(bits[:2], dtype=np.uint8))
return {
# do we want these?
# 'FID2': bool(bits[0]),
# 'IMGNUM2': _packbits(bits[1:4]),
# 'IMGFUNC2': _packbits(bits[4:6]),
'BGDRMS': _packbits(c[6:16]),
'TEMPCCD': bits[2],
'TEMPHOUS': bits[3],
'TEMPPRIM': bits[4],
'TEMPSEC': bits[5],
'BGDSTAT': bits[6],
'BGDSTAT_PIXELS': np.unpackbits(np.array(bits[-1:], dtype=np.uint8)[-1:])
}
@staticmethod
def _aca_header_3(bits):
"""
Unpack ACA header 3 (ACA User Manual 5.3.2.2.3).
:param bits: bytes-like object of length 7
:return: dict
"""
return {'DIAGNOSTIC': _unpack('BBBBBB', bits[1:])}
def unpack_aca_telemetry(packet):
"""
Unpack ACA telemetry encoded in 225-byte packets.
:param packet: bytes
:return: list of dict
A list of length 8, one entry per slot, where each entry is a dictionary.
"""
s1, s2, s3 = _unpack('BBB', packet[5:8])
_size_bits[:, -3:] = np.unpackbits(np.array([[s1, s2, s3]], dtype=np.uint8).T, axis=1).reshape(
(8, -1))
img_types = np.packbits(_size_bits, axis=1).T[0]
slots = []
header_decom = _AcaImageHeaderDecom()
for img_num, i in enumerate(range(8, len(packet), 27)):
img_header = {'IMGTYPE': img_types[img_num]}
img_header.update(header_decom(img_num, img_types[img_num], packet[i:i + 7]))
img_pixels = _unpack('B' * 20, packet[i + 7:i + 27])
_pixel_bits[:, -10:] = np.unpackbits(np.array([img_pixels], dtype=np.uint8).T,
axis=1).reshape((-1, 10))
img_pixels = np.sum(np.packbits(_pixel_bits, axis=1) * [[2 ** 8, 1]], axis=1)
img_header['pixels'] = img_pixels
slots.append(img_header)
# Before the dynamic background patch, the first two bytes contained INTEG in those
# 16 bits (named integbits). After the dynamic background patch, the first 6 bits of
# integbits will be repurposed: two bits for PIXTLM, next bit for BGDTYP, 3 spares,
# and 10 bits for INTEG. This telem/decom change is back-compatible and can be promoted
# before the dynamic background patch is in use onboard.
integbits = np.unpackbits(np.array(_unpack('BB', packet[0:2]), dtype=np.uint8))
pixtlm = _packbits(integbits[0:2])
bgdtyp = integbits[2]
integ = _packbits(integbits[6:])
glbstat = _unpack('B', packet[2:3])[0]
bits = np.unpackbits(np.array(_unpack('BBB', packet[2:5]), dtype=np.uint8))
res = {
'PIXTLM': pixtlm,
'BGDTYP': bgdtyp,
'INTEG': integ,
'GLBSTAT': glbstat,
'HIGH_BGD': bool(bits[0]),
'RAM_FAIL': bool(bits[1]),
'ROM_FAIL': bool(bits[2]),
'POWER_FAIL': bool(bits[3]),
'CAL_FAIL': bool(bits[4]),
'COMM_CHECKSUM_FAIL': bool(bits[5]),
'RESET': bool(bits[6]),
'SYNTAX_ERROR': bool(bits[7]),
'COMMCNT': _packbits(bits[8:14], unsigned=False),
'COMMCNT_SYNTAX_ERROR': bool(bits[14]),
'COMMCNT_CHECKSUM_FAIL': bool(bits[15]),
'COMMPROG': _packbits(bits[16:22], unsigned=False),
'COMMPROG_REPEAT': _packbits(bits[22:24], unsigned=False),
}
for i, s in enumerate(slots):
s.update(res)
return slots
def _combine_aca_packets(aca_packets):
"""
Combine a list of ACA packets into a single record.
This is intended to combine the two 6X6 packets or the four 8X8 packets.
:param aca_packets: list of dict
:return: dict
"""
# note that they are reverse-sorted so the first frame overwrites the others if they collide
aca_packets = sorted(aca_packets, key=lambda p: p['TIME'], reverse=True)
res = {}
pixels = np.ma.masked_all((8, 8))
pixels.data[:] = np.nan
for f in aca_packets:
pixels[_IMG_INDICES[f['IMGTYPE']][0], _IMG_INDICES[f['IMGTYPE']][1]] = f['pixels']
for f in aca_packets:
res.update(f)
del res['pixels']
res['IMG'] = pixels
return res
def _group_packets(packets, discard=True):
"""
ACA telemetry is packed in packets of 225 bytes. Each of these is split in four VCDU frames.
Before decommuting an ACA package we group the ACA-related portion of VCDU frames to form the
one 225-byte ACA packet.
:param packets: list of ACA sub-packets
:param discard: bool to discard incomplete ACA packets
:return: list of ACA packets
"""
res = []
n = None
s = None
for packet in packets:
if res and (packet['MJF'] * 128 + packet['MNF'] > n):
if not discard or len(res) == s:
yield res
res = []
if not res:
# the number of minor frames expected within the same ACA packet
s = {0: 1, 1: 2, 2: 2, 4: 4, 5: 4, 6: 4, 7: 4}[packet['IMGTYPE']]
# the number of minor frames within the same ACA packet expected after this minor frame
remaining = {0: 0, 1: 1, 2: 0, 4: 3, 5: 2, 6: 1, 7: 0}[packet['IMGTYPE']]
n = packet['MJF'] * 128 + packet['MNF'] + 4 * remaining
res.append(packet)
if res and (not discard or len(res) == s):
yield res
def get_raw_aca_packets(start, stop, maude_result=None, **maude_kwargs):
"""
Fetch 1025-byte VCDU frames using MAUDE and extract a list of 225-byte ACA packets.
If the first minor frame in a group of four ACA packets is within (start, stop),
the three following minor frames are included if present.
returns a dictionary with keys ['TIME', 'MNF', 'MJF', 'packets', 'flags'].
These correspond to the minor frame time, minor frame count, major frame count,
the list of packets, and flags returned by MAUDE respectively.
:param start: timestamp interpreted as a Chandra.Time.DateTime
:param stop: timestamp interpreted as a Chandra.Time.DateTime
:param maude_result: the result of calling maude.get_frames. Optional.
:param maude_kwargs: keyword args passed to maude.get_frames()
:return: dict
{'flags': int, 'packets': [],
'TIME': np.array([]), 'MNF': np.array([]), 'MJF': np.array([])}
"""
date_start, date_stop = DateTime(start), DateTime(stop) # ensure input is proper | |
# coding: utf8
"""
ref
1. http://disi.unitn.it/moschitti/Tree-Kernel.htm
2. http://disi.unitn.it/moschitti/Teaching-slides/slides-AINLP-2016/SVMs-Kernel-Methods.pdf
3. code: http://joedsm.altervista.org/pythontreekernels.htm
4. wiki: https://en.wikipedia.org/wiki/Tree_kernel
"""
from __future__ import print_function
import math
from copy import deepcopy
from . import tree
class Kernel():
#Common routines for kernel functions
def kernel(self,a,b):
#compute the tree kernel on the trees a and b
if not isinstance(a, tree.Tree):
print("ERROR: first parameter has to be a Tree Object")
return ""
if not isinstance(b, tree.Tree):
print("ERROR: second parameter has to be a Tree Object")
return ""
self.preProcess(a)
self.preProcess(b)
return self.evaluate(a,b)
def preProcess(self,a):
#Create any data structure useful for computing the kernel
#To be instantiated in subclasses
print("ERROR: prepProcess() must be executed in subclasses")
pass
def evaluate(self,a,b):
#To be instantiated in subclasses
print("ERROR: evaluated() must be executed in subclasses")
pass
def printKernelMatrix(self,dataset):
if not isinstance(dataset, tree.Dataset):
print("ERROR: the first Parameter must be a Dataset object")
return
ne = len(dataset)
for i in range(ne):
for j in range(i,ne):
print("%d %d %.2f" % (i, j, self.kernel(dataset.getExample(i), dataset.getExample(j))))
class KernelST(Kernel):
def __init__(self,l,savememory=1,hashsep="#"):
self.l = float(l)
self.hashsep = hashsep
self.savememory = savememory
def preProcess(self,a):
if hasattr(a,'kernelstrepr'): #already preprocessed
return
if not hasattr(a.root, 'stsize'):
a.root.setSubtreeSize()
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelstrepr = tree.SubtreeIDSubtreeSizeList(a.root)
a.kernelstrepr.sort()
if self.savememory==1:
a.deleteRootTreeNode()
def evaluate(self,a,b):
ha, hb = (a.kernelstrepr, b.kernelstrepr)
#Assumes ha and hb are ordered list of pairs (subtreeid, subtreesize)
#a.kernelreprst,b.kernelreprst are checked or created in preProcess()
i,j,k,toti,totj = (0,0,0,len(ha), len(hb))
while i < toti and j < totj:
if ha.getSubtreeID(i) == hb.getSubtreeID(j):
ci,cj=(i,j)
while i < toti and ha.getSubtreeID(i)==ha.getSubtreeID(ci):
i += 1
while j < totj and hb.getSubtreeID(j)==hb.getSubtreeID(cj):
j += 1
k += (i-ci)*(j-cj)*(self.l**ha.getSubtreeSize(ci))
elif ha.getSubtreeID(i) < hb.getSubtreeID(j):
i += 1
else:
j += 1
return k
class KernelSST(Kernel):
def __init__(self,l,hashsep="#"):
self.l = float(l)
self.hashsep = hashsep
self.cache = Cache()
def preProcess(self,a):
if hasattr(a,'kernelsstrepr'): #already preprocessed
return
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelsstrepr = tree.ProdSubtreeList(a.root)
a.kernelsstrepr.sort()
def CSST(self,c,d):
if c.getSubtreeID() < d.getSubtreeID():
tmpkey = str(c.getSubtreeID()) + "#" + str(d.getSubtreeID())
else:
tmpkey = str(d.getSubtreeID()) + "#" + str(c.getSubtreeID())
if self.cache.exists(tmpkey):
return float(self.cache.read(tmpkey))
else:
prod = self.l
nc = c.getOutdegree()
if nc==d.getOutdegree():
for ci in range(nc):
if c.getChild(ci).getProduction() == d.getChild(ci).getProduction():
prod *= (1 + self.CSST(c.getChild(ci),d.getChild(ci)))
else:
cid, did = (c.getChild(ci).getSubtreeID(),d.getChild(ci).getSubtreeID())
if cid < did:
self.cache.insert(str(cid) + str(did), 0)
else:
self.cache.insert(str(did) + str(cid), 0)
self.cache.insert(tmpkey, prod)
return float(prod)
def evaluate(self,a,b):
pa,pb=(a.kernelsstrepr, b.kernelsstrepr)
self.cache.removeAll()
i,j,k,toti,totj = (0,0,0,len(pa),len(pb))
while i < toti and j < totj:
if pa.getProduction(i) == pb.getProduction(j):
ci,cj=(i,j)
while i < toti and pa.getProduction(i)==pa.getProduction(ci):
j = cj
while j < totj and pb.getProduction(j)==pb.getProduction(cj):
k += self.CSST(pa.getTree(i),pb.getTree(j))
j += 1
i += 1
elif len(pa.getProduction(i))<len(pb.getProduction(j)) or (len(pa.getProduction(i))==len(pb.getProduction(j)) and pa.getProduction(i) < pb.getProduction(j)):
i += 1
else:
j += 1
return k
class KernelPT(Kernel):
def __init__(self,l,m,hashsep="#"):
self.l = float(l)
self.m = float(m)
self.hashsep = hashsep
self.cache = Cache()
def preProcess(self,a):
if hasattr(a,'kernelptrepr'): #already preprocessed
return
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelptrepr = tree.LabelSubtreeList(a.root)
a.kernelptrepr.sort()
def DeltaSk(self, a, b,nca, ncb):
DPS = [[0 for i in range(ncb+1)] for j in range(nca+1)]
DP = [[0 for i in range(ncb+1)] for j in range(nca+1)]
kmat = [0]*(nca+1)
for i in range(1,nca+1):
for j in range(1,ncb+1):
if a.getChild(i-1).getLabel() == b.getChild(j-1).getLabel():
DPS[i][j] = self.CPT(a.getChild(i-1),b.getChild(j-1))
kmat[0] += DPS[i][j]
else:
DPS[i][j] = 0
for s in range(1,min(nca,ncb)):
for i in range(nca+1):
DP[i][s-1] = 0
for j in range(ncb+1):
DP[s-1][j] = 0
for i in range(s,nca+1):
for j in range(s,ncb+1):
DP[i][j] = DPS[i][j] + self.l*DP[i-1][j] + self.l*DP[i][j-1] - self.l**2*DP[i-1][j-1]
if a.getChild(i-1).getLabel() == b.getChild(j-1).getLabel():
DPS[i][j] = self.CPT(a.getChild(i-1),b.getChild(j-1))*DP[i-1][j-1]
kmat[s] += DPS[i][j]
return sum(kmat)
def CPT(self,c,d):
if c.getSubtreeID() < d.getSubtreeID():
tmpkey = str(c.getSubtreeID()) + "#" + str(d.getSubtreeID())
else:
tmpkey = str(d.getSubtreeID()) + "#" + str(c.getSubtreeID())
if self.cache.exists(tmpkey):
return self.cache.read(tmpkey)
else:
if c.getOutdegree()==0 or d.getOutdegree()==0:
prod = self.m*self.l**2
else:
prod = self.m*(self.l**2+self.DeltaSk(c, d,c.getOutdegree(),d.getOutdegree()))
self.cache.insert(tmpkey, prod)
return prod
def evaluate(self,a,b):
self.cache.removeAll()
la,lb = (a.kernelptrepr,b.kernelptrepr)
i,j,k,toti,totj = (0,0,0,len(la),len(lb))
while i < toti and j < totj:
if la.getLabel(i) == lb.getLabel(j):
ci,cj=(i,j)
while i < toti and la.getLabel(i) == la.getLabel(ci):
j = cj
while j < totj and lb.getLabel(j) == lb.getLabel(cj):
k += self.CPT(la.getTree(i),lb.getTree(j))
j += 1
i += 1
elif la.getLabel(i) <= lb.getLabel(j):
i += 1
else:
j += 1
return k
class KernelPdak(Kernel):
def __init__(self, l, gamma, beta, hashsep="#"):
self.l = float(l)
self.gamma = float(gamma)
self.beta = float(beta)
self.hashsep = hashsep
def preProcess(self, t):
if hasattr(t,'kernelpdakrepr'): #already preprocessed
return
if not hasattr(t.root, 'stsize'):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
t.kernelpdakrepr = tree.SubtreePositionIDLabelSubtreeSizeList(t.root)
def mergetrees_with_depth(self, tree1, tree2):
merge = {}
for key in tree1:
if key in tree2:
merge[key] = ({(tree1[key][0],tree1[key][2]):{tree1[key][1]:1}},{(tree2[key][0],tree2[key][2]):{tree2[key][1]:1}})
del tree2[key]
else: merge[key] = ({(tree1[key][0],tree1[key][2]):{tree1[key][1]:1}},None)
for key in tree2:
merge[key] = (None,{(tree2[key][0],tree2[key][2]):{tree2[key][1]:1}})
return merge
def visit_with_depth(self,jtree,node,depth,param,lambda_par,gamma_par):
kvalue = 0
if node is not None :
child = 0
key = str(hash(node+'#'+str(child)))
while key in jtree :
kvalue = kvalue + self.visit_with_depth(jtree,key,depth+1,param,lambda_par,gamma_par)
if jtree[key][0] is not None:
if jtree[node][0] is None:
#jtree[node][0] = jtree[key][0]
jtree[node] = (jtree[key][0], jtree[node][1])
else:
for tmpkey in jtree[key][0]:
if tmpkey in jtree[node][0]:
for tmpkey2 in jtree[key][0][tmpkey]:
if tmpkey2 in jtree[node][0][tmpkey]:
jtree[node][0][tmpkey][tmpkey2] = jtree[node][0][tmpkey][tmpkey2] + jtree[key][0][tmpkey][tmpkey2]
else: jtree[node][0][tmpkey][tmpkey2] = jtree[key][0][tmpkey][tmpkey2]
else: jtree[node][0][tmpkey] = jtree[key][0][tmpkey]
if jtree[key][1] is not None:
if jtree[node][1] is None:
#jtree[node][1]=jtree[key][1]
jtree[node]=(jtree[node][0],jtree[key][1])
else:
for tmpkey in jtree[key][1]:
if tmpkey in jtree[node][1]:
for tmpkey2 in jtree[key][1][tmpkey]:
if tmpkey2 in jtree[node][1][tmpkey]:
jtree[node][1][tmpkey][tmpkey2] = jtree[node][1][tmpkey][tmpkey2] + jtree[key][1][tmpkey][tmpkey2]
else: jtree[node][1][tmpkey][tmpkey2] = jtree[key][1][tmpkey][tmpkey2]
else: jtree[node][1][tmpkey] = jtree[key][1][tmpkey]
child = child + 1
key = str(hash(node+'#'+str(child)))
# print jtree[node]
if (jtree[node][0] is not None) and (jtree[node][1] is not None):
for lkey in jtree[node][0]:
if lkey in jtree[node][1]:
tmpk = 0
for fkey1 in jtree[node][0][lkey]:
for fkey2 in jtree[node][1][lkey]:
tmpk = tmpk + lambda_par**lkey[1]*jtree[node][0][lkey][fkey1]*jtree[node][1][lkey][fkey2]*math.exp(-param*(fkey1 + fkey2))
kvalue = kvalue + (gamma_par**depth)*tmpk*math.exp(2*param*depth)
return kvalue
def evaluate(self,a,b):
tree1 = deepcopy(a.kernelpdakrepr.sids)
tree2 = deepcopy(b.kernelpdakrepr.sids)
m = self.mergetrees_with_depth(tree1,tree2)
kvalue = self.visit_with_depth(m,str(hash('0')),1,self.l, self.gamma, self.beta)
del m, tree1, tree2
return kvalue
class KernelPdakMine(Kernel):
def __init__(self, l, gamma, beta, hashsep="#"):
self.l = float(l)
self.gamma = float(gamma)
self.beta = float(beta)
self.hashsep = hashsep
self.cache = Cache()
self.cachesize = 10000
def preProcess(self, t):
if hasattr(t,'kernelpdakrepr'): #already preprocessed
return
if not hasattr(t.root, 'stsize'):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
t.computeRoutes()
t.kernelpdakrepr = tree.SubtreeIDSubtreeSizeRouteList(t.root)
t.kernelpdakrepr.sort()
#print t.kernelpdakrepr.sids
def ntk(self, ra, da, rb, db, hra, hrb):
if hra < hrb:
tmpkey = str(hra) + "#" + str(hrb)
else:
tmpkey = str(hrb) + "#" + str(hra)
if self.cache.exists(tmpkey):
return float(self.cache.read(tmpkey))
lena,lenb = len(ra), len(rb)
c, p, minlen = 0, 0, min(lena,lenb)
while c < minlen and ra[c] == rb[c]:
if ra[c] == "#": p += 1
c += 1
#print "p = ", p, "da, db", da, db, ra, rb
if self.gamma == 1:
r = (p+1)*(math.e**(-self.beta*(da + db - 2*p)))
else:
r = (1-self.gamma**(p+1))/(1-self.gamma)*(math.e**(-self.beta*(da + db - 2*p)))
if len(self.cache) > self.cachesize:
self.cache.removeAll()
self.cache.insert(tmpkey,r)
return r
# if self.gamma == 1:
# return (p+1)*(math.e**(-self.beta*(da + db - 2*p)))
# else:
# return (1-self.gamma**(p+1))/(1-self.gamma)*(math.e**(-self.beta*(da + db - 2*p)))
def evaluate(self,a,b):
ha, hb = (a.kernelpdakrepr, b.kernelpdakrepr)
#print ha, hb
#Assumes ha and hb are ordered list of pairs (subtreeid, subtreesize, route)
#a.kernelreprst,b.kernelreprst are checked or created in preProcess()
i,j,k,toti,totj = (0,0,0,len(ha), len(hb))
while i < toti and j < totj:
if ha.getLabel(i) == hb.getLabel(j):
ci, cj = (i, j)
while i < toti and ha.getLabel(i)==ha.getLabel(ci):
j = cj
while j < totj and hb.getLabel(j)==hb.getLabel(cj):
cst = self.l
if ha.getSubtreeID(i)==hb.getSubtreeID(j):
cst += self.l**ha.getSubtreeSize(i)
#print ha.getLabel(i), hb.getLabel(j), cst, self.ntk(ha.getRoute(i), ha.getDepth(i), hb.getRoute(j), hb.getDepth(j))
k += cst*self.ntk(ha.getRoute(i), ha.getDepth(i), hb.getRoute(j), hb.getDepth(j), ha.getRouteHash(i), hb.getRouteHash(j))
j += 1
i += 1
elif ha.getLabel(i) < hb.getLabel(j):
i += 1
else:
j += 1
return k
class KernelPdakFast(KernelPdak):
def preProcess(self, t):
if hasattr(t,'kernelpdakrepr'): #already preprocessed
return
if not hasattr(t.root, 'stsize'):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
a = tree.SubtreePositionIDSubtreeIDSubtreeSizeListLabel(t.root)
t.kernelpdakrepr = (a.sids, a.pinv)
def mergetrees_with_depth_del_labels(self, tree1,labels1, tree2,labels2):
merge = {}
match = 0
for key in tree1:
if key in tree2:
if tree1[key][0] in labels2:
match = match+1
if tree2[key][0] in labels1:
merge[key] = ({(tree1[key][0],tree1[key][1]):0},{(tree2[key][0],tree2[key][1]):0})
else:
merge[key] = ({(tree1[key][0],tree1[key][1]):0},{})
else:
if tree2[key][0] in labels1:
merge[key] = ({},{(tree2[key][0],tree2[key][1]):0})
match = match+1
else: merge[key] = ({},{})
del tree2[key]
else:
if tree1[key][0] in labels2:
merge[key] = ({(tree1[key][0],tree1[key][1]):0},{})
match | |
"""Module with utilities that ease the transformation of word sets to embeddings."""
import logging
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
from sklearn.feature_extraction.text import strip_accents_ascii, strip_accents_unicode
from wefe.query import Query
from wefe.word_embedding_model import WordEmbeddingModel
EmbeddingDict = Dict[str, np.ndarray]
EmbeddingSets = Dict[str, EmbeddingDict]
def preprocess_word(
word: str,
options: Dict[str, Union[str, bool, Callable]] = {},
vocab_prefix: Optional[str] = None,
) -> str:
"""pre-processes a word before it is searched in the model's vocabulary.
Parameters
----------
word : str
Word to be preprocessed.
options : Dict[str, Union[str, bool, Callable]], optional
Dictionary with arguments that specifies how the words will be preprocessed,
The available word preprocessing options are as follows:
- ```lowercase```: bool. Indicates if the words are transformed to lowercase.
- ```uppercase```: bool. Indicates if the words are transformed to uppercase.
- ```titlecase```: bool. Indicates if the words are transformed to titlecase.
- ```strip_accents```: `bool`, `{'ascii', 'unicode'}`: Specifies if the accents of
the words are eliminated. The stripping type can be
specified. True uses 'unicode' by default.
- ```preprocessor```: Callable. It receives a function that operates on each
word. In the case of specifying a function, it overrides
the default preprocessor (i.e., the previous options
stop working).
By default, no preprocessing is generated, which is equivalent to {}
Returns
-------
str
The pre-processed word according to the given parameters.
"""
preprocessor = options.get("preprocessor", None)
# if the preprocessor is specified, it takes precedence over all other operations.
if preprocessor is not None and callable(preprocessor):
word = preprocessor(word)
else:
strip_accents = options.get("strip_accents", False)
lowercase = options.get("lowercase", False)
uppercase = options.get("uppercase", False)
titlecase = options.get("titlecase", False)
if lowercase:
word = word.lower()
elif uppercase:
word = word.upper()
elif titlecase:
word = word.title()
# by default, if strip_accents is True, run strip_accents_unicode
if strip_accents is True:
word = strip_accents_unicode(word)
elif strip_accents == "ascii":
word = strip_accents_ascii(word)
elif strip_accents == "unicode":
word = strip_accents_unicode(word)
if vocab_prefix is not None and isinstance(vocab_prefix, str):
return vocab_prefix + word
return word
def get_embeddings_from_set(
model: WordEmbeddingModel,
word_set: Sequence[str],
preprocessors: List[Dict[str, Union[str, bool, Callable]]] = [{}],
strategy: str = "first",
normalize: bool = False,
verbose: bool = False,
) -> Tuple[List[str], Dict[str, np.ndarray]]:
"""Transform a sequence of words into dictionary that maps word - word embedding.
The method discard out words that are not in the model's vocabulary
(according to the rules specified in the preprocessors).
Parameters
----------
model : WordEmbeddingModel
A word embeddding model
word_set : Sequence[str]
A sequence with the words that this function will convert to embeddings.
preprocessors : List[Dict[str, Union[str, bool, Callable]]]
A list with preprocessor options.
A ``preprocessor`` is a dictionary that specifies what processing(s) are
performed on each word before it is looked up in the model vocabulary.
For example, the ``preprocessor``
``{'lowecase': True, 'strip_accents': True}`` allows you to lowercase
and remove the accent from each word before searching for them in the
model vocabulary. Note that an empty dictionary ``{}`` indicates that no
preprocessing is done.
The possible options for a preprocessor are:
* ``lowercase``: ``bool``. Indicates that the words are transformed to
lowercase.
* ``uppercase``: ``bool``. Indicates that the words are transformed to
uppercase.
* ``titlecase``: ``bool``. Indicates that the words are transformed to
titlecase.
* ``strip_accents``: ``bool``, ``{'ascii', 'unicode'}``: Specifies that
the accents of the words are eliminated. The stripping type can be
specified. True uses ‘unicode’ by default.
* ``preprocessor``: ``Callable``. It receives a function that operates
on each word. In the case of specifying a function, it overrides the
default preprocessor (i.e., the previous options stop working).
A list of preprocessor options allows you to search for several
variants of the words into the model. For example, the preprocessors
``[{}, {"lowercase": True, "strip_accents": True}]``
``{}`` allows first to search for the original words in the vocabulary of
the model. In case some of them are not found,
``{"lowercase": True, "strip_accents": True}`` is executed on these words
and then they are searched in the model vocabulary.
by default [{}]
strategy : str, optional
The strategy indicates how it will use the preprocessed words: 'first' will
include only the first transformed word found. all' will include all
transformed words found, by default "first".
normalize : bool, optional
True indicates that embeddings will be normalized, by default False
verbose : bool, optional
Indicates whether the execution status of this function is printed,
by default False
Returns
-------
Tuple[List[str], Dict[str, np.ndarray]]
A tuple containing the words that could not be found and a dictionary with
the found words and their corresponding embeddings.
"""
# ----------------------------------------------------------------------------------
# type verifications.
if not isinstance(model, WordEmbeddingModel):
raise TypeError(f"model should be a WordEmbeddingModel instance, got {model}.")
if not isinstance(word_set, (list, tuple, np.ndarray)):
raise TypeError(
"word_set should be a list, tuple or np.array of strings"
f", got {word_set}."
)
if not isinstance(preprocessors, list):
raise TypeError(
"preprocessors should be a list of dicts which contains preprocessor options"
f", got {preprocessors}."
)
if len(preprocessors) == 0:
raise TypeError(
"preprocessors must indicate at least one preprocessor, even if it is "
"an empty dictionary {}, "
f"got: {preprocessors}."
)
for idx, p in enumerate(preprocessors):
if not isinstance(p, dict):
raise TypeError(
f"each preprocessor should be a dict, got {p} at index {idx}."
)
if strategy != "first" and strategy != "all":
raise ValueError(f"strategy should be 'first' or 'all', got {strategy}.")
# ----------------------------------------------------------------------------------
# filter the words
selected_embeddings = {}
not_found_words = []
for word in word_set:
for preprocessor in preprocessors:
preprocessed_word = preprocess_word(
word, options=preprocessor, vocab_prefix=model.vocab_prefix
)
embedding = model[preprocessed_word]
if embedding is not None:
selected_embeddings[preprocessed_word] = embedding
# if the selected strategy is first, then it stops on the first
# word encountered.
if strategy == "first":
break
else:
not_found_words.append(preprocessed_word)
# if requested, normalize embeddings.
if normalize:
selected_embeddings = {
k: v / np.linalg.norm(v) for k, v in selected_embeddings.items()
}
if verbose:
print(
f"Word(s) found: {list(selected_embeddings.keys())}, "
f"not found: {not_found_words}"
)
return not_found_words, selected_embeddings
def _warn_not_found_words(
warn_not_found_words: bool,
not_found_words: List[str],
model_name: str,
set_name: str,
) -> None:
if not isinstance(warn_not_found_words, bool):
raise TypeError(
"warn_not_found_words should be a boolean, got {}.".format(
warn_not_found_words
)
)
if warn_not_found_words:
if len(not_found_words) > 0:
logging.warning(
"The following words from set '{}' do not exist within the vocabulary "
"of {}: {}".format(set_name, model_name, not_found_words)
)
def _check_lost_vocabulary_threshold(
model: WordEmbeddingModel,
embeddings: EmbeddingDict,
word_set: List[str],
word_set_name: str,
lost_vocabulary_threshold: float,
):
if not isinstance(lost_vocabulary_threshold, (float, np.floating)):
raise TypeError(
"lost_vocabulary_threshold should be float, "
"got {}.".format(lost_vocabulary_threshold)
)
remaining_words = list(embeddings.keys())
number_of_lost_words = len(word_set) - len(remaining_words)
percentage_of_lost_words = number_of_lost_words / len(word_set)
# if the percentage of filtered words are greater than the
# threshold, log and return False
if percentage_of_lost_words > lost_vocabulary_threshold:
logging.warning(
"The transformation of '{}' into {} embeddings lost proportionally more "
"words than specified in 'lost_words_threshold': {} lost with respect "
"to {} maximum loss allowed.".format(
word_set_name,
model.name,
round(percentage_of_lost_words, 2),
lost_vocabulary_threshold,
)
)
return True
return False
def get_embeddings_from_sets(
model: WordEmbeddingModel,
sets: Sequence[Sequence[str]],
sets_name: Union[str, None] = None,
preprocessors: List[Dict[str, Union[str, bool, Callable]]] = [{}],
strategy: str = "first",
normalize: bool = False,
discard_incomplete_sets: bool = True,
warn_lost_sets: bool = True,
verbose: bool = False,
) -> List[EmbeddingDict]:
"""Given a sequence of word sets, obtain their corresponding embeddings.
Parameters
----------
model
sets : Sequence[Sequence[str]]
A sequence containing word sets.
Example: `[['woman', 'man'], ['she', 'he'], ['mother', 'father'] ...]`.
sets_name : Union[str, optional]
The name of the set of word sets. Example: `definning sets`.
This parameter is used only for printing.
by default None
preprocessors : List[Dict[str, Union[str, bool, Callable]]]
A list with preprocessor options.
A ``preprocessor`` is a dictionary that specifies what processing(s) are
performed on each word before it is looked up in the model vocabulary.
For example, the ``preprocessor``
``{'lowecase': True, 'strip_accents': True}`` allows you to lowercase
and remove the accent from each word before searching for them in the
model vocabulary. Note that an empty dictionary ``{}`` indicates that no
preprocessing is done.
The possible options for a preprocessor are:
* ``lowercase``: | |
#coding: UTF-8
#開始運行前,請先全選doc,按ctrl + shift + f9移除所有超鏈接並保存
#如果有超鏈接,轉換會出錯
#执行宏操作把doc中的编号转成固定文本
#編譯辭書格式:
#第一章 XXX
#第二章 XXX
#第三章 XX鎮、鄉、區
# 第一節 鎮名緣起
# 第二節 自然環境
# 第三節 區域特色
# 第四節 各里地名釋義
# 第一項 XX里(村) <-------------------------------抓取起始位置
# 里(村)名由來
# 里(村)的description,若干行
# 地名釋義
# (一)具體地名1
# 具體地名1的description,若干行
# (二)具體地名2
# 具體地名2的description,若干行
# ......
# 其他
# (一)具體地名1
# 具體地名1的description,若干行
# (二)具體地名2
# 具體地名2的description,若干行
# ......
# 第二項 XX里(村)
# ......
# 第三項 XX里(村)
# ......
#第四章 XX鎮、鄉、區
#基本算法架構:step1:根據“第X項”找到里(村)作為開始,以“章”結束一個鎮、鄉、區,據此找到每個里(村)相關內容的上下邊界
# step2:在每個里(村)中根據“里(村)名由來”找到每個里(村)description的上下邊界
# step3:在每個里(村)中根據“地名釋義”和“()”找到每個里(村)的具體地名及其上下邊界
# step4:返回正文,從根據上下邊界抓取具體內文,輸出到csv的固定位置
#***************************************************************************************************************
import numpy as np
import pandas as pd
from docx import Document
import win32com.client as wc
import os
import re
from tqdm import tqdm
#數據處理,doc轉docx轉txt存入本地
class data_processor():
def __init__(self):
self.dir_name = 'F:/street_name/book/'
#self.dir_name = 'D:/street_name/book/'
def doc2docx(self):
#doc转docx
word = wc.Dispatch("Word.Application")
for i in os.listdir(self.dir_name):
if i.endswith('.doc') and not i .startswith('~$'):
doc_path = os.path.join(self.dir_name, i)
doc = word.Documents.Open(doc_path)
rename = os.path.splitext(i)
save_path = os.path.join(self.dir_name, rename[0] + '.docx')
doc.SaveAs(save_path, 12)
doc.Close()
print(i)
word.Quit()
def docx2txt(self):
#docx转txt,去除所有不必要的格式
for i in os.listdir(self.dir_name):
if i.endswith('.docx') and not i.startswith('~$'):
docx_path = os.path.join(self.dir_name, i)
document = Document(docx_path)
txt_path = os.path.join(self.dir_name, str(i).replace('.docx', '.txt'))
txt_file = open(txt_path, 'w', encoding = 'utf-8')
mode = False
for paragraph in tqdm(document.paragraphs):
new_paragraph = paragraph.text.strip('/r')
new_paragraph = new_paragraph.strip()
new_paragraph = new_paragraph.replace(' ', '')
new_paragraph = new_paragraph.replace(' ', '')
if new_paragraph == '註:':
mode = True
continue
if mode:
if new_paragraph.startswith('('):
continue
else:
mode = False
if new_paragraph != '':
txt_file.write(new_paragraph + '\n')
txt_file.close()
#删除使用过的docx
os.remove(docx_path)
#分行
class word_cut():
def __init__(self):
#初始化全局變量
#工作目錄
self.dir_name = 'F:/street_name/book/'
#self.dir_name = 'D:/street_name/book/'
#中文字符常量
self.chinese = ['一', '二', '三', '四', '五', '六', '七', '八', '九', '十']
self.img_id = []
self.tab_id = []
self.tab_id_xiang = []
self.img_id2 = []
self.tab_id2 = []
for i in range(1, 30):
for j in range(1, 100):
self.img_id.append('(如圖{i}-{j}所示)')
self.tab_id.append('(如表{i}-{j})')
self.tab_id_xiang.append('(詳如表{i}-{j})')
self.img_id2.append('圖{i}-{j}')
self.tab_id2.append('表{i}-{j}')
def run(self):
for i in os.listdir(self.dir_name):
if i.endswith('.txt') and not i .startswith('~$'):
self.save_name = i.replace('txt', 'csv')
print('Begin read ' + str(i))
self.get_txt(i)
self.get_vil_index_up_down()
self.cut_vli_by_index()
self.get_small_name()
self.re_index() #若註解此行則以村里為單位標記No
self.split_taipei()
self.save_csv()
def get_txt(self, file_name):
txt_path = os.path.join(self.dir_name, str(file_name))
with open(txt_path, 'r', encoding = 'utf-8') as txt_file:
#获得txt文本存入list
self.document_list = txt_file.readlines()
txt_file.close()
#定義一個df存放村裡對應的行號上下界
self.vil_df_index = pd.DataFrame(columns = ['No', 'dist_name', 'vil_name', 'vil_index_down', 'vil_index_up'], dtype = int)
#定義一個df供保存需要的數據
self.df_save = pd.DataFrame(columns = ['No', 'name_dist', 'name_li', 'name', 'name_eng', 'location', 'description'])
#獲取各個村里內容的上下限
def get_vil_index_up_down(self):
for line_index in tqdm(range(len(self.document_list))):
#記錄每個里的行號作為index存入df_index
line = self.document_list[line_index]
vil_name = self.get_vil_name(line)
if vil_name is not None:
if 'end' not in vil_name :
cache_index = pd.DataFrame({'No': vil_pointer + 1,
'dist_name': dist_name,
'vil_name': vil_name,
'vil_index_down': line_index,
'vil_index_up': 0}, index = [0])
vil_df_cache_index = vil_df_cache_index.append(cache_index, ignore_index = True)
try:
vil_df_cache_index.iloc[vil_pointer - 1, 4] = line_index - 1
except:
pass
vil_pointer += 1
else:
if vil_name[1] != '結論':
dist_name = vil_name[1]
try:
vil_df_cache_index.iloc[vil_pointer - 1, 4] = line_index - 1
self.vil_df_index = self.vil_df_index.append(vil_df_cache_index, ignore_index = True)
except:
pass
#重置指針和暫存df
vil_pointer = 0
vil_df_cache_index = pd.DataFrame(columns = ['No', 'dist_name', 'vil_name', 'vil_index_down', 'vil_index_up'], dtype = int)
def get_vil_name(self, line):
#根據內容和長度查找 “第X項 XX村(里)”格式段落
if line.startswith('第') and '項' in line and (len(line) <= 24 or '(' in line) and '。' not in line and ':' not in line:
tmp = line.split('項')
tmp[1] = tmp[1].replace('\n', '')
return tmp[1].strip()
elif '第' in line and '章' in line and any(s in line for s in self.chinese) and len(line) <= 12:
tmp = line.split('章')
return 'end', tmp[1].strip()
else:
return None
def cut_vli_by_index(self):
#初始化一個df供存放村里名、地名和description的index
useful_index = pd.DataFrame(columns = ['No', 'dist_name', 'vil_name', 'useful_name', 'useful_index_down', 'useful_index_up'])
#遍歷vil_df_index,搜索每個村裡名下面的地名和對應的description的行號存入useful_index,
for i in tqdm(range(len(self.vil_df_index))):
no = self.vil_df_index.iloc[i, 0]
dist_name = self.vil_df_index.iloc[i, 1]
vil_name = self.vil_df_index.iloc[i, 2]
line_index_down = self.vil_df_index.iloc[i, 3]
line_index_up = self.vil_df_index.iloc[i, 4]
cache_index = self.get_name_and_description_index(no, dist_name, vil_name, line_index_down, line_index_up)
useful_index = useful_index.append(cache_index, ignore_index = True)
self.get_description_main(useful_index)
#根據上下界,在村里內做進一步細分
def get_name_and_description_index(self, no, dist_name, vil_name, line_index_down, line_index_up):
useful_index = pd.DataFrame(columns = ['No', 'dist_name', 'vil_name', 'useful_name', 'useful_index_down', 'useful_index_up'])
line_pointer = 0
for i in range(line_index_down, line_index_up + 1):
line = self.document_list[i].strip()
line = line.replace('\r', '')
line = line.replace('\n', '')
line = line.replace(' ', '')
#依次切斷
#里(村)名由来下面是里(村)的description
if ('名由來' in line or '名緣起' in line) and len(line) <= 8 and '。' not in line:
try:
line = line.split('、')[1]
except:
pass
cache_index = pd.DataFrame({'No': line_pointer + 1,
'dist_name': dist_name,
'vil_name': vil_name,
'useful_name': line,
'useful_index_down': i,
'useful_index_up': 0}, index = [0])
useful_index = useful_index.append(cache_index, ignore_index = True)
line_pointer += 1
#地名釋義終結了村、里的description,且下面是具體地名
elif ('二、地名釋義' in line or '二、其他' in line) and len(line) <=10:
useful_index.iloc[line_pointer - 1, 5] = i - 1
#具體地名以括號+中文數字做開頭
#如果是短句必定以‘。’結尾
#具體地名緊接著就是description
elif '(' in line and ')' in line \
and any(s in line for s in self.chinese) \
and '。' not in line\
and '本里於' not in line\
and ':' not in line:
try:
#以)分割取出具體地名
line = line.split(')', 1)[1]
except:
pass
cache_index = pd.DataFrame({'No': line_pointer + 1,
'dist_name': dist_name,
'vil_name': vil_name,
'useful_name': line,
'useful_index_down': i,
'useful_index_up': 0}, index = [0])
useful_index = useful_index.append(cache_index, ignore_index = True)
try:
useful_index.iloc[line_pointer - 1, 5] = i - 1
except:
pass
line_pointer += 1
useful_index.iloc[line_pointer - 1, 5] = line_index_up
return useful_index
#獲取description
def get_description_main(self, useful_index):
for i in tqdm(range(len(useful_index))):
#初始化save_df內的各項元素
no = useful_index.iloc[i, 0]
dist_name = useful_index.iloc[i, 1]
vil_name = useful_index.iloc[i, 2]
name = ''
name_eng = ''
location = ''
description = ''
#獲取內容
#里(村)名由來下面就是村里的description
if '名由來' in useful_index.iloc[i, 3] and len(useful_index.iloc[i, 3]) == 4:
description = self.get_description(useful_index.iloc[i, 4], useful_index.iloc[i, 5])
#否則對應地名及其下的description
else:
name = useful_index.iloc[i, 3]
description = self.get_description(useful_index.iloc[i, 4], useful_index.iloc[i, 5])
cache_description = pd.DataFrame({'No': no,
'name_dist': dist_name,
'name_li': vil_name,
'name': name,
'name_eng': name_eng,
'location': location,
'description': description}, index = [0])
#寫入df
self.df_save = self.df_save.append(cache_description, ignore_index = True)
def get_description(self, index_down, index_up):
#根據index上下限獲取description
description = ''
#如果只有一行description則直接寫入
if index_down == index_up - 1:
description = self.clear_description(self.document_list[index_up])
else:
for i in range(index_down + 1, index_up + 1):
if self.document_list[i].startswith(tuple(self.img_id2))\
or self.document_list[i].startswith(tuple(self.tab_id2)):
pass
else:
description = description + self.document_list[i]
description = self.clear_description(description)
return description
def clear_description(self, description):
#清理正文內容,去除換行、空格、局末的其他(小地名存在兩個部分,以其他分隔,因只有一行,將其歸上處理)等不必要的字符
#description = description.strip()
description = description.replace('\r', '')
#description = description.replace('\n', '')
description = description.replace(' ', '')
description = description.replace('三、其他', '')
description = description.replace('二、地名釋義', '')
description = description.replace('二、其他', '')
for i in range(len(self.img_id)):
description = description.replace(self.img_id[i], '')
description = description.replace(self.tab_id[i], '')
description = description.replace(self.tab_id_xiang[i], '')
return description
def get_small_name(self):
self.df_save2 = pd.DataFrame(columns = ['No', 'name_dist', 'name_li', 'name', 'name_eng', 'location', 'description'])
cnt = 0
for i in tqdm(range(len(self.df_save))):
if '小地名' in self.df_save.iloc[i, 6] and '1.' in self.df_save.iloc[i, 6]:
tmp = self.df_save.iloc[i, 6].split('小地名')[-2]
cache_new_df = pd.DataFrame({'No': self.df_save.iloc[i, 0],
'name_dist': self.df_save.iloc[i, 1],
'name_li': self.df_save.iloc[i, 2],
'name': self.df_save.iloc[i, 3],
'name_eng': self.df_save.iloc[i, 4],
'location': self.df_save.iloc[i, 5],
'description': tmp.replace('\n', '').replace(' ', '')}, index = [0])
self.df_save2 = self.df_save2.append(cache_new_df, ignore_index = True)
tmp = self.df_save.iloc[i, 6].split('小地名')[-1]
tmp = tmp.split(':', 1)[1]
tmp2 = tmp.split('.')
for j in range(1, len(tmp2)):
try:
cache_new_df = pd.DataFrame({'No': self.df_save.iloc[i, 0],
'name_dist': self.df_save.iloc[i, 1],
'name_li': self.df_save.iloc[i, 2],
'name': tmp2[j].split(':')[0],
'name_eng': self.df_save.iloc[i, 4],
'location': self.df_save.iloc[i, 5],
'description': self.clear_small_description(tmp2[j].split(':')[1])}, index = [0])
self.df_save2 = self.df_save2.append(cache_new_df, ignore_index = True)
except:
pass
elif '消失的聚落' in self.df_save.iloc[i, 3]:
tmp2 = self.df_save.iloc[i, 6].split('.')
for j in range(1, len(tmp2)):
try:
cache_new_df = pd.DataFrame({'No': self.df_save.iloc[i, 0],
'name_dist': self.df_save.iloc[i, 1],
'name_li': self.df_save.iloc[i, 2],
'name': tmp2[j].split('\n')[0],
'name_eng': self.df_save.iloc[i, 4],
'location': self.df_save.iloc[i, 5],
'description': self.clear_small_description(tmp2[j].split('\n')[1])}, index = [0])
self.df_save2 = self.df_save2.append(cache_new_df, ignore_index = True)
except:
self.df_save2.iloc[cnt, 6] = self.df_save2.iloc[cnt, 6] + tmp2[j].replace('\n', '')
else:
cache_new_df = pd.DataFrame({'No': self.df_save.iloc[i, 0],
'name_dist': self.df_save.iloc[i, 1],
'name_li': self.df_save.iloc[i, 2],
'name': self.df_save.iloc[i, 3],
'name_eng': self.df_save.iloc[i, 4],
'location': self.df_save.iloc[i, 5],
'description': self.df_save.iloc[i, 6].replace('\n', '').replace(' ', '')}, index = [0])
#寫入df
self.df_save2 = self.df_save2.append(cache_new_df, ignore_index = True)
cnt += 1
def clear_small_description(self, description):
for i in range(10):
description = description.strip(str(i))
description = description.replace('\n' ,'')
return description
def re_index(self):
#重新編號,以鄉鎮區為編號單位
count = 0
for i in range(len(self.df_save2)):
count += 1
self.df_save2.iloc[i, 0] = count
try:
if self.df_save2.iloc[i, 1] != self.df_save2.iloc[i + 1, 1]:
count = 0
except:
pass
def split_taipei(self):
zhmodle =re.compile(u'[\u4e00-\u9fa5]')
for i in tqdm(range(len(self.df_save2))):
name_line = self.df_save2.iloc[i, 3]
name_line = name_line.replace('(', '(')
name_line = name_line.replace(')', ')')
tmp = name_line.split('(')
self.df_save2.iloc[i, 3] = tmp[0]
for j in range(1, len(tmp)):
if ',' in tmp[j]:
self.df_save2.iloc[i, 5] = tmp[j].replace(')', '')
elif any(c.islower() for c in tmp[j]):
self.df_save2.iloc[i, 4] = tmp[j].replace(')', '')
elif zhmodle.search(tmp[j]):
self.df_save2.iloc[i, 3] = str(tmp[0] + '(' + tmp[j])
def save_csv(self):
self.df_save2.to_csv(self.dir_name + self.save_name, header=1, index = False, encoding='utf-8-sig')
if __name__ == '__main__':
dp = data_processor()
#doc转txt
try:
| |
" to method report_folder_and_file_get_folders_and_files" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `report_folder_and_file_get_folders_and_files`") # noqa: E501
if self.api_client.client_side_validation and 'id' in local_var_params and not re.search(r'^[A-Fa-f0-9]{24}$', local_var_params['id']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id` when calling `report_folder_and_file_get_folders_and_files`, must conform to the pattern `/^[A-Fa-f0-9]{24}$/`") # noqa: E501
if self.api_client.client_side_validation and 'skip' in local_var_params and local_var_params['skip'] > 2147483647: # noqa: E501
raise ApiValueError("Invalid value for parameter `skip` when calling `report_folder_and_file_get_folders_and_files`, must be a value less than or equal to `2147483647`") # noqa: E501
if self.api_client.client_side_validation and 'skip' in local_var_params and local_var_params['skip'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `skip` when calling `report_folder_and_file_get_folders_and_files`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'take' in local_var_params and local_var_params['take'] > 120: # noqa: E501
raise ApiValueError("Invalid value for parameter `take` when calling `report_folder_and_file_get_folders_and_files`, must be a value less than or equal to `120`") # noqa: E501
if self.api_client.client_side_validation and 'take' in local_var_params and local_var_params['take'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `take` when calling `report_folder_and_file_get_folders_and_files`, must be a value greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and ('search_pattern' in local_var_params and # noqa: E501
len(local_var_params['search_pattern']) > 100): # noqa: E501
raise ApiValueError("Invalid value for parameter `search_pattern` when calling `report_folder_and_file_get_folders_and_files`, length must be less than or equal to `100`") # noqa: E501
if self.api_client.client_side_validation and ('search_pattern' in local_var_params and # noqa: E501
len(local_var_params['search_pattern']) < 0): # noqa: E501
raise ApiValueError("Invalid value for parameter `search_pattern` when calling `report_folder_and_file_get_folders_and_files`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'skip' in local_var_params and local_var_params['skip'] is not None: # noqa: E501
query_params.append(('skip', local_var_params['skip'])) # noqa: E501
if 'take' in local_var_params and local_var_params['take'] is not None: # noqa: E501
query_params.append(('take', local_var_params['take'])) # noqa: E501
if 'order_by' in local_var_params and local_var_params['order_by'] is not None: # noqa: E501
query_params.append(('orderBy', local_var_params['order_by'])) # noqa: E501
if 'desc' in local_var_params and local_var_params['desc'] is not None: # noqa: E501
query_params.append(('desc', local_var_params['desc'])) # noqa: E501
if 'search_pattern' in local_var_params and local_var_params['search_pattern'] is not None: # noqa: E501
query_params.append(('searchPattern', local_var_params['search_pattern'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey', 'JWT'] # noqa: E501
response_types_map = {
200: "FilesVM",
400: "ProblemDetails",
403: "ProblemDetails",
404: "ProblemDetails",
}
return self.api_client.call_api(
'/api/rp/v1/Reports/Folder/{id}/ListFolderAndFiles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def report_folders_copy_folder(self, id, folder_id, **kwargs): # noqa: E501
"""Move folder to a specified folder # noqa: E501
User with a Update Place permission for a folder and Create Entity for a Parent Folder can access this method. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.report_folders_copy_folder(id, folder_id, async_req=True)
>>> result = thread.get()
:param id: moving folder id (required)
:type id: str
:param folder_id: destination folder id (required)
:type folder_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: FileVM
"""
kwargs['_return_http_data_only'] = True
return self.report_folders_copy_folder_with_http_info(id, folder_id, **kwargs) # noqa: E501
def report_folders_copy_folder_with_http_info(self, id, folder_id, **kwargs): # noqa: E501
"""Move folder to a specified folder # noqa: E501
User with a Update Place permission for a folder and Create Entity for a Parent Folder can access this method. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.report_folders_copy_folder_with_http_info(id, folder_id, async_req=True)
>>> result = thread.get()
:param id: moving folder id (required)
:type id: str
:param folder_id: destination folder id (required)
:type folder_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(FileVM, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id',
'folder_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method report_folders_copy_folder" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `report_folders_copy_folder`") # noqa: E501
# verify the required parameter 'folder_id' is set
if self.api_client.client_side_validation and ('folder_id' not in local_var_params or # noqa: E501
local_var_params['folder_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `folder_id` when calling `report_folders_copy_folder`") # noqa: E501
if self.api_client.client_side_validation and 'id' in local_var_params and not re.search(r'^[A-Fa-f0-9]{24}$', local_var_params['id']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id` when calling `report_folders_copy_folder`, must conform to the pattern `/^[A-Fa-f0-9]{24}$/`") # noqa: E501
if self.api_client.client_side_validation and 'folder_id' in local_var_params and not re.search(r'^[A-Fa-f0-9]{24}$', local_var_params['folder_id']): # noqa: E501
raise ApiValueError("Invalid value for parameter `folder_id` when calling `report_folders_copy_folder`, must conform to the pattern `/^[A-Fa-f0-9]{24}$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'folder_id' in local_var_params:
path_params['folderId'] = local_var_params['folder_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey', 'JWT'] # noqa: E501
response_types_map = {
200: "FileVM",
400: "ProblemDetails",
403: "ProblemDetails",
402: "ProblemDetails",
404: "ProblemDetails",
}
return self.api_client.call_api(
'/api/rp/v1/Reports/Folder/{id}/Copy/{folderId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def report_folders_delete_folder(self, id, **kwargs): # noqa: E501
"""Delete specified folder # noqa: E501
User with a Delete Entity permission can access this method. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.report_folders_delete_folder(id, async_req=True)
>>> result = thread.get()
:param id: folder id (required)
:type id: str
:param recursive: delete all childs
:type recursive: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
| |
import random
from typing import cast, List, Optional, Set, Union
from cytoolz import itertoolz
from .. import errors, utils
from . import utils as aug_utils
def substitute_word_synonyms(
aug_toks: List[aug_utils.AugTok],
*,
num: Union[int, float] = 1,
pos: Optional[Union[str, Set[str]]] = None,
) -> List[aug_utils.AugTok]:
"""
Randomly substitute words for which synonyms are available
with a randomly selected synonym,
up to ``num`` times or with a probability of ``num``.
Args:
aug_toks: Sequence of tokens to augment through synonym substitution.
num: If int, maximum number of words with available synonyms
to substitute with a randomly selected synonym; if float, probability
that a given word with synonyms will be substituted.
pos: Part of speech tag(s) of words to be considered for augmentation.
If None, all words with synonyms are considered.
Returns:
New, augmented sequence of tokens.
Note:
This transform requires :class:`textacy.resources.ConceptNet` to be downloaded
to work properly, since this is the data source for word synonyms to be substituted.
"""
_validate_aug_toks(aug_toks)
pos = cast(Set[str], utils.to_collection(pos, str, set))
cand_idxs = [
idx
for idx, aug_tok in enumerate(aug_toks)
if aug_tok.syns and (pos is None or aug_tok.pos in pos)
]
rand_idxs = set(_select_random_candidates(cand_idxs, num))
if not rand_idxs:
return aug_toks[:]
new_aug_toks = []
for idx, aug_tok in enumerate(aug_toks):
if idx in rand_idxs:
new_aug_toks.append(
aug_utils.AugTok(
text=random.choice(aug_tok.syns),
ws=aug_tok.ws,
pos=aug_tok.pos,
is_word=aug_tok.is_word,
syns=aug_tok.syns, # TODO: re-fetch syns? use []?
)
)
else:
new_aug_toks.append(aug_tok)
return new_aug_toks
def insert_word_synonyms(
aug_toks: List[aug_utils.AugTok],
*,
num: Union[int, float] = 1,
pos: Optional[Union[str, Set[str]]] = None,
) -> List[aug_utils.AugTok]:
"""
Randomly insert random synonyms of tokens for which synonyms are available,
up to ``num`` times or with a probability of ``num``.
Args:
aug_toks: Sequence of tokens to augment through synonym insertion.
num: If int, maximum number of words with available synonyms
from which a random synonym is selected and randomly inserted; if float,
probability that a given word with synonyms will provide a synonym
to be inserted.
pos: Part of speech tag(s) of words to be considered for augmentation.
If None, all words with synonyms are considered.
Returns:
New, augmented sequence of tokens.
Note:
This transform requires :class:`textacy.resources.ConceptNet` to be downloaded
to work properly, since this is the data source for word synonyms to be inserted.
"""
_validate_aug_toks(aug_toks)
pos = cast(Set[str], utils.to_collection(pos, str, set))
# bail out on very short sentences to avoid clobbering meaning
if len(aug_toks) < 3:
return aug_toks[:]
cand_aug_toks = [
aug_tok
for aug_tok in aug_toks
if aug_tok.syns and (pos is None or aug_tok.pos in pos)
]
rand_aug_toks = _select_random_candidates(cand_aug_toks, num)
rand_idxs = random.sample(range(len(aug_toks)), len(rand_aug_toks))
if not rand_idxs:
return aug_toks[:]
rand_aug_toks = iter(rand_aug_toks)
new_aug_toks: List[aug_utils.AugTok] = []
# NOTE: https://github.com/python/mypy/issues/5492
padded_pairs = itertoolz.sliding_window(2, [None] + aug_toks) # type: ignore
for idx, (prev_tok, curr_tok) in enumerate(padded_pairs):
if idx in rand_idxs:
rand_aug_tok = next(rand_aug_toks)
if prev_tok:
# use previous token's whitespace for inserted synonym
new_tok_ws = prev_tok.ws
if prev_tok.is_word and not prev_tok.ws:
# previous token should have whitespace, if a word
new_aug_toks[-1] = aug_utils.AugTok(
text=prev_tok.text,
ws=" ",
pos=prev_tok.pos,
is_word=True,
syns=prev_tok.syns,
)
else:
new_tok_ws = " "
new_aug_toks.append(
aug_utils.AugTok(
text=random.choice(rand_aug_tok.syns),
ws=new_tok_ws,
pos=rand_aug_tok.pos,
is_word=rand_aug_tok.is_word,
syns=rand_aug_tok.syns, # TODO: re-fetch syns? use []?
)
)
new_aug_toks.append(curr_tok)
return new_aug_toks
def swap_words(
aug_toks: List[aug_utils.AugTok],
*,
num: Union[int, float] = 1,
pos: Optional[Union[str, Set[str]]] = None,
) -> List[aug_utils.AugTok]:
"""
Randomly swap the positions of two *adjacent* words,
up to ``num`` times or with a probability of ``num``.
Args:
aug_toks: Sequence of tokens to augment through position swapping.
num: If int, maximum number of adjacent word pairs to swap;
if float, probability that a given word pair will be swapped.
pos: Part of speech tag(s) of words to be considered for augmentation.
If None, all words are considered.
Returns:
New, augmented sequence of tokens.
"""
_validate_aug_toks(aug_toks)
pos = cast(Set[str], utils.to_collection(pos, str, set))
# if we don't require _adjacent_ words, this does the trick
# if not pos:
# pos = set(aug_tok.pos for aug_tok in aug_toks if aug_tok.is_word)
# cand_idx_pairs = list(
# itertools.chain.from_iterable(
# itertools.combinations(
# (idx for idx, aug_tok in enumerate(aug_toks) if aug_tok.pos == pos_),
# 2,
# )
# for pos_ in pos
# )
# )
cand_idxs = (
idx
for idx, aug_tok in enumerate(aug_toks)
if aug_tok.is_word and (pos is None or aug_tok.pos in pos)
)
cand_idx_pairs = [
(idx1, idx2)
for idx1, idx2 in itertoolz.sliding_window(2, cand_idxs)
if idx2 - idx1 == 1
]
rand_idx_pairs = _select_random_candidates(cand_idx_pairs, num)
if not rand_idx_pairs:
return aug_toks[:]
new_aug_toks = aug_toks[:]
for idx1, idx2 in rand_idx_pairs:
tok1 = new_aug_toks[idx1]
tok2 = new_aug_toks[idx2]
new_aug_toks[idx1] = aug_utils.AugTok(
text=tok2.text,
ws=tok1.ws,
pos=tok2.pos,
is_word=tok2.is_word,
syns=tok2.syns,
)
new_aug_toks[idx2] = aug_utils.AugTok(
text=tok1.text,
ws=tok2.ws,
pos=tok1.pos,
is_word=tok1.is_word,
syns=tok1.syns,
)
return new_aug_toks
def delete_words(
aug_toks: List[aug_utils.AugTok],
*,
num: Union[int, float] = 1,
pos: Optional[Union[str, Set[str]]] = None,
) -> List[aug_utils.AugTok]:
"""
Randomly delete words,
up to ``num`` times or with a probability of ``num``.
Args:
aug_toks: Sequence of tokens to augment through word deletion.
num: If int, maximum number of words to delete;
if float, probability that a given word will be deleted.
pos: Part of speech tag(s) of words to be considered for augmentation.
If None, all words are considered.
Returns:
New, augmented sequence of tokens.
"""
_validate_aug_toks(aug_toks)
pos = cast(Set[str], utils.to_collection(pos, str, set))
# bail out on very short sentences to avoid clobbering meaning
if len(aug_toks) < 3:
return aug_toks[:]
cand_idxs = [
idx
for idx, aug_tok in enumerate(aug_toks)
if aug_tok.is_word and (pos is None or aug_tok.pos in pos) and idx > 0
]
rand_idxs = set(_select_random_candidates(cand_idxs, num))
if not rand_idxs:
return aug_toks[:]
new_aug_toks: List[aug_utils.AugTok] = []
# NOTE: https://github.com/python/mypy/issues/5492
padded_triplets = itertoolz.sliding_window(
3, [None] + aug_toks + [None], # type: ignore
)
for idx, (prev_tok, curr_tok, next_tok) in enumerate(padded_triplets):
if idx in rand_idxs:
# special case: word then [deleted word] then punctuation
# give deleted word's whitespace to previous word
if prev_tok and next_tok and prev_tok.is_word and not next_tok.is_word:
new_aug_toks[-1] = aug_utils.AugTok(
text=prev_tok.text,
ws=curr_tok.ws,
pos=prev_tok.pos,
is_word=prev_tok.is_word,
syns=prev_tok.syns,
)
else:
new_aug_toks.append(curr_tok)
return new_aug_toks
def substitute_chars(
aug_toks: List[aug_utils.AugTok],
*,
num: Union[int, float] = 1,
lang: Optional[str] = None,
) -> List[aug_utils.AugTok]:
"""
Randomly substitute a single character in randomly-selected words with another,
up to ``num`` times or with a probability of ``num``.
Args:
aug_toks: Sequence of tokens to augment through character substitution.
num: If int, maximum number of words to modify with a random character substitution;
if float, probability that a given word will be modified.
lang: Standard, two-letter language code corresponding to ``aug_toks``.
Used to load a weighted distribution of language-appropriate characters
that are randomly selected for substitution. More common characters
are more likely to be substituted. If not specified, ascii letters and
digits are randomly selected with equal probability.
Returns:
New, augmented sequence of tokens.
Note:
This transform requires :class:`textacy.datasets.UDHR` to be downloaded
to work properly, since this is the data source for character weights when
deciding which char(s) to insert.
"""
_validate_aug_toks(aug_toks)
char_weights = aug_utils.get_char_weights(lang or "xx")
cand_idxs = [
idx
for idx, aug_tok in enumerate(aug_toks)
if aug_tok.is_word and len(aug_tok.text) >= 3
]
rand_idxs = set(_select_random_candidates(cand_idxs, num))
if not rand_idxs:
return aug_toks[:]
rand_chars = iter(
random.choices(
[char for char, _ in char_weights],
weights=[weight for _, weight in char_weights],
k=len(rand_idxs),
)
)
new_aug_toks = []
for idx, aug_tok in enumerate(aug_toks):
if idx in rand_idxs:
text_list = list(aug_tok.text)
rand_char_idx = random.choice(range(len(text_list)))
text_list[rand_char_idx] = next(rand_chars)
new_aug_toks.append(
aug_utils.AugTok(
text="".join(text_list),
ws=aug_tok.ws,
pos=aug_tok.pos,
is_word=aug_tok.is_word,
syns=aug_tok.syns,
)
)
else:
new_aug_toks.append(aug_tok)
return new_aug_toks
def insert_chars(
aug_toks: List[aug_utils.AugTok],
*,
num: Union[int, float] = 1,
lang: Optional[str] = None,
) -> List[aug_utils.AugTok]:
"""
Randomly insert a character into randomly-selected words,
up to ``num`` times or with a probability of ``num``.
Args:
aug_toks: Sequence of tokens to augment through character insertion.
num: If int, maximum number of words to modify with a random character insertion;
if float, probability that a given word will be modified.
lang: Standard, two-letter language code corresponding to ``aug_toks``.
Used to load a weighted distribution of language-appropriate characters
that are randomly selected for substitution. More common characters
are more likely to be substituted. If not | |
the sum is smaller.
[200. if drop_remainder else 310.] * 3)
for_sums = defun(sum_for_loop)(dataset)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if (not drop_remainder or (
defun_type == "tf_function" and input_type == "input_fn")):
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(for_sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
tensor_type=["sparse", "ragged"],
enable_get_next_as_optional=[True, False]
))
def testRaggedSparseGetNextAsOptional(
self, distribution, input_type, drop_remainder, tensor_type,
enable_get_next_as_optional):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
tensor_type: (ragged_tensor if tensor_type == "ragged" else
ragged_tensor.to_sparse()),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
iterator = iter(ds)
self.assertEqual(iterator._enable_get_next_as_optional,
(not drop_remainder) and enable_get_next_as_optional)
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
# TODO(mdan): Add these?
# strategy_combinations.multi_worker_mirrored_2x1_cpu,
# strategy_combinations.multi_worker_mirrored_2x1_gpu,
# strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
))
def testRaggedSparseGetNextAsOptionalInLoop(
self, distribution, input_type, drop_remainder):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
self.skipTest("b/323359921")
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
per_replica_sums = distribution.run(
(lambda x: math_ops.reduce_sum(x.values)) if all(
map(sparse_tensor.is_sparse, per_replica_values.values)) else
math_ops.reduce_sum, (per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_while_loop(ds):
iterator = iter(ds)
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
try_next = constant_op.constant(True)
while try_next:
opt_iterate = iterator.get_next_as_optional()
if opt_iterate.has_value():
sums = _reduce(sums, opt_iterate.get_value())
else:
try_next = False
return sums
sums = def_function.function(sum_while_loop)(ds)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if not drop_remainder or input_type == "input_fn":
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatch(self, input_type, api_type, iteration_type,
distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(12).batch(8)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9, 10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatchWithLegacyRebatch(self, input_type, api_type,
iteration_type, distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps. However, when we create a
# DistributedDataset and cannot statically infer the intended global batch
# size (e.g. if the user does not use a batching dataset), each worker will
# rebatch based on the dynamic batch size of the data encountered, even when
# it encounters partial batches. The last per-worker partial batch (size 4)
# ends up being split into two replicas, resulting in 4 steps in total, of
# (global) batch sizes 8, 8, 4, 4.
def dataset_fn(ctx):
del ctx
# The following dataset is equivalent to
# tf.data.Dataset.range(12).batch(8), but does not use a batching dataset.
# This causes DistributedDataset to use LegacyRebatch instead.
batch_sizes = dataset_ops.Dataset.from_tensor_slices([8, 4])
offsets = dataset_ops.Dataset.from_tensor_slices([0, 8])
dataset = dataset_ops.Dataset.zip((offsets, batch_sizes))
def map_fn(offset, batch_size):
return math_ops.range(offset, offset + batch_size)
dataset = dataset.map(map_fn)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is equivalent to
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as the number of global
# replicas is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9]], [[10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.DATA]))
def testMWMSWithDataSharding(self, input_type, api_type, iteration_type,
distribution, auto_shard_policy):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior the dataset is sharded by data
# and the batch size is indivisible by the number of replicas. This checks
# that the elements are as expected and the batch size across all workers
# adds up to 3. This test will only pass if the autoshard rewrite rewrites
# RebatchDatasetV2 to legacy RebatchDataset when sharding by data.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(8).batch(3)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. We expect each worker to see different shards of
# data.
cr = distribution.cluster_resolver
worker_id = multi_worker_util.id_in_cluster(cr.cluster_spec(), cr.task_type,
cr.task_id)
if worker_id == 0:
expected_values = [[[0, 1]], [[3, 4]], [[6]]]
elif worker_id == 1:
expected_values = [[[2]], [[5]], [[7]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
class DistributedIteratorPerDeviceTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for PER_WORKER and PER_REPLICA's InputOptions variants."""
def setUp(self):
context._reset_context()
strategy_combinations.set_virtual_cpus_to_at_least(3)
super(DistributedIteratorPerDeviceTest, self).setUp()
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_prefetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
| |
<filename>io_scene_sphnx/export_ese.py
# Copyright (c) 2020-2021 Swyter <<EMAIL>>
# SPDX-License-Identifier: Zlib
"""
Name: 'Eurocom Scene Export'
Blender: 2.90.1
Group: 'Export'
Tooltip: 'Blender ESE Exporter for EuroLand'
Authors: Swyter and Jmarti856
"""
import bpy
import os
import math
import bmesh
import datetime
from mathutils import *
from math import *
from pathlib import Path
from bpy_extras.io_utils import axis_conversion
from . import bl_info
block_level = 0
def _write(context, filepath,
EXPORT_FLIP_POLYGONS,
EXPORT_OBJECTTYPES,
EXPORT_MATERIALS,
EXPORT_CAMERALIGHTANIMS,
EXPORT_VERTEXCOLORS,
EXPORT_ANIMATION,
EXPORT_GLOBAL_MATRIX,
):
#===============================================================================================
# FUNCTIONS
#===============================================================================================
def GetMaterialCount():
Materials_Number = 0
for indx, MeshObj in enumerate(bpy.context.scene.objects):
if MeshObj.type == 'MESH':
Materials_Number += 1
return Materials_Number
#===============================================================================================
# MAIN
#===============================================================================================
def WriteFile():
# Stop edit mode
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
#Create new file
with open(filepath, 'w') as out:
# swy: reset the indentation level to zero
global block_level; block_level = 0
# swy: make a no-carriage-return version
def write_scope_no_cr(dump):
out.write(('\t' * block_level) + dump)
def write_scope(dump):
write_scope_no_cr(dump + '\n')
def w_new_block(dump):
write_scope(dump)
global block_level; block_level += 1
def w_end_block(dump):
global block_level; block_level -= 1
write_scope(dump)
def PrintNODE_TM(tag_name, object):
bpy.context.scene.frame_set(bpy.context.scene.frame_start)
w_new_block('*' + tag_name + ' {')
#Write Matrix
write_scope('*NODE_NAME "%s"' % object.name)
write_scope('*TM_ROW0 %.4f %.4f %.4f' % (obj.matrix_world[0].x, obj.matrix_world[0].y, obj.matrix_world[0].z))
write_scope('*TM_ROW1 %.4f %.4f %.4f' % (obj.matrix_world[1].x, obj.matrix_world[1].y, obj.matrix_world[1].z))
write_scope('*TM_ROW2 %.4f %.4f %.4f' % (obj.matrix_world[2].x, obj.matrix_world[2].y, obj.matrix_world[2].z))
write_scope('*TM_ROW3 %.4f %.4f %.4f' % (obj.matrix_world[0].w, obj.matrix_world[1].w, obj.matrix_world[2].w))
w_end_block('}')
def PrintTM_ANIMATION(object, TimeValue):
w_new_block('*TM_ANIMATION {')
write_scope('*NODE_NAME "%s"' % object.name)
w_new_block('*TM_ANIM_FRAMES {')
last_matrix = False; TimeValueCounter = 0
for f in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end + 1):
bpy.context.scene.frame_set(f)
if obj.matrix_world != last_matrix:
#Write Time Value
write_scope_no_cr('*TM_FRAME %5u' % f)
#Write Matrix
out.write(' %.4f %.4f %.4f' % (obj.matrix_world[0].x, obj.matrix_world[0].y, obj.matrix_world[0].z))
out.write(' %.4f %.4f %.4f' % (obj.matrix_world[1].x, obj.matrix_world[1].y, obj.matrix_world[1].z))
out.write(' %.4f %.4f %.4f' % (obj.matrix_world[2].x, obj.matrix_world[2].y, obj.matrix_world[2].z))
out.write(' %.4f %.4f %.4f' % (obj.matrix_world[0].w, obj.matrix_world[1].w, obj.matrix_world[2].w))
out.write('\n')
#Update counter
TimeValueCounter += TimeValue
last_matrix = obj.matrix_world.copy()
w_end_block('}') # NODE_NAME
w_end_block('}') # TM_ANIMATION
# here's the first line that gets written; we start here, with the basic header
write_scope('*3DSMAX_EUROEXPORT 300')
# swy: turn a (2021, 8, 16) tuple into "2021.08.16"
version_date = '.'.join(('%02u' % x) for x in bl_info['version'])
write_scope('*COMMENT "Eurocom Export Version %s - %s"' % (version_date, datetime.datetime.utcnow().strftime('%a %b %d %H:%M:%S %Y')))
write_scope('*COMMENT "Version of Blender that output this file: %s"' % bpy.app.version_string)
write_scope('*COMMENT "Version of ESE Plug-in: 5.0.0.13"')
write_scope('')
#===============================================================================================
# SCENE INFO
#===============================================================================================
TimeValue = 1 # 4800 / bpy.context.scene.render.fps
frame_count = bpy.context.scene.frame_end - bpy.context.scene.frame_start + 1
w_new_block('*SCENE {')
write_scope('*SCENE_FILENAME "%s"' % os.path.basename(bpy.data.filepath))
write_scope('*SCENE_FIRSTFRAME %u ' % bpy.context.scene.frame_start)
write_scope('*SCENE_LASTFRAME %u ' % bpy.context.scene.frame_end)
write_scope('*SCENE_FRAMESPEED %u ' % bpy.context.scene.render.fps)
write_scope('*SCENE_TICKSPERFRAME %u ' % TimeValue)
w_end_block('}') # SCENE
#===============================================================================================
# GEOM OBJECT
#===============================================================================================
def duplicate(obj, data=True, actions=True, collection=None):
obj_copy = obj.copy()
if data:
obj_copy.data = obj_copy.data.copy()
if actions and obj_copy.animation_data:
obj_copy.animation_data.action = obj_copy.animation_data.action.copy()
#collection.objects.link(obj_copy)
return obj_copy
if 'MESH' in EXPORT_OBJECTTYPES:
for indx, obj_orig in enumerate(bpy.context.scene.objects):
obj = (obj_orig)
# swy: convert from the blender to the euroland coordinate system; we can't do that with the
# standard matrix transformations
# swy: this does the Z-up to Y-up coordinate conversion, so that things don't appear oriented sideways
obj.matrix_world = Matrix(([-1, 0, 0], [0, 0, 1], [0, -1, 0])).to_4x4() @ obj.matrix_world
# swy: this fixes only the reversed rotations (they were facing in the wrongest way) without affecting positions or scaling, which look alright
# decompose the transformation, fix rotations and compose it back as normal; the .inverted() call here is the magic word.
translation_vec, rotation_quat, scale_vec = obj.matrix_world.decompose()
obj.matrix_world = Matrix.Translation(translation_vec) @ rotation_quat.to_matrix().inverted().to_4x4() @ Matrix.Diagonal(scale_vec.to_4d())
if obj.type == 'MESH':
if hasattr(obj, 'data'):
#===========================================[Clone Object]====================================================
depsgraph = bpy.context.evaluated_depsgraph_get()
ob_for_convert = obj.evaluated_get(depsgraph)
try:
MeshObject = obj.to_mesh()
except RuntimeError:
MeshObject = None
if MeshObject is None:
continue
#===========================================[Triangulate Object]====================================================
bm = bmesh.new()
bm.from_mesh(MeshObject)
tris = bm.calc_loop_triangles()
#===========================================[Get Object Data]====================================================
#Get UV Layer Active
UVVertexList = []
for name, uvl in bm.loops.layers.uv.items():
for i, tri in enumerate(tris):
for loop in tri:
DataToAppend = loop[uvl].uv
if DataToAppend not in UVVertexList:
UVVertexList.append(DataToAppend)
if True:
#Get Vertex Colors List
VertexColorList = []
for name, cl in bm.loops.layers.color.items():
for tri in tris:
for loop in tri:
color = loop[cl] # gives a Vector((R, G, B, A))
if color not in VertexColorList:
VertexColorList.append(color)
#===============================================================================================
# MATERIAL LIST
#===============================================================================================
w_new_block('*MATERIAL_LIST {')
write_scope('*MATERIAL_COUNT %u' % GetMaterialCount())
w_new_block('*MATERIAL %u {' % indx)
#Mesh Materials
if len(obj.material_slots) > 0:
currentSubMat = 0
#Material Info
MatData = bpy.data.materials[0]
DiffuseColor = MatData.diffuse_color
write_scope('*MATERIAL_NAME "%s"' % MatData.name)
if not MatData.use_backface_culling:
write_scope('*MATERIAL_TWOSIDED')
write_scope('*NUMSUBMTLS %u ' % len(obj.material_slots))
#Loop Trought Submaterials
for indx, Material_Data in enumerate(obj.material_slots):
if Material_Data.name == '':
continue
MatData = bpy.data.materials[Material_Data.name]
#Material has texture
if MatData.node_tree.nodes.get('Image Texture', None):
ImageNode = MatData.node_tree.nodes.get('Image Texture', None)
ImageName = ImageNode.image.name
DiffuseColor = MatData.diffuse_color
#Submaterial
w_new_block('*SUBMATERIAL %u {' % currentSubMat)
write_scope('*MATERIAL_NAME "%s"' % (os.path.splitext(ImageName)[0]))
write_scope('*MATERIAL_DIFFUSE %.4f %.4f %.4f' % (DiffuseColor[0], DiffuseColor[1], DiffuseColor[2]))
write_scope('*MATERIAL_SPECULAR %u %u %u' % (MatData.specular_color[0], MatData.specular_color[1], MatData.specular_color[2]))
write_scope('*MATERIAL_SHINE %.1f' % MatData.metallic)
write_scope('*MATERIAL_SELFILLUM %u' % int(MatData.use_preview_world))
#Map Difuse
w_new_block('*MAP_DIFFUSE {')
write_scope('*MAP_NAME "%s"' % (os.path.splitext(ImageName)[0]))
write_scope('*MAP_CLASS "%s"' % "Bitmap")
write_scope('*MAP_AMOUNT "%u"' % 1)
write_scope('*BITMAP "%s"' % (bpy.path.abspath(ImageNode.image.filepath)))
w_end_block('}')
w_end_block('}') # SUBMATERIAL
#Material has no texture
else:
#Submaterial
principled = [n for n in MatData.node_tree.nodes if n.type == 'BSDF_PRINCIPLED']
if principled:
principled = next(n for n in MatData.node_tree.nodes if n.type == 'BSDF_PRINCIPLED')
base_color = principled.inputs['Base Color']
color = base_color.default_value
w_new_block('*SUBMATERIAL %u {' % currentSubMat)
write_scope('*MATERIAL_NAME "%s"' % MatData.name)
write_scope('*MATERIAL_DIFFUSE %.4f %.4f %.4f' % ((color[0] * .5), (color[1] * .5), (color[2] * .5)))
write_scope('*MATERIAL_SPECULAR %u %u %u' % (MatData.specular_color[0], MatData.specular_color[1], MatData.specular_color[2]))
write_scope('*MATERIAL_SHINE %.1f' % MatData.metallic)
write_scope('*MATERIAL_SELFILLUM %u' % int(MatData.use_preview_world))
w_end_block('}') # SUBMATERIAL
currentSubMat += 1
w_end_block('}') # MATERIAL
w_end_block('}') # MATERIAL_LIST
#===========================================[Print Object Data]====================================================
w_new_block('*GEOMOBJECT {')
write_scope('*NODE_NAME "%s"' % obj.name)
#Print Matrix Rotation
PrintNODE_TM('NODE_TM', obj)
#Print Matrix Rotation again ¯\_(ツ)_/¯
#PrintNODE_TM('PIVOT_TM', obj)
#MESH Section
w_new_block('*MESH {')
write_scope('*MESH_NUMVERTEX %u' % len(bm.verts))
write_scope('*MESH_NUMFACES %u' % len(tris))
#Print Vertex List
w_new_block('*MESH_VERTEX_LIST {')
for idx, vert in enumerate(bm.verts):
vtx = vert.co # @ euroland_mtx
write_scope('*MESH_VERTEX %5u %4.4f %4.4f %4.4f' % (idx, vtx.x, vtx.y, vtx.z))
w_end_block('}') # MESH_VERTEX_LIST
# swy: the calc_loop_triangles() doesn't modify the original faces, and instead does temporary ad-hoc triangulation
# returning us a list of three loops per "virtual triangle" that only exists in the returned thingie
# i.e. len(tri_loop) should always be 3, but internally, for each loop .face we're a member of
# still has 4 vertices and the four (different) loops of an n-gon, and .link_loop_next
# points to the original model's loop chain; the loops of our triangle aren't really linked
def tri_edge_is_from_ngon(tri_loop, tri_idx):
return tri_loop[(tri_idx + 1) % len(tri_loop)] == tri_loop[tri_idx].link_loop_next
#Face Vertex Index
w_new_block('*MESH_FACE_LIST {')
for i, tri in enumerate(tris):
write_scope_no_cr('*MESH_FACE %3u:' % i)
out.write(' A: %3u B: %3u C: %3u' % (tri[0].vert.index, tri[1].vert.index, tri[2].vert.index))
out.write(' AB: %u BC: %u CA: %u' % (tri_edge_is_from_ngon(tri, 0), tri_edge_is_from_ngon(tri, 1), tri_edge_is_from_ngon(tri, 2)))
out.write(' *MESH_MTLID %u\n' % tri[0].face.material_index)
w_end_block('}') # MESH_FACE
#Texture UVs
if len(UVVertexList) > 0:
write_scope('*MESH_NUMTVERTEX %u' % len(UVVertexList))
w_new_block('*MESH_TVERTLIST {')
for idx, TextUV in enumerate(UVVertexList):
write_scope('*MESH_TVERT %3u %.4f %.4f' % (idx, TextUV[0], TextUV[1]))
w_end_block('}') # MESH_TVERTLIST
#Face Layers UVs Index
layerIndex = 0
if bm.loops.layers == 1:
#write_scope('*MESH_NUMTFACELAYERS %u' % len(bm.loops.layers.uv.items()))
for name, uv_lay in bm.loops.layers.uv.items():
#w_new_block('*MESH_TFACELAYER %u {' % layerIndex)
write_scope('*MESH_NUMTVFACES %u' % len(bm.faces))
w_new_block('*MESH_TFACELIST {')
for i, tri in enumerate(tris):
write_scope('*MESH_TFACE %u' % i)
write_scope('%u %u %u' % (UVVertexList.index(tri[0][uv_lay].uv), UVVertexList.index(tri[1][uv_lay].uv), UVVertexList.index(tri[2][uv_lay].uv)))
w_end_block("}") # MESH_TFACELIST
#w_end_block("}")
layerIndex += 1
# swy: refresh the custom mesh layer/attributes in case they don't exist
if 'euro_vtx_flags' not in bm.verts.layers.int:
bm.verts.layers.int.new('euro_vtx_flags')
if 'euro_fac_flags' not in bm.faces.layers.int:
bm.faces.layers.int.new('euro_fac_flags')
euro_vtx_flags = bm.verts.layers.int['euro_vtx_flags']
euro_fac_flags = bm.faces.layers.int['euro_fac_flags']
# swy: add the custom mesh attributes here
write_scope("*MESH_NUMFACEFLAGS %u" % len(bm.faces))
w_new_block("*MESH_FACEFLAGLIST {")
for face in bm.faces:
a = face[euro_fac_flags]
# swy: don't set it where it isn't needed
if face[euro_fac_flags] != 0:
write_scope('*MESH_FACEFLAG %u %u' % (face.index, face[euro_fac_flags]))
w_end_block("}") # MESH_NUMFACEFLAGS
w_new_block('*MESH_VERTFLAGSLIST {')
for idx, vert in enumerate(bm.verts):
# swy: don't set it where it isn't needed
if vert[euro_vtx_flags] != 0:
write_scope('*VFLAG %u %u' % (idx, vert[euro_vtx_flags]))
w_end_block('}') # MESH_VERTFLAGSLIST
if len(VertexColorList) > 0:
#Vertex Colors List
write_scope('*MESH_NUMCVERTEX %u' % len(VertexColorList))
w_new_block('*MESH_CVERTLIST {')
for idx, ColorArray in enumerate(VertexColorList):
write_scope('*MESH_VERTCOL | |
redirected to one of the other fault domains
and the MySQL instance in that domain is promoted to the primary instance.
This redirection does not affect the IP address of the DB System in any way.
For a standalone DB System, this defines the fault domain in which the DB System is placed.
:param fault_domain: The fault_domain of this CreateDbSystemDetails.
:type: str
"""
self._fault_domain = fault_domain
@property
def configuration_id(self):
"""
Gets the configuration_id of this CreateDbSystemDetails.
The OCID of the Configuration to be used for this DB System.
:return: The configuration_id of this CreateDbSystemDetails.
:rtype: str
"""
return self._configuration_id
@configuration_id.setter
def configuration_id(self, configuration_id):
"""
Sets the configuration_id of this CreateDbSystemDetails.
The OCID of the Configuration to be used for this DB System.
:param configuration_id: The configuration_id of this CreateDbSystemDetails.
:type: str
"""
self._configuration_id = configuration_id
@property
def shape_name(self):
"""
**[Required]** Gets the shape_name of this CreateDbSystemDetails.
The name of the shape. The shape determines the resources allocated
- CPU cores and memory for VM shapes; CPU cores, memory and storage
for non-VM (or bare metal) shapes. To get a list of shapes, use the
:func:`list_shapes` operation.
:return: The shape_name of this CreateDbSystemDetails.
:rtype: str
"""
return self._shape_name
@shape_name.setter
def shape_name(self, shape_name):
"""
Sets the shape_name of this CreateDbSystemDetails.
The name of the shape. The shape determines the resources allocated
- CPU cores and memory for VM shapes; CPU cores, memory and storage
for non-VM (or bare metal) shapes. To get a list of shapes, use the
:func:`list_shapes` operation.
:param shape_name: The shape_name of this CreateDbSystemDetails.
:type: str
"""
self._shape_name = shape_name
@property
def mysql_version(self):
"""
Gets the mysql_version of this CreateDbSystemDetails.
The specific MySQL version identifier.
:return: The mysql_version of this CreateDbSystemDetails.
:rtype: str
"""
return self._mysql_version
@mysql_version.setter
def mysql_version(self, mysql_version):
"""
Sets the mysql_version of this CreateDbSystemDetails.
The specific MySQL version identifier.
:param mysql_version: The mysql_version of this CreateDbSystemDetails.
:type: str
"""
self._mysql_version = mysql_version
@property
def subnet_id(self):
"""
**[Required]** Gets the subnet_id of this CreateDbSystemDetails.
The OCID of the subnet the DB System is associated with.
:return: The subnet_id of this CreateDbSystemDetails.
:rtype: str
"""
return self._subnet_id
@subnet_id.setter
def subnet_id(self, subnet_id):
"""
Sets the subnet_id of this CreateDbSystemDetails.
The OCID of the subnet the DB System is associated with.
:param subnet_id: The subnet_id of this CreateDbSystemDetails.
:type: str
"""
self._subnet_id = subnet_id
@property
def admin_username(self):
"""
**[Required]** Gets the admin_username of this CreateDbSystemDetails.
The username for the administrative user.
:return: The admin_username of this CreateDbSystemDetails.
:rtype: str
"""
return self._admin_username
@admin_username.setter
def admin_username(self, admin_username):
"""
Sets the admin_username of this CreateDbSystemDetails.
The username for the administrative user.
:param admin_username: The admin_username of this CreateDbSystemDetails.
:type: str
"""
self._admin_username = admin_username
@property
def admin_password(self):
"""
**[Required]** Gets the admin_password of this CreateDbSystemDetails.
The password for the administrative user. The password must be
between 8 and 32 characters long, and must contain at least 1
numeric character, 1 lowercase character, 1 uppercase character, and
1 special (nonalphanumeric) character.
:return: The admin_password of this CreateDbSystemDetails.
:rtype: str
"""
return self._admin_password
@admin_password.setter
def admin_password(self, admin_password):
"""
Sets the admin_password of this CreateDbSystemDetails.
The password for the administrative user. The password must be
between 8 and 32 characters long, and must contain at least 1
numeric character, 1 lowercase character, 1 uppercase character, and
1 special (nonalphanumeric) character.
:param admin_password: The admin_password of this CreateDbSystemDetails.
:type: str
"""
self._admin_password = <PASSWORD>
@property
def data_storage_size_in_gbs(self):
"""
Gets the data_storage_size_in_gbs of this CreateDbSystemDetails.
Initial size of the data volume in GBs that will be created and attached.
Keep in mind that this only specifies the size of the database data volume,
the log volume for the database will be scaled appropriately with its shape.
:return: The data_storage_size_in_gbs of this CreateDbSystemDetails.
:rtype: int
"""
return self._data_storage_size_in_gbs
@data_storage_size_in_gbs.setter
def data_storage_size_in_gbs(self, data_storage_size_in_gbs):
"""
Sets the data_storage_size_in_gbs of this CreateDbSystemDetails.
Initial size of the data volume in GBs that will be created and attached.
Keep in mind that this only specifies the size of the database data volume,
the log volume for the database will be scaled appropriately with its shape.
:param data_storage_size_in_gbs: The data_storage_size_in_gbs of this CreateDbSystemDetails.
:type: int
"""
self._data_storage_size_in_gbs = data_storage_size_in_gbs
@property
def hostname_label(self):
"""
Gets the hostname_label of this CreateDbSystemDetails.
The hostname for the primary endpoint of the DB System. Used for DNS.
The value is the hostname portion of the primary private IP's fully qualified domain name (FQDN)
(for example, \"dbsystem-1\" in FQDN \"dbsystem-1.subnet123.vcn1.oraclevcn.com\").
Must be unique across all VNICs in the subnet and comply with RFC 952 and RFC 1123.
:return: The hostname_label of this CreateDbSystemDetails.
:rtype: str
"""
return self._hostname_label
@hostname_label.setter
def hostname_label(self, hostname_label):
"""
Sets the hostname_label of this CreateDbSystemDetails.
The hostname for the primary endpoint of the DB System. Used for DNS.
The value is the hostname portion of the primary private IP's fully qualified domain name (FQDN)
(for example, \"dbsystem-1\" in FQDN \"dbsystem-1.subnet123.vcn1.oraclevcn.com\").
Must be unique across all VNICs in the subnet and comply with RFC 952 and RFC 1123.
:param hostname_label: The hostname_label of this CreateDbSystemDetails.
:type: str
"""
self._hostname_label = hostname_label
@property
def ip_address(self):
"""
Gets the ip_address of this CreateDbSystemDetails.
The IP address the DB System is configured to listen on.
A private IP address of your choice to assign to the primary endpoint of the DB System.
Must be an available IP address within the subnet's CIDR. If you don't specify a value,
Oracle automatically assigns a private IP address from the subnet. This should be a
\"dotted-quad\" style IPv4 address.
:return: The ip_address of this CreateDbSystemDetails.
:rtype: str
"""
return self._ip_address
@ip_address.setter
def ip_address(self, ip_address):
"""
Sets the ip_address of this CreateDbSystemDetails.
The IP address the DB System is configured to listen on.
A private IP address of your choice to assign to the primary endpoint of the DB System.
Must be an available IP address within the subnet's CIDR. If you don't specify a value,
Oracle automatically assigns a private IP address from the subnet. This should be a
\"dotted-quad\" style IPv4 address.
:param ip_address: The ip_address of this CreateDbSystemDetails.
:type: str
"""
self._ip_address = ip_address
@property
def port(self):
"""
Gets the port of this CreateDbSystemDetails.
The port for primary endpoint of the DB System to listen on.
:return: The port of this CreateDbSystemDetails.
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port of this CreateDbSystemDetails.
The port for primary endpoint of the DB System to listen on.
:param port: The port of this CreateDbSystemDetails.
:type: int
"""
self._port = port
@property
def port_x(self):
"""
Gets the port_x of this CreateDbSystemDetails.
The TCP network port on which X Plugin listens for connections. This is the X Plugin equivalent of port.
:return: The port_x of this CreateDbSystemDetails.
:rtype: int
"""
return self._port_x
@port_x.setter
def port_x(self, port_x):
"""
Sets the port_x of this CreateDbSystemDetails.
The TCP network port on which X Plugin listens for connections. This is the X Plugin equivalent of port.
:param port_x: The port_x of this CreateDbSystemDetails.
:type: int
"""
self._port_x = port_x
@property
def backup_policy(self):
"""
Gets the backup_policy of this CreateDbSystemDetails.
:return: The backup_policy of this CreateDbSystemDetails.
:rtype: oci.mysql.models.CreateBackupPolicyDetails
"""
return self._backup_policy
@backup_policy.setter
def backup_policy(self, backup_policy):
"""
Sets the backup_policy of this CreateDbSystemDetails.
:param backup_policy: The backup_policy of this CreateDbSystemDetails.
:type: oci.mysql.models.CreateBackupPolicyDetails
"""
self._backup_policy = backup_policy
@property
def source(self):
"""
Gets the source of this CreateDbSystemDetails.
:return: The source of this CreateDbSystemDetails.
:rtype: oci.mysql.models.CreateDbSystemSourceDetails
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this CreateDbSystemDetails.
:param source: The source of this CreateDbSystemDetails.
:type: oci.mysql.models.CreateDbSystemSourceDetails
"""
self._source = source
@property
def maintenance(self):
"""
Gets the maintenance of this CreateDbSystemDetails.
:return: The maintenance of this CreateDbSystemDetails.
:rtype: oci.mysql.models.CreateMaintenanceDetails
"""
return self._maintenance
@maintenance.setter
def maintenance(self, maintenance):
"""
Sets the maintenance of this CreateDbSystemDetails.
:param maintenance: The | |
<gh_stars>0
import abstract_component
import os
import subprocess
import numpy as np
import exodus
from output_suppression import Suppressor
from output_suppression import suppress_stdout_stderr
import sync_times
import sys
sys.path.append("/gpfs1/jtencer/UQTk_v3.0.4-install")
import PyUQTk.pce as uqtkpce
import PyUQTk.uqtkarray as uqtkarray
class exogenous_port:
"""
Uncertain parameters set using aprepro
"""
def __init__(self, varname, nominal_value, uncertainty):
self.varname = varname
self.mean = nominal_value
self.std = uncertainty
self.value = nominal_value
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
def __repr__(self):
return "exogenous_port(%s, %s, %s)" % (self.varname, self.mean, self.std)
class endogenous_port:
"""
Connections to other components
"""
def __init__(self, sideset_name, field_name):
self.ssname = sideset_name
self.varname = field_name
def __str__(self):
return str(self.__class__) + ":" + str(self.__dict__)
def __repr__(self):
return "endogenous_port(%s, %s)" % (self.ssname, self.varname)
class simple_pressio_aria_component(abstract_component.Component):
"""
Single component in DDUQ network with foward problem implemented as an aria model
"""
def __init__(self, inputdeckfilename, outputfilename, np=1, output_times=None):
"""
Initialize component object.
Run initial simulation to make sure output exodus file exists.
"""
self.inputdeck = inputdeckfilename
self.outfile = outputfilename
self.pce_file = "%s.pce" % outputfilename.split('.')[:-1][0]
self.endogenous_output_ports = []
self.exogenous_ports = []
self.QoIs = []
self.num_procs = np
self.required_files = []
self.pc_model = None
self.num_endogenous_nodes = 0
self.num_timesteps = 0
self.output_times = output_times
if not os.path.isfile(self.outfile):
print("initializing component: %s" % inputdeckfilename)
aprfile = "%s.apr" % self.inputdeck.split('.')[:-1][0]
with open(os.devnull, 'w') as devnull:
subprocess.check_output(["aprepro", "initialize=1", self.inputdeck, aprfile], stderr=devnull)
subprocess.check_output(["sierra", "--pre", "-n", str(self.num_procs), "aria", "-i", aprfile])
subprocess.check_output(["sierra", "--run", "-n", str(self.num_procs), "aria", "-i", aprfile])
subprocess.check_output(["rm", aprfile])
self.required_files.append(inputdeckfilename)
self.required_files.append(outputfilename)
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
def __call__(self):
return self.execute()
def execute(self):
"""
Perform forward problem
"""
aprfile = "%s.apr" % self.inputdeck.split('.')[:-1][0]
apr_command=["aprepro"]
for port in self.exogenous_ports:
apr_command.append("%s=%f" % (port.varname, port.value))
apr_command.append(self.inputdeck)
apr_command.append(aprfile)
with open(os.devnull, 'w') as devnull:
subprocess.check_output(apr_command, stderr=devnull)
if self.num_procs > 1:
subprocess.check_output(["sierra", "--pre", "-n", str(self.num_procs), "aria", "-i", aprfile])
with open(os.devnull, 'w') as devnull:
p = subprocess.Popen(["sierra", "--run", "-n", str(self.num_procs), "aria", "-i", aprfile], stdout=devnull)
return p
def add_endogenous_port(self, ssname, varname):
"""
Add an endogenous port between two aria_component instances
Port is specified on the *sending* component
"""
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
ssnames = e.get_side_set_names()
e.close()
assert ssname in ssnames, "%s not a sideset in %s." % (ssname, self.outfile)
my_port = endogenous_port(ssname,varname)
self.endogenous_output_ports.append(my_port)
def add_exogenous_port(self, varname, nominal_value, uncertainty):
"""
Specify aprepro variable that will be used as an exogenous input
"""
self.exogenous_ports.append(exogenous_port(varname, nominal_value, uncertainty))
def add_QoI(self, varname):
"""
Specify global variables from exodus output to be treated at QoIs
"""
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
gnames = e.get_global_variable_names()
e.close()
assert varname in gnames, "%s not a global variable in %s." % (varname, self.outfile)
self.QoIs.append(varname)
def get_num_endogenous_ports(self):
nodes = self.get_num_endogenous_nodes()
steps = self.get_num_timesteps()
return nodes * steps
def get_num_endogenous_nodes(self):
if (self.num_endogenous_nodes == 0) and (len(self.endogenous_output_ports) > 0):
self.get_endogenous_data()
return self.num_endogenous_nodes
def get_num_timesteps(self):
if self.num_timesteps == 0:
self.num_timesteps = len(self.get_solution_times())
return self.num_timesteps
def set_solution_times(self, times):
self.output_times = times
self.num_timesteps = len(times)
def get_solution_times(self):
if self.output_times:
print("using self.output_times")
times = self.output_times
else:
print("no output times found, reading from exodus")
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
times = e.get_times()
e.close()
return times
def get_num_exogenous_ports(self):
return len(self.exogenous_ports)
def get_output_filename(self):
return self.outfile
def get_exogenous_ports(self):
return self.exogenous_ports
def get_num_QoI(self):
return len(self.QoIs)
def get_QoIs(self):
return self.QoIs
def get_num_pc_terms(self):
return self.pc_model.GetNumberPCTerms()
def get_pc_model(self):
return self.pc_model
def get_num_pc_coeffs(self):
return self.get_num_pc_terms()*self.get_num_endogenous_ports()
def get_endogenous_data(self):
"""
Retreive my output data at endogenous nodes
"""
if self.output_times:
sync_times.interpolate_to_timeline(self.outfile, self.outfile+".tmp", self.output_times)
os.rename(self.outfile+".tmp", self.outfile)
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
ss_ids = e.get_side_set_ids()
ss_names = e.get_side_set_names()
dictionary = dict(zip(ss_names, ss_ids))
# Get list of time steps for which to provide data
times = e.get_times()
self.num_timesteps = len(times)
vals=[]
for timestep in range(self.num_timesteps):
self.num_endogenous_nodes = 0
for port in self.endogenous_output_ports:
endogenous_vals = {}
ssid = e.get_side_set_node_list(dictionary[port.ssname])[1]
nodal_values = e.get_node_variable_values(port.varname,timestep+1)
side_set_unique_node_ids = set(ssid)
for nid in side_set_unique_node_ids:
endogenous_vals[nid] = nodal_values[nid-1]
vals.append(endogenous_vals)
self.num_endogenous_nodes += len(endogenous_vals)
with Suppressor():
e.close()
return vals
def get_all_data(self, varname, filename=None):
"""
Retreive my output data at all nodes
"""
if filename==None:
filename = self.outfile
with Suppressor():
e = exodus.exodus(filename, mode='r')
# Get list of time steps for which to provide data
times = e.get_times()
self.num_timesteps = len(times)
vals=[]
for timestep in range(self.num_timesteps):
nodal_values = e.get_node_variable_values(varname,timestep+1)
vals.append(nodal_values)
e.close()
return vals
def get_QoI_data(self):
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
QoI_vals = {}
for QoI in self.QoIs:
QoI_vals[QoI] = e.get_global_variable_values(QoI)
with Suppressor():
e.close()
return QoI_vals
def get_required_files(self):
return self.required_files
def add_required_files(self, files):
for f in files:
if f not in self.required_files:
self.required_files.append(f)
def generate_pc_model(self, pce_dim, nord=3, pc_type="HG", pc_alpha=0.0, pc_beta=1.0, quadtype='full'):
"""
Wrapper for uqtk PCSet with default values
"""
param = nord+1 # Parameter for quadrature point generation. Equal to number of quad points per dimension for full quadrature
self.pc_model = uqtkpce.PCSet("NISP", nord,pce_dim,pc_type, pc_alpha,pc_beta)
self.pc_model.SetQuadRule(pc_type, quadtype, param)
def initialize_PCE(self):
if os.path.isfile(self.pce_file):
# Read initial PCE values from exodus file
my_endo_pce_coeffs = np.zeros(( self.get_num_endogenous_ports(), self.get_num_pc_terms() ))
varnames = []
for coeff_idx in range(self.get_num_pc_terms()):
varnames.append('PCE_%d' % coeff_idx)
e = exodus.exodus(self.pce_file, mode='r')
ss_ids = e.get_side_set_ids()
ss_names = e.get_side_set_names()
dictionary = dict(zip(ss_names, ss_ids))
# Get list of nodes for which to provide data
#TODO: This likely broken from port change
all_side_set_node_ids = []
for port in self.endogenous_output_ports:
side_set_node_ids = e.get_side_set_node_list(dictionary[port.ssname])[1]
all_side_set_node_ids.append(side_set_node_ids)
endo_map = self.get_endogenous_data()
for timestep, node_map in enumerate(endo_map):
print("timestep: %d" % timestep)
for coeff_idx in range(self.get_num_pc_terms()):
varname = varnames[coeff_idx]
nodal_values = e.get_node_variable_values(varname,1)
for ssid in all_side_set_node_ids:
side_set_unique_node_ids = set(ssid)
for nid in side_set_unique_node_ids:
index = timestep*self.num_endogenous_nodes + node_map.keys().index(nid)
my_endo_pce_coeffs[index,coeff_idx] = nodal_values[nid-1]
e.close()
else:
endo_init = self.get_endogenous_data()
my_endo_pce_coeffs = np.zeros(( self.get_num_endogenous_ports(), self.get_num_pc_terms() ))
index = 0
for timestep in range(self.num_timesteps):
for portid,port in enumerate(self.endogenous_output_ports):
nodal_data = endo_init[timestep*len(self.endogenous_output_ports) + portid]
for nid in nodal_data:
my_endo_pce_coeffs[index,0] = nodal_data[nid]
index += 1
return my_endo_pce_coeffs
def GalerkinProjection(self,f_evaluations):
"""
Obtain PC coefficients by Galerkin Projection
Input:
f_evaluations: 1D numpy array (vector) with function to be projected,
evaluated at the quadrature points
Output:
Numpy array with PC coefficients
"""
# Get parameters
if len(f_evaluations.shape) > 1:
print("This function can only project single variables for now")
exit(1)
npce = self.pc_model.GetNumberPCTerms()
nqp = f_evaluations.shape[0] # Number of quadrature points
# UQTk array for PC coefficients for one variable
c_k_1d_uqtk = uqtkarray.dblArray1D(npce,0.0)
# UQTk array for function evaluations at quadrature points for that variable
f_uqtk = uqtkarray.dblArray1D(nqp,0.0)
for ipt in range(nqp):
f_uqtk[ipt]=f_evaluations[ipt]
# Galerkin Projection
self.pc_model.GalerkProjection(f_uqtk,c_k_1d_uqtk)
# Put PC coefficients in numpy array
c_k = np.zeros(npce)
for ip in range(npce):
c_k[ip] = c_k_1d_uqtk[ip]
# Return numpy array of PC coefficients
return c_k
def evaluate_pce(self, pc_coeffs,germ_samples):
"""
Evaluate PCE at a set of samples of the germ of this PCE
Input:
pc_coeffs: 1D numpy array with PC coefficients of the RVs to be evaluated.
Each column corresponds to one RV.
germ_samples: numpy array with samples of the PCE germ at which the RVs
are to be evaluated. Each line is one sample. The number
of columns is the number of RVs.
Output:
Numpy array with PCE evaluations
"""
# Get data set dimensions etc.
n_test_samples = germ_samples.shape[0]
ndim = germ_samples.shape[1]
npce = self.pc_model.GetNumberPCTerms()
# Put PC germ samples in a UQTk array
std_samples_uqtk = uqtkarray.dblArray2D(n_test_samples, ndim)
std_samples_uqtk.setnpdblArray(np.asfortranarray(germ_samples))
# Numpy array to store all RVs evaluated from sampled PCEs
rvs_sampled = np.zeros(n_test_samples)
# Evaluate PCE for RVs in each dimension
# Create and fill UQTk array for PC coefficients
c_k_1d_uqtk = uqtkarray.dblArray1D(npce,0.0)
for ip in range(npce):
c_k_1d_uqtk[ip] = pc_coeffs[ip]
# Create UQTk array to store outputs in
rv_from_pce_uqtk = uqtkarray.dblArray1D(n_test_samples,0.0)
# Evaluate the PCEs for each input RV at those random samples
self.pc_model.EvalPCAtCustPoints(rv_from_pce_uqtk,std_samples_uqtk,c_k_1d_uqtk)
# Put evaluated samples in numpy array
for isamp in range(n_test_samples):
rvs_sampled[isamp] = rv_from_pce_uqtk[isamp]
# Return numpy array of PCE evaluations
return rvs_sampled
def save_nodal_pce(self, pc_coeffs, meshfilename, outputfilename):
if os.path.isfile(outputfilename): os.remove(outputfilename)
print("Save nodal PCE %s" % outputfilename)
times = self.get_solution_times()
varnames = []
for coeff_idx in range(pc_coeffs.shape[1]):
varnames.append('PCE_%d' % coeff_idx)
e = exodus.copy_mesh(meshfilename, outputfilename)
e.close()
e = exodus.exodus(outputfilename, mode='a')
exodus.add_variables(e, nodal_vars=varnames)
numnodes = pc_coeffs.shape[0]/self.num_timesteps
for timestep in range(self.num_timesteps):
for coeff_idx in range(pc_coeffs.shape[1]):
varname = varnames[coeff_idx]
nodal_values = e.get_node_variable_values(varname,1)
for nidx in range(numnodes):
index = timestep*numnodes + nidx
nodal_values[nidx] = pc_coeffs[index,coeff_idx]
e.put_node_variable_values(varname,timestep+1,nodal_values)
e.put_time(timestep+1,times[timestep])
e.close()
def set_all_endogenous_values(self, pc_coeffs, germ):
"""
Sample polynomial chaos expansion for endogenous values at germ
| |
# --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import numpy as np
import os
import cv2
import random
from PIL import Image
from bbox.bbox_transform import clip_boxes
import coviar_py2
GOP_SIZE = 12
# TODO: This two functions should be merged with individual data loader
def get_image(roidb, config, cur_frame_id):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
num_images = len(roidb)
processed_ims = []
processed_roidb = []
processed_motion_vector = []
processed_res_diff = []
for i in range(num_images):
roi_rec = roidb[i]
im_h = int(roi_rec['height'])
im_w = int(roi_rec['width'])
motion_vector = np.zeros((im_h, im_w, 2), dtype=np.float32)
res_diff = np.zeros((im_h, im_w, 3), dtype=np.float32)
num_frames = roi_rec['frame_seg_len']
gop_id = cur_frame_id // GOP_SIZE
pos_id = cur_frame_id % GOP_SIZE
#assert os.path.exists(roi_rec['image']), '%s does not exist'.format(roi_rec['image'])
if cur_frame_id + 1 == num_frames:
im = cv2.imread(roi_rec['image'], cv2.IMREAD_COLOR|cv2.IMREAD_IGNORE_ORIENTATION)
else:
image_dirs = roi_rec['image'].split('/')
video_name = image_dirs[-2] + '.mp4'
video_dir = os.path.join(image_dirs[0], image_dirs[1], image_dirs[2], image_dirs[3], image_dirs[4], 'mpeg4_snippets', image_dirs[5], video_name)
assert os.path.exists(video_dir), '%s does not exists'.format(video_dir)
im = coviar_py2.load(video_dir, gop_id, pos_id, 0, True) .astype(np.float32)
motion_vector = coviar_py2.load(video_dir, gop_id, pos_id, 1, True).astype(np.float32)
motion_vector = - motion_vector
res_diff = coviar_py2.load(video_dir, gop_id, pos_id, 2, True).astype(np.float32)
if roidb[i]['flipped']:
im = im[:, ::-1, :]
motion_vector = motion_vector[:, ::-1]
motion_vector[:, :, 0] = - motion_vector[:, :, 0]
res_diff = res_diff[:, ::-1, :]
new_rec = roi_rec.copy()
scale_ind = random.randrange(len(config.SCALES))
target_size = config.SCALES[scale_ind][0]
max_size = config.SCALES[scale_ind][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS, config.network.PIXEL_SCALE)
motion_vector_tensor, res_diff_tensor = transform_mv_res(motion_vector, res_diff, im_scale, config.network.PIXEL_MEANS, config.network.PIXEL_SCALE)
processed_ims.append(im_tensor)
processed_motion_vector.append(motion_vector_tensor)
processed_res_diff.append(res_diff_tensor)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['boxes'] = clip_boxes(np.round(roi_rec['boxes'].copy() * im_scale), im_info[:2])
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return processed_ims, processed_roidb, processed_motion_vector, processed_res_diff
def check_reconstruction(ref_im, im, motion_vector, res_diff, video_dir, cur_frame_id, ref_id, gop_id, pos_id, ref_gop_id, ref_pos_id):
im_h, im_w, _ = im.shape
for i in range(im_w):
for j in range(im_h):
mv_i_, mv_j_ = motion_vector[j, i]
mv_i = i - mv_i_
mv_j = j - mv_j_
res = res_diff[j, i]
if not (ref_im[mv_j, mv_i] + res == im[j, i]).all():
import pdb;pdb.set_trace()
return True
def get_pair_image(roidb, config):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
num_images = len(roidb)
processed_ims = []
processed_ref_ims = []
processed_old_ref_ims = []
processed_eq_flags = []
processed_eq_flags_old = []
processed_roidb = []
processed_motion_vector = []
processed_res_diff = []
for i in range(num_images):
roi_rec = roidb[i]
eq_flag = 0 # 0 for unequal, 1 for equal
eq_flag_old = 0
assert os.path.exists(roi_rec['image']), '%s does not exist'.format(roi_rec['image'])
#im = cv2.imread(roi_rec['image'], cv2.IMREAD_COLOR|cv2.IMREAD_IGNORE_ORIENTATION)
im_h = int(roi_rec['height'])
im_w = int(roi_rec['width'])
motion_vector = np.zeros((im_h, im_w, 2))
res_diff = np.zeros((im_h, im_w, 3))
#import pdb;pdb.set_trace()
if roi_rec.has_key('pattern'):
ref_id = min(max(roi_rec['frame_seg_id'] + np.random.randint(config.TRAIN.MIN_OFFSET, config.TRAIN.MAX_OFFSET+1), 0),roi_rec['frame_seg_len']-1)
cur_frame_id = roi_rec['frame_seg_id']
gop_id = cur_frame_id // GOP_SIZE
pos_id = cur_frame_id % GOP_SIZE
image_dirs = roi_rec['image'].split('/')
video_name = image_dirs[-2] + '.mp4'
video_dir = os.path.join(image_dirs[0], image_dirs[1], image_dirs[2], image_dirs[3], image_dirs[4], 'mpeg4_snippets', image_dirs[5], image_dirs[6], video_name)
assert os.path.exists(video_dir), '%s does not exists'.format(video_dir)
num_frames = coviar_py2.get_num_frames(video_dir)
if num_frames == cur_frame_id: #last video frame. coviar can not decode the last frame
ref_id = cur_frame_id
im = cv2.imread(roi_rec['image'], cv2.IMREAD_COLOR|cv2.IMREAD_IGNORE_ORIENTATION)
ref_im = im.copy()
old_ref_im = im.copy()
else: #this frame is not the last frame, then load from coviar
im = coviar_py2.load(video_dir, gop_id, pos_id, 0, True).astype(np.float32)
if pos_id == 0 or ref_id == cur_frame_id: #key frame or last frame or just random key frame
ref_id = cur_frame_id
eq_flag = 1
ref_im = im.copy()
old_ref_im = im.copy()
else:
ref_id = gop_id * GOP_SIZE
#ref_image = roi_rec['pattern'] % ref_id
#assert os.path.exists(ref_image), '%s does not exist'.format(ref_image)
#ref_im = cv2.imread(ref_image, cv2.IMREAD_COLOR|cv2.IMREAD_IGNORE_ORIENTATION)
ref_gop_id = ref_id // GOP_SIZE
ref_pos_id = ref_id % GOP_SIZE
old_ref_gop_id = ref_gop_id - 1 if ref_gop_id > 0 else 0
eq_flag_old = 1 if old_ref_gop_id == ref_gop_id else 0
old_ref_im = coviar_py2.load(video_dir, old_ref_gop_id, ref_pos_id, 0, True).astype(np.float32)
ref_im = coviar_py2.load(video_dir, ref_gop_id, ref_pos_id, 0, True).astype(np.float32)
motion_vector = coviar_py2.load(video_dir, gop_id, pos_id, 1, True)
motion_vector = - motion_vector
res_diff = coviar_py2.load(video_dir, gop_id, pos_id, 2, True)
else:
im = cv2.imread(roi_rec['image'], cv2.IMREAD_COLOR|cv2.IMREAD_IGNORE_ORIENTATION)
ref_im = im.copy()
old_ref_im = im.copy()
eq_flag = 1
if roidb[i]['flipped']:
im = im[:, ::-1, :]
ref_im = ref_im[:, ::-1, :]
old_ref_im = old_ref_im[:, ::-1, :]
motion_vector = motion_vector[:, ::-1]
motion_vector[:, :, 0] = - motion_vector[:, :, 0]
res_diff = res_diff[:, ::-1, :]
#check motion vector and residual difference
#if eq_flag == 0:
# print roidb[i]['flipped']
# check_reconstruction(ref_im, im, motion_vector, res_diff, video_dir, cur_frame_id, ref_id, gop_id, pos_id, ref_gop_id, ref_pos_id)
new_rec = roi_rec.copy()
scale_ind = random.randrange(len(config.SCALES))
target_size = config.SCALES[scale_ind][0]
max_size = config.SCALES[scale_ind][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
ref_im, im_scale = resize(ref_im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
old_ref_im, im_scale = resize(old_ref_im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS, config.network.PIXEL_SCALE)
ref_im_tensor = transform(ref_im, config.network.PIXEL_MEANS, config.network.PIXEL_SCALE)
old_ref_im_tensor = transform(old_ref_im, config.network.PIXEL_MEANS, config.network.PIXEL_SCALE)
motion_vector_tensor, res_diff_tensor = transform_mv_res(motion_vector, res_diff, im_scale, config.network.PIXEL_MEANS, config.network.PIXEL_SCALE)
processed_ims.append(im_tensor)
processed_ref_ims.append(ref_im_tensor)
processed_old_ref_ims.append(old_ref_im_tensor)
processed_eq_flags.append(eq_flag)
processed_eq_flags_old.append(eq_flag_old)
processed_motion_vector.append(motion_vector_tensor)
processed_res_diff.append(res_diff_tensor)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return processed_ims, processed_ref_ims, processed_old_ref_ims, processed_eq_flags, processed_eq_flags_old, processed_roidb, processed_motion_vector, processed_res_diff
def transform_mv_res(motion_vector, res_diff, im_scale, pixel_means, pixel_scale, rcnn_stride=16, interpolation = cv2.INTER_LINEAR):
motion_vector = cv2.resize(motion_vector.astype(np.float32), None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
res_diff = cv2.resize(res_diff.astype(np.float32), None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
im_h, im_w, _ = res_diff.shape
p_im_h = int(np.ceil(im_h / float(rcnn_stride)) * rcnn_stride)
p_im_w = int(np.ceil(im_w / float(rcnn_stride)) * rcnn_stride)
padded_motion_vector = np.zeros((p_im_h, p_im_w, 2))
padded_res_diff = np.zeros((p_im_h, p_im_w, 3))
padded_motion_vector[:im_h, :im_w] = motion_vector
padded_res_diff[:im_h, :im_w] = res_diff
for i in range(3):
padded_res_diff[:, :, i] = (padded_res_diff[:, :, 2 - i] - pixel_means[2 - i]) * pixel_scale
rcnn_scale = 1.0 / rcnn_stride
resize_motion_vector = cv2.resize(padded_motion_vector, None, None, fx=rcnn_scale, fy=rcnn_scale, interpolation=interpolation)
resize_res_diff = cv2.resize(padded_res_diff, None, None, fx=rcnn_scale, fy=rcnn_scale, interpolation=interpolation)
scale = im_scale * rcnn_scale
resize_motion_vector *= scale
tensor_h, tensor_w, _ = resize_res_diff.shape
motion_vector_tensor = resize_motion_vector.transpose((2, 0, 1)).reshape(1, 2, tensor_h, tensor_w)
res_diff_tensor = resize_res_diff.transpose((2, 0, 1)).reshape(1, 3, tensor_h, tensor_w)
#motion_vector_tensor[:] = np.random.randint(-10, 10)
#motion_vector_tensor[:] = 0.0
#res_diff_tensor[:] = 0.0
'''
motion_vector = cv2.resize(motion_vector.astype(np.float32), None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
res_diff = cv2.resize(res_diff.astype(np.float32), None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
im_h, im_w, _ = res_diff.shape
p_im_h = int(np.ceil(im_h / float(rcnn_stride)) * rcnn_stride)
p_im_w = int(np.ceil(im_w / float(rcnn_stride)) * rcnn_stride)
padded_motion_vector = np.zeros((p_im_h, p_im_w, 2))
padded_res_diff = np.zeros((p_im_h, p_im_w, 3))
padded_motion_vector[:im_h, :im_w] = motion_vector
padded_res_diff[:im_h, :im_w] = res_diff
for i in range(3):
padded_res_diff[:, :, i] = (padded_res_diff[:, :, 2 - i] - pixel_means[2 - i]) * pixel_scale
rcnn_scale = 1.0 / rcnn_stride
resize_motion_vector = cv2.resize(padded_motion_vector, None, None, fx=rcnn_scale, fy=rcnn_scale, interpolation=interpolation)
resize_res_diff = cv2.resize(padded_res_diff, None, None, fx=rcnn_scale, fy=rcnn_scale, interpolation=interpolation)
scale = im_scale * rcnn_scale
resize_motion_vector *= scale
tensor_h, tensor_w, _ = resize_res_diff.shape
motion_vector_tensor = np.zeros((1, 2, tensor_h, tensor_w), dtype=np.float32)
res_diff_tensor = np.zeros((1, 3, tensor_h, tensor_w), dtype=np.float32)
for i in range(2):
motion_vector_tensor[0, i] = resize_motion_vector[:, :, i]
for i in range(3):
res_diff_tensor[0, i] = resize_res_diff[:, :, i]
'''
return motion_vector_tensor, res_diff_tensor
def resize(im, target_size, max_size, stride=0, interpolation = cv2.INTER_LINEAR):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:param stride: if given, pad the image to designated stride
:param interpolation: if given, using given interpolation method to resize image
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
if stride == 0:
return im, im_scale
else:
# pad to product of stride
im_height = int(np.ceil(im.shape[0] / float(stride)) * stride)
im_width = int(np.ceil(im.shape[1] / float(stride)) * stride)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[:im.shape[0], :im.shape[1], :] = im
return | |
<reponame>cartertinney/azure-iot-sdk-python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import abc
import six
import threading
import sys
from azure.iot.device import ProxyOptions
from azure.iot.device import constant
from azure.iot.device.common.pipeline.config import DEFAULT_KEEPALIVE
# Python 2 doesn't define this constant, so manually do it
if sys.version_info < (3,):
if not hasattr(threading, "TIMEOUT_MAX"):
threading.TIMEOUT_MAX = 4294967.0
@six.add_metaclass(abc.ABCMeta)
class PipelineConfigInstantiationTestBase(object):
"""All PipelineConfig instantiation tests should inherit from this base class.
It provides tests for shared functionality among all PipelineConfigs, derived from
the BasePipelineConfig class.
"""
@abc.abstractmethod
def config_cls(self):
"""This must be implemented in the child test class.
It returns the child class under test"""
pass
@abc.abstractmethod
def required_kwargs(self):
"""This must be implemented in the child test class.
It returns required kwargs for the child class under test"""
pass
# PipelineConfig objects require exactly one auth mechanism, sastoken or x509.
# For the sake of ease of testing, we will assume sastoken is being used unless
# otherwise specified.
# It does not matter which is used for the purposes of these tests.
@pytest.fixture
def sastoken(self, mocker):
return mocker.MagicMock()
@pytest.fixture
def x509(self, mocker):
return mocker.MagicMock()
@pytest.mark.it(
"Instantiates with the 'hostname' attribute set to the provided 'hostname' parameter"
)
def test_hostname_set(self, config_cls, required_kwargs, sastoken):
# Hostname is one of the required kwargs, because it is required for the child
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.hostname == required_kwargs["hostname"]
@pytest.mark.it(
"Instantiates with the 'gateway_hostname' attribute set to the provided 'gateway_hostname' parameter"
)
def test_gateway_hostname_set(self, config_cls, required_kwargs, sastoken):
fake_gateway_hostname = "gateway-hostname.some-domain.net"
config = config_cls(
sastoken=sastoken, gateway_hostname=fake_gateway_hostname, **required_kwargs
)
assert config.gateway_hostname == fake_gateway_hostname
@pytest.mark.it(
"Instantiates with the 'gateway_hostname' attribute set to 'None' if no 'gateway_hostname' parameter is provided"
)
def test_gateway_hostname_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.gateway_hostname is None
@pytest.mark.it(
"Instantiates with the 'keep_alive' attribute set to the provided 'keep_alive' parameter (converting the value to int)"
)
@pytest.mark.parametrize(
"keep_alive",
[
pytest.param(1, id="int"),
pytest.param(35.90, id="float"),
pytest.param(0b1010, id="binary"),
pytest.param(0x9, id="hexadecimal"),
pytest.param("7", id="Numeric string"),
],
)
def test_keep_alive_valid_with_conversion(
self, mocker, required_kwargs, config_cls, sastoken, keep_alive
):
config = config_cls(sastoken=sastoken, keep_alive=keep_alive, **required_kwargs)
assert config.keep_alive == int(keep_alive)
@pytest.mark.it(
"Instantiates with the 'keep_alive' attribute to 'None' if no 'keep_alive' parameter is provided"
)
def test_keep_alive_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.keep_alive == DEFAULT_KEEPALIVE
@pytest.mark.it("Raises TypeError if the provided 'keep_alive' parameter is not numeric")
@pytest.mark.parametrize(
"keep_alive",
[
pytest.param("sectumsempra", id="non-numeric string"),
pytest.param((1, 2), id="tuple"),
pytest.param([1, 2], id="list"),
pytest.param(object(), id="object"),
],
)
def test_keep_alive_invalid_type(self, config_cls, required_kwargs, sastoken, keep_alive):
with pytest.raises(TypeError):
config_cls(sastoken=sastoken, keep_alive=keep_alive, **required_kwargs)
@pytest.mark.it("Raises ValueError if the provided 'keep_alive' parameter has an invalid value")
@pytest.mark.parametrize(
"keep_alive",
[
pytest.param(9876543210987654321098765432109876543210, id="> than max"),
pytest.param(-2001, id="negative"),
pytest.param(0, id="zero"),
],
)
def test_keep_alive_invalid_value(
self, mocker, required_kwargs, config_cls, sastoken, keep_alive
):
with pytest.raises(ValueError):
config_cls(sastoken=sastoken, keep_alive=keep_alive, **required_kwargs)
@pytest.mark.it(
"Instantiates with the 'sastoken' attribute set to the provided 'sastoken' parameter"
)
def test_sastoken_set(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.sastoken is sastoken
@pytest.mark.it(
"Instantiates with the 'sastoken' attribute set to 'None' if no 'sastoken' parameter is provided"
)
def test_sastoken_default(self, config_cls, required_kwargs, x509):
config = config_cls(x509=x509, **required_kwargs)
assert config.sastoken is None
@pytest.mark.it("Instantiates with the 'x509' attribute set to the provided 'x509' parameter")
def test_x509_set(self, config_cls, required_kwargs, x509):
config = config_cls(x509=x509, **required_kwargs)
assert config.x509 is x509
@pytest.mark.it(
"Instantiates with the 'x509' attribute set to 'None' if no 'x509 paramater is provided"
)
def test_x509_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.x509 is None
@pytest.mark.it(
"Raises a ValueError if neither the 'sastoken' nor 'x509' parameter is provided"
)
def test_no_auths_provided(self, config_cls, required_kwargs):
with pytest.raises(ValueError):
config_cls(**required_kwargs)
@pytest.mark.it("Raises a ValueError if both the 'sastoken' and 'x509' parameters are provided")
def test_both_auths_provided(self, config_cls, required_kwargs, sastoken, x509):
with pytest.raises(ValueError):
config_cls(sastoken=sastoken, x509=x509, **required_kwargs)
@pytest.mark.it(
"Instantiates with the 'server_verification_cert' attribute set to the provided 'server_verification_cert' parameter"
)
def test_server_verification_cert_set(self, config_cls, required_kwargs, sastoken):
svc = "fake_server_verification_cert"
config = config_cls(sastoken=sastoken, server_verification_cert=svc, **required_kwargs)
assert config.server_verification_cert == svc
@pytest.mark.it(
"Instantiates with the 'server_verification_cert' attribute set to 'None' if no 'server_verification_cert' paramater is provided"
)
def test_server_verification_cert_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.server_verification_cert is None
@pytest.mark.it(
"Instantiates with the 'websockets' attribute set to the provided 'websockets' parameter"
)
@pytest.mark.parametrize(
"websockets", [True, False], ids=["websockets == True", "websockets == False"]
)
def test_websockets_set(self, config_cls, required_kwargs, sastoken, websockets):
config = config_cls(sastoken=sastoken, websockets=websockets, **required_kwargs)
assert config.websockets is websockets
@pytest.mark.it(
"Instantiates with the 'websockets' attribute to 'False' if no 'websockets' parameter is provided"
)
def test_websockets_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.websockets is False
@pytest.mark.it(
"Instantiates with the 'cipher' attribute set to OpenSSL list formatted version of the provided 'cipher' parameter"
)
@pytest.mark.parametrize(
"cipher_input, expected_cipher",
[
pytest.param(
"DHE-RSA-AES128-SHA",
"DHE-RSA-AES128-SHA",
id="Single cipher suite, OpenSSL list formatted string",
),
pytest.param(
"DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-GCM-SHA256",
"DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-GCM-SHA256",
id="Multiple cipher suites, OpenSSL list formatted string",
),
pytest.param(
"DHE_RSA_AES128_SHA",
"DHE-RSA-AES128-SHA",
id="Single cipher suite, as string with '_' delimited algorithms/protocols",
),
pytest.param(
"DHE_RSA_AES128_SHA:DHE_RSA_AES256_SHA:ECDHE_ECDSA_AES128_GCM_SHA256",
"DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-GCM-SHA256",
id="Multiple cipher suites, as string with '_' delimited algorithms/protocols and ':' delimited suites",
),
pytest.param(
["DHE-RSA-AES128-SHA"],
"DHE-RSA-AES128-SHA",
id="Single cipher suite, in a list, with '-' delimited algorithms/protocols",
),
pytest.param(
["DHE-RSA-AES128-SHA", "DHE-RSA-AES256-SHA", "ECDHE-ECDSA-AES128-GCM-SHA256"],
"DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-GCM-SHA256",
id="Multiple cipher suites, in a list, with '-' delimited algorithms/protocols",
),
pytest.param(
["DHE_RSA_AES128_SHA"],
"DHE-RSA-AES128-SHA",
id="Single cipher suite, in a list, with '_' delimited algorithms/protocols",
),
pytest.param(
["DHE_RSA_AES128_SHA", "DHE_RSA_AES256_SHA", "ECDHE_ECDSA_AES128_GCM_SHA256"],
"DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-GCM-SHA256",
id="Multiple cipher suites, in a list, with '_' delimited algorithms/protocols",
),
],
)
def test_cipher(self, config_cls, required_kwargs, sastoken, cipher_input, expected_cipher):
config = config_cls(sastoken=sastoken, cipher=cipher_input, **required_kwargs)
assert config.cipher == expected_cipher
@pytest.mark.it(
"Raises TypeError if the provided 'cipher' attribute is neither list nor string"
)
@pytest.mark.parametrize(
"cipher",
[
pytest.param(123, id="int"),
pytest.param(
{"cipher1": "DHE-RSA-AES128-SHA", "cipher2": "DHE_RSA_AES256_SHA"}, id="dict"
),
pytest.param(object(), id="complex object"),
],
)
def test_invalid_cipher_param(self, config_cls, required_kwargs, sastoken, cipher):
with pytest.raises(TypeError):
config_cls(sastoken=sastoken, cipher=cipher, **required_kwargs)
@pytest.mark.it(
"Instantiates with the 'cipher' attribute to empty string ('') if no 'cipher' parameter is provided"
)
def test_cipher_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.cipher == ""
@pytest.mark.it(
"Instantiates with the 'proxy_options' attribute set to the ProxyOptions object provided in the 'proxy_options' parameter"
)
def test_proxy_options(self, mocker, required_kwargs, config_cls, sastoken):
proxy_options = ProxyOptions(
proxy_type=mocker.MagicMock(), proxy_addr="127.0.0.1", proxy_port=8888
)
config = config_cls(sastoken=sastoken, proxy_options=proxy_options, **required_kwargs)
assert config.proxy_options is proxy_options
@pytest.mark.it(
"Instantiates with the 'proxy_options' attribute to 'None' if no 'proxy_options' parameter is provided"
)
def test_proxy_options_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.proxy_options is None
@pytest.mark.it(
"Instantiates with the 'auto_connect' attribute set to the provided 'auto_connect' parameter"
)
def test_auto_connect_set(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, auto_connect=False, **required_kwargs)
assert config.auto_connect is False
@pytest.mark.it(
"Instantiates with the 'auto_connect' attribute set to 'None' if no 'auto_connect' parameter is provided"
)
def test_auto_connect_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.auto_connect is True
@pytest.mark.it(
"Instantiates with the 'connection_retry' attribute set to the provided 'connection_retry' parameter"
)
def test_connection_retry_set(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, connection_retry=False, **required_kwargs)
assert config.connection_retry is False
@pytest.mark.it(
"Instantiates with the 'connection_retry' attribute set to 'True' if no 'connection_retry' parameter is provided"
)
def test_connection_retry_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.connection_retry is True
@pytest.mark.it(
"Instantiates with the 'connection_retry_interval' attribute set to the provided 'connection_retry_interval' parameter (converting the value to int)"
)
@pytest.mark.parametrize(
"connection_retry_interval",
[
pytest.param(1, id="int"),
pytest.param(35.90, id="float"),
pytest.param(0b1010, id="binary"),
pytest.param(0x9, id="hexadecimal"),
pytest.param("7", id="Numeric string"),
],
)
def test_connection_retry_interval_set(
self, connection_retry_interval, config_cls, required_kwargs, sastoken
):
config = config_cls(
sastoken=sastoken,
connection_retry_interval=connection_retry_interval,
**required_kwargs
)
assert config.connection_retry_interval == int(connection_retry_interval)
@pytest.mark.it(
"Instantiates with the 'connection_retry_interval' attribute set to 10 if no 'connection_retry_interval' parameter is provided"
)
def test_connection_retry_interval_default(self, config_cls, required_kwargs, sastoken):
config = config_cls(sastoken=sastoken, **required_kwargs)
assert config.connection_retry_interval == 10
@pytest.mark.it(
"Raises a TypeError if the provided 'connection_retry_interval' paramater is not numeric"
)
@pytest.mark.parametrize(
"connection_retry_interval",
[
pytest.param("non-numeric-string", id="non-numeric string"),
pytest.param((1, 2), id="tuple"),
pytest.param([1, 2], id="list"),
pytest.param(object(), id="object"),
],
)
def test_connection_retry_interval_invalid_type(
self, config_cls, sastoken, required_kwargs, connection_retry_interval
):
with pytest.raises(TypeError):
config_cls(
sastoken=sastoken,
connection_retry_interval=connection_retry_interval,
**required_kwargs
)
@pytest.mark.it(
"Raises a ValueError if the provided 'connection_retry_interval' parameter has an invalid value"
)
@pytest.mark.parametrize(
"connection_retry_interval",
[
pytest.param(threading.TIMEOUT_MAX + 1, id="> than max"),
pytest.param(-1, id="negative"),
pytest.param(0, id="zero"),
],
)
def test_connection_retry_interval_invalid_value(
self, config_cls, | |
<gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 5.3
# in conjunction with Tcl version 8.6
# May 26, 2020 02:41:41 PM JST platform: Windows NT
import tkinter as tk
from tkinter import ttk
import threading
import time
from rmslogin import RakutenRms
class SearchWindow:
def __init__(self, parent, top, rms):
self.items = []
self.rms = rms
self.parent = parent
self.search_type_dict = {"商品名": "itemName",
"PC用キャッチコピー": "catchcopy",
"商品管理番号": "itemUrl",
"カタログID": "catalogId",
"全商品ディレクトリID": "genreId",
"価格下限": "itemPriceFrom",
"価格上限": "itemPriceTo",
"倉庫フラグ": "depotFlg",
"商品モバイルフラグ": "itemMobileFlg",
"闇市フラグ": "limitedFlg",
"送料別フラグ": "postageFlg"}
self.top = top
self.top.geometry("600x450+650+150")
self.top.minsize(120, 1)
self.top.maxsize(3204, 1061)
self.top.resizable(1, 1)
self.top.title("検索")
self.Frame1 = tk.Frame(self.top)
self.Frame1.place(
relx=0.017,
rely=0.022,
relheight=0.878,
relwidth=0.958)
self.Frame1.configure(relief='groove')
self.Frame1.configure(borderwidth="2")
self.Frame1.configure(relief="groove")
self.result_box_var = tk.StringVar()
self.result_box = tk.Listbox(self.Frame1, listvariable=self.result_box_var, selectmode=tk.EXTENDED)
self.result_box.place(
relx=0.017,
rely=0.203,
relheight=0.785,
relwidth=0.963)
self.keyword_entry = tk.Entry(self.Frame1)
self.keyword_entry.place(
relx=0.209,
rely=0.076,
height=27,
relwidth=0.494)
self.search_button = tk.Button(self.Frame1, command=self.search)
self.search_button.place(relx=0.73, rely=0.051, height=44, width=67)
self.search_button.configure(text='''検索''')
self.search_type_var = tk.StringVar()
self.search_type = ttk.Combobox(self.Frame1, textvariable=self.search_type_var)
self.search_type.place(
relx=0.017,
rely=0.084,
relheight=0.053,
relwidth=0.162)
self.search_type["values"] = list(self.search_type_dict.keys())
self.search_type.set("商品名")
self.next_button = tk.Button(self.Frame1, command=self.nextsearch)
self.next_button.place(relx=0.87, rely=0.051, height=44, width=67)
self.next_button.configure(text='''続けて検索''')
self.enter_button = tk.Button(self.top, command=self.update_item)
self.enter_button.place(relx=0.683, rely=0.911, height=34, width=67)
self.enter_button.configure(text='''決定''')
self.cancel_button = tk.Button(self.top, command=self.destroywindow)
self.cancel_button.place(relx=0.833, rely=0.911, height=34, width=67)
self.cancel_button.configure(text='''キャンセル''')
self.search_num_var = tk.StringVar()
self.search_num_var.set("0件")
self.search_num = tk.Label(self.top, textvariable=self.search_num_var)
self.search_num.place(relx=0.017, rely=0.911, height=31, width=134)
def update_item(self):
if self.items:
self.parent.update_item(self.items)
self.top.destroy()
def search(self):
self.search_thread = threading.Thread(target=self.__search)
self.search_thread.deamon = True
self.search_thread.start()
self.enter_button["state"] = tk.DISABLED
self.next_button["state"] = tk.DISABLED
self.search_button["state"] = tk.DISABLED
def __search(self):
if self.keyword_entry.get():
chunk_gen = self.rms.search(
searchkey={
self.search_type_dict[self.search_type.get()]: self.keyword_entry.get()})
for items, result_num, in chunk_gen:
self.items.extend(items)
self.search_num_var.set(str(len(self.items)) + "件")
if self.items:
self.items = list(set(self.items))
result_items = [item.find("itemName").text for item in self.items]
self.result_box.delete(0, tk.END)
for item in result_items:
self.result_box.insert(tk.END, item)
self.enter_button["state"] = tk.NORMAL
self.next_button["state"] = tk.NORMAL
self.search_button["state"] = tk.NORMAL
def nextsearch(self):
if self.items and self.keyword_entry.get():
self.items = self.rms.nextsearch(self.items, searchkey={
self.search_type_dict[self.search_type.get()]: self.keyword_entry.get()})
result_items = [item.find("itemName").text for item in self.items]
self.result_box.delete(0, tk.END)
for item in result_items:
self.result_box.insert(tk.END, item)
self.search_num_var.set(str(len(self.items)) + "件")
def destroywindow(self):
self.top.destroy()
class MainWindow:
def __init__(self, rms, top):
self.rms = rms
self.items = []
self.toggles = {}
self.entries = {}
self.selected = {}
self.update_flag = False
top.geometry("889x909+708+81")
top.minsize(120, 1)
top.maxsize(3204, 1041)
top.resizable(1, 1)
top.title("rakuten-item-update")
self.menubar = tk.Menu(top)
top.configure(menu=self.menubar)
self.menubar.add_command(
label="検索", command=self.create_search_window)
self.Frame1 = tk.Frame(top)
self.Frame1.place(relx=0.0, rely=0.0, relheight=1.0, relwidth=1.0)
self.Frame1.configure(relief='groove')
self.Frame1.configure(borderwidth="2")
self.Frame1.configure(relief="groove")
self.__item_name()
self.__sale_time()
self.__catchcopy()
self.__mb_catchcopy()
self.__tax_rate()
self.__tax_flag()
self.__delivery_set_id()
self.__postage_flag()
self.__daibiki_flag()
self.__catalog_caption()
self.__smart_caption()
self.__display_caption()
self.__depot_flag()
self.__limited_flag()
self.__item_box()
self.__item_updates()
for k, sel in self.selected.items():
sel["values"] = [v for k, v in self.rms.config.settings[k]["options"].items()]
def create_search_window(self):
SearchWindow(parent=self, top=tk.Toplevel(), rms=self.rms)
def update_item(self, items):
self.items = items
if self.items:
result_items = [item.find("itemName").text for item in self.items]
self.result_box.delete(0, tk.END)
for item in result_items:
self.result_box.insert(tk.END, item)
self.item_num_label_var.set(str(len(self.items)) + "件")
def exclusion_items(self):
if self.items:
selection = sorted(list(self.result_box.curselection()), reverse=True)
for sel in selection:
self.result_box.delete(sel)
del self.items[sel]
self.update_item(self.items)
def update_rms_items(self):
self.update_item_thread = threading.Thread(target=self.__update_rms_items)
self.update_item_thread.start()
self.update_flag = True
self.update_button["state"] = tk.DISABLED
def __update_rms_items(self):
self.update_flag = True
for item in self.items:
if not self.update_flag:
break
for k, toggle in self.toggles.items():
toggle_var = toggle.get()
self.rms.config.settings[k]["toggle"] = toggle_var
if toggle_var:
value = self.entries[k]
if type(value) is list:
self.rms.config.settings[k]["insert"] = [
self.entries[k][0].get(), self.entries[k][1].get()]
self.rms.config.settings[k]["replace"] = [
self.entries[k][2].get(), self.entries[k][3].get()]
else:
if self.rms.config.settings[k]["type"] == "select":
options = {v: k for k, v in self.rms.config.settings[k]["options"].items()}
self.rms.config.settings[k]["value"] = options[self.entries[k].get()]
else:
self.rms.config.settings[k]["value"] = self.entries[k].get()
self.rms.update(item.find("itemUrl").text)
self.update_button["state"] = tk.NORMAL
def cancel_updates(self):
self.update_flag = False
def __item_name(self, toggle=False):
self.Frame2 = tk.Frame(self.Frame1)
self.Frame2.place(relx=0.0, rely=0.0, relheight=0.087, relwidth=0.649)
self.Frame2.configure(relief='groove')
self.Frame2.configure(borderwidth="2")
self.Frame2.configure(relief="groove")
self.Label1 = tk.Label(self.Frame2)
self.Label1.place(relx=0.069, rely=0.101, height=64, width=83)
self.Label1.configure(text='''商品名''')
self.Label1.configure(wraplength="50")
self.item_name_insert_1 = tk.Entry(self.Frame2)
self.item_name_insert_1.place(
relx=0.31, rely=0.152, height=27, relwidth=0.267)
self.item_name_insert_2 = tk.Entry(self.Frame2)
self.item_name_insert_2.place(
relx=0.716, rely=0.152, height=27, relwidth=0.267)
self.Label2 = tk.Label(self.Frame2)
self.Label2.place(relx=0.208, rely=0.152, height=22, width=43)
self.Label2.configure(text='''挿入前''')
self.Label3 = tk.Label(self.Frame2)
self.Label3.place(relx=0.622, rely=0.152, height=22, width=43)
self.Label3.configure(text='''挿入後''')
self.item_name_replace_1 = tk.Entry(self.Frame2)
self.item_name_replace_1.place(
relx=0.31, rely=0.557, height=27, relwidth=0.267)
self.item_name_replace_2 = tk.Entry(self.Frame2)
self.item_name_replace_2.place(
relx=0.716, rely=0.557, height=27, relwidth=0.267)
self.Label4 = tk.Label(self.Frame2)
self.Label4.place(relx=0.208, rely=0.557, height=20, width=43)
self.Label4.configure(text='''置換前''')
self.Label5 = tk.Label(self.Frame2)
self.Label5.place(relx=0.622, rely=0.557, height=20, width=43)
self.Label5.configure(text='''置換後''')
self.item_name_toggle_var = tk.BooleanVar()
self.item_name_toggle_var.set(toggle)
self.item_name_toggle = tk.Checkbutton(self.Frame2)
self.item_name_toggle.place(
relx=0.029,
rely=0.316,
relheight=0.392,
relwidth=0.071)
self.item_name_toggle.configure(justify='left')
self.item_name_toggle.configure(variable=self.item_name_toggle_var)
self.toggles["item_name"] = self.item_name_toggle_var
self.entries["item_name"] = [
self.item_name_insert_1,
self.item_name_insert_2,
self.item_name_replace_1,
self.item_name_replace_2]
def __sale_time(
self,
toggle=False):
self.Frame3 = tk.Frame(self.Frame1)
self.Frame3.place(
relx=0.0,
rely=0.509,
relheight=0.099,
relwidth=0.649)
self.Frame3.configure(relief='groove')
self.Frame3.configure(borderwidth="2")
self.Frame3.configure(relief="groove")
self.Label6 = tk.Label(self.Frame3, text="販売終了日時")
self.Label6.place(relx=0.087, rely=0.1, height=29, width=99)
self.sale_time_toggle_var = tk.BooleanVar()
self.sale_time_toggle_var.set(toggle)
self.sale_time_toggle = tk.Checkbutton(
self.Frame3, variable=self.sale_time_toggle_var, justify="left")
self.sale_time_toggle.place(
relx=0.029,
rely=0.311,
relheight=0.4,
relwidth=0.071)
self.Label7 = tk.Label(self.Frame3, text="販売終了日時")
self.Label7.place(relx=0.087, rely=0.522, height=29, width=99)
self.s_time_year_var = tk.StringVar()
self.s_time_year = ttk.Combobox(
self.Frame3, textvariable=self.s_time_year_var)
self.s_time_year.place(
relx=0.295,
rely=0.211,
relheight=0.211,
relwidth=0.125)
self.s_time_month_var = tk.StringVar()
self.s_time_month = ttk.Combobox(
self.Frame3, textvariable=self.s_time_month_var)
self.s_time_month.place(
relx=0.466,
rely=0.211,
relheight=0.211,
relwidth=0.075)
self.s_time_day_var = tk.StringVar()
self.s_time_day = ttk.Combobox(
self.Frame3, textvariable=self.s_time_day_var)
self.s_time_day.place(
relx=0.588,
rely=0.211,
relheight=0.211,
relwidth=0.075)
self.s_time_hour_var = tk.StringVar()
self.s_time_hour = ttk.Combobox(
self.Frame3, textvariable=self.s_time_hour_var)
self.s_time_hour.place(
relx=0.709,
rely=0.211,
relheight=0.211,
relwidth=0.075)
self.s_time_min_var = tk.StringVar()
self.s_time_min = ttk.Combobox(
self.Frame3, textvariable=self.s_time_min_var)
self.s_time_min.place(
relx=0.83,
rely=0.211,
relheight=0.211,
relwidth=0.075)
self.Label8 = tk.Label(self.Frame3, text="年")
self.Label8.place(relx=0.433, rely=0.211, height=19, width=14)
self.Label9 = tk.Label(self.Frame3, text="月")
self.Label9.place(relx=0.555, rely=0.211, height=19, width=14)
self.Label10 = tk.Label(self.Frame3, text="日")
self.Label10.place(relx=0.674, rely=0.211, height=19, width=14)
self.Label11 = tk.Label(self.Frame3, text="時")
self.Label11.place(relx=0.795, rely=0.211, height=19, width=14)
self.Label12 = tk.Label(self.Frame3, text="分")
self.Label12.place(relx=0.917, rely=0.211, height=19, width=14)
self.e_time_year_var = tk.StringVar()
self.e_time_year = ttk.Combobox(
self.Frame3, textvariable=self.e_time_year_var)
self.e_time_year.place(
relx=0.295,
rely=0.622,
relheight=0.222,
relwidth=0.125)
self.e_time_month_var = tk.StringVar()
self.e_time_month = ttk.Combobox(
self.Frame3, textvariable=self.e_time_month_var)
self.e_time_month.place(
relx=0.466,
rely=0.622,
relheight=0.222,
relwidth=0.075)
self.e_time_day_var = tk.StringVar()
self.e_time_day = ttk.Combobox(
self.Frame3, textvariable=self.e_time_day_var)
self.e_time_day.place(
relx=0.588,
rely=0.622,
relheight=0.222,
relwidth=0.075)
self.e_time_hour_var = tk.StringVar()
self.e_time_hour = ttk.Combobox(
self.Frame3, textvariable=self.e_time_hour_var)
self.e_time_hour.place(
relx=0.709,
rely=0.622,
relheight=0.222,
relwidth=0.075)
self.e_time_min_var = tk.StringVar()
self.e_time_min = ttk.Combobox(
self.Frame3, textvariable=self.e_time_min_var)
self.e_time_min.place(
relx=0.83,
rely=0.622,
relheight=0.222,
relwidth=0.075)
self.Label13 = tk.Label(self.Frame3, text="年")
self.Label13.place(relx=0.433, rely=0.622, height=20, width=14)
self.Label14 = tk.Label(self.Frame3, text="月")
self.Label14.place(relx=0.555, rely=0.622, height=20, width=14)
self.Label15 = tk.Label(self.Frame3, text="日")
self.Label15.place(relx=0.674, rely=0.622, height=20, width=14)
self.Label16 = tk.Label(self.Frame3, text="時")
self.Label16.place(relx=0.795, rely=0.622, height=20, width=14)
self.Label17 = tk.Label(self.Frame3, text="分")
self.Label17.place(relx=0.917, rely=0.622, height=20, width=14)
self.toggles["sale_stime_year"] = self.sale_time_toggle_var
self.toggles["sale_stime_month"] = self.sale_time_toggle_var
self.toggles["sale_stime_day"] = self.sale_time_toggle_var
self.toggles["sale_stime_hour"] = self.sale_time_toggle_var
self.toggles["sale_stime_min"] = self.sale_time_toggle_var
self.toggles["sale_etime_year"] = self.sale_time_toggle_var
self.toggles["sale_etime_month"] = self.sale_time_toggle_var
self.toggles["sale_etime_day"] = self.sale_time_toggle_var
self.toggles["sale_etime_hour"] = self.sale_time_toggle_var
self.toggles["sale_etime_min"] = self.sale_time_toggle_var
self.selected["sale_stime_year"] = self.s_time_year
self.selected["sale_stime_month"] = self.s_time_month
self.selected["sale_stime_day"] = self.s_time_day
self.selected["sale_stime_hour"] = self.s_time_hour
self.selected["sale_stime_min"] = self.s_time_min
self.selected["sale_etime_year"] = self.e_time_year
self.selected["sale_etime_month"] = self.e_time_month
self.selected["sale_etime_day"] = self.e_time_day
self.selected["sale_etime_hour"] = self.e_time_hour
self.selected["sale_etime_min"] = self.e_time_min
self.entries["sale_stime_year"] = self.s_time_year_var
self.entries["sale_stime_month"] = self.s_time_month_var
self.entries["sale_stime_day"] = self.s_time_day_var
self.entries["sale_stime_hour"] = self.s_time_hour_var
self.entries["sale_stime_min"] = self.s_time_min_var
self.entries["sale_etime_year"] = self.e_time_year_var
self.entries["sale_etime_month"] = self.e_time_month_var
self.entries["sale_etime_day"] = self.e_time_day_var
self.entries["sale_etime_hour"] = self.e_time_hour_var
self.entries["sale_etime_min"] = self.e_time_min_var
def __catchcopy(self, toggle=False):
self.Frame2_7 = tk.Frame(self.Frame1)
self.Frame2_7.place(
relx=0.0,
rely=0.085,
relheight=0.087,
relwidth=0.649)
self.Frame2_7.configure(relief='groove')
self.Frame2_7.configure(borderwidth="2")
self.Frame2_7.configure(relief="groove")
self.Label1_8 = tk.Label(
self.Frame2_7,
text="PC用キャッチコピー",
wraplength="50")
self.Label1_8.place(relx=0.069, rely=0.101, height=64, width=83)
self.catchcopy_insert_1 = tk.Entry(self.Frame2_7)
self.catchcopy_insert_1.place(
relx=0.31, rely=0.152, height=27, relwidth=0.267)
self.catchcopy_insert_2 = tk.Entry(self.Frame2_7)
self.catchcopy_insert_2.place(
relx=0.716, rely=0.152, height=27, relwidth=0.267)
self.Label2_4 = tk.Label(self.Frame2_7, text="挿入前")
self.Label2_4.place(relx=0.208, rely=0.152, height=22, width=43)
self.Label3_5 = tk.Label(self.Frame2_7, text="挿入後")
self.Label3_5.place(relx=0.622, rely=0.152, height=22, width=43)
self.catchcopy_replace_1 = tk.Entry(self.Frame2_7)
self.catchcopy_replace_1.place(
relx=0.31, rely=0.557, height=27, relwidth=0.267)
self.catchcopy_replace_2 = tk.Entry(self.Frame2_7)
self.catchcopy_replace_2.place(
relx=0.716, rely=0.557, height=27, relwidth=0.267)
self.Label4_4 = tk.Label(self.Frame2_7, text="置換前")
self.Label4_4.place(relx=0.208, rely=0.557, height=20, width=43)
self.Label5_5 = tk.Label(self.Frame2_7, text="置換後")
self.Label5_5.place(relx=0.622, rely=0.557, height=20, width=43)
self.catchcopy_toggle_var = tk.BooleanVar()
self.catchcopy_toggle_var.set(toggle)
self.catchcopy_toggle = tk.Checkbutton(
self.Frame2_7, variable=self.catchcopy_toggle_var)
self.catchcopy_toggle.place(
relx=0.029,
rely=0.316,
relheight=0.392,
relwidth=0.071)
self.toggles["catch_copy"] = self.catchcopy_toggle_var
self.entries["catch_copy"] = [
self.catchcopy_insert_1,
self.catchcopy_insert_2,
self.catchcopy_replace_1,
self.catchcopy_replace_2]
def __mb_catchcopy(self, toggle=False):
self.Frame2_8 = tk.Frame(self.Frame1)
self.Frame2_8.place(
relx=0.0,
rely=0.169,
relheight=0.087,
relwidth=0.649)
self.Frame2_8.configure(relief='groove')
self.Frame2_8.configure(borderwidth="2")
self.Frame2_8.configure(relief="groove")
self.Label1_9 = tk.Label(self.Frame2_8)
self.Label1_9.place(relx=0.069, rely=0.101, height=64, width=83)
self.Label1_9.configure(text='''モバイル用キャッチコピー''')
self.Label1_9.configure(wraplength="50")
self.mb_catchcopy_insert_1 = tk.Entry(self.Frame2_8)
self.mb_catchcopy_insert_1.place(
relx=0.31, rely=0.152, height=27, relwidth=0.267)
self.mb_catchcopy_insert_2 = tk.Entry(self.Frame2_8)
self.mb_catchcopy_insert_2.place(
relx=0.716, rely=0.152, height=27, relwidth=0.267)
self.Label2_5 = tk.Label(self.Frame2_8)
self.Label2_5.place(relx=0.208, rely=0.152, height=22, width=43)
self.Label2_5.configure(text='''挿入前''')
self.Label3_6 = tk.Label(self.Frame2_8)
self.Label3_6.place(relx=0.622, rely=0.152, height=22, width=43)
self.Label3_6.configure(text='''挿入後''')
self.mb_catchcopy_replace_1 = tk.Entry(self.Frame2_8)
self.mb_catchcopy_replace_1.place(
relx=0.31, rely=0.557, height=27, relwidth=0.267)
self.mb_catchcopy_replace_2 = tk.Entry(self.Frame2_8)
self.mb_catchcopy_replace_2.place(
relx=0.716, rely=0.557, height=27, relwidth=0.267)
self.Label4_5 = tk.Label(self.Frame2_8)
self.Label4_5.place(relx=0.208, rely=0.557, height=20, width=43)
self.Label4_5.configure(text='''置換前''')
self.Label5_6 = tk.Label(self.Frame2_8)
self.Label5_6.place(relx=0.622, rely=0.557, height=20, width=43)
self.Label5_6.configure(text='''置換後''')
self.mb_catchcopy_toggle_var = tk.BooleanVar()
self.mb_catchcopy_toggle_var.set(toggle)
self.mb_catchcopy_toggle = tk.Checkbutton(
self.Frame2_8, variable=self.mb_catchcopy_toggle_var)
self.mb_catchcopy_toggle.place(
relx=0.029,
rely=0.316,
relheight=0.392,
relwidth=0.071)
self.toggles["mobile_catch_copy"] = self.mb_catchcopy_toggle_var
self.entries["mobile_catch_copy"] = [
self.mb_catchcopy_insert_1,
self.mb_catchcopy_insert_2,
self.mb_catchcopy_replace_1,
self.mb_catchcopy_replace_2]
def __tax_rate(self, toggle=False):
self.Frame2_9 = tk.Frame(self.Frame1)
self.Frame2_9.place(
relx=0.0,
rely=0.254,
relheight=0.052,
relwidth=0.649)
self.Frame2_9.configure(relief='groove')
self.Frame2_9.configure(borderwidth="2")
self.Frame2_9.configure(relief="groove")
self.Label1_10 = tk.Label(self.Frame2_9)
self.Label1_10.place(relx=0.069, rely=0.106, height=38, width=83)
self.Label1_10.configure(text='''消費税率''')
self.Label1_10.configure(wraplength="50")
self.tax_rate_toggle_var = tk.BooleanVar()
self.tax_rate_toggle_var.set(toggle)
self.tax_rate_toggle = tk.Checkbutton(
self.Frame2_9, variable=self.tax_rate_toggle_var)
self.tax_rate_toggle.place(
relx=0.029,
rely=0.319,
relheight=0.383,
relwidth=0.071)
self.tax_rate_var = tk.StringVar()
self.tax_rate = ttk.Combobox(
self.Frame2_9, textvariable=self.tax_rate_var)
self.tax_rate.place(
relx=0.312,
rely=0.213,
relheight=0.447,
relwidth=0.248)
self.toggles["tax_rate"] = self.tax_rate_toggle_var
self.selected["tax_rate"] = self.tax_rate
self.entries["tax_rate"] = self.tax_rate_var
def __tax_flag(self, toggle=False):
self.Frame2_10 = tk.Frame(self.Frame1)
self.Frame2_10.place(
relx=0.0,
rely=0.304,
relheight=0.054,
relwidth=0.649)
self.Frame2_10.configure(relief='groove')
self.Frame2_10.configure(borderwidth="2")
self.Frame2_10.configure(relief="groove")
self.Label1_11 = tk.Label(self.Frame2_10)
self.Label1_11.place(relx=0.069, rely=0.102, height=40, width=83)
self.Label1_11.configure(text='''消費税''')
self.Label1_11.configure(wraplength="50")
self.tax_flag_toggle_var = tk.BooleanVar()
self.tax_flag_toggle_var.set(toggle)
self.tax_flag_toggle = tk.Checkbutton(
self.Frame2_10, variable=self.tax_flag_toggle_var)
self.tax_flag_toggle.place(
relx=0.029,
rely=0.306,
relheight=0.408,
relwidth=0.071)
self.tax_flag_var | |
import argparse
from astropy.io import ascii
from pandeia.engine.perform_calculation import perform_calculation
import json
import os
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.table import Table
from scipy.optimize import minimize
import numpy as np
import warnings
import pysynphot as S #
from extinction import fm07, fitzpatrick99, apply # https://pypi.org/project/extinction/
from astropy import units as u
from astropy.cosmology import WMAP9 as cosmo
from astropy.coordinates import Distance
from astropy import units as u
from background4jwst import background4jwstclass
from pandeia.engine.calc_utils import build_default_calc # or alternatively, load ETC calculation parameters
package_directory = os.path.dirname(os.path.abspath(__file__))
class NIRSpec_SNR(object):
"""
Wapper for the Pandeia engine to calculate the signal to noise and exposure times for
NIRSpec observations. Numerous options are available for readout mode, grating, filter,
and observing mode.
Important:
ref_wave must be provided, wavelength the SN is calculated at
A spectrum must be provided.
The distance is measured in Mpc
Note:
The spectrum input units should be microns for wavelength and mJy for flux.
The normalising magnitude is as measured by NIRCam with mode = sw_imaging filter = f115w
Key functions:
Calculate_SNR()
Calculates the expected signal to noise for a given configuration and exposuretime.
The configuration is automatically determined from the input exposure time
Calculate_exposure_for_SN()
Calculates the exposure time needed for the desired signal to noise.
The instrument configuration is set to the values that achieve the shortest exposure
time for the same signal to noise.
Missing components:
Input spectra can be normalised by a magnitude in a given NIRCam configuration.
The default is set to "nircam,sw_imaging,f115w" and for now can only be changed by editing the
template "nirspec_fs_default.json".
It needs a more efficient way to calculate the optimal setup in Calculate_exposure_for_SN(),
currently it loops through group values. Scipy minimize didn't work so well here.
Different NIRSpec observing modes. Currently only long_slit is implemented, need to find
documentation on the other modes and their calls in Pandeia.
Commandline interface?
TBD
"""
def __init__(self,ref_wave=None,av=None,dist=None,
z=None,
grating='prism',filt=None,
read_mode='nrsirs2rapid',mode='fixed_slit',
spec = None,exptime=None):
self.grating = grating.lower() # grating name
self.Check_grating()
if filt is None:
self.Assign_filter() # filter name
else:
self.filter = filt # filter name
self.ngroups = None # number of frame groups
self.nint = 1 # Number of integrations
self.nexp = 4 # Number of exposures
self.mode = mode # Spectrograph observation mode
self.read_mode = read_mode # readout mode
self.exp_time = exptime # Desired exposure time
self.pandeiacfg = None
self.mag = None
self.ref_filter = 'f200w'
self.ref_sn = None # signal to nosie
if ref_wave is not None:
self.ref_wave = ref_wave # reference wavelength in mirons
else:
self.ref_wave = 2 # reference wavelength in miron
self.ref_filter
self.av = av # v band extinction
self.dist = dist
self.z = z
self.spec = spec
self.background4jwst = background4jwstclass()
self.lambkg4ETC = None
# calculated by pandeia
self.calc_flux = None # units e-/s
self.calc_wave = None # units micron
self.calc_err = None # units e-/s
self.calc_sn = None # singal to noise of full spec
self.calc_exp = None
def Assign_filter(self):
grating = self.grating
if (grating == 'g140h') | (grating == 'g140m'):
message = ('2 filters available: f070lp, or f100lf \n' +
'Assigning f070lp')
warnings.warn(message)
filt = 'f070lp'
elif (grating == 'g235h') | (grating == 'g235m'):
filt = 'f170lp'
elif (grating == 'g395h') | (grating == 'g395m'):
filt = 'f290lp'
elif grating == 'prism':
filt = 'clear'
self.filter = filt
def Check_grating(self):
"""
Check to see if the input grating value is a valid option
"""
allowed = ['prism', 'g140h', 'g140m',
'g235h', 'g235m', 'g395h', 'g395m']
for i in range(len(allowed)):
if self.grating == allowed[i]:
return
message = ('No such grating available, please choose from:\n '
+ 'prism\n g140h\n g140m\n g235h\n g235m\n g395h\n g395m')
raise(ValueError(message))
def Check_filter(self):
"""
Check to see if the input filter value is a valid option
"""
allowed = ['clear', 'f070lp', 'f100lp',
'f110w', 'f140x', 'f170lp', 'f290lp']
for i in range(len(allowed)):
if self.filter == allowed[i]:
return
message = ('No such filter available, please choose from:\n '
+ 'clear\n f070lp\n f100lp\n f110w\n f140x\n f170lp\n f290lp')
raise(ValueError(message))
def Check_read_mode(self):
"""
Check to see if the input read mode value is a valid option
"""
allowed = ['nrsrapid', 'nrsrapidd6','nrs',
'nrsirs2rapid','nrsirs2']
for i in range(len(allowed)):
if self.read_mode == allowed[i]:
return
message = ('No such readout mode available, please choose from:\n '
+ 'nrsrapid\n nrsrapidd6\n nrs\n nrsirs2rapid\n nrsirs2')
raise(ValueError(message))
def Check_mode(self):
"""
Check to see if the input mode value is a valid option
"""
allowed = ['fixed_slit','ifu']
for i in range(len(allowed)):
if self.mode == allowed[i]:
return
message = ('No such mode available, please choose from:\n '
+ 'fixed_slit\n ifu\n')
raise(ValueError(message))
def Check_exposure_time(self):
"""
Check to see if the input exposure time value is a valid option
"""
if self.exp_time is None:
message = "Exposure time can't be None"
raise(ValueError(message))
#if (type(self.exp_time) != float) & (type(self.exp_time) !=int):
# print(type(self.exp_time))
# message = "Exposure time must be either a float or int"
# raise ValueError(message)
return
def NIRSpec_exp_2_groups(self):
"""
Calculate the number of groups that can be used recorded for a
given exposure time and configuration.
"""
self.Check_read_mode()
self.Check_exposure_time()
mode = self.read_mode
exp = self.exp_time
if mode == 'nrsrapid':
ng = (10.737)
elif mode == 'nrsrapidd6':
ng = (75.159)
elif mode == 'nrs':
ng = (42.947)
elif mode == 'nrsirs2rapid':
ng = (14.589)
elif mode == 'nrsirs2':
ng = (72.944)
ng = np.floor(exp / (self.nint * self.nexp * ng + 58.35 * self.nint))
if ng > 100:
self.nint += 1
ng = np.floor(exp / (self.nint * self.nexp * ng + 58.35 * self.nint))
if ng <1:
warnings.warn('exposure is too short for even one group!'+
'setting ng = 1')
ng =1
self.ngroups = ng
return
def NIRSpec_groups_2_exp(self):
"""
Calculate the total exposure time for a given configuration.
"""
self.Check_read_mode()
self.Check_exposure_time()
mode = self.read_mode
if mode == 'nrsrapid':
exp = 10.737
elif mode == 'nrsrapidd6':
exp = 75.159
elif mode == 'nrs':
exp = 42.947
elif mode == 'nrsirs2rapid':
exp = 14.589
elif mode == 'nrsirs2':
exp = 72.944
exp = exp * self.nint * self.nexp * self.ngroups * 14.6
self.exp_time = exp
return
def Check_all_fields(self):
"""
Check if all parameters have been filled in correctly to enter into Pandeia
"""
self.Check_grating()
self.Check_filter()
self.Check_read_mode()
self.Check_mode()
message = ''
if self.ngroups is None:
message += 'Number of groups must be defined\n'
elif self.ngroups < 4:
m = ('Number of groups is only {}, a value >4 is prefered,'.format(self.ngroups) +
' so increasing exposure time is advised.')
warnings.warn(m)
if self.nint is None:
message += 'Number of integrations must be defined\n'
if self.nexp is None:
messae += 'Number of exposures must be defined\n'
if self.read_mode is None:
message += 'Read mode must be defined\n'
if self.spec is None:
message += 'No spectrum provided\n'
if self.ref_wave is None:
message += 'No reference wavelength specified (ref_wave = None)\n'
if message != '':
raise(ValueError(message))
return
def Get_background(self):
"""
Add the background model to the pandeia table.
The fits file was provided by <NAME> via <NAME>
"""
file = os.path.join(package_directory,'data/minzodi12_12052016.fits')
hdu = fits.open(file)
table = hdu[1].data
back_lam = table.field('wavelength')
back_all = (table.field('background') + table.field('thermal') +
table.field('straylight') + table.field('infield'))
self.pandeiacfg['background'] = [list(back_lam),list(back_all)]
return
def Normalise_spec(self):
imgr_data = self.imgr_data
imgr_data['scene'][0]['spectrum']["normalization"] = {}
imgr_data['scene'][0]['spectrum']["normalization"]["bandpass"]= "nircam,lw_imaging," + self.ref_filt
imgr_data['scene'][0]['spectrum']["normalization"]["norm_flux"] = self.mag
imgr_data['scene'][0]['spectrum']["normalization"]["norm_fluxunit"] = "abmag"
imgr_data['scene'][0]['spectrum']["normalization"]["type"] = "jwst"
self.imgr_data = imgr_data
def Make_config(self):
"""
Put the configuration data into the format expected by Pandeia,
using the default template provided by <NAME>.
"""
if self.ngroups is None:
self.NIRSpec_exp_2_groups()
self.Check_all_fields()
if self.pandeiacfg is None:
self.pandeiacfg = build_default_calc('jwst','nirspec','fixed_slit')
self.pandeiacfg['configuration']['detector']['ngroup'] = self.ngroups
self.pandeiacfg['configuration']['detector']['nint'] = self.nint
self.pandeiacfg['configuration']['detector']['nexp'] = self.nexp
self.pandeiacfg['configuration']['instrument']['mode'] = self.mode
self.pandeiacfg['configuration']['instrument']['filter'] = self.filter
self.pandeiacfg['configuration']['instrument']['disperser'] = self.grating
self.pandeiacfg['configuration']['detector']['readout_pattern'] = self.read_mode
self.pandeiacfg['configuration']['detector']['subarray'] = 'full'
self.spec.convert('micron')
self.spec.convert('mJy')
self.pandeiacfg['scene'][0]['spectrum']['sed']['sed_type'] = 'input'
self.pandeiacfg['scene'][0]['spectrum']['sed']['spectrum'] = [self.spec.wave,self.spec.flux]
self.pandeiacfg['scene'][0]['spectrum']['sed']['unit'] = 'flam'
if self.mag is not None:
self.pandeiacfg['scene'][0]['spectrum']['normalization']['type'] = 'jwst'
self.pandeiacfg['scene'][0]['spectrum']['normalization']['bandpass'] = 'nircam,sw_imaging,' + self.ref_filter
self.pandeiacfg['scene'][0]['spectrum']['normalization']['norm_flux'] = self.mag
| |
<reponame>bogomhd/BeoLightControl
#!/usr/bin/python3
import yaml
import json
import requests
import time
import sys
import shutil
import signal
from os import system, path
from threading import Thread
from zeroconf import ServiceBrowser, Zeroconf
import ipaddress
import http.client
configuration_file = "BeoLightControl.yaml"
config_hue_bridge = "Hue bridge"
config_hue_token = "token"
config_product = "Beo product"
config_product_name = "name"
config_light = "Light"
config_light_url = "light_url"
config_light_control_url = "control_url"
config_ip = "ip"
headers = {"Content-Type": "application/json"}
brightness_steps = 20
class ConnectionData:
def __init__(self, hue_api_url_light, hue_control_url, beo_device_ip, beo_device_name):
# http://192.168.195.141/api/*token*/lights/12
# http://192.168.195.141/api/*token*/groups/1/
self.hue_id_url = hue_api_url_light
self.hue_control_url = hue_control_url
# http://192.168.195.141/api/*token*/lights/12/state
# http://192.168.195.141/api/*token*/groups/1/action
self.hue_control_url_full = hue_api_url_light + "/" + hue_control_url
self.beo_notifications_url = 'http://' + beo_device_ip + ":8080/BeoNotify/Notifications?lastId="
self.beo_device_ip = beo_device_ip
self.beo_device_name = beo_device_name
self.headers = {'Content-Type': 'application/json'}
self.interrupt = False
class BeoLightControl:
def __init__(self):
self.connection_dict = {}
self.conn_data = None
self.devices_discovered = {}
self.beo_device_ip = ""
self.beo_device_name = ""
self.hue_api_ip = ""
self.hue_api_url_base = ""
self.hue_api_url_groups = ""
self.hue_api_url_light = ""
self.hue_api_token = ""
self.hue_light_name = ""
self.hue_api_url_selected_light = ""
self.hue_control_path = ""
self.listner_thread = None
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGINT, self.signal_handler)
def toggle_light(self):
dump = {}
response = requests.get(self.conn_data.hue_id_url, headers=headers)
if response.status_code == 200:
dump = json.loads(response.content.decode('utf-8'))
else:
print ("Something went wrong http status code: " + response.status_code)
return
current_state = bool(dump[self.conn_data.hue_control_url]['on'])
if not current_state:
data = '{"on":' + str(not current_state).lower() + ', "bri": 254}'
else:
data = '{"on":' + str(not current_state).lower() + '}'
#print (data)
requests.put(self.conn_data.hue_control_url_full, headers=headers, data=data)
def change_brightness(self, event_key: str, key_press: bool):
if key_press:
dump = {}
response = requests.get(self.conn_data.hue_id_url, headers=headers)
if response.status_code == 200:
dump = json.loads(response.content.decode('utf-8'))
else:
print ("Something went wrong http status code: " + response.status_code)
return
if event_key == "Down":
data = '{"bri_inc":-254, "transitiontime":30}'
else:
if not bool(dump[self.conn_data.hue_control_url]['on']):
# If the light is off we turn it on and start the transistion
requests.put(self.conn_data.hue_control_url_full, headers=headers, data='{"on": true, "bri": 0}')
data = '{"bri_inc":254, "transitiontime":30}'
else:
data = '{"bri_inc":254, "transitiontime":30}'
else:
data = '{"bri_inc":0}'
requests.put(self.conn_data.hue_control_url_full, headers=headers, data=data)
def handle_event(self, event_key: str, key_press: bool):
#print ("Light key:" + event_key + " press: " + str(key_press))
if key_press and event_key == "Select":
self.toggle_light()
if event_key == "Up" or event_key == "Down":
self.change_brightness(event_key, key_press)
def product_select(self, message):
selection = 0
_ = system('clear')
print (message)
product_list = {}
for i, product in enumerate(self.devices_discovered):
product_list[i] = {}
product_list[i][product] = self.devices_discovered[product]
for product in product_list:
print (str(product) + ": " + str(product_list[product]))
while True:
selection = ""
try:
selection = int(input(""))
except ValueError:
pass
if selection in product_list:
break
else:
print("Invalid selection. Pick another!")
return list(product_list[selection].keys())[0]
def group_select(self):
_ = system('clear')
print ("Please select which group:")
response = requests.get(self.hue_api_url_groups, headers=headers)
if response.status_code == 200:
dump = json.loads(response.content.decode('utf-8'))
else:
print ("Error talking to Hue Bridge!")
return ""
groups = {}
for element in dump:
groups[element] = dump[element]['name']
print (element + ": " + dump[element]['name'])
return input("")
def light_select(self):
_= system('clear')
print ("Please select which group:")
response = requests.get(self.hue_api_url_light, headers=headers)
if response.status_code == 200:
dump = json.loads(response.content.decode('utf-8'))
else:
print ("Error talking to Hue Bridge!")
return ""
lights = {}
for element in dump:
lights[element] = dump[element]['name']
print (element + ": " + dump[element]['name'])
return input("")
def get_light_name(self):
if self.hue_api_url_selected_light:
response = requests.get(self.hue_api_url_selected_light, headers=headers)
if response.status_code == 200:
dump = json.loads(response.content.decode('utf-8'))
else:
return ""
return dump['name']
else:
return ""
def store_light_data(self):
config = {}
with open(configuration_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
config[config_light] = {config_light_url: self.hue_api_url_selected_light, config_light_control_url: self.hue_control_path}
with open(configuration_file, "w") as file:
yaml.dump(config, file)
def listner(self):
last_id = "0"
while True:
try:
#print ("notification url: " + self.conn_data.beo_notifications_url + last_id)
r = requests.get(self.conn_data.beo_notifications_url + last_id, stream=True, timeout=20)
for line in r.iter_lines():
if self.conn_data.interrupt:
return
# Skip keep-alive new lines
if line:
decoded_line = line.decode('utf-8')
new_event = json.loads(decoded_line)
if 'notification' in new_event:
if 'data' in new_event['notification']:
data = new_event['notification']['data']
if 'category' in data:
if data['category'] == 'Light':
self.handle_event(data['key'], (data['event'] == 'KeyPress'))
last_id = str(new_event['notification']['id'])
time.sleep(0.05)
except:
last_id = "0"
print ("Problem with connection to the product! Error: " + str(sys.exc_info()[0]) + "... retry in 5 sec.")
time.sleep(5)
def remove_service(self, zeroconf, type, name):
pass
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
self.devices_discovered[str(ipaddress.IPv4Address(info.addresses[0]))] = info.get_name()
def discover_devices(self, service):
self.devices_discovered = {}
zeroconf = Zeroconf()
ServiceBrowser(zeroconf, service, self)
try:
print ("Updating list of devices in the network...")
l = 50
# Initial call to print 0% progress
self.printProgressBar(0, l, prefix='Progress:', suffix='Complete', length=50, autosize=True)
for i in range(0,l):
time.sleep(0.1)
# Update Progress Bar
self.printProgressBar(i + 1, l, prefix='Progress:', suffix='Complete', length=50, autosize=True)
finally:
zeroconf.close()
def select_hue_bridge(self):
self.discover_devices("_hap._tcp.local.")
self.hue_api_ip = self.product_select("Please select your Philps Hue Bridge:")
print ("Hue Bridge IP: " + self.hue_api_ip)
def select_beo_product(self):
self.discover_devices("_beoremote._tcp.local.")
self.beo_device_ip = self.product_select("Please select which product you want to configure:")
self.beo_device_name = self.devices_discovered[self.beo_device_ip]
config = {}
with open(configuration_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
config[config_product] = {config_product_name: self.beo_device_name, config_ip: self.beo_device_ip}
with open(configuration_file, "w") as file:
yaml.dump(config, file)
def generate_hue_urls(self):
self.hue_api_url_base = "http://" + self.hue_api_ip + "/api/" + self.hue_api_token + "/"
self.hue_api_url_groups = self.hue_api_url_base + "groups"
self.hue_api_url_light = self.hue_api_url_base + "lights"
def load_stored_config(self):
conf_file_exsists = path.exists(configuration_file)
#print ("conf file exsists: " + str(conf_file_exsists))
if conf_file_exsists:
#Load data
with open(configuration_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
if config_ip in config[config_hue_bridge]:
self.hue_api_ip = config[config_hue_bridge][config_ip]
else:
print ("Error with ip in config file")
return False
if config_hue_token in config[config_hue_bridge]:
self.hue_api_token = config[config_hue_bridge][config_hue_token]
else:
print ("Error with token in config file")
return False
if config_product in config:
if config_product_name in config[config_product]:
self.beo_device_name = config[config_product][config_product_name]
if config_ip in config[config_product]:
self.beo_device_ip = config[config_product][config_ip]
if config_light in config:
if config_light_control_url in config[config_light] and config_light_url in config[config_light]:
self.hue_api_url_selected_light = config[config_light][config_light_url]
self.hue_control_path = config[config_light][config_light_control_url]
else:
return False
return True
def setup_hue_config(self):
self.select_hue_bridge()
data = '{"devicetype":"BeoLightControl"}'
button_pressed = False
while not button_pressed:
response = requests.post("http://" + self.hue_api_ip + "/api", headers=headers, data=data)
if response.status_code == 200:
dump = json.loads(response.content.decode('utf-8'))[0]
if 'error' in dump:
input("Please press the button on the Philips Hue Bridge and press enter\n")
else:
print ("Connected to Philips Hue Bridge successfully!")
self.hue_api_token = dump['success']['username']
time.sleep(3)
button_pressed = True
else:
print ("Error! HTTP Connection error code: " + response.status_code)
if self.hue_api_token == "":
print ("Error! No Hue token")
return
dict_file = {config_hue_bridge : {config_ip : self.hue_api_ip, config_hue_token : self.hue_api_token}}
with open(configuration_file, "w") as file:
yaml.dump(dict_file, file)
def start_listner(self):
if self.hue_api_url_selected_light:
self.conn_data = ConnectionData(self.hue_api_url_selected_light, self.hue_control_path, self.beo_device_ip, self.beo_device_name)
try:
self.listner_thread = Thread(target=self.listner)
except:
print ("ERROR! " + str(sys.exc_info()[0]))
print ("Started to listen to events from " + self.beo_device_ip)
self.listner_thread.start()
else:
print ("No light selected! Don't start listner!")
def stop_listner(self):
if self.listner_thread:
if self.listner_thread.is_alive:
self.conn_data.interrupt = True
print ("Stopping listner...")
self.listner_thread.join()
def ui(self):
while True:
_= system('clear')
print ("Current settings:\nProduct: " + self.beo_device_name + "\nLight/Group: " + self.get_light_name())
val = input("\nWhat do you want to do?\n1: Select product\n2: Select Light or Group\n3: Start/Stop listner\n4: Quit\n")
if val == "1":
_= system('clear')
self.select_beo_product()
elif val == "2":
_= system('clear')
val = input("What to you want to control?\n1: Light\n2: Group\n")
if val == "1":
self.hue_control_path = "state"
self.hue_api_url_selected_light = self.hue_api_url_light + "/" + self.light_select()
else:
self.hue_control_path = "action"
self.hue_api_url_selected_light = self.hue_api_url_groups + "/" + self.group_select()
self.store_light_data()
_ = system('clear')
elif val == "3":
_= system('clear')
val = input("Do you want to start or stop the listner?\n1: Start\n2: Stop\n")
if val == "1":
self.start_listner()
time.sleep(5)
else:
self.stop_listner()
_= system('clear')
print ("Listner stopped!")
time.sleep(3)
else:
self.stop_listner()
return
def start(self, listner_mode):
if not self.load_stored_config():
self.setup_hue_config()
self.generate_hue_urls()
#print ("IP: " + self.hue_api_ip + " Token: " + self.hue_api_token)
_= system('clear')
if listner_mode:
self.start_listner()
if self.listner_thread:
if self.listner_thread.is_alive:
self.listner_thread.join()
else:
self.ui()
def signal_handler(self, signum, frame):
print('Signal handler called with signal', signum)
self.stop_listner()
sys.exit()
# Borrowed from: https://gist.github.com/greenstick/b23e475d2bfdc3a82e34eaa1f6781ee4
def printProgressBar (self, iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', autosize = False):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character | |
_to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this PeerResponseResources object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'PeerResponseResources') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'PeerResponseResources') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class PeerResponseStorage():
"""
The **cached** Kubernetes storage attributes for this component. Not available if peer
was imported.
:attr StorageObject peer: (optional)
:attr StorageObject statedb: (optional)
"""
def __init__(self,
*,
peer: 'StorageObject' = None,
statedb: 'StorageObject' = None) -> None:
"""
Initialize a PeerResponseStorage object.
:param StorageObject peer: (optional)
:param StorageObject statedb: (optional)
"""
self.peer = peer
self.statedb = statedb
@classmethod
def from_dict(cls, _dict: Dict) -> 'PeerResponseStorage':
"""Initialize a PeerResponseStorage object from a json dictionary."""
args = {}
if 'peer' in _dict:
args['peer'] = StorageObject.from_dict(_dict.get('peer'))
if 'statedb' in _dict:
args['statedb'] = StorageObject.from_dict(_dict.get('statedb'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a PeerResponseStorage object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'peer') and self.peer is not None:
_dict['peer'] = self.peer.to_dict()
if hasattr(self, 'statedb') and self.statedb is not None:
_dict['statedb'] = self.statedb.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this PeerResponseStorage object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'PeerResponseStorage') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'PeerResponseStorage') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RemoveMultiComponentsResponse():
"""
RemoveMultiComponentsResponse.
:attr List[DeleteComponentResponse] removed: (optional)
"""
def __init__(self,
*,
removed: List['DeleteComponentResponse'] = None) -> None:
"""
Initialize a RemoveMultiComponentsResponse object.
:param List[DeleteComponentResponse] removed: (optional)
"""
self.removed = removed
@classmethod
def from_dict(cls, _dict: Dict) -> 'RemoveMultiComponentsResponse':
"""Initialize a RemoveMultiComponentsResponse object from a json dictionary."""
args = {}
if 'removed' in _dict:
args['removed'] = [DeleteComponentResponse.from_dict(x) for x in _dict.get('removed')]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RemoveMultiComponentsResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'removed') and self.removed is not None:
_dict['removed'] = [x.to_dict() for x in self.removed]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RemoveMultiComponentsResponse object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RemoveMultiComponentsResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RemoveMultiComponentsResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ResourceLimits():
"""
ResourceLimits.
:attr str cpu: (optional) Maximum CPU for subcomponent. Must be >=
"requests.cpu". Defaults to the same value in "requests.cpu". [Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
:attr str memory: (optional) Maximum memory for subcomponent. Must be >=
"requests.memory". Defaults to the same value in "requests.memory". [Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
"""
def __init__(self,
*,
cpu: str = None,
memory: str = None) -> None:
"""
Initialize a ResourceLimits object.
:param str cpu: (optional) Maximum CPU for subcomponent. Must be >=
"requests.cpu". Defaults to the same value in "requests.cpu". [Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
:param str memory: (optional) Maximum memory for subcomponent. Must be >=
"requests.memory". Defaults to the same value in "requests.memory".
[Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
"""
self.cpu = cpu
self.memory = memory
@classmethod
def from_dict(cls, _dict: Dict) -> 'ResourceLimits':
"""Initialize a ResourceLimits object from a json dictionary."""
args = {}
if 'cpu' in _dict:
args['cpu'] = _dict.get('cpu')
if 'memory' in _dict:
args['memory'] = _dict.get('memory')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ResourceLimits object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cpu') and self.cpu is not None:
_dict['cpu'] = self.cpu
if hasattr(self, 'memory') and self.memory is not None:
_dict['memory'] = self.memory
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ResourceLimits object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ResourceLimits') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ResourceLimits') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ResourceObject():
"""
This field requires the use of Fabric v1.4.* and higher.
:attr ResourceRequests requests:
:attr ResourceLimits limits: (optional)
"""
def __init__(self,
requests: 'ResourceRequests',
*,
limits: 'ResourceLimits' = None) -> None:
"""
Initialize a ResourceObject object.
:param ResourceRequests requests:
:param ResourceLimits limits: (optional)
"""
self.requests = requests
self.limits = limits
@classmethod
def from_dict(cls, _dict: Dict) -> 'ResourceObject':
"""Initialize a ResourceObject object from a json dictionary."""
args = {}
if 'requests' in _dict:
args['requests'] = ResourceRequests.from_dict(_dict.get('requests'))
else:
raise ValueError('Required property \'requests\' not present in ResourceObject JSON')
if 'limits' in _dict:
args['limits'] = ResourceLimits.from_dict(_dict.get('limits'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ResourceObject object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'requests') and self.requests is not None:
_dict['requests'] = self.requests.to_dict()
if hasattr(self, 'limits') and self.limits is not None:
_dict['limits'] = self.limits.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ResourceObject object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ResourceObject') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ResourceObject') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ResourceObjectCouchDb():
"""
*Legacy field name* Use the field `statedb` instead. This field requires the use of
Fabric v1.4.* and higher.
:attr ResourceRequests requests:
:attr ResourceLimits limits: (optional)
"""
def __init__(self,
requests: 'ResourceRequests',
*,
limits: 'ResourceLimits' = None) -> None:
"""
Initialize a ResourceObjectCouchDb object.
:param ResourceRequests requests:
:param ResourceLimits limits: (optional)
"""
self.requests = requests
self.limits = limits
@classmethod
def from_dict(cls, _dict: Dict) -> 'ResourceObjectCouchDb':
"""Initialize a ResourceObjectCouchDb object from a json dictionary."""
args = {}
if 'requests' in _dict:
args['requests'] = ResourceRequests.from_dict(_dict.get('requests'))
else:
raise ValueError('Required property \'requests\' not present in ResourceObjectCouchDb JSON')
if 'limits' in _dict:
args['limits'] = ResourceLimits.from_dict(_dict.get('limits'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ResourceObjectCouchDb object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'requests') and self.requests is not None:
_dict['requests'] = self.requests.to_dict()
if hasattr(self, 'limits') and self.limits is not None:
_dict['limits'] = self.limits.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ResourceObjectCouchDb object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ResourceObjectCouchDb') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ResourceObjectCouchDb') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ResourceObjectFabV1():
"""
This field requires the use of Fabric v1.4.* and **lower**.
:attr ResourceRequests requests:
:attr ResourceLimits limits: (optional)
"""
def __init__(self,
requests: 'ResourceRequests',
*,
limits: 'ResourceLimits' = None) -> None:
"""
Initialize a ResourceObjectFabV1 object.
:param ResourceRequests requests:
:param ResourceLimits limits: (optional)
"""
self.requests = requests
self.limits = limits
@classmethod
def from_dict(cls, _dict: Dict) -> 'ResourceObjectFabV1':
"""Initialize a ResourceObjectFabV1 object from a json dictionary."""
args = {}
if 'requests' in _dict:
args['requests'] = ResourceRequests.from_dict(_dict.get('requests'))
else:
raise ValueError('Required property \'requests\' not present in ResourceObjectFabV1 JSON')
if 'limits' in | |
6245691.858534137, 11.688941994097817, 0.015110828165630823, 1.670666167191741e-06],
[1050.9762404493679, 1257.606216652866, 1428.1727195332353, 2676217.667872637, 5029083.46383821,
6252781.131346029, 9.912726746719038, 0.01508497956653298, 1.6858501133596892e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6264496.097901617, 11.605501217123276, 0.015319465719948019, 1.746960716329272e-06],
[1180.7233642687456, 1290.1144240449926, 1434.8120791304352, 2854778.224454453, 5290343.222690649,
6018525.355599652, 10.111309139135487, 0.017509170428913215, 1.874732806720558e-06],
[1114.9126539360466, 1260.5869822935006, 1420.7016199249624, 2711709.7515290566, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5634195.507417285,
6009864.991404326, 11.605501217123276, 0.015129553230613428, 1.746960716329272e-06],
[1176.1786883446464, 1281.1513670331767, 1422.5889236131238, 2643981.980613781, 5933402.3025543075,
6009864.991404326, 11.603245339559699, 0.015319465719948019, 1.746960716329272e-06],
[1114.834792035313, 1252.4221946420748, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.962874958185285, 0.014995216559648368, 1.64441131717817e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2710386.6655561705, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1117.7420821280732, 1268.6265627123964, 1417.260144182994, 2711709.7515290566, 5151932.1545183025,
6293398.136293111, 9.899845029941671, 0.014904179979953325, 1.6416817287721877e-06],
[1100.1408530781362, 1259.20529625876, 1420.7016199249624, 2794529.276377869, 5137466.622582068,
6277042.034213948, 9.959965929147502, 0.014877788107490625, 1.6767475913984607e-06],
[1140.2618729167225, 1259.7696328062204, 1439.7518547085635, 2711709.7515290566, 5172210.454034539,
6129007.601007714, 9.459700490332747, 0.01508497956653298, 1.8717425252296465e-06],
[1114.9126539360466, 1259.1180510408708, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6277042.034213948, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1114.9126539360466, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1085.9977035903282, 1259.3587163655109, 1417.260144182994, 2711709.7515290566, 5151932.1545183025,
6010849.40413067, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1172.6173836553487, 1231.4016603382263, 1416.1995418912618, 2618905.5231839283, 5151840.492070999,
6245691.858534137, 11.593330796866331, 0.015109546481814558, 1.716788954519652e-06],
[1137.2060277008807, 1263.0364881672328, 1435.9138529261822, 2711709.7515290566, 5530424.3268809505,
6058545.49793099, 9.772174739908307, 0.01508497956653298, 1.6453299579647577e-06],
[1189.435265267026, 1289.634611933384, 1434.8120791304352, 2862137.984148449, 5290343.222690649,
5912724.800574016, 10.184880722099962, 0.017509170428913215, 1.928535330675258e-06],
[1114.8364994143328, 1252.420943546848, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6204147.819576919, 9.962874958185285, 0.014995216559648368, 1.644382598706639e-06],
[1128.6500541303647, 1260.508213067299, 1420.6927251709847, 2711709.7515290566, 5196098.543422922,
6267381.396045687, 9.805607387570063, 0.01508497956653298, 1.644075174710523e-06],
[1180.010905938387, 1284.2421194708988, 1434.8120791304352, 2854778.224454453, 5290343.222690649,
5907538.352840094, 10.111309139135487, 0.017509170428913215, 1.8805380048230116e-06],
[1176.1786883446464, 1283.60271590594, 1422.5889236131238, 2643981.980613781, 5933402.3025543075,
6019806.243375341, 11.603245339559699, 0.015319465719948019, 1.746960716329272e-06],
[1110.1175336282156, 1252.4221946420748, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.962874958185285, 0.014995216559648368, 1.64441131717817e-06],
[1133.9089419004854, 1259.7696328062204, 1436.8742705057894, 2711709.7515290566, 5172210.454034539,
6129007.601007714, 9.459700490332747, 0.01508497956653298, 1.8717425252296465e-06],
[1114.9126539360466, 1260.920980046961, 1427.7914453000797, 2711709.7515290566, 5154755.245931415,
6346803.504583463, 9.87071144774333, 0.014332511765226055, 1.64751e-06],
[1110.0932897166533, 1263.0364881672328, 1435.9138529261822, 2711709.7515290566, 5183912.901011421,
6382146.572286638, 10.641003542772033, 0.015398836251961747, 1.6866917168224695e-06],
[1109.4847517557757, 1260.5869822935006, 1428.1550395559061, 2749269.6046548923, 5151932.1545183025,
6374031.613940251, 10.746163282079246, 0.01508497956653298, 1.64751e-06],
[1085.9977035903282, 1287.212660761374, 1429.0126531036894, 2853237.462187207, 5151932.1545183025,
6123092.829186761, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1172.7862297639358, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 5082503.039302015,
6252960.904134171, 11.417802244505165, 0.01508497956653298, 1.6571644711735225e-06],
[1100.4582501781676, 1260.5869822935006, 1428.1550395559061, 2749269.6046548923, 4637750.6030374495,
6771063.144279609, 10.746163282079246, 0.01508497956653298, 1.64751e-06],
[1114.9126539360466, 1263.4435351891264, 1420.7016199249624, 2711709.7515290566, 5135604.332508602,
6339744.606807769, 9.869108693672011, 0.01508497956653298, 1.64751e-06],
[1170.681625634677, 1261.2561027220345, 1424.647927352167, 2643981.980613781, 5390118.746328119,
6264496.097901617, 11.605501217123276, 0.015319465719948019, 1.7448699564399412e-06],
[1102.1152991124782, 1257.5624747055258, 1310.4803905865347, 2711709.7515290566, 5167041.861010327,
6149853.921081831, 9.837636314132341, 0.01462714264058961, 1.6997971589837513e-06],
[1117.7420821280732, 1268.6265627123964, 1417.260144182994, 2711709.7515290566, 5151932.1545183025,
6293398.136293111, 9.927254088826919, 0.014904179979953325, 1.6416817287721877e-06],
[1106.8749431340277, 1252.420943546848, 1435.4274518724374, 2711709.7515290566, 5177667.749296468,
6204147.819576919, 9.607588244206696, 0.014995216559648368, 1.644382598706639e-06],
[1114.9126539360466, 1259.1180510408708, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6277042.034213948, 9.959965929147502, 0.014580502784658061, 1.675614930109933e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5634195.507417285,
6253461.142118783, 11.605501217123276, 0.015129553230613428, 1.646211033468178e-06],
[1114.834792035313, 1260.5869822935006, 1424.6533094655529, 2711709.7515290566, 5177667.749296468,
6058545.49793099, 9.869052328516, 0.01508497956653298, 1.64751e-06],
[1100.1063624541289, 1231.7664627414742, 1436.3027121805815, 3627678.357077309, 4986879.9039197,
2532740.0207836498, 10.181273986642962, 0.016620130673890635, 2.3929656390077357e-06],
[1118.376653988001, 1250.323551925034, 1424.6533094655529, 2711709.7515290566, 5177667.749296468,
6058545.49793099, 9.869052328516, 0.01508497956653298, 1.6395049308713066e-06],
[1117.7420821280732, 1263.6042552377323, 1417.260144182994, 2711709.7515290566, 5151932.1545183025,
6316177.007625211, 9.927254088826919, 0.014904179979953325, 1.6692606546872883e-06],
[1050.9762404493679, 1257.606216652866, 1428.1727195332353, 2676217.667872637, 5029083.46383821,
6252781.131346029, 9.912726746719038, 0.01508497956653298, 1.6858501133596892e-06],
[1172.1322901571223, 1249.0062677413969, 1424.647927352167, 2676217.667872637, 5151840.492070999,
6253461.142118783, 11.605501217123276, 0.01508497956653298, 1.670666167191741e-06],
[1179.0771063597485, 1217.4447193223818, 1413.6456109965338, 2620530.399487235, 5184803.83682678,
6245691.858534137, 11.688941994097817, 0.015110828165630823, 1.670666167191741e-06],
[1106.8629264804772, 1251.8935813168757, 1435.4274518724374, 2711709.7515290566, 5337967.192104895,
6138136.18408686, 9.607588244206696, 0.014995216559648368, 1.644382598706639e-06],
[1140.2618729167225, 1259.7696328062204, 1439.7518547085635, 2711709.7515290566, 5172210.454034539,
6129007.601007714, 9.459700490332747, 0.01508497956653298, 1.8717425252296465e-06],
[1180.010905938387, 1284.2421194708988, 1446.7483442856462, 2854778.224454453, 5290343.222690649,
5907538.352840094, 10.175809217601456, 0.017509170428913215, 1.8805380048230116e-06],
[1114.9126539360466, 1260.5869822935006, 1420.7016199249624, 2711709.7515290566, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1117.7420821280732, 1268.6265627123964, 1417.260144182994, 2711709.7515290566, 5151932.1545183025,
6293398.136293111, 9.899845029941671, 0.014904179979953325, 1.6416817287721877e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6264496.097901617, 11.426790838912128, 0.015319465719948019, 1.746960716329272e-06],
[1114.9126539360466, 1259.1180510408708, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6277042.034213948, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1183.39613591346, 1249.007116324817, 1424.9780695578336, 2685865.672715818, 5151840.492070999,
6253461.142118783, 11.688941994097817, 0.015110828165630823, 1.7287073342999962e-06],
[1176.1786883446464, 1283.60271590594, 1422.5889236131238, 2643981.980613781, 5933402.3025543075,
6019806.243375341, 11.603245339559699, 0.015319465719948019, 1.746960716329272e-06],
[1114.9126539360466, 1263.4435351891264, 1420.7016199249624, 2711709.7515290566, 5234415.800976389,
6351339.408023649, 9.599662957877094, 0.01508497956653298, 1.64751e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2710386.6655561705, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1118.8759320363351, 1268.4658426637905, 1424.8562282482428, 2711709.7515290566, 5135604.332508602,
6339744.606807769, 9.869108693672011, 0.01508497956653298, 1.64751e-06],
[1115.400748739093, 1265.5877782356972, 1428.3010571928912, 2714079.301423972, 5048177.206187649,
6403353.83122605, 11.579324789874297, 0.015321153721496901, 1.6447615608703846e-06],
[1114.9126539360466, 1260.647368087708, 1420.7016199249624, 2711709.7515290566, 5227000.31504303,
6346803.504583463, 9.87071144774333, 0.01477210173962785, 1.64751e-06],
[1100.1408530781362, 1259.20529625876, 1420.7016199249624, 2794529.276377869, 5137466.622582068,
6291895.722338178, 9.959965929147502, 0.014877788107490625, 1.6827081430883913e-06],
[1114.834792035313, 1252.4221946420748, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.962874958185285, 0.014995216559648368, 1.64441131717817e-06],
[1085.9977035903282, 1287.212660761374, 1429.0126531036894, 2853237.462187207, 5151932.1545183025,
6123092.829186761, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1114.8364994143328, 1252.420943546848, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6204147.819576919, 9.962874958185285, 0.014995216559648368, 1.644382598706639e-06],
[1111.2176303621834, 1265.581002946954, 1420.7016199249624, 2711709.7515290566, 4972815.830228467,
6346803.504583463, 9.867608359905255, 0.014875693737049485, 1.698160937631157e-06],
[1114.8364994143328, 1259.8285287095925, 1437.3529725977721, 2711709.7515290566, 5061252.899416794,
6609284.374515722, 9.962874958185285, 0.014995216559648368, 1.644382598706639e-06],
[1172.1322901571223, 1272.8969286837373, 1424.647927352167, 2643981.980613781, 5644960.1495587025,
6343918.206439393, 11.605501217123276, 0.015319465719948019, 1.746960716329272e-06],
[1110.0932897166533, 1263.0364881672328, 1435.9138529261822, 2711709.7515290566, 5183912.901011421,
6382146.572286638, 10.641003542772033, 0.015398836251961747, 1.6866917168224695e-06],
[1114.9126539360466, 1260.48746626119, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1180.7233642687456, 1290.1144240449926, 1434.8120791304352, 2854778.224454453, 5290343.222690649,
6018525.355599652, 10.111309139135487, 0.017509170428913215, 1.874732806720558e-06],
[1114.834792035313, 1252.4221946420748, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
5999071.985613948, 9.962874958185285, 0.015031653825581753, 1.64441131717817e-06],
[1050.9762404493679, 1257.2267980758488, 1440.177953468843, 2711337.2097504647, 5029083.46383821,
6252781.131346029, 9.840366214118836, 0.01508497956653298, 1.6858501133596892e-06],
[1110.0932897166533, 1268.3282815822229, 1435.9138529261822, 2743472.8696363987, 5183912.901011421,
6382146.572286638, 10.641003542772033, 0.015398836251961747, 1.6866917168224695e-06],
[1172.6173836553487, 1231.4016603382263, 1416.1995418912618, 2618905.5231839283, 5151840.492070999,
6245691.858534137, 11.593330796866331, 0.015109546481814558, 1.716788954519652e-06],
[1137.2060277008807, 1263.0364881672328, 1435.9138529261822, 2711709.7515290566, 5530424.3268809505,
6058545.49793099, 9.772174739908307, 0.01508497956653298, 1.6453299579647577e-06],
[1114.834792035313, 1252.4221946420748, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.8370396447197, 0.015156027526283776, 1.64441131717817e-06],
[1172.7862297639358, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 5082503.039302015,
6252960.904134171, 11.033638203846577, 0.01532902143707891, 1.6571644711735225e-06],
[1128.6500541303647, 1260.508213067299, 1420.6927251709847, 2711709.7515290566, 5196098.543422922,
6267381.396045687, 9.805607387570063, 0.01508497956653298, 1.644075174710523e-06],
[1133.9089419004854, 1259.7696328062204, 1436.8742705057894, 2711709.7515290566, 5172210.454034539,
6129007.601007714, 9.459700490332747, 0.01508497956653298, 1.8717425252296465e-06],
[1111.2176303621834, 1265.6413887411613, 1420.7016199249624, 2732860.0254997723, 4972815.830228467,
6433383.956931086, 9.867608359905255, 0.014875693737049485, 1.698160937631157e-06],
[1114.8364994143328, 1252.420943546848, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6217837.80643352, 9.962874958185285, 0.014794895554939519, 1.644382598706639e-06],
[1180.7233642687456, 1290.1144240449926, 1434.8120791304352, 2854778.224454453, 5290343.222690649,
6018525.355599652, 10.111309139135487, 0.01782143850877657, 1.874993741056419e-06],
[1114.9126539360466, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1172.7862297639358, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 5082503.039302015,
6252960.904134171, 11.417802244505165, 0.01508497956653298, 1.6571644711735225e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2710386.6655561705, 5151932.1545183025,
6252740.844923852, 9.839306628863088, 0.01437360244072579, 1.6406324153956573e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6264496.097901617, 11.605501217123276, 0.015319465719948019, 1.746960716329272e-06],
[1114.9126539360466, 1255.3060555816992, 1420.7016199249624, 2711709.7515290566, 5135604.332508602,
6339744.606807769, 9.869108693672011, 0.01508497956653298, 1.64751e-06],
[1172.1322901571223, 1272.8969286837373, 1421.7990273266873, 2643981.980613781, 5644960.1495587025,
6054341.979799062, 11.605501217123276, 0.01439486405547544, 1.746960716329272e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5634195.507417285,
6253461.142118783, 11.605501217123276, 0.015129553230613428, 1.646211033468178e-06],
[1114.834792035313, 1260.5869822935006, 1424.6533094655529, 2711709.7515290566, 5177667.749296468,
6058545.49793099, 9.869052328516, 0.01508497956653298, 1.64751e-06],
[1100.1063624541289, 1231.7664627414742, 1436.3027121805815, 3627678.357077309, 4986879.9039197,
2532740.0207836498, 10.181273986642962, 0.016620130673890635, 2.3929656390077357e-06],
[1118.8759320363351, 1268.4658426637905, 1424.8562282482428, 2711709.7515290566, 5135604.332508602,
6339744.606807769, 9.869108693672011, 0.01508497956653298, 1.64751e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6264496.097901617, 11.426790838912128, 0.015319465719948019, 1.746960716329272e-06],
[1114.9126539360466, 1263.4435351891264, 1420.7016199249624, 2711709.7515290566, 5234415.800976389,
6351339.408023649, 9.599662957877094, 0.01508497956653298, 1.64751e-06],
[1172.7862297639358, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 5082503.039302015,
6252960.904134171, 11.14125315048896, 0.01508497956653298, 1.65184419912513e-06],
[1172.7862297639358, 1260.5869822935006, 1424.6533094655529, 2756948.118884614, 5082503.039302015,
6252960.904134171, 11.417802244505165, 0.01508497956653298, 1.6571644711735225e-06],
[1114.9126539360466, 1255.3060555816992, 1420.7016199249624, 2711709.7515290566, 5135604.332508602,
6339744.606807769, 9.869108693672011, 0.01508497956653298, 1.64751e-06],
[1111.2176303621834, 1265.581002946954, 1420.7016199249624, 2711709.7515290566, 4972815.830228467,
6346803.504583463, 9.867608359905255, 0.014875693737049485, 1.698160937631157e-06],
[1179.0771063597485, 1217.4447193223818, 1413.6456109965338, 2620530.399487235, 5184803.83682678,
6245691.858534137, 11.688941994097817, 0.015110828165630823, 1.670666167191741e-06],
[1180.7233642687456, 1290.1144240449926, 1434.8120791304352, 2854778.224454453, 5290343.222690649,
6018525.355599652, 10.111309139135487, 0.017509170428913215, 1.874732806720558e-06],
[1094.20494378046, 1255.3060555816992, 1420.7016199249624, 2640666.855664094, 5135604.332508602,
6266674.486555223, 10.091325553538296, 0.01508497956653298, 1.64751e-06],
[1110.0932897166533, 1277.4127717832648, 1435.9138529261822, 2743472.8696363987, 5202115.561551428,
6382146.572286638, 10.62631839334633, 0.015398836251961747, 1.6866917168224695e-06],
[1176.1786883446464, 1283.60271590594, 1422.5889236131238, 2643981.980613781, 5933402.3025543075,
6019806.243375341, 11.603245339559699, 0.015319465719948019, 1.746960716329272e-06],
[1106.8629264804772, 1251.8935813168757, 1435.4274518724374, 2711709.7515290566, 5337967.192104895,
6138136.18408686, 9.607588244206696, 0.014995216559648368, 1.644382598706639e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2643981.980613781, 5539495.081817166,
6264496.097901617, 11.605501217123276, 0.015319465719948019, 1.746960716329272e-06],
[1172.1322901571223, 1263.903713429593, 1424.647927352167, 2627915.438331411, 5539495.081817166,
6264496.097901617, 11.21124190291663, 0.015654224593012078, 1.7492546775003008e-06],
[1114.9126539360466, 1260.5869822935006, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1114.9126539360466, 1260.48746626119, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1183.39613591346, 1249.007116324817, 1424.9780695578336, 2685865.672715818, 5151840.492070999,
6253461.142118783, 11.688941994097817, 0.015110828165630823, 1.7287073342999962e-06],
[1114.9126539360466, 1259.1180510408708, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6277042.034213948, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1172.1322901571223, 1249.0062677413969, 1424.647927352167, 2676217.667872637, 5151840.492070999,
6253461.142118783, 11.605501217123276, 0.01508497956653298, 1.670666167191741e-06],
[1115.400748739093, 1265.5877782356972, 1428.3010571928912, 2714079.301423972, 5048177.206187649,
6403353.83122605, 11.579324789874297, 0.015321153721496901, 1.6447615608703846e-06],
[1183.39613591346, 1249.007116324817, 1424.9780695578336, 2688406.096439742, 5151840.492070999,
6253461.142118783, 11.688941994097817, 0.015110828165630823, 1.7287073342999962e-06],
[1114.834792035313, 1252.4221946420748, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6205159.784569372, 9.962874958185285, 0.014995216559648368, 1.64441131717817e-06],
[1128.7178736119527, 1260.508213067299, 1420.090845472287, 2711709.7515290566, 5196098.543422922,
6267381.396045687, 9.805607387570063, 0.01508497956653298, 1.6462533731904685e-06],
[1180.010905938387, 1284.2421194708988, 1446.7483442856462, 2854778.224454453, 5290343.222690649,
5907538.352840094, 10.175809217601456, 0.017352132407197105, 1.8805380048230116e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2710386.6655561705, 5151932.1545183025,
6252740.844923852, 9.899845029941671, 0.01491459237885602, 1.6416646007583103e-06],
[1114.8364994143328, 1252.420943546848, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6204147.819576919, 9.962874958185285, 0.014995216559648368, 1.644382598706639e-06],
[1114.9126539360466, 1254.7974343990907, 1420.4426978781207, 2711709.7515290566, 5135604.332508602,
6339744.606807769, 9.869108693672011, 0.01462354532140832, 1.64751e-06],
[1050.9762404493679, 1257.606216652866, 1428.1727195332353, 2676217.667872637, 5029083.46383821,
6252781.131346029, 9.912726746719038, 0.01508497956653298, 1.6858501133596892e-06],
[1172.6173836553487, 1237.9667862455688, 1416.1995418912618, 2618905.5231839283, 5079617.930562607,
6245691.858534137, 11.771103069974284, 0.015512293130991383, 1.716788954519652e-06],
[1050.9762404493679, 1257.2267980758488, 1440.177953468843, 2711337.2097504647, 5029083.46383821,
6252781.131346029, 9.840366214118836, 0.01508497956653298, 1.6858501133596892e-06],
[1140.2618729167225, 1259.7696328062204, 1439.7518547085635, 2711709.7515290566, 5172210.454034539,
6129007.601007714, 9.459700490332747, 0.01508497956653298, 1.8717425252296465e-06],
[1180.010905938387, 1284.2421194708988, 1446.7483442856462, 2854778.224454453, 5290343.222690649,
5907538.352840094, 10.175809217601456, 0.017509170428913215, 1.8805380048230116e-06],
[1141.2570102982986, 1259.7696328062204, 1439.7518547085635, 2753844.2247911957, 5172210.454034539,
6129007.601007714, 9.459700490332747, 0.015110704744109062, 1.8717425252296465e-06],
[1114.9126539360466, 1260.48746626119, 1408.3354294977653, 2794529.276377869, 5197901.329027299,
6346803.504583463, 9.87071144774333, 0.01505009711174968, 1.64751e-06],
[1110.0932897166533, 1263.0364881672328, 1435.9138529261822, 2711709.7515290566, 5183912.901011421,
6382146.572286638, 10.641003542772033, 0.015398836251961747, 1.6866917168224695e-06],
[1110.0932897166533, 1268.3282815822229, 1435.9138529261822, 2743472.8696363987, 5183912.901011421,
6382146.572286638, 10.641003542772033, 0.015398836251961747, 1.6866917168224695e-06],
[1180.7233642687456, 1290.1144240449926, 1434.8120791304352, 2854778.224454453, 5478555.735442081,
6018525.355599652, 10.111309139135487, 0.017509170428913215, 1.874732806720558e-06],
[1114.9126539360466, 1247.339086600727, 1421.8558877581204, 2794529.276377869, 5151932.1545183025,
6346803.504583463, 9.87071144774333, 0.01508497956653298, 1.64751e-06],
[1114.8364994143328, 1252.420943546848, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
6217837.80643352, 9.962874958185285, 0.014794895554939519, 1.644382598706639e-06],
[1100.1408530781362, 1259.20529625876, 1420.7016199249624, 2794529.276377869, 5137466.622582068,
6291895.722338178, 9.959965929147502, 0.014877788107490625, 1.6827081430883913e-06],
[1114.834792035313, 1252.4221946420748, 1431.2324989413153, 2711709.7515290566, 5177667.749296468,
5999071.985613948, 9.962874958185285, 0.015031653825581753, 1.64441131717817e-06],
[1115.3505637801043, 1259.3587163655109, 1417.21404073115, 2710386.6655561705, 5151932.1545183025,
6252740.844923852, 9.839306628863088, 0.01437360244072579, 1.6406324153956573e-06],
[1114.8364994143328, 1259.8285287095925, 1437.3529725977721, 2711709.7515290566, 5061252.899416794,
6609284.374515722, 9.962874958185285, 0.014995216559648368, 1.644382598706639e-06],
[1161.9308463271727, 1239.3344143550214, 1416.1995418912618, 2618905.5231839283, 5151840.492070999,
6245691.858534137, 11.593330796866331, 0.01534698148474761, 1.716788954519652e-06],
[1172.6173836553487, 1231.4016603382263, 1416.1995418912618, 2618905.5231839283, 5151840.492070999,
6245691.858534137, 11.593330796866331, 0.015109546481814558, 1.716788954519652e-06],
[1172.1322901571223, 1272.8969286837373, 1424.647927352167, 2643981.980613781, 5644960.1495587025,
6343918.206439393, 11.605501217123276, 0.015319465719948019, 1.746960716329272e-06],
[1172.1322901571223, 1261.2561027220345, 1424.647927352167, 2674527.9391689897, 5634195.507417285,
6253461.142118783, 11.605501217123276, 0.015129553230613428, 1.646211033468178e-06],
[1114.834792035313, 1260.5869822935006, 1424.6533094655529, 2711709.7515290566, 5177667.749296468,
6058545.49793099, 9.869052328516, 0.01508497956653298, 1.64751e-06],
[1100.1063624541289, 1231.7664627414742, 1436.3027121805815, 3627678.357077309, 4986879.9039197,
2532740.0207836498, 10.181273986642962, 0.016620130673890635, 2.3929656390077357e-06],
[1050.9762404493679, 1253.7997759357147, 1425.0398300804388, 2643910.964641653, 5029083.46383821,
6252781.131346029, 9.912726746719038, 0.015713623088508245, 1.6858501133596892e-06],
[1171.0989638205704, 1269.4644002253424, 1424.647927352167, 2627915.438331411, 5539495.081817166,
6264496.097901617, 11.21124190291663, 0.015912513251578327, 1.7492546775003008e-06],
[1179.6140307813284, 1284.2421194708988, 1446.7483442856462, 2854778.224454453, 5290343.222690649,
5907538.352840094, 10.175809217601456, 0.017136258349463553, 1.8805380048230116e-06],
[1114.9126539360466, 1259.1180510408708, 1420.7016199249624, 2794529.276377869, 5151932.1545183025,
6277042.034213948, 9.87071144774333, | |
<filename>statestream/interfaces/process_if_cifar10.py
# -*- coding: utf-8 -*-
# Copyright (c) 2017 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/statestream
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
from skimage.transform import resize
try:
import pickle as pckl
except:
try:
import cPickle as pckl
except:
pckl = None
from statestream.interfaces.process_if import ProcessIf
from statestream.interfaces.utils.temporal_confusion_matrix import TemporalConfusionMatrix
from statestream.utils.shared_memory_layout import SharedMemoryLayout as ShmL
from statestream.meta.neuron_pool import np_state_shape
def if_interfaces():
"""Returns the specific interfaces as strings of the interface.
Parameters:
-----------
net : dict
The network dictionary containing all nps, sps, plasts, ifs.
name : str
The unique string name of this interface.
"""
# Specify sub-interfaces.
return {"out": ["cf10_image", "cf10_label"],
"in": ["cf10_pred"]
}
def if_init(net, name, dat_name, dat_layout, mode=None):
"""Return value for interface parameter / variable.
Parameters:
-----------
net : dict
Complete network dictionary containing all nps, sps, plasts, ifs.
name : str
The unique string identifier for the interface.
ident : int
The unique process id for the process of this interface.
param : dict
Dictionary of core parameters.
mn : MetaNetwork
deprecated
"""
# Default return is None.
dat_value = None
# Return initialized value.
return dat_value
def if_shm_layout(name, net, param):
"""Return shared memory layout for civar10 interface.
Parameters
----------
net : dict
Complete network dictionary containing all nps, sps, plasts, ifs.
name : str
The unique string identifier for the interface.
param : dict
Dictionary of core parameters.
"""
# Get interface dictionary.
p = net["interfaces"][name]
# Begin with empty layout.
shm_layout = {}
shm_layout["parameter"] = {}
shm_layout["variables"] = {}
# Add parameter.
# -------------------------------------------------------------------------
# Add mode parameter.
shm_layout["parameter"]["mode"] \
= ShmL("np", (), np.int32, p.get("mode", 0))
# Add durations as numpy parameters.
shm_layout["parameter"]["min_duration"] = ShmL("np", (), np.int32, 12, 1, None)
shm_layout["parameter"]["max_duration"] = ShmL("np", (), np.int32, 16, 1, None)
shm_layout["parameter"]["fading"] = ShmL("np", (), np.int32, 4, 0, None)
# Add variables.
# -------------------------------------------------------------------------
# Add all outputs as variables.
for o in p["out"]:
tmp_target = o
# Consider remapping.
if "remap" in p:
if o in p["remap"]:
tmp_target = p["remap"][o]
# Set layout.
shm_layout["variables"][o] = ShmL("np",
np_state_shape(net, tmp_target),
np.float32,
0)
# We also add a simple trigger variable for new stimulus onsets.
shm_layout["variables"]["_trigger_"] \
= ShmL("np", [net["agents"],], np.float32, 0)
shm_layout["variables"]["_epoch_trigger_"] \
= ShmL("np", [3,], np.float32, 0)
# If a prediction is given as input we compute a confusion matrix.
if "cf10_pred" in p["in"]:
# Determine number of classes, for this we need the real prediction np.
tmp_target = "cf10_pred"
if "remap" in p:
if "cf10_pred" in p["remap"]:
tmp_target = p["remap"]["cf10_pred"]
no_classes = np_state_shape(net, tmp_target)[1]
# Create layout for conf-mat.
shm_layout["variables"]["conf_mat_train"] \
= ShmL("np",
[p.get("conf-mat window", 9),
no_classes,
no_classes],
np.float32,
0)
shm_layout["variables"]["conf_mat_valid"] \
= ShmL("np",
[p.get("conf-mat window", 9),
no_classes,
no_classes],
np.float32,
0)
shm_layout["variables"]["acc_train"] \
= ShmL("np",
[p.get("conf-mat window", 9),
1,
1],
np.float32,
0)
shm_layout["variables"]["acc_valid"] \
= ShmL("np",
[p.get("conf-mat window", 9),
1,
1],
np.float32,
0)
# Return layout.
return shm_layout
class ProcessIf_cifar10(ProcessIf):
"""Interface class providing basic cifar10 data.
To use this interface, please download the python version of the cifar-10
dataset. Extract the cifar-10 dataset and specify the extracted folder
in the st_graph file (under 'source_path') that uses the cifar-10
dataset.
Interface parameters:
---------------------
source_path : str
This has to be the global path to the cifar10 dataset path.
min_duration : int
The minimum duration a mnist number will be presented in frames.
The actual duration will be drawn uniformly between min_duration
and max_duration.
max_duration : int
The maximum duration a mnist number will be presented in frames.
conf-mat window : int
As a performance measure for a potential classifier a confusion
matrix is computed over a certain window of delays. This parameter
specifies the window size, e.g. confusion matrices will be computed
for all delays up to this window size. Note: This should be larger
than the shortest path in the network from input to classification.
conf-mat mean over : int
Confusion matrices will (over the conf-mat window) will be computed
as the mean over the last 'conf-mat mean over' frames for temporal
smoothing and a better approximation.
Inputs:
-------
cf10_pred : np.array, shape [agents, 10, 1, 1]
The cifar10 interface provides a delayed confusion matrix as
performance measure. To compute this a classification result is
needed to be compared with the ground-truth.
Outputs:
--------
cf10_image : np.array, shape [agents, 3, 32, 32]
The grey-scale mnist images.
cf10_label : np.array, shape [agents, 10, 1, 1]
The one-hot encoded ground-truth label for the current image.
"""
def __init__(self, name, ident, metanet, param):
# Initialize parent ProcessIf class.
ProcessIf.__init__(self, name, ident, metanet, param)
def initialize(self):
"""Method to initialize the cifar-10 (load) interface.
"""
# Get some experimental parameters.
# ---------------------------------------------------------------------
self.max_duration = self.p.get("max_duration", 16)
self.min_duration = self.p.get("min_duration", 12)
self.min_duration = self.p.get("fading", 4)
self.augmentation = self.p.get("augmentation", {})
self.mode = self.p.get("mode", 0)
self.mode_shuffle = {}
self.mode_current = {}
# Load cifar10 dataset.
# ---------------------------------------------------------------------
self.image_shape = np.array([3, 32, 32]).astype(np.int32)
self.no_classes = 10
self.samples = {}
self.samples['train'] = 40000
self.samples['valid'] = 10000
self.samples['test'] = 10000
# Structure holding all cifar10 images.
self.dataset = {'train': {}, 'valid': {}, 'test': {}}
# Load training data.
for t in self.dataset:
self.dataset[t]["cf10_image"] = np.zeros([self.samples[t], 3, 32, 32], dtype=np.float32)
self.dataset[t]["cf10_label"] = np.zeros([self.samples[t], 1], dtype=np.float32)
for b in range(4):
if sys.version[0] == "2":
data = pckl.load(open(self.net["interfaces"][self.name]["source_path"] \
+ "/data_batch_" + str(b+1), "rb"))
elif sys.version[0] == "3":
data = pckl.load(open(self.net["interfaces"][self.name]["source_path"] \
+ "/data_batch_" + str(b+1), "rb"),
encoding="latin1")
image_data = np.swapaxes(np.reshape(data["data"], [10000, 3, 32, 32]), 2, 3)
self.dataset['train']["cf10_image"][b*10000:(b+1)*10000,:,:,:] \
= image_data[:,:,:,:] / 256.0
# get labels
self.dataset['train']["cf10_label"][b*10000:(b+1)*10000,0] = np.array(data["labels"])[:]
# Load validation dataset.
if sys.version[0] == "2":
data = pckl.load(open(self.net["interfaces"][self.name]["source_path"] \
+ "/data_batch_5", "rb"))
elif sys.version[0] == "3":
data = pckl.load(open(self.net["interfaces"][self.name]["source_path"] \
+ "/data_batch_5", "rb"),
encoding="latin1")
image_data = np.swapaxes(np.reshape(data["data"], [self.samples['valid'], 3, 32, 32]), 2, 3)
self.dataset['valid']["cf10_image"][:,:,:,:] \
= image_data[:,:,:,:] / 256.0
self.dataset['valid']["cf10_label"][:,0] = np.array(data["labels"])[:]
# Load test dataset.
if sys.version[0] == "2":
data = pckl.load(open(self.net["interfaces"][self.name]["source_path"] \
+ "/test_batch", "rb"))
elif sys.version[0] == "3":
data = pckl.load(open(self.net["interfaces"][self.name]["source_path"] \
+ "/test_batch", "rb"),
encoding="latin1")
image_data = np.swapaxes(np.reshape(data["data"], [self.samples['test'], 3, 32, 32]), 2, 3)
self.dataset['test']["cf10_image"][:,:,:,:] \
= image_data[:,:,:,:] / 256.0
self.dataset['test']["cf10_label"][:,0] = np.array(data["labels"])[:]
for t in ['train', 'valid', 'test']:
if t + ' samples' in self.p:
self.samples[t] = min(self.p[t + ' samples'], self.samples[t])
# Initialize experimental state for all agents.
# ---------------------------------------------------------------------
self.current_duration = []
self.current_elapsed = []
self.current_sample = []
self.current_augmentation = []
self.current_image = []
for a in range(self.net["agents"]):
self.current_duration += [0]
self.current_elapsed += [0]
self.current_sample += [0]
self.current_augmentation.append({})
if 'flipX' in self.augmentation:
self.current_augmentation[-1]['flipX'] = False
if 'crop' in self.augmentation:
self.current_augmentation[-1]['crop'] = [0, 0]
self.current_image.append(np.zeros(self.image_shape, dtype=np.float32))
for a in range(self.net["agents"]):
self.draw_new_sample(a)
for t in ['train', 'valid', 'test']:
self.mode_shuffle[t] = np.random.permutation(self.samples[t])
self.mode_current[t] = 0
# Instantiate temporal confusion matrix.
if "cf10_pred" in self.p["in"]:
self.TCM_train = TemporalConfusionMatrix(self.net, self.name, "cf10_pred")
self.TCM_valid = TemporalConfusionMatrix(self.net, self.name, "cf10_pred")
for a in range(self.net["agents"]):
self.TCM_train.trigger_history[a] = [-1, -1]
self.TCM_valid.trigger_history[a] = [-1, -1]
# For cummulative performances (over entire epoch, e.g. valid and test).
self.cumm_conf_mat = np.zeros(self.TCM_valid.conf_mat.shape, dtype=np.float32)
def draw_new_sample(self, a):
"""Draw a new sample.
"""
self.current_elapsed[a] = 0
self.current_duration[a] \
= np.random.randint(self.dat["parameter"]["min_duration"],
self.dat["parameter"]["max_duration"] + 1)
if 'flipX' in self.augmentation:
if np.random.random() > 0.5:
self.current_augmentation[a]['flipX'] = True
else:
self.current_augmentation[a]['flipX'] = False
if 'crop' in self.augmentation:
crop = [np.random.random() * (1.0 - self.augmentation['crop']),
np.random.random() * (1.0 - self.augmentation['crop'])]
self.current_augmentation[a]['crop'] \
= [int(self.image_shape[1] * crop[0]),
int(self.image_shape[2] * crop[1])]
# Dependent on presentations of this next frame,
# init its | |
<gh_stars>10-100
# ---------------------------------------------------------------
# image.py
# Set-up time: 2020/4/26 上午8:41
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: <EMAIL> [OR] <EMAIL>
# ---------------------------------------------------------------
import mmcv
import cv2
import numpy as np
from mmcv.image import imread, imwrite
from .color import color_palette, float_palette
import os.path as osp
import matplotlib.pyplot as plt
from pdf2image import convert_from_path
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
import time
from collections import defaultdict
# entity_color_pools:
# the first two are for gt: refered/not refered in the graph
# the last two are for prediction: grounded/not grounded to the groundtruth entities.
entity_color_pools = ['SandyBrown', 'PaleGreen', 'LightCoral', 'GreenYellow']
rel_color_pools = ['Violet', 'SkyBlue']
def draw_abstract_graph(name_dict, rels, predicate_names, work_dir, filename, entity_scores=None, rel_scores=None,
triplet_scores=None, entity_colors_dict=None, rel_colors_dict=None):
type = 'gt' if rel_scores is None else 'pred'
from graphviz import Digraph
u = Digraph('GT Scene Graph', format='pdf')
u.body.append('size="6, 6"')
u.body.append('rankdir="LR"')
u.node_attr.update(style='filled')
for i, name in name_dict.items():
c = entity_color_pools[entity_colors_dict[i]]
entity_label = name_dict[i]
if entity_scores is not None:
entity_label += '|{:.02f}'.format(entity_scores[i])
u.node(str(i), label=entity_label, color=c)
for i, rel in enumerate(rels):
edge_key = '%s_%s_%s' % (rel[0], rel[1], rel[2])
edge_label = predicate_names[rel[2]]
if rel_scores is not None:
edge_label += '|{:.02f}'.format(rel_scores[i])
if triplet_scores is not None:
edge_label += '|{:.02f}'.format(triplet_scores[i])
u.node(edge_key, label=edge_label, color=rel_color_pools[rel_colors_dict[i]])
u.edge(str(rel[0]), edge_key)
u.edge(edge_key, str(rel[1]))
u.render(osp.join(work_dir, filename + '_{}_sg'.format(type)))
sg_im = convert_from_path(osp.join(work_dir, filename + '_{}_sg.pdf'.format(type))) # PIL list
return sg_im[0]
def get_name_dict(class_names, labels):
name_cnt = {n: 1 for n in class_names}
name_dict = {}
for idx, l in enumerate(labels):
name = class_names[l]
suffix = name_cnt[name]
name_cnt[name] += 1
name_dict[idx] = name + '_' + str(suffix)
return name_dict
def imdraw_sg(img,
pred_bboxes,
pred_labels,
pred_rels,
gt_bboxes=None,
gt_labels=None,
gt_rels=None,
pred_scores=None,
pred_rel_scores=None,
pred_triplet_scores=None,
class_names=None,
predicate_names=None,
score_thr=0.3,
iou_thr=0.5,
work_dir=None,
filename=None,
backend='graphviz'):
"""
TODO: Currently the backend: networkx has some bugs. You'd better use graphviz.
"""
img = imread(img)
h, w = img.shape[:2]
# create the figure
if gt_rels is None:
nrows, ncols = 1, 2
else:
nrows, ncols = 2, 2
figsize = [50, 20]
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
axflat = ax.flat
# for networkx
node_size = 4000
font_size = 20
line_width = 2
arrowsize = 20
if score_thr > 0 and pred_scores is not None:
inds = np.where(pred_scores >= score_thr)[0]
rel_inds = np.ones(len(pred_rels), dtype=np.bool)
for i, rel in enumerate(pred_rels):
if rel[0] not in inds or rel[1] not in inds:
rel_inds[i] = 0
pred_bboxes = pred_bboxes[inds, :]
pred_scores = pred_scores[inds]
pred_labels = pred_labels[inds]
pred_rels = pred_rels[rel_inds]
pred_rel_scores = pred_rel_scores[rel_inds]
pred_triplet_scores = pred_triplet_scores[rel_inds]
# adjust the box id in the pred_rels
entity_mapping_ = {ind: i for i, ind in enumerate(inds)}
tmp = []
for rel in pred_rels:
tmp.append([entity_mapping_[rel[0]], entity_mapping_[rel[1]], rel[2]])
pred_rels = np.array(tmp)
subplot_offset = 0
gt_to_pred = None
if gt_rels is not None:
subplot_offset = 2
gt_entity_colors_dict, gt_rel_colors_dict = {}, {}
# draw the gt scene graph: both on image and abstract graph
gt_name_dict = get_name_dict(class_names, gt_labels)
gt_rel_inds = gt_rels[:, :2].ravel().tolist()
axflat[0].imshow(img)
axflat[0].axis('off')
for i, (bbox, label) in enumerate(zip(gt_bboxes, gt_labels)):
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
gt_entity_colors_dict[i] = 0 if i in gt_rel_inds else 1
gt_rel_colors_dict[i] = 0
axflat[0].add_patch(
plt.Rectangle(left_top, bbox_int[2] - bbox_int[0], bbox_int[3] - bbox_int[1], fill=False,
edgecolor=entity_color_pools[gt_entity_colors_dict[i]], linewidth=4.5))
axflat[0].text(bbox_int[0], bbox_int[1] + 2, gt_name_dict[i],
bbox=dict(facecolor=entity_color_pools[gt_entity_colors_dict[i]], alpha=0.5),
fontsize=15, color='black')
axflat[0].set_title('GT Object Visualization', fontsize=25)
if backend == 'graphviz':
gt_abstract_graph = draw_abstract_graph(gt_name_dict, gt_rels, predicate_names, work_dir, filename,
entity_colors_dict=gt_entity_colors_dict,
rel_colors_dict=gt_rel_colors_dict)
gt_abstract_graph = gt_abstract_graph.resize((w, h))
axflat[1].imshow(np.asarray(gt_abstract_graph))
elif backend == 'networkx':
import networkx as nx
nodes, node_colors, edges, edge_labels, edge_colors = [], [], [], {}, []
for idx in range(len(gt_name_dict)):
nodes.append(gt_name_dict[idx])
node_colors.append(entity_color_pools[gt_entity_colors_dict[idx]])
for idx, rel in enumerate(gt_rels):
edges.append([gt_name_dict[rel[0]], gt_name_dict[rel[1]]])
edge_labels[(gt_name_dict[rel[0]], gt_name_dict[rel[1]])] = predicate_names[rel[2]]
edge_colors.append(rel_color_pools[gt_rel_colors_dict[idx]])
plt.sca(axflat[1])
g = nx.DiGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.circular_layout(g)
nx.draw(g, pos, edge_color=edge_colors, width=line_width, ax=axflat[1],
linewidth=1, node_size=node_size, node_color=node_colors, font_size=font_size,
labels={node: node for node in g.nodes()}, arrowsize=arrowsize, connectionstyle='arc3, rad = 0.2')
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, font_size=font_size, font_color='black',
ax=axflat[1])
axflat[1].axis('off')
axflat[1].set_title('GT Scene Graph Visualization', fontsize=25)
"""
step 1: (Find the equivalent boxes) group the prediction box: because there may be more than one boxes can be
grounded to the same gt box.
"""
ious = bbox_overlaps(pred_bboxes, gt_bboxes)
pred_to_gt = np.zeros(len(pred_bboxes), dtype=np.int32)
pred_to_gt_iou = np.zeros(len(pred_bboxes))
pred_to_gt_iou.fill(-1)
for pred_ind, (box, label) in enumerate(zip(pred_bboxes, pred_labels)):
cand_gt_inds = np.where(gt_labels == label)[0]
if len(cand_gt_inds) == 0:
pred_to_gt[pred_ind] = -1
continue
target_ious = ious[pred_ind, cand_gt_inds]
max_gt_iou, max_gt_index = np.max(target_ious), np.argmax(target_ious)
pred_to_gt[pred_ind] = cand_gt_inds[max_gt_index]
pred_to_gt_iou[pred_ind] = max_gt_iou
# for each gt, find all the qualified predicted boxes
qualified_inds = np.where(pred_to_gt_iou > iou_thr)[0]
gt_to_pred = defaultdict(list)
for pred_ind in qualified_inds:
gt_to_pred[pred_to_gt[pred_ind]].append((pred_ind, pred_to_gt_iou[pred_ind]))
gt_to_pred = dict(gt_to_pred)
for k, v in gt_to_pred.items():
gt_to_pred[k] = sorted(v, key=lambda x: x[1], reverse=True)
"""
Step 2: For each predicted relation R, evaluate whether it is grounded to a gt relation:
1). The subject and object can be grounded to a gt object;
2). The relation can be found in the gt relations;
3). If the gt relation associated by R has been shown, R will not be shown.
"""
# (1) map the all the predicted boxes to its equivalent boxes
_eq_inner_mapping = {}
_eq_gt_mapping = {}
if gt_to_pred is not None and len(gt_to_pred) > 0:
for k, v in gt_to_pred.items():
for _v in v:
_eq_inner_mapping[_v[0]] = v[0][0] # the first one has the highest iou, so this is the flag
_eq_gt_mapping[v[0][0]] = k
# (2) replace the predicted relation indexes and scores:
new_rels = {}
for rel, rel_score, triplet_score in zip(pred_rels, pred_rel_scores, pred_triplet_scores):
new_rel_pair = (_eq_inner_mapping.get(rel[0], rel[0]), _eq_inner_mapping.get(rel[1], rel[1]))
if new_rel_pair in new_rels and rel[2] not in [v[0] for v in new_rels[new_rel_pair]]:
new_rels[new_rel_pair].append((rel[2], rel_score, triplet_score))
else:
new_rels[new_rel_pair] = [(rel[2], rel_score, triplet_score)]
# find the visible bbox idx, and adjust the relations, assign the entity colors and relation colors
pred_entity_colors_dict, pred_rel_colors_dict = {}, {}
vis_pred_idxes = np.ones(len(pred_bboxes), dtype=np.bool)
for pred_ind in range(len(vis_pred_idxes)):
if pred_ind in _eq_inner_mapping.keys() and pred_ind not in _eq_inner_mapping.values():
vis_pred_idxes[pred_ind] = 0
pred_bboxes = pred_bboxes[vis_pred_idxes, :]
pred_labels = pred_labels[vis_pred_idxes]
pred_scores = pred_scores[vis_pred_idxes] if pred_scores is not None else None
_o2n_mapping = {idx: i for i, idx in enumerate(np.where(vis_pred_idxes)[0])}
grd_idxes = [_o2n_mapping[i] for i in list(set(list(_eq_inner_mapping.values())))]
for i in range(len(pred_bboxes)):
pred_entity_colors_dict[i] = 2 if i in grd_idxes else 3
new_pred_rels = []
new_pred_rel_scores = [] if pred_rel_scores is not None else None
new_pred_triplet_scores = [] if pred_triplet_scores is not None else None
grounded_gtrel_idxes = []
gt_rel_lists = gt_rels.tolist() if gt_rels is not None else None
for rel_pair, cand_predicates in new_rels.items():
subj, obj = _o2n_mapping[rel_pair[0]], _o2n_mapping[rel_pair[1]]
if rel_pair[0] in _eq_gt_mapping and rel_pair[1] in _eq_gt_mapping:
# NOTE: there may be one of them do not match the gt box, check it !!!!
for cand_predicate in cand_predicates:
cand_rel = [_eq_gt_mapping[rel_pair[0]], _eq_gt_mapping[rel_pair[1]], cand_predicate[0]]
if cand_rel in gt_rel_lists and gt_rel_lists.index(cand_rel) not in grounded_gtrel_idxes:
grounded_gtrel_idxes.append(gt_rel_lists.index(cand_rel))
pred_rel_colors_dict[len(new_pred_rels)] = 0
new_pred_rels.append([subj, obj, cand_predicate[0]])
new_pred_rel_scores.append(cand_predicate[1])
new_pred_triplet_scores.append(cand_predicate[2])
else:
for cand_predicate in cand_predicates:
pred_rel_colors_dict[len(new_pred_rels)] = 1
new_pred_rels.append([subj, obj, cand_predicate[0]])
new_pred_rel_scores.append(cand_predicate[1])
new_pred_triplet_scores.append(cand_predicate[2])
pred_name_dict = get_name_dict(class_names, pred_labels)
axflat[subplot_offset].imshow(img)
axflat[subplot_offset].axis('off')
for i, (bbox, label, score) in enumerate(zip(pred_bboxes, pred_labels, pred_scores)):
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
c = entity_color_pools[pred_entity_colors_dict[i]]
axflat[subplot_offset].add_patch(
plt.Rectangle(left_top, bbox_int[2] - bbox_int[0], bbox_int[3] - bbox_int[1], fill=False,
edgecolor=c, linewidth=4.5))
axflat[subplot_offset].text(bbox_int[0], bbox_int[1] + 2, pred_name_dict[i] + '|{:.02f}'.format(score),
bbox=dict(facecolor=c, alpha=0.5),
fontsize=15, color='black')
axflat[subplot_offset].set_title('Predicted Object Visualization', fontsize=25)
# axflat[subplot_offset].savefig(osp.join(work_dir, filename + '_vis_pred_object.png'), bbox_inches='tight')
if backend == 'graphviz':
pred_abstract_graph = draw_abstract_graph(pred_name_dict, new_pred_rels, predicate_names, work_dir, filename,
entity_scores=pred_scores,
rel_scores=new_pred_rel_scores,
triplet_scores=new_pred_triplet_scores,
entity_colors_dict=pred_entity_colors_dict,
rel_colors_dict=pred_rel_colors_dict)
pred_abstract_graph = pred_abstract_graph.resize((w, h))
axflat[subplot_offset + 1].imshow(np.asarray(pred_abstract_graph))
elif backend == 'networkx':
import networkx as nx
nodes, node_colors, edges, edge_labels, edge_colors = [], [], [], {}, []
for idx in range(len(pred_name_dict)):
nodes.append(pred_name_dict[idx])
node_colors.append(entity_color_pools[pred_entity_colors_dict[idx]])
for idx, rel in enumerate(new_pred_rels):
edges.append([pred_name_dict[rel[0]], pred_name_dict[rel[1]]])
edge_labels[(pred_name_dict[rel[0]], pred_name_dict[rel[1]])] = predicate_names[rel[2]]
edge_colors.append(rel_color_pools[pred_rel_colors_dict[idx]])
plt.sca(axflat[subplot_offset + 1])
g = nx.DiGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges[:5])
pos = nx.circular_layout(g)
nx.draw(g, pos, edge_color=edge_colors, width=line_width, ax=axflat[subplot_offset + 1],
linewidth=1, node_size=node_size, node_color=node_colors, font_size=font_size,
labels={node: node for node in g.nodes()}, arrowsize=arrowsize, connectionstyle='arc3, rad = 0.05')
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, font_size=font_size, font_color='black',
ax=axflat[subplot_offset + 1], connectionstyle='arc3, rad = 0.05')
axflat[subplot_offset + 1].axis('off')
axflat[subplot_offset + 1].set_title('Predicted Scene Graph Visualization', fontsize=25)
plt.tight_layout()
fig.savefig(osp.join(work_dir, filename + '_vis_sg.png'), bbox_inches='tight')
def imshow_det_bboxes(img,
bboxes,
labels,
class_names=None,
score_thr=0,
palette=dict(palette='pastel', n_colors=7),
thickness=2,
font_scale=0.7,
show=True,
win_name='',
wait_time=0,
out_file=None):
"""Draw bboxes and class labels (with scores) on an image.
Args:
img (str or ndarray): The image to be displayed.
bboxes | |
and _env_is_exposed(rspec):
ret.add(rspec)
return ret
def _envs_dulwich(repo):
'''
Check the refs and return a list of the ones which can be used as salt
environments.
'''
ret = set()
for ref in _dulwich_env_refs(repo['repo'].get_refs()):
# ref will be something like 'refs/heads/master'
rtype, rspec = ref[5:].split('/', 1)
if rtype == 'heads':
if rspec == repo['base']:
rspec = 'base'
if _env_is_exposed(rspec):
ret.add(rspec)
elif rtype == 'tags' and _env_is_exposed(rspec):
ret.add(rspec)
return ret
def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Find the first file to match the path and ref, read the file out of git
and send the path to the newly cached file
'''
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path) or tgt_env not in envs():
return fnd
provider = _get_provider()
dest = os.path.join(__opts__['cachedir'], 'gitfs/refs', tgt_env, path)
hashes_glob = os.path.join(__opts__['cachedir'],
'gitfs/hash',
tgt_env,
'{0}.hash.*'.format(path))
blobshadest = os.path.join(__opts__['cachedir'],
'gitfs/hash',
tgt_env,
'{0}.hash.blob_sha1'.format(path))
lk_fn = os.path.join(__opts__['cachedir'],
'gitfs/hash',
tgt_env,
'{0}.lk'.format(path))
destdir = os.path.dirname(dest)
hashdir = os.path.dirname(blobshadest)
if not os.path.isdir(destdir):
try:
os.makedirs(destdir)
except OSError:
# Path exists and is a file, remove it and retry
os.remove(destdir)
os.makedirs(destdir)
if not os.path.isdir(hashdir):
try:
os.makedirs(hashdir)
except OSError:
# Path exists and is a file, remove it and retry
os.remove(hashdir)
os.makedirs(hashdir)
for repo in init():
if repo['mountpoint'] \
and not path.startswith(repo['mountpoint'] + os.path.sep):
continue
repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep)
if repo['root']:
repo_path = os.path.join(repo['root'], repo_path)
if provider == 'gitpython':
tree = _get_tree_gitpython(repo, tgt_env)
if not tree:
# Branch/tag/SHA not found in repo, try the next
continue
try:
blob = tree / repo_path
except KeyError:
continue
blob_hexsha = blob.hexsha
elif provider == 'pygit2':
tree = _get_tree_pygit2(repo, tgt_env)
if not tree:
# Branch/tag/SHA not found in repo, try the next
continue
try:
oid = tree[repo_path].oid
blob = repo['repo'][oid]
except KeyError:
continue
blob_hexsha = blob.hex
elif provider == 'dulwich':
prefix_dirs, _, filename = repo_path.rpartition(os.path.sep)
tree = _get_tree_dulwich(repo, tgt_env)
tree = _dulwich_walk_tree(repo['repo'], tree, prefix_dirs)
if not isinstance(tree, dulwich.objects.Tree):
# Branch/tag/SHA not found in repo, try the next
continue
try:
# Referencing the path in the tree returns a tuple, the
# second element of which is the object ID of the blob
blob = repo['repo'].get_object(tree[filename][1])
except KeyError:
continue
blob_hexsha = blob.sha().hexdigest()
salt.fileserver.wait_lock(lk_fn, dest)
if os.path.isfile(blobshadest) and os.path.isfile(dest):
with salt.utils.fopen(blobshadest, 'r') as fp_:
sha = fp_.read()
if sha == blob_hexsha:
fnd['rel'] = path
fnd['path'] = dest
return fnd
with salt.utils.fopen(lk_fn, 'w+') as fp_:
fp_.write('')
for filename in glob.glob(hashes_glob):
try:
os.remove(filename)
except Exception:
pass
with salt.utils.fopen(dest, 'w+') as fp_:
if provider == 'gitpython':
blob.stream_data(fp_)
elif provider == 'pygit2':
fp_.write(blob.data)
elif provider == 'dulwich':
fp_.write(blob.as_raw_string())
with salt.utils.fopen(blobshadest, 'w+') as fp_:
fp_.write(blob_hexsha)
try:
os.remove(lk_fn)
except (OSError, IOError):
pass
fnd['rel'] = path
fnd['path'] = dest
return fnd
return fnd
def serve_file(load, fnd):
'''
Return a chunk from a file based on the data received
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
ret = {'data': '',
'dest': ''}
required_load_keys = set(['path', 'loc', 'saltenv'])
if not all(x in load for x in required_load_keys):
log.debug(
'Not all of the required keys present in payload. '
'Missing: {0}'.format(
', '.join(required_load_keys.difference(load))
)
)
return ret
if not fnd['path']:
return ret
ret['dest'] = fnd['rel']
gzip = load.get('gzip', None)
with salt.utils.fopen(fnd['path'], 'rb') as fp_:
fp_.seek(load['loc'])
data = fp_.read(__opts__['file_buffer_size'])
if gzip and data:
data = salt.utils.gzip_util.compress(data, gzip)
ret['gzip'] = gzip
ret['data'] = data
return ret
def file_hash(load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
if not all(x in load for x in ('path', 'saltenv')):
return ''
ret = {'hash_type': __opts__['hash_type']}
relpath = fnd['rel']
path = fnd['path']
hashdest = os.path.join(__opts__['cachedir'],
'gitfs/hash',
load['saltenv'],
'{0}.hash.{1}'.format(relpath,
__opts__['hash_type']))
if not os.path.isfile(hashdest):
if not os.path.exists(os.path.dirname(hashdest)):
os.makedirs(os.path.dirname(hashdest))
ret['hsum'] = salt.utils.get_hash(path, __opts__['hash_type'])
with salt.utils.fopen(hashdest, 'w+') as fp_:
fp_.write(ret['hsum'])
return ret
else:
with salt.utils.fopen(hashdest, 'rb') as fp_:
ret['hsum'] = fp_.read()
return ret
def _file_lists(load, form):
'''
Return a dict containing the file lists for files and dirs
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/gitfs')
if not os.path.isdir(list_cachedir):
try:
os.makedirs(list_cachedir)
except os.error:
log.critical('Unable to make cachedir {0}'.format(list_cachedir))
return []
list_cache = os.path.join(
list_cachedir,
'{0}.p'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
w_lock = os.path.join(
list_cachedir,
'.{0}.w'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
cache_match, refresh_cache, save_cache = \
salt.fileserver.check_file_list_cache(
__opts__, form, list_cache, w_lock
)
if cache_match is not None:
return cache_match
if refresh_cache:
ret = {}
ret['files'] = _get_file_list(load)
ret['dirs'] = _get_dir_list(load)
if save_cache:
salt.fileserver.write_file_list_cache(
__opts__, ret, list_cache, w_lock
)
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return []
def file_list(load):
'''
Return a list of all files on the file server in a specified
environment
'''
return _file_lists(load, 'files')
def _get_file_list(load):
'''
Return a list of all files on the file server in a specified
environment
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
provider = _get_provider()
if 'saltenv' not in load or load['saltenv'] not in envs():
return []
ret = set()
for repo in init():
if provider == 'gitpython':
ret.update(
_file_list_gitpython(repo, load['saltenv'])
)
elif provider == 'pygit2':
ret.update(
_file_list_pygit2(repo, load['saltenv'])
)
elif provider == 'dulwich':
ret.update(
_file_list_dulwich(repo, load['saltenv'])
)
return sorted(ret)
def _file_list_gitpython(repo, tgt_env):
'''
Get file list using GitPython
'''
ret = set()
if tgt_env == 'base':
tgt_env = repo['base']
tree = _get_tree_gitpython(repo, tgt_env)
if not tree:
return ret
if repo['root']:
try:
tree = tree / repo['root']
except KeyError:
return ret
for blob in tree.traverse():
if not isinstance(blob, git.Blob):
continue
if repo['root']:
path = os.path.relpath(blob.path, repo['root'])
else:
path = blob.path
ret.add(os.path.join(repo['mountpoint'], path))
return ret
def _file_list_pygit2(repo, tgt_env):
'''
Get file list using pygit2
'''
def _traverse(tree, repo_obj, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all the
blob paths within it in the "blobs" list
'''
for entry in iter(tree):
blob = repo_obj[entry.oid]
if isinstance(blob, pygit2.Blob):
blobs.append(os.path.join(prefix, entry.name))
elif isinstance(blob, pygit2.Tree):
_traverse(blob,
repo_obj,
blobs,
os.path.join(prefix, entry.name))
ret = set()
if tgt_env == 'base':
tgt_env = repo['base']
tree = _get_tree_pygit2(repo, tgt_env)
if not tree:
return ret
if repo['root']:
try:
# This might need to be changed to account for a root that
# spans more than one directory
oid = tree[repo['root']].oid
tree = repo['repo'][oid]
except KeyError:
return ret
if not isinstance(tree, pygit2.Tree):
return ret
blobs = []
if len(tree):
_traverse(tree, repo['repo'], blobs, repo['root'])
for blob in blobs:
if repo['root']:
blob = os.path.relpath(blob, repo['root'])
ret.add(os.path.join(repo['mountpoint'], blob))
return ret
def _file_list_dulwich(repo, tgt_env):
'''
Get file list using dulwich
'''
def _traverse(tree, repo_obj, blobs, prefix):
'''
Traverse through a dulwich Tree object recursively, accumulating all the
blob paths within it in the "blobs" list
'''
for item in tree.items():
obj = repo_obj.get_object(item.sha)
if isinstance(obj, dulwich.objects.Blob):
blobs.append(os.path.join(prefix, item.path))
elif isinstance(obj, dulwich.objects.Tree):
_traverse(obj,
repo_obj,
blobs,
os.path.join(prefix, item.path))
ret = set()
if tgt_env == 'base':
tgt_env = repo['base']
tree = _get_tree_dulwich(repo, tgt_env)
tree = _dulwich_walk_tree(repo['repo'], tree, repo['root'])
if not isinstance(tree, dulwich.objects.Tree):
return ret
blobs = []
if len(tree):
_traverse(tree, repo['repo'], blobs, repo['root'])
for blob in blobs:
if repo['root']:
blob = os.path.relpath(blob, repo['root'])
ret.add(os.path.join(repo['mountpoint'], blob))
return ret
def file_list_emptydirs(load): # pylint: disable=W0613
'''
Return a list of all empty directories on the master
'''
# Cannot have empty dirs in git
return []
def dir_list(load):
'''
Return a list of all directories on the master
'''
return _file_lists(load, 'dirs')
def _get_dir_list(load):
'''
Get a list of all directories on the master
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
| |
<filename>src/emuvim/dcemulator/monitoring.py
__author__ = 'Administrator'
import urllib2
import logging
from mininet.node import OVSSwitch
import ast
import time
from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter, REGISTRY, CollectorRegistry, \
pushadd_to_gateway, push_to_gateway, delete_from_gateway
import threading
from subprocess import Popen, PIPE
import os
import paramiko
import gevent
logging.basicConfig(level=logging.INFO)
"""
class to read openflow stats from the Ryu controller of the DCNetwork
"""
class DCNetworkMonitor():
def __init__(self, net):
self.net = net
prometheus_ip = '127.0.0.1'
prometheus_port = '9090'
self.prometheus_REST_api = 'http://{0}:{1}'.format(prometheus_ip, prometheus_port)
# helper variables to calculate the metrics
self.pushgateway = 'localhost:9091'
# Start up the server to expose the metrics to Prometheus.
#start_http_server(8000)
# supported Prometheus metrics
self.registry = CollectorRegistry()
self.prom_tx_packet_count = Gauge('sonemu_tx_count_packets', 'Total number of packets sent',
['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
self.prom_rx_packet_count = Gauge('sonemu_rx_count_packets', 'Total number of packets received',
['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
self.prom_tx_byte_count = Gauge('sonemu_tx_count_bytes', 'Total number of bytes sent',
['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
self.prom_rx_byte_count = Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',
['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
self.prom_metrics={'tx_packets':self.prom_tx_packet_count, 'rx_packets':self.prom_rx_packet_count,
'tx_bytes':self.prom_tx_byte_count,'rx_bytes':self.prom_rx_byte_count}
# list of installed metrics to monitor
# each entry can contain this data
'''
{
switch_dpid = 0
vnf_name = None
vnf_interface = None
previous_measurement = 0
previous_monitor_time = 0
metric_key = None
mon_port = None
}
'''
self.monitor_lock = threading.Lock()
self.monitor_flow_lock = threading.Lock()
self.network_metrics = []
self.flow_metrics = []
# start monitoring thread
self.start_monitoring = True
self.monitor_thread = threading.Thread(target=self.get_network_metrics)
self.monitor_thread.start()
self.monitor_flow_thread = threading.Thread(target=self.get_flow_metrics)
self.monitor_flow_thread.start()
# helper tools
#self.pushgateway_process = self.start_PushGateway()
#self.prometheus_process = self.start_Prometheus()
self.cadvisor_process = self.start_cadvisor()
# first set some parameters, before measurement can start
def setup_flow(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=0):
flow_metric = {}
# check if port is specified (vnf:port)
if vnf_interface is None:
# take first interface by default
connected_sw = self.net.DCNetwork_graph.neighbors(vnf_name)[0]
link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
vnf_interface = link_dict[0]['src_port_id']
flow_metric['vnf_name'] = vnf_name
flow_metric['vnf_interface'] = vnf_interface
vnf_switch = None
for connected_sw in self.net.DCNetwork_graph.neighbors(vnf_name):
link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
for link in link_dict:
# logging.info("{0},{1}".format(link_dict[link],vnf_interface))
if link_dict[link]['src_port_id'] == vnf_interface:
# found the right link and connected switch
# logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
vnf_switch = connected_sw
flow_metric['mon_port'] = link_dict[link]['dst_port_nr']
break
if not vnf_switch:
logging.exception("vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface))
return "vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface)
try:
# default port direction to monitor
if metric is None:
metric = 'tx_packets'
next_node = self.net.getNodeByName(vnf_switch)
if not isinstance(next_node, OVSSwitch):
logging.info("vnf: {0} is not connected to switch".format(vnf_name))
return
flow_metric['previous_measurement'] = 0
flow_metric['previous_monitor_time'] = 0
flow_metric['switch_dpid'] = int(str(next_node.dpid), 16)
flow_metric['metric_key'] = metric
flow_metric['cookie'] = cookie
self.monitor_flow_lock.acquire()
self.flow_metrics.append(flow_metric)
self.monitor_flow_lock.release()
logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))
return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)
except Exception as ex:
logging.exception("setup_metric error.")
return ex.message
def stop_flow(self, vnf_name, vnf_interface=None, metric=None, cookie=0):
for flow_dict in self.flow_metrics:
if flow_dict['vnf_name'] == vnf_name and flow_dict['vnf_interface'] == vnf_interface \
and flow_dict['metric_key'] == metric and flow_dict['cookie'] == cookie:
self.monitor_flow_lock.acquire()
self.flow_metrics.remove(flow_dict)
for collector in self.registry._collectors:
if (vnf_name, vnf_interface, cookie) in collector._metrics:
#logging.info('2 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,
# collector._metrics))
collector.remove(vnf_name, vnf_interface, cookie)
delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')
self.monitor_flow_lock.release()
logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))
return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)
# first set some parameters, before measurement can start
def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):
network_metric = {}
# check if port is specified (vnf:port)
if vnf_interface is None:
# take first interface by default
connected_sw = self.net.DCNetwork_graph.neighbors(vnf_name)[0]
link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
vnf_interface = link_dict[0]['src_port_id']
network_metric['vnf_name'] = vnf_name
network_metric['vnf_interface'] = vnf_interface
for connected_sw in self.net.DCNetwork_graph.neighbors(vnf_name):
link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
for link in link_dict:
# logging.info("{0},{1}".format(link_dict[link],vnf_interface))
if link_dict[link]['src_port_id'] == vnf_interface:
# found the right link and connected switch
# logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
network_metric['mon_port'] = link_dict[link]['dst_port_nr']
break
if 'mon_port' not in network_metric:
logging.exception("vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface))
return "vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface)
try:
# default port direction to monitor
if metric is None:
metric = 'tx_packets'
vnf_switch = self.net.DCNetwork_graph.neighbors(str(vnf_name))
if len(vnf_switch) > 1:
logging.info("vnf: {0} has multiple ports".format(vnf_name))
return
elif len(vnf_switch) == 0:
logging.info("vnf: {0} is not connected".format(vnf_name))
return
else:
vnf_switch = vnf_switch[0]
next_node = self.net.getNodeByName(vnf_switch)
if not isinstance(next_node, OVSSwitch):
logging.info("vnf: {0} is not connected to switch".format(vnf_name))
return
network_metric['previous_measurement'] = 0
network_metric['previous_monitor_time'] = 0
network_metric['switch_dpid'] = int(str(next_node.dpid), 16)
network_metric['metric_key'] = metric
self.monitor_lock.acquire()
self.network_metrics.append(network_metric)
self.monitor_lock.release()
logging.info('Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))
return 'Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)
except Exception as ex:
logging.exception("setup_metric error.")
return ex.message
def stop_metric(self, vnf_name, vnf_interface=None, metric=None):
for metric_dict in self.network_metrics:
#logging.info('start Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric_dict))
if metric_dict['vnf_name'] == vnf_name and metric_dict['vnf_interface'] == vnf_interface \
and metric_dict['metric_key'] == metric:
self.monitor_lock.acquire()
self.network_metrics.remove(metric_dict)
#this removes the complete metric, all labels...
#REGISTRY.unregister(self.prom_metrics[metric_dict['metric_key']])
#self.registry.unregister(self.prom_metrics[metric_dict['metric_key']])
for collector in self.registry._collectors :
#logging.info('name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames, collector._metrics))
"""
INFO:root:name:sonemu_rx_count_packets
labels:('vnf_name', 'vnf_interface')
metrics:{(u'tsrc', u'output'): < prometheus_client.core.Gauge
object
at
0x7f353447fd10 >}
"""
logging.info('{0}'.format(collector._metrics.values()))
#if self.prom_metrics[metric_dict['metric_key']]
if (vnf_name, vnf_interface, 'None') in collector._metrics:
logging.info('2 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,
collector._metrics))
#collector._metrics = {}
collector.remove(vnf_name, vnf_interface, 'None')
# set values to NaN, prometheus api currently does not support removal of metrics
#self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))
# this removes the complete metric, all labels...
# 1 single monitor job for all metrics of the SDN controller
# we can only remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway
# we can not specify labels from the metrics to be removed
# if we need to remove the metrics seperatelty, we need to give them a separate grouping key, and probably a diffferent registry also
delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')
self.monitor_lock.release()
logging.info('Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))
return 'Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)
# delete everything from this vnf
elif metric_dict['vnf_name'] == vnf_name and vnf_interface is None and metric is None:
self.monitor_lock.acquire()
self.network_metrics.remove(metric_dict)
for collector in self.registry._collectors:
collector_dict = collector._metrics.copy()
for name, interface, id in collector_dict:
if name == vnf_name:
logging.info('3 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,
collector._metrics))
collector.remove(name, interface, 'None')
delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')
self.monitor_lock.release()
logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))
return 'Stopped monitoring: {0}'.format(vnf_name)
# get all metrics defined in the list and export it to Prometheus
def get_flow_metrics(self):
while self.start_monitoring:
self.monitor_flow_lock.acquire()
for flow_dict in self.flow_metrics:
data = {}
data['cookie'] = flow_dict['cookie']
if 'tx' in flow_dict['metric_key']:
data['match'] = {'in_port':flow_dict['mon_port']}
elif 'rx' in flow_dict['metric_key']:
data['out_port'] = flow_dict['mon_port']
# query Ryu
ret = self.net.ryu_REST('stats/flow', dpid=flow_dict['switch_dpid'], data=data)
flow_stat_dict = ast.literal_eval(ret)
#logging.info('received flow stat:{0} '.format(flow_stat_dict))
self.set_flow_metric(flow_dict, flow_stat_dict)
self.monitor_flow_lock.release()
time.sleep(1)
def get_network_metrics(self):
while self.start_monitoring:
self.monitor_lock.acquire()
# group metrics by dpid to optimize the rest api calls
dpid_list = [metric_dict['switch_dpid'] for metric_dict in self.network_metrics]
dpid_set = set(dpid_list)
for dpid in dpid_set:
# query Ryu
ret = self.net.ryu_REST('stats/port', dpid=dpid)
port_stat_dict = ast.literal_eval(ret)
metric_list = [metric_dict for metric_dict in self.network_metrics
if int(metric_dict['switch_dpid'])==int(dpid)]
#logging.info('1set prom packets:{0} '.format(self.network_metrics))
for metric_dict in metric_list:
self.set_network_metric(metric_dict, port_stat_dict)
self.monitor_lock.release()
time.sleep(1)
# add metric to the list to export to Prometheus, parse the Ryu port-stats reply
def set_network_metric(self, metric_dict, port_stat_dict):
# vnf tx is the datacenter switch rx and vice-versa
metric_key = self.switch_tx_rx(metric_dict['metric_key'])
switch_dpid = metric_dict['switch_dpid']
vnf_name = metric_dict['vnf_name']
vnf_interface = metric_dict['vnf_interface']
previous_measurement = metric_dict['previous_measurement']
previous_monitor_time = metric_dict['previous_monitor_time']
mon_port = metric_dict['mon_port']
for port_stat in port_stat_dict[str(switch_dpid)]:
if int(port_stat['port_no']) == int(mon_port):
port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)
this_measurement = int(port_stat[metric_key])
#logging.info('set prom packets:{0} {1}:{2}'.format(this_measurement, vnf_name, vnf_interface))
# set prometheus metric
self.prom_metrics[metric_dict['metric_key']].\
labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': None}).\
set(this_measurement)
#push_to_gateway(self.pushgateway, job='SDNcontroller',
# grouping_key={'metric':metric_dict['metric_key']}, registry=self.registry)
# 1 single monitor job for all metrics of the SDN controller
pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:
metric_dict['previous_measurement'] = int(port_stat[metric_key])
metric_dict['previous_monitor_time'] = port_uptime
# do first measurement
#logging.info('first measurement')
time.sleep(1)
self.monitor_lock.release()
metric_rate = self.get_network_metrics()
return metric_rate
else:
time_delta = (port_uptime - metric_dict['previous_monitor_time'])
metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)
#logging.info('metric: {0} rate:{1}'.format(metric_dict['metric_key'], metric_rate))
metric_dict['previous_measurement'] = this_measurement
metric_dict['previous_monitor_time'] = port_uptime
return metric_rate
logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))
return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)
def set_flow_metric(self, metric_dict, flow_stat_dict):
# vnf tx is the datacenter switch rx and vice-versa
#metric_key = self.switch_tx_rx(metric_dict['metric_key'])
metric_key = metric_dict['metric_key']
switch_dpid = metric_dict['switch_dpid']
vnf_name = metric_dict['vnf_name']
vnf_interface = metric_dict['vnf_interface']
previous_measurement = metric_dict['previous_measurement']
previous_monitor_time = metric_dict['previous_monitor_time']
cookie = metric_dict['cookie']
# TODO aggregate all found flow stats
flow_stat = flow_stat_dict[str(switch_dpid)][0]
if 'bytes' in metric_key:
counter = flow_stat['byte_count']
elif 'packet' in metric_key:
counter = flow_stat['packet_count']
flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 | |
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. Collection of resources.
:vartype value: list[~azure.mgmt.appcontainers.models.Certificate]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Certificate]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Certificate"],
**kwargs
):
"""
:keyword value: Required. Collection of resources.
:paramtype value: list[~azure.mgmt.appcontainers.models.Certificate]
"""
super(CertificateCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class CertificatePatch(msrest.serialization.Model):
"""A certificate to update.
:ivar tags: A set of tags. Application-specific metadata in the form of key-value pairs.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Application-specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
"""
super(CertificatePatch, self).__init__(**kwargs)
self.tags = tags
class CertificateProperties(msrest.serialization.Model):
"""Certificate resource specific properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: Provisioning state of the certificate. Possible values include:
"Succeeded", "Failed", "Canceled", "DeleteFailed", "Pending".
:vartype provisioning_state: str or
~azure.mgmt.appcontainers.models.CertificateProvisioningState
:ivar password: Certificate password.
:vartype password: str
:ivar subject_name: Subject name of the certificate.
:vartype subject_name: str
:ivar value: PFX or PEM blob.
:vartype value: bytearray
:ivar issuer: Certificate issuer.
:vartype issuer: str
:ivar issue_date: Certificate issue Date.
:vartype issue_date: ~datetime.datetime
:ivar expiration_date: Certificate expiration date.
:vartype expiration_date: ~datetime.datetime
:ivar thumbprint: Certificate thumbprint.
:vartype thumbprint: str
:ivar valid: Is the certificate valid?.
:vartype valid: bool
:ivar public_key_hash: Public key hash.
:vartype public_key_hash: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'subject_name': {'readonly': True},
'issuer': {'readonly': True},
'issue_date': {'readonly': True},
'expiration_date': {'readonly': True},
'thumbprint': {'readonly': True},
'valid': {'readonly': True},
'public_key_hash': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'subject_name': {'key': 'subjectName', 'type': 'str'},
'value': {'key': 'value', 'type': 'bytearray'},
'issuer': {'key': 'issuer', 'type': 'str'},
'issue_date': {'key': 'issueDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'valid': {'key': 'valid', 'type': 'bool'},
'public_key_hash': {'key': 'publicKeyHash', 'type': 'str'},
}
def __init__(
self,
*,
password: Optional[str] = None,
value: Optional[bytearray] = None,
**kwargs
):
"""
:keyword password: Certificate password.
:paramtype password: str
:keyword value: PFX or PEM blob.
:paramtype value: bytearray
"""
super(CertificateProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.password = password
self.subject_name = None
self.value = value
self.issuer = None
self.issue_date = None
self.expiration_date = None
self.thumbprint = None
self.valid = None
self.public_key_hash = None
class CheckNameAvailabilityRequest(msrest.serialization.Model):
"""The check availability request body.
:ivar name: The name of the resource for which availability needs to be checked.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the resource for which availability needs to be checked.
:paramtype name: str
:keyword type: The resource type.
:paramtype type: str
"""
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = name
self.type = type
class CheckNameAvailabilityResponse(msrest.serialization.Model):
"""The check availability result.
:ivar name_available: Indicates if the resource name is available.
:vartype name_available: bool
:ivar reason: The reason why the given name is not available. Possible values include:
"Invalid", "AlreadyExists".
:vartype reason: str or ~azure.mgmt.appcontainers.models.CheckNameAvailabilityReason
:ivar message: Detailed reason why the given name is available.
:vartype message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
reason: Optional[Union[str, "CheckNameAvailabilityReason"]] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword name_available: Indicates if the resource name is available.
:paramtype name_available: bool
:keyword reason: The reason why the given name is not available. Possible values include:
"Invalid", "AlreadyExists".
:paramtype reason: str or ~azure.mgmt.appcontainers.models.CheckNameAvailabilityReason
:keyword message: Detailed reason why the given name is available.
:paramtype message: str
"""
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self.name_available = name_available
self.reason = reason
self.message = message
class ClientRegistration(msrest.serialization.Model):
"""The configuration settings of the app registration for providers that have client ids and client secrets.
:ivar client_id: The Client ID of the app used for login.
:vartype client_id: str
:ivar client_secret_setting_name: The app setting name that contains the client secret.
:vartype client_secret_setting_name: str
"""
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'client_secret_setting_name': {'key': 'clientSecretSettingName', 'type': 'str'},
}
def __init__(
self,
*,
client_id: Optional[str] = None,
client_secret_setting_name: Optional[str] = None,
**kwargs
):
"""
:keyword client_id: The Client ID of the app used for login.
:paramtype client_id: str
:keyword client_secret_setting_name: The app setting name that contains the client secret.
:paramtype client_secret_setting_name: str
"""
super(ClientRegistration, self).__init__(**kwargs)
self.client_id = client_id
self.client_secret_setting_name = client_secret_setting_name
class Configuration(msrest.serialization.Model):
"""Non versioned Container App configuration properties that define the mutable settings of a Container app.
:ivar secrets: Collection of secrets used by a Container app.
:vartype secrets: list[~azure.mgmt.appcontainers.models.Secret]
:ivar active_revisions_mode: ActiveRevisionsMode controls how active revisions are handled for
the Container app:
.. raw:: html
<list><item>Multiple: multiple revisions can be active.</item><item>Single: Only one
revision can be active at a time. Revision weights can not be used in this mode. If no value if
provided, this is the default.</item></list>. Possible values include: "Multiple", "Single".
:vartype active_revisions_mode: str or ~azure.mgmt.appcontainers.models.ActiveRevisionsMode
:ivar ingress: Ingress configurations.
:vartype ingress: ~azure.mgmt.appcontainers.models.Ingress
:ivar registries: Collection of private container registry credentials for containers used by
the Container app.
:vartype registries: list[~azure.mgmt.appcontainers.models.RegistryCredentials]
:ivar dapr: Dapr configuration for the Container App.
:vartype dapr: ~azure.mgmt.appcontainers.models.Dapr
"""
_attribute_map = {
'secrets': {'key': 'secrets', 'type': '[Secret]'},
'active_revisions_mode': {'key': 'activeRevisionsMode', 'type': 'str'},
'ingress': {'key': 'ingress', 'type': 'Ingress'},
'registries': {'key': 'registries', 'type': '[RegistryCredentials]'},
'dapr': {'key': 'dapr', 'type': 'Dapr'},
}
def __init__(
self,
*,
secrets: Optional[List["Secret"]] = None,
active_revisions_mode: Optional[Union[str, "ActiveRevisionsMode"]] = None,
ingress: Optional["Ingress"] = None,
registries: Optional[List["RegistryCredentials"]] = None,
dapr: Optional["Dapr"] = None,
**kwargs
):
"""
:keyword secrets: Collection of secrets used by a Container app.
:paramtype secrets: list[~azure.mgmt.appcontainers.models.Secret]
:keyword active_revisions_mode: ActiveRevisionsMode controls how active revisions are handled
for the Container app:
.. raw:: html
<list><item>Multiple: multiple revisions can be active.</item><item>Single: Only one
revision can be active at a time. Revision weights can not be used in this mode. If no value if
provided, this is the default.</item></list>. Possible values include: "Multiple", "Single".
:paramtype active_revisions_mode: str or ~azure.mgmt.appcontainers.models.ActiveRevisionsMode
:keyword ingress: Ingress configurations.
:paramtype ingress: ~azure.mgmt.appcontainers.models.Ingress
:keyword registries: Collection of private container registry credentials for containers used
by the Container app.
:paramtype registries: list[~azure.mgmt.appcontainers.models.RegistryCredentials]
:keyword dapr: Dapr configuration for the Container App.
:paramtype dapr: ~azure.mgmt.appcontainers.models.Dapr
"""
super(Configuration, self).__init__(**kwargs)
self.secrets = secrets
self.active_revisions_mode = active_revisions_mode
self.ingress = ingress
self.registries = registries
self.dapr = dapr
class Container(msrest.serialization.Model):
"""Container App container definition.
:ivar image: Container image tag.
:vartype image: str
:ivar name: Custom container name.
:vartype name: str
:ivar command: Container start command.
:vartype command: list[str]
:ivar args: Container start command arguments.
:vartype args: list[str]
:ivar env: Container environment variables.
:vartype env: list[~azure.mgmt.appcontainers.models.EnvironmentVar]
:ivar resources: Container resource requirements.
:vartype resources: ~azure.mgmt.appcontainers.models.ContainerResources
:ivar probes: List of probes for the container.
:vartype probes: list[~azure.mgmt.appcontainers.models.ContainerAppProbe]
:ivar volume_mounts: Container volume mounts.
:vartype volume_mounts: list[~azure.mgmt.appcontainers.models.VolumeMount]
"""
_attribute_map = {
'image': {'key': 'image', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'command': {'key': 'command', 'type': '[str]'},
'args': {'key': 'args', 'type': '[str]'},
'env': {'key': 'env', 'type': '[EnvironmentVar]'},
'resources': {'key': 'resources', 'type': 'ContainerResources'},
'probes': {'key': 'probes', 'type': '[ContainerAppProbe]'},
'volume_mounts': {'key': 'volumeMounts', 'type': '[VolumeMount]'},
}
def __init__(
self,
*,
image: Optional[str] = None,
name: Optional[str] = None,
command: Optional[List[str]] = None,
args: Optional[List[str]] = None,
env: Optional[List["EnvironmentVar"]] = None,
resources: Optional["ContainerResources"] = None,
probes: Optional[List["ContainerAppProbe"]] = None,
volume_mounts: Optional[List["VolumeMount"]] = None,
**kwargs
):
"""
:keyword image: Container image tag.
:paramtype image: str
:keyword name: Custom container name.
:paramtype name: str
:keyword command: Container start command.
:paramtype command: list[str]
:keyword | |
<gh_stars>0
import asyncio
import discord
from discord.ext import commands
from discord.utils import get
from discord import FFmpegPCMAudio
from json import load
import random
import content.tables as tab
import content.pictures as pic
from os import environ
from youtube_dl import YoutubeDL
from cogs.music import Music
# Main colors used for the bot's embeded messages formating.
MAIN_COLOR = 0x8b54cf
ERROR_COLOR = 0xff0000
SUCCESS_COLOR = 0x16bd00
# Bot token imported from Heroku env.
BOT_TOKEN = environ.get('BOT_TOKEN')
# Footer text (version + update date) for every single command.
FOOTER_TEXT = 'Elvie v0.90 - WFRP 4ED\nOstatnia aktualizacja: 9/19/2021'
# Discord intents declarations -> can be modified at https://discord.com/developers/
intents = discord.Intents.default()
intents.members = True
client = commands.Bot(intents=intents, command_prefix='.')
client.remove_command('help')
@client.event
async def on_ready():
activity = discord.Game(name=".help")
await client.change_presence(status=discord.Status.online, activity=activity)
@client.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
embed=discord.Embed(title='⚠️Błąd polecenia⚠️', description='Nie znalazłem polecenia o tej nazwie. Może polecenie **.help** odpowie na Twoje pytanie?', color=ERROR_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
elif isinstance(error, commands.MissingRequiredArgument):
embed=discord.Embed(title='⚠️Brakujący argument⚠️', description='We wpisanym poleceniu brakuje jednego z argumentów. Sprawdź **.help** w celu weryfikacji składni polecenia.', color=ERROR_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
raise error
@client.command()
async def servers(ctx):
servers = list(client.guilds)
description = 'Połączony z **' + str(len(servers)) + '** serwerami\n\n'
for i, server in enumerate(servers, start=1):
description += '**' + str(i) + '.** ' + server.name + '\n'
embed=discord.Embed(title='Lista serwerów', description=description, color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def help(ctx):
description = \
'Poniżej znajdziesz listę obecnie dostępnych poleceń. Argumenty oznaczone `*` są opcjonalne:\n\n'\
'**.advance <c/u> <start> <cel> <t*>**\nOblicz koszt rozwoju od `start` do `cel` cechy lub umiejętności (`c` lub `u`). Argument `t` obniża koszt rozwinięcia o 5 PD. Przykładowo:\n`.advance c 12 15` albo `.advance u 5 14 t`\n\n'\
'**.advance_table <m*>**\nWyświetl tabelę *Koszt rozwoju cech i umiejętności w PD*. Argument `m` wyświetla tabelę w wersji na urządzenia mobilne. Przykładowo:\n`.advance_table` albo `.advance_table m`\n\n'\
'**.talent <nazwa>**\nWyświetl opis talentu `nazwa`. Nazwa musi zostać podana z uwzględnieniem polskich znaków oraz bez użycia nawiasów. Przykładowo:\n`.talent bardzo szybki` albo `.talent magia tajemna`\n\n'\
'**.ability <nazwa>**\nWyświetl opis umiejętności `nazwa`. Nazwa musi zostać podana z uwzględnieniem polskich znaków oraz bez użycia nawiasów. Przykładowo:\n`.ability rzemiosło` albo `.ability mocna głowa`\n\nPodziękowania dla Kazyleusz#2024.\n\n'\
'**.miscast <w*>**\nWylosuj mniejszą lub większą (parametr `w`) manifestację. Przykładowo:\n`.miscast` albo `.miscast w`\n\n'\
'**.corruption <p*>**\nWylosuj spaczenie fizyczne lub zepsucie psychiczne (parametr `p`). Przykładowo:\n`.corruption` albo `.corruption p`\n\n'\
'**.music** \nWyświetl instrukcję dotyczącą odtwarzania muzyki z portalu YouTube\n\n'\
'**.fortune**\nWylosuj 4 karty, wybierz jedną i sprawdź czy `Ranald` wysłucha Twej modlitwy.\n\n'\
'**.clear <wartość>**\nWyczyść `wartość` wiadomości. Może się przydać w trzymaniu porządku na kanale z rzutami. Użycie polecenia wymaga uprawnień administratora.\n\n'\
'**.contact <wiadomość>**\nWyślij `wiadomość` bezpośrednio do autora bota. Wszelkie wykryte błędy, zażalenia i pytania są mile widziane.\n\n'\
'**.invite**\nWygeneruj `URL`, dzięki któremu będziesz mógł zaprosić Elviego na własny serwer.\n\n'\
embed=discord.Embed(title='Krótka instrukcja bota Elvie', description=description, color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def music(ctx):
description = \
'Poniżej znajdziesz listę obecnie dostępnych poleceń związanych z odtwarzaniem muzyki:\n\n'\
'**.play <URL/fraza>**\nOdtwórz utwór z platformy YouTube. Możesz wpisać URL lub wyszukać daną frazę. Przykładowo:\n`.play <adres_url>` albo `.play the city must survive`\n\nAlternatywne nazwy:\n`.p` `.sing`\n\n'\
'**.pause**\nZapauzuj utwór bez czyszczenia kolejki. Bot pozostanie na kanale.\n\n'\
'**.stop**\nZatrzymaj utwór i wyczyść kolejkę. Bot odłączy się z kanału.\n\n'\
'**.skip**\nPomiń aktualny utwór. Może nastąpić mała przerwa między utworami.\n\n'\
'**.volume <w>**\nZmień głośność odtwarzanego utworu na `w` procent. Przykładowo:\n`.volume 20` albo `.volume 80`\n\nAlternatywne nazwy:\n`.vol`\n\n'\
'**.now_playing**\nWyświetl informacje o aktualnie odtwarzanym utworze.\n\nAlternatywne nazwy:\n`.np` `.current` `.currentsong` `.playing`\n\n'\
'**.queue**\nWyświetl zakolejkowane utwory. Kolejka nie uwzględnia aktualnie odtwarzanego utworu.\n\nAlternatywne nazwy:\n`.q`, `.playlist`\n\n'\
embed=discord.Embed(title='Krótka instrukcja muzycznego modułu bota Elvie', description=description, color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def reaction(ctx):
zus = {
pic.ZUS_PIC_NOT_AMUSED:'shocked!',
pic.ZUS_PIC_BORED:'bored!',
pic.ZUS_PIC_HUNGRY:'hungry!',
pic.ZUS_PIC_THIRSTY:'thirsty!',
pic.ZUS_PIC_FANCY:'feeling fancy!'}
zus_choice = random.choice(list(zus.items()))
embed=discord.Embed(title='Zus reaction table', description='Zus is ' + zus_choice[1], color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
embed.set_image(url=zus_choice[0])
await ctx.send(embed=embed)
@client.command()
async def tracks(ctx):
embed=discord.Embed(title='Tracks', description='Work in progress.', color=MAIN_COLOR)
await ctx.send(embed=embed)
@client.command()
async def clear(ctx, amount: int):
if ctx.author.guild_permissions.administrator:
deleted = await ctx.channel.purge(limit=amount)
embed=discord.Embed(title='Usunięto wiadomości', description='Usunięto **' + str(len(deleted)) + '** wiadomości.', color=MAIN_COLOR)
else:
embed=discord.Embed(title='⚠️Błąd uprawnień⚠️', description='Nie jesteś administratorem.', color=ERROR_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def invite(ctx):
embed=discord.Embed(title='Link do zaproszenia', description='https://discord.com/oauth2/authorize?client_id=864205486056669244&permissions=8&scope=bot', color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def advance(ctx, type: str, init: int, goal: int, talent: str=None):
ability_map = {5:10, 10:15, 15:20, 20:30, 25:40, 30:60, 35:80, 40:110, 45:140, 50:180, 55:220, 60:270, 65:320, 70:380, 9999:440}
attribute_map = {5:25, 10:30, 15:40, 20:50, 25:70, 30:90, 35:120, 40:150, 45:190, 50:230, 55:280, 60:330, 65:390, 70:450, 9999:520}
if type == 'c':
chosen_map = attribute_map
choice = 'cechy'
elif type == 'u':
chosen_map = ability_map
choice = 'umiejętności'
current = init
cost_sum = 0
dif = goal - init
for key, value in chosen_map.items():
while current < key and dif != 0:
cost_sum += value
current += 1
dif -= 1
description = \
'Twoja początkowa wartość **' + choice + '** to: **' + str(init) + '**\n'\
'Twoja docelowa wartość **' + choice + '** to: **' + str(goal) + '**\n\n'
if talent == 't':
description += 'Jeden z Twoich talentów obniża koszt o **5 PD** za każde rozwinięcie.\n\n'\
'Finalny koszt rozwinięcia to: **' + str(cost_sum - 5 * (goal - init)) + ' PD**'
else:
description += 'Koszt rozwinięcia to: **' + str(cost_sum) + ' PD**'
embed=discord.Embed(title='Rozwinięcie ' + choice, description=description, color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def miscast(ctx, type: str='m'):
roll = random.randint(1,100)
if type == 'w':
table = tab.MISCAST_MAJOR
name = 'Większa'
else:
table = tab.MISCAST_MINOR
name = 'Mniejsza'
for i, r in enumerate(range(5, 101, 5)):
if roll <= r:
miscast = table[i]
break
embed=discord.Embed(title=name + ' manifestacja!', description='Wyrzuciłeś **' + str(roll) + '**...\n\n' + miscast, color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def corruption(ctx, type: str='f'):
roll = random.randint(1,100)
if type == 'p':
table = tab.CORRUPTION_MENTAL
name = 'zepsucie psychiczne!'
else:
table = tab.CORRUPTION_PHYSICAL
name = 'spaczenie fizyczne!'
for i, r in enumerate(range(5, 101, 5)):
if roll <= r:
corruption = table[i]
break
embed=discord.Embed(title='Wylosowano ' + name, description='Wyrzuciłeś **' + str(roll) + '**...\n\n' + corruption, color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def fortune(ctx):
reactions = ['1️⃣', '2️⃣', '3️⃣', '4️⃣']
author = ctx.message.author
winner = random.choice(reactions)
index = reactions.index(winner)
win_card = pic.WIN_CARDS[index]
embed=discord.Embed(title='Punkt szczęścia użyty!', description='Czyli Twoja dobra passa się skończyła i nagle chcesz, by sam **Ranald** Ci dopomógł?\n\nDobrze, wybierz kartę śmiertelniku...\n\n', color=MAIN_COLOR)
embed.set_image(url=pic.CARD_REVERSE)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
message = await ctx.send(embed=embed)
for emoji in reactions:
await message.add_reaction(emoji)
try:
reaction, user = await client.wait_for('reaction_add', timeout=45.0, check= lambda reaction, user: user == ctx.message.author and str(reaction.emoji) in reactions)
except asyncio.TimeoutError:
embed=discord.Embed(title='Za późno...', description=author.mention + ', Twój czas się skończył.', color=ERROR_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
else:
if str(reaction.emoji) == winner:
embed=discord.Embed(title='🤞 Twój wybór...', description='Świetnie ' + author.mention + ', dziś Ranald wysłuchał Twej prośby!', color=SUCCESS_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
embed.set_image(url=win_card)
else:
lose_card = pic.LOSE_CARDS[reactions.index(str(reaction.emoji))]
embed=discord.Embed(title='🤞 Twój wybór...', description=author.mention + ', to był bardzo zły wybór...\n\nSzczęśliwą kartą była karta nr ' + str(winner), color=ERROR_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
embed.set_image(url=lose_card)
await ctx.send(embed=embed)
@client.command()
async def advance_table(ctx, version: str='pc'):
if version == 'm':
image = pic.ADVANCE_TABLE_PIC
embed=discord.Embed(title='Koszt rozwoju cech i umiejętności w PD', description='', color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
embed.set_image(url=image)
else:
description = tab.ADV_TABLE
embed=discord.Embed(title='Koszt rozwoju cech i umiejętności w PD', description=description, color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def talent(ctx, *, talent_name: str):
talent_name = talent_name.replace(' ','_').lower()
with open('content/talents.json', encoding="utf8") as jf:
json_data = load(jf)
if talent_name in json_data:
talent = json_data[talent_name]
await ctx.send('Talent name found!')
embed=discord.Embed(title=talent['name'], description=talent['description'], color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
embed.add_field(name="Maksimum", value=talent['max'], inline=True)
embed.add_field(name="Testy", value=talent['tests'], inline=True)
else:
embed=discord.Embed(title='⚠️Nie znalazłem talentu⚠️', description='Pamiętaj o składni podanej w poleceniu **.help**.', color=ERROR_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
await ctx.send(embed=embed)
@client.command()
async def ability(ctx, *, ability_name: str):
ability_name = ability_name.replace(' ','_').lower()
with open('content/abilities.json', encoding="utf8") as jf:
json_data = load(jf)
if ability_name in json_data:
ability = json_data[ability_name]
await ctx.send('Ability name found!')
embed=discord.Embed(title=ability['name'], description=ability['description'], color=MAIN_COLOR)
embed.set_footer(text = FOOTER_TEXT, icon_url = pic.BOT_AVATAR)
embed.add_field(name="Typ", value=ability['type'], inline=False)
embed.add_field(name="Cecha", value=ability['attribute'], | |
<filename>Lib/test/test_random.py
import unittest
import random
import time
import pickle
import warnings
from math import log, exp, pi, fsum, sin
from functools import reduce
from test import test_support
class TestBasicOps(unittest.TestCase):
# Superclass with tests common to all generators.
# Subclasses must arrange for self.gen to retrieve the Random instance
# to be tested.
def randomlist(self, n):
"""Helper function to make a list of random numbers"""
return [self.gen.random() for i in xrange(n)]
def test_autoseed(self):
self.gen.seed()
state1 = self.gen.getstate()
time.sleep(0.1)
self.gen.seed() # diffent seeds at different times
state2 = self.gen.getstate()
self.assertNotEqual(state1, state2)
def test_saverestore(self):
N = 1000
self.gen.seed()
state = self.gen.getstate()
randseq = self.randomlist(N)
self.gen.setstate(state) # should regenerate the same sequence
self.assertEqual(randseq, self.randomlist(N))
def test_seedargs(self):
for arg in [None, 0, 0L, 1, 1L, -1, -1L, 10**20, -(10**20),
3.14, 1+2j, 'a', tuple('abc')]:
self.gen.seed(arg)
for arg in [range(3), dict(one=1)]:
self.assertRaises(TypeError, self.gen.seed, arg)
self.assertRaises(TypeError, self.gen.seed, 1, 2)
self.assertRaises(TypeError, type(self.gen), [])
def test_jumpahead(self):
self.gen.seed()
state1 = self.gen.getstate()
self.gen.jumpahead(100)
state2 = self.gen.getstate() # s/b distinct from state1
self.assertNotEqual(state1, state2)
self.gen.jumpahead(100)
state3 = self.gen.getstate() # s/b distinct from state2
self.assertNotEqual(state2, state3)
with test_support.check_py3k_warnings(quiet=True):
self.assertRaises(TypeError, self.gen.jumpahead) # needs an arg
self.assertRaises(TypeError, self.gen.jumpahead, 2, 3) # too many
def test_jumpahead_produces_valid_state(self):
# From http://bugs.python.org/issue14591.
self.gen.seed(199210368)
self.gen.jumpahead(13550674232554645900)
for i in range(500):
val = self.gen.random()
self.assertLess(val, 1.0)
def test_sample(self):
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
N = 100
population = xrange(N)
for k in xrange(N+1):
s = self.gen.sample(population, k)
self.assertEqual(len(s), k)
uniq = set(s)
self.assertEqual(len(uniq), k)
self.assertTrue(uniq <= set(population))
self.assertEqual(self.gen.sample([], 0), []) # test edge case N==k==0
def test_sample_distribution(self):
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n = 5
pop = range(n)
trials = 10000 # large num prevents false negatives without slowing normal case
def factorial(n):
return reduce(int.__mul__, xrange(1, n), 1)
for k in xrange(n):
expected = factorial(n) // factorial(n-k)
perms = {}
for i in xrange(trials):
perms[tuple(self.gen.sample(pop, k))] = None
if len(perms) == expected:
break
else:
self.fail()
def test_sample_inputs(self):
# SF bug #801342 -- population can be any iterable defining __len__()
self.gen.sample(set(range(20)), 2)
self.gen.sample(range(20), 2)
self.gen.sample(xrange(20), 2)
self.gen.sample(str('abcdefghijklmnopqrst'), 2)
self.gen.sample(tuple('abcdefghijklmnopqrst'), 2)
def test_sample_on_dicts(self):
self.gen.sample(dict.fromkeys('abcdefghijklmnopqrst'), 2)
# SF bug #1460340 -- random.sample can raise KeyError
a = dict.fromkeys(range(10)+range(10,100,2)+range(100,110))
self.gen.sample(a, 3)
# A followup to bug #1460340: sampling from a dict could return
# a subset of its keys or of its values, depending on the size of
# the subset requested.
N = 30
d = dict((i, complex(i, i)) for i in xrange(N))
for k in xrange(N+1):
samp = self.gen.sample(d, k)
# Verify that we got ints back (keys); the values are complex.
for x in samp:
self.assertTrue(type(x) is int)
samp.sort()
self.assertEqual(samp, range(N))
def test_gauss(self):
# Ensure that the seed() method initializes all the hidden state. In
# particular, through 2.2.1 it failed to reset a piece of state used
# by (and only by) the .gauss() method.
for seed in 1, 12, 123, 1234, 12345, 123456, 654321:
self.gen.seed(seed)
x1 = self.gen.random()
y1 = self.gen.gauss(0, 1)
self.gen.seed(seed)
x2 = self.gen.random()
y2 = self.gen.gauss(0, 1)
self.assertEqual(x1, x2)
self.assertEqual(y1, y2)
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
state = pickle.dumps(self.gen, proto)
origseq = [self.gen.random() for i in xrange(10)]
newgen = pickle.loads(state)
restoredseq = [newgen.random() for i in xrange(10)]
self.assertEqual(origseq, restoredseq)
def test_bug_1727780(self):
# verify that version-2-pickles can be loaded
# fine, whether they are created on 32-bit or 64-bit
# platforms, and that version-3-pickles load fine.
files = [("randv2_32.pck", 780),
("randv2_64.pck", 866),
("randv3.pck", 343)]
for file, value in files:
f = open(test_support.findfile(file),"rb")
r = pickle.load(f)
f.close()
self.assertEqual(r.randrange(1000), value)
class WichmannHill_TestBasicOps(TestBasicOps):
gen = random.WichmannHill()
def test_setstate_first_arg(self):
self.assertRaises(ValueError, self.gen.setstate, (2, None, None))
def test_strong_jumpahead(self):
# tests that jumpahead(n) semantics correspond to n calls to random()
N = 1000
s = self.gen.getstate()
self.gen.jumpahead(N)
r1 = self.gen.random()
# now do it the slow way
self.gen.setstate(s)
for i in xrange(N):
self.gen.random()
r2 = self.gen.random()
self.assertEqual(r1, r2)
def test_gauss_with_whseed(self):
# Ensure that the seed() method initializes all the hidden state. In
# particular, through 2.2.1 it failed to reset a piece of state used
# by (and only by) the .gauss() method.
for seed in 1, 12, 123, 1234, 12345, 123456, 654321:
self.gen.whseed(seed)
x1 = self.gen.random()
y1 = self.gen.gauss(0, 1)
self.gen.whseed(seed)
x2 = self.gen.random()
y2 = self.gen.gauss(0, 1)
self.assertEqual(x1, x2)
self.assertEqual(y1, y2)
def test_bigrand(self):
# Verify warnings are raised when randrange is too large for random()
with warnings.catch_warnings():
warnings.filterwarnings("error", "Underlying random")
self.assertRaises(UserWarning, self.gen.randrange, 2**60)
class SystemRandom_TestBasicOps(TestBasicOps):
gen = random.SystemRandom()
def test_autoseed(self):
# Doesn't need to do anything except not fail
self.gen.seed()
def test_saverestore(self):
self.assertRaises(NotImplementedError, self.gen.getstate)
self.assertRaises(NotImplementedError, self.gen.setstate, None)
def test_seedargs(self):
# Doesn't need to do anything except not fail
self.gen.seed(100)
def test_jumpahead(self):
# Doesn't need to do anything except not fail
self.gen.jumpahead(100)
def test_gauss(self):
self.gen.gauss_next = None
self.gen.seed(100)
self.assertEqual(self.gen.gauss_next, None)
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(NotImplementedError, pickle.dumps, self.gen, proto)
def test_53_bits_per_float(self):
# This should pass whenever a C double has 53 bit precision.
span = 2 ** 53
cum = 0
for i in xrange(100):
cum |= int(self.gen.random() * span)
self.assertEqual(cum, span-1)
def test_bigrand(self):
# The randrange routine should build-up the required number of bits
# in stages so that all bit positions are active.
span = 2 ** 500
cum = 0
for i in xrange(100):
r = self.gen.randrange(span)
self.assertTrue(0 <= r < span)
cum |= r
self.assertEqual(cum, span-1)
def test_bigrand_ranges(self):
for i in [40,80, 160, 200, 211, 250, 375, 512, 550]:
start = self.gen.randrange(2 ** (i-2))
stop = self.gen.randrange(2 ** i)
if stop <= start:
continue
self.assertTrue(start <= self.gen.randrange(start, stop) < stop)
def test_rangelimits(self):
for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]:
self.assertEqual(set(range(start,stop)),
set([self.gen.randrange(start,stop) for i in xrange(100)]))
def test_genrandbits(self):
# Verify ranges
for k in xrange(1, 1000):
self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k)
# Verify all bits active
getbits = self.gen.getrandbits
for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]:
cum = 0
for i in xrange(100):
cum |= getbits(span)
self.assertEqual(cum, 2**span-1)
# Verify argument checking
self.assertRaises(TypeError, self.gen.getrandbits)
self.assertRaises(TypeError, self.gen.getrandbits, 1, 2)
self.assertRaises(ValueError, self.gen.getrandbits, 0)
self.assertRaises(ValueError, self.gen.getrandbits, -1)
self.assertRaises(TypeError, self.gen.getrandbits, 10.1)
def test_randbelow_logic(self, _log=log, int=int):
# check bitcount transition points: 2**i and 2**(i+1)-1
# show that: k = int(1.001 + _log(n, 2))
# is equal to or one greater than the number of bits in n
for i in xrange(1, 1000):
n = 1L << i # check an exact power of two
numbits = i+1
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits)
self.assertTrue(n == 2**(k-1))
n += n - 1 # check 1 below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertIn(k, [numbits, numbits+1])
self.assertTrue(2**k > n > 2**(k-2))
n -= n >> 15 # check a little farther below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits) # note the stronger assertion
self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion
class MersenneTwister_TestBasicOps(TestBasicOps):
gen = random.Random()
@test_support.cpython_only
def test_bug_31478(self):
# _random.Random.seed() should ignore the __abs__() method of a
# long/int subclass argument.
class BadInt(int):
def __abs__(self):
1/0.0
class BadLong(long):
def __abs__(self):
1/0.0
self.gen.seed(42)
expected_value = self.gen.random()
for seed_arg in [42L, BadInt(42), BadLong(42)]:
self.gen.seed(seed_arg)
self.assertEqual(self.gen.random(), expected_value)
def test_setstate_first_arg(self):
self.assertRaises(ValueError, self.gen.setstate, (1, None, None))
def test_setstate_middle_arg(self):
start_state = self.gen.getstate()
# Wrong type, s/b tuple
self.assertRaises(TypeError, self.gen.setstate, (2, None, None))
# Wrong length, s/b 625
self.assertRaises(ValueError, self.gen.setstate, (2, (1,2,3), None))
# Wrong type, s/b tuple of 625 ints
self.assertRaises(TypeError, self.gen.setstate, (2, ('a',)*625, None))
# Last element s/b an int also
self.assertRaises(TypeError, self.gen.setstate, (2, (0,)*624+('a',), None))
# Last element s/b between 0 and 624
with self.assertRaises((ValueError, OverflowError)):
self.gen.setstate((2, (1,)*624+(625,), None))
with self.assertRaises((ValueError, OverflowError)):
self.gen.setstate((2, (1,)*624+(-1,), None))
# Failed calls to setstate() should not have changed the state.
bits100 = self.gen.getrandbits(100)
self.gen.setstate(start_state)
self.assertEqual(self.gen.getrandbits(100), bits100)
def test_referenceImplementation(self):
# Compare the python implementation with results from the original
# code. | |
#! /usr/bin/env python
#=========================================================================
# Graph.py
#=========================================================================
#
# Author : <NAME>
# Date : June 2, 2019
#
from __future__ import print_function
from .Edge import Edge
from .Step import Step
from ..utils import get_top_dir
class Graph( object ):
def __init__( s ):
s._edges_i = {}
s._edges_o = {}
s._steps = {}
#-----------------------------------------------------------------------
# API to help build the graph interactively
#-----------------------------------------------------------------------
# ADKs
def set_adk( s, adk, default=True ):
if default:
s.adk = Step( get_top_dir() + '/adks/' + adk, default=False )
else:
s.adk = Step( adk, default=False )
s.add_step( s.adk )
def get_adk_step( s ):
return s.adk
# Steps
def add_step( s, step ):
key = step.get_name()
assert key not in s._steps.keys(), \
'add_step -- Duplicate step!' \
'If this is intentional, first change the step name'
s._steps[ key ] = step
def get_step( s, step_name ):
return s._steps[ step_name ]
def all_steps( s ):
return s._steps.keys()
# Edges -- incoming and outgoing adjacency lists
def get_edges_i( s, step_name ):
try:
return s._edges_i[ step_name ]
except KeyError:
return []
def get_edges_o( s, step_name ):
try:
return s._edges_o[ step_name ]
except KeyError:
return []
# Quality-of-life utility function
def dangling_inputs( s ):
dangling = []
for step_name in s.all_steps():
incoming_edges = s.get_edges_i( step_name )
incoming_edge_f_names = [ e.get_dst()[1] for e in incoming_edges ]
inputs = s.get_step( step_name ).all_inputs()
if inputs:
for x in inputs:
if x not in incoming_edge_f_names:
dangling.append( ( step_name, x ) )
if dangling:
for step_name, f_name in dangling:
msg = 'Dangling input in step "{}": {}'
msg = msg.format( step_name, f_name )
print( msg )
else:
print( 'No dangling inputs in graph' )
#-----------------------------------------------------------------------
# Connect
#-----------------------------------------------------------------------
def connect( s, l_handle, r_handle ):
# Twizzle and figure out which side is the src and which is the dst
l_step_name, l_direction, l_handle_name = l_handle
r_step_name, r_direction, r_handle_name = r_handle
if l_direction == 'inputs':
assert r_direction == 'outputs', \
'connect -- Must connect an input to an output'
src_handle = r_handle
dst_handle = l_handle
elif r_direction == 'inputs':
assert l_direction == 'outputs', \
'connect -- Must connect an input to an output'
src_handle = l_handle
dst_handle = r_handle
else:
assert False, \
'connect -- Must connect an input to an output'
# Create an edge from src to dst
src_step_name, src_direction, src_f = src_handle
dst_step_name, dst_direction, dst_f = dst_handle
if dst_step_name not in s._edges_i.keys():
s._edges_i[ dst_step_name ] = []
if src_step_name not in s._edges_o.keys():
s._edges_o[ src_step_name ] = []
src = ( src_step_name, src_f )
dst = ( dst_step_name, dst_f )
e = Edge( src, dst )
# Add this edge to tracking
s._edges_i[ dst_step_name ].append( e )
s._edges_o[ src_step_name ].append( e )
def connect_by_name( s, src, dst ):
# Get the step (in case the user provided step names instead)
if type( src ) != Step:
src_step = s.get_step( src )
else:
src_step = src
src_step_name = src_step.get_name()
assert src_step_name in s.all_steps(), \
'connect_by_name -- ' \
'Step "{}" not found in graph'.format( src_step_name )
if type( dst ) != Step:
dst_step = s.get_step( dst )
else:
dst_step = dst
dst_step_name = dst_step.get_name()
assert dst_step_name in s.all_steps(), \
'connect_by_name -- ' \
'Step "{}" not found in graph'.format( dst_step_name )
# Find same-name matches between the src output and dst input
src_outputs = src_step.all_outputs()
dst_inputs = dst_step.all_inputs()
overlap = set( src_outputs ).intersection( set( dst_inputs ) )
# For all overlaps, connect src to dst
for name in overlap:
l_handle = src_step.o( name )
r_handle = dst_step.i( name )
s.connect( l_handle, r_handle )
#-----------------------------------------------------------------------
# Parameter system
#-----------------------------------------------------------------------
def update_params( s, params ):
for step_name in s.all_steps():
s.get_step( step_name ).update_params( params )
def expand_params( s ):
for step_name in s.all_steps():
s.get_step( step_name ).expand_params()
#-----------------------------------------------------------------------
# Design-space exploration
#-----------------------------------------------------------------------
# param_space
#
# Spins out new copies of the step across the parameter space.
#
# For example, for a graph like this:
#
# +-----+ +-----------+ +-----------+
# | foo | -> | bar | -> | baz |
# | | | ( p = 1 ) | | |
# +-----+ +-----------+ +-----------+
#
# this call:
#
# s.param_space( 'bar', 'p', [ 1, 2, 3 ] )
#
# will be transformed into a graph like this:
#
# +-----------+ +-----------+
# +-> | bar-p-1 | -> | baz-p-1 |
# | | ( p = 1 ) | | |
# | +-----------+ +-----------+
# +-----+ | +-----------+ +-----------+
# | foo | --> | bar-p-2 | -> | baz-p-2 |
# | | | | ( p = 2 ) | | |
# +-----+ | +-----------+ +-----------+
# | +-----------+ +-----------+
# +-> | bar-p-3 | -> | baz-p-3 |
# | ( p = 3 ) | | |
# +-----------+ +-----------+
#
# Returns a list of (parameterized) steps (i.e., 'bar-p-1', 'bar-p-2',
# and 'bar-p-3').
#
def param_space( s, step, param_name, param_space ):
# Get the step name (in case the user provided a step object instead)
if type( step ) != str:
step_name = step.get_name()
else:
step_name = step
step = s.get_step( step_name )
assert step_name in s.all_steps(), \
'param_space -- ' \
'Step "{}" not found in graph'.format( step_name )
# Remove the step and its incoming edges from the graph
del( s._steps[ step_name ] )
elist_i = s._param_space_helper_remove_incoming_edges( step_name )
# Now spin out new copies of the step across the parameter space
new_steps = []
for p in param_space:
p_step = step.clone()
p_step.set_param( param_name, p )
p_step.set_name( step_name + '-' + param_name + '-' + str(p) )
s.add_step( p_step )
for e in elist_i:
src_step_name, src_f = e.get_src()
dst_step_name, dst_f = e.get_dst()
src_step = s.get_step( src_step_name )
s.connect( src_step.o( src_f ), p_step.i( dst_f ) )
new_steps.append( p_step )
# Get the steps that directly depended on this step
dep_steps = s._param_space_helper_get_dependent_steps( step_name )
# For each dependent step, replicate and connect to the new steps
for dep_step in dep_steps:
s._param_space_helper( step = dep_step,
old_src = step,
new_srcs = new_steps,
param_name = param_name,
param_space = param_space )
return new_steps
def _param_space_helper( s, step, old_src, new_srcs, param_name,
param_space ):
step_name = step.get_name()
# Remove the step and its incoming edges from the graph
del( s._steps[ step_name ] )
elist_i = s._param_space_helper_remove_incoming_edges( step_name )
# Now spin out new copies of the step + attach them to new srcs
new_steps = []
for i, p in enumerate( param_space ):
p_step = step.clone()
p_step.set_name( step_name + '-' + param_name + '-' + str(p) )
s.add_step( p_step )
for e in elist_i:
src_step_name, src_f = e.get_src()
dst_step_name, dst_f = e.get_dst()
if src_step_name == old_src.get_name():
src_step = new_srcs[i]
else:
src_step = s.get_step( src_step_name )
s.connect( src_step.o( src_f ), p_step.i( dst_f ) )
new_steps.append( p_step )
# Get the steps that directly depended on this step
dep_steps = s._param_space_helper_get_dependent_steps( step_name )
# For each dependent step, replicate and connect to the new steps
for dep_step in dep_steps:
s._param_space_helper( step = dep_step,
old_src = step,
new_srcs = new_steps,
param_name = param_name,
param_space = param_space )
return new_steps
def _param_space_helper_remove_incoming_edges( s, step_name ):
try:
elist_i = s._edges_i[ step_name ]
del( s._edges_i[ step_name ] ) # Delete edges in incoming edge list
for e in elist_i: # Also delete these edges in outgoing edge lists
src_step_name, src_f = e.get_src()
src_elist_o = s._edges_o[src_step_name]
del( src_elist_o[ src_elist_o.index( e ) ] )
except KeyError:
elist_i = []
return elist_i
def _param_space_helper_get_dependent_steps( s, step_name ):
dep_steps = set()
try:
elist_o = s._edges_o[ step_name ]
except KeyError:
elist_o = []
for e in elist_o:
dst_step_name, dst_f = e.get_dst()
dep_steps.add( s.get_step( dst_step_name ) )
return dep_steps
#-----------------------------------------------------------------------
# Ninja helpers
#-----------------------------------------------------------------------
def escape_dollars( s ):
for step_name in s.all_steps():
s.get_step( step_name ).escape_dollars()
#-----------------------------------------------------------------------
# Drawing
#-----------------------------------------------------------------------
# plot
#
# Dumps a graphviz dot file
def plot( s, dot_title='', dot_f='graph.dot' ):
# Templates for generating graphviz dot statements
graph_template = \
'''\
digraph {{
label="{title}";
labelloc="t";
fontsize=60;
size="8.5;11";
ratio="fill";
margin=0;
pad=1;
rankdir="TB";
concentrate=true;
splines=polyline;
center=true;
nodesep=1.2;
ranksep=0.8;
{nodes}
{edges}
}}\
'''
node_template = | |
<reponame>dchud/demodocus
"""
Software License Agreement (Apache 2.0)
Copyright (c) 2020, The MITRE Corporation.
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This project was developed by The MITRE Corporation.
If this code is used in a deployment or embedded within another project,
it is requested that you send an email to <EMAIL> in order to
let us know where this software is being used.
"""
from collections import defaultdict, deque
import os
import pathlib
import itertools
import json
import re
import logging
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
from lxml import etree
from lxml.html import html5parser
from demodocusfw.analysis import BaseAnalyzer
from demodocusfw.utils import color_contrast_ratio
from demodocusfw.web.action import keyboard_actions, mouse_actions
logger = logging.getLogger('analysis.webaccessanalyzer')
"""Customized version of BaseAnalyzer that also analyzes a graph for
accessibility violations and possible outcomes if inaccessible elements are made
accessible."""
class WebAccessAnalyzer(BaseAnalyzer):
"""Analyzer for the Web/Accessibility interface/app_context. Adds
functionality to analyze inaccessible parts of the graph based on different
user models. Also provides inaccessible elements, and the result of fixing
them.
"""
# --
# Formatting sections. Title and guide to be printed for each section.
# May be overridden.
#
_build_sections = \
{
"els_states":
{"label": "Visible Border & Tab Analysis",
"guide_lines":
[
' * **valid tab order** -- all elements for a crawled state (not stub state) can be navigated to via a `TAB` key (forward and backward), and follow a logical ordering (top to bottom, left to right).',
' * **visual indication of focus** -- issues occur when an element for a crawled state (not stub state) has the same focused and unfocused style information.'
]
}
}
_crawl_user_sections = \
{
"inacc":
{"label": "Inaccessible Elements",
"guide_lines":
[
' * any reference to a state contains a hyperlink to its corresponding `HTML` dom file'
]
}
}
# --
# Constructor/initializer. Loads in graph and sets up needed class fields.
# May be overridden.
#
def __init__(self, graph_fpath, config):
# Initialize any parameters specific to this class
self._dom_path = None
# Call the super init
super().__init__(graph_fpath, config)
# Perform other methods specific to this class
self._set_dom_path()
# Variables necessary for the element map creation
self._keyboard_actions_str = {str(act) for act in keyboard_actions}
self._mouse_actions_str = {str(act) for act in mouse_actions}
self._group_id = 0
self._state_trees = dict()
# --
# Property (getter/setter) methods.
# May be overridden.
#
@property
def dom_path(self):
return self._dom_path
def state_tree(self, state_id):
# Load in the dom tree if it's not already loaded
if state_id not in self._state_trees:
path = pathlib.Path(self.output_path)
state_fpath = path / "states" / f"state-{state_id}.html"
tree = html5parser.parse(str(state_fpath.absolute()))
# Drop the pesky namespaces
tree = self._strip_ns_prefix(tree)
self._state_trees[state_id] = tree
else:
tree = self._state_trees[state_id]
return tree
# --
# Helper methods for initialization.
# May be overridden.
#
def _set_dom_path(self):
# initialize the dom file path
graph_path_obj = pathlib.Path(self.graph_fpath)
dom_path = graph_path_obj.parent / 'dom'
if dom_path.is_dir():
self._dom_path = dom_path
# --
# Methods to perform section-specific analyses.
# May be overridden.
#
def _get_user_actions_subgraph(self, user):
"""Get the graph of nodes that is accessible for a given user's actions.
Note: This graph is based on the actions that the user can perform. This
would include subsets of edge that may be in an inaccessible region of
the graph, but have the necessary actions (found by the OmniUser) to be
accessible if they were able to be reached.
Args:
user: str denoting a crawl user
Returns:
user_actions_subgraph: networkx.graph of nodes reachable with this user's actions
"""
user_model = self.users["crawl_users"][user]["user_model"]
# user_model's actions
user_actions = {str(e) for e in user_model.actions}
# getting edges of traversable user_model's actions
edges_with_user_action = [(u, v, k) for u, v, k, d in
self.full_graph.edges(data=True, keys=True)
if d['action'] in user_actions]
# getting graph of traversable user_model's actions
user_actions_subgraph = self.full_graph.edge_subgraph(
edges_with_user_action).copy()
return user_actions_subgraph
def _get_inaccessible_graph(self, user):
"""Get the graph of nodes that is inaccessible for a given user.
Args:
user: str denoting a crawl user
Returns:
inaccessible_graph: networkx.graph of nodes the user cannot access
"""
inaccessible_graph = self.full_graph.copy()
user_graph = self.users["crawl_users"][user]["graph"]
inaccessible_graph.remove_nodes_from(user_graph.nodes())
return inaccessible_graph
def _inaccessible_user_analysis(self, user):
"""Top-level method for the inaccessible analysis section.
Args:
user: str denoting a crawl user
Returns:
print_lines: list of lines to print to the report
"""
print_lines = list()
print_lines.append(f'### <a name="{user.lower()}-inacc"></a> Inaccessible Elements')
user_graph = self.users["crawl_users"][user]["graph"]
user_actions_graph = self._get_user_actions_subgraph(user)
user_inaccessible_graph = self._get_inaccessible_graph(user)
user_node_ids = list(user_graph.nodes())
inaccessible_node_ids = list(user_inaccessible_graph.nodes())
potential_improvements = self._find_all_accessible(user_node_ids, inaccessible_node_ids, user_actions_graph)
_, lines = self._elements_to_fix(potential_improvements, user)
print_lines += lines
# update new graph G with new states included and path scores
new_states = {k: str(sorted(v['new_states_included'])) for k, v in
potential_improvements.items()}
for i in user_node_ids:
new_states[i] = str([])
nx.set_node_attributes(self.full_graph, new_states, f"NewStates{user[0:-4]}")
return print_lines
def _find_accessible_if(self, idx, user_node_ids, user_actions_graph,
primary_connection=True):
"""Analyzes a single state for accessible-if states for a given user
Args:
idx: index of a node to analyze
user_node_ids: list of node indices that are in the user graph
user_actions_graph: networkx graph with only action edges for user
primary_connection: bool to denote if idx is one edge away from an already accessible state
Returns:
additional_user_edges: dictionary of lists "possible_edges",
"new_states_included", "new_edges_included"
"""
additional_user_edges = defaultdict(list)
# get all the edges of graph G that go into node idx
if primary_connection:
all_edges_to_idx = list(self.full_graph.in_edges([idx], keys=True))
else:
all_edges_to_idx = list(user_actions_graph.in_edges([idx], keys=True))
# get edges from all_edges_to_idx that start from a user_node
possible_edges = [e for e in all_edges_to_idx if e[0] in user_node_ids]
# if there are edges from the user graph nodes to the idx node
if len(possible_edges) > 0 and user_actions_graph.has_node(idx):
# find new edges that would be included if we add node idx to the user graph
new_edges_included = [(u, v, k) for (u, v, k) in
user_actions_graph.out_edges(idx, keys=True)
if v not in user_node_ids]
# record this in a dictionary
new_states_included = sorted(list(set([v for (u, v, k)
in new_edges_included])))
if primary_connection:
additional_user_edges["possible_edges"] += possible_edges
additional_user_edges["new_states_included"] += new_states_included
additional_user_edges["new_edges_included"] += new_edges_included
return additional_user_edges
def _find_all_accessible(self, user_node_ids, inaccessible_node_ids,
user_actions_graph):
"""Analyzes an entire graph for accessible-if states
Args:
user_node_ids: list of node indices that are in the user graph
inaccessible_node_ids: list of node indices that are not in the user graph
user_actions_graph: networkx graph with only action edges for user
Returns:
potential_improvements: dictionary of lists "possible_edges", "new_states_included", "new_edges_included"
"""
potential_improvements = dict()
# loop through all nodes not included in the user graph
for idx in inaccessible_node_ids:
# initialize looping parameters for each
primary_connection = True
user_node_ids_copy = user_node_ids.copy()
inaccessible_node_ids_copy = inaccessible_node_ids.copy()
inaccessible_node_ids_copy.insert(0, inaccessible_node_ids_copy.pop(inaccessible_node_ids_copy.index(idx)))
adtl_user_edges = defaultdict(list)
# iterate through all the nodes not in user graph (including newly accessible nodes)
while inaccessible_node_ids_copy:
idx_copy = inaccessible_node_ids_copy.pop(0)
adtl_user_edges_tmp = self._find_accessible_if(idx_copy,
user_node_ids_copy,
user_actions_graph,
primary_connection=primary_connection)
adtl_user_edges["possible_edges"] += adtl_user_edges_tmp["possible_edges"]
adtl_user_edges["new_states_included"] += adtl_user_edges_tmp["new_states_included"]
adtl_user_edges["new_edges_included"] += adtl_user_edges_tmp["new_edges_included"]
# if new states were discovered
if len(adtl_user_edges_tmp['new_states_included']) > 0:
# update the user nodes
user_node_ids_copy += [idx_copy]
user_node_ids_copy += adtl_user_edges_tmp['new_states_included']
user_node_ids_copy.sort()
# break the loop and move onto the next node to test
elif primary_connection:
break
# set flag to false after first iteration
primary_connection = False
# record all states/actions discovered for state id: idx
potential_improvements[idx] = adtl_user_edges
return potential_improvements
@staticmethod
def _print_user_path(graph, path, additional_spaces=0):
"""Print the path a user would take for a particular user graph
Args:
graph: networkx graph representing the user state graph.
path: list of state_ids
additional_spaces: integer for the additional space chars for each line
Returns:
print_lines: list of lines to print to the report
"""
elements = []
actions = []
# formatting the values for easy printing
for i in range(len(path) - 1):
actions_list = []
for j in graph[path[i]][path[i + 1]]:
actions_list.append(graph[path[i]][path[i + 1]][j]['action'])
actions.append(actions_list)
elements.append(graph[path[i]][path[i + 1]][j]['element'])
# easy print!
spaces = " " * additional_spaces
print_lines = []
for i | |
<gh_stars>1-10
import abc
import numpy as np
from scipy.signal import convolve, sosfiltfilt
from scipy.optimize import linear_sum_assignment
from detectsound.utils import get_1d_gauss_kernel, get_1d_LoG_kernel, first_true, last_true
class _BlobExtractor:
"""
A general-purpose detector for extracting blob-like features from
spectrogram-like inputs. It is an implementation of the algorithm described
in -
<NAME>, <NAME>, and <NAME>. "A general
purpose automatic detector of broadband transient signals in underwater
audio." In 2018 OCEANS-MTS/IEEE Kobe Techno-Oceans (OTO), pp. 1-6.
IEEE, 2018.
This class defines the base operations that will be used in both batch-mode
and streaming-mode applications.
Parameters
----------
num_f_bins : int
The height of spectrogram-like 2d inputs.
centroid_bounds : 2 element list or tuple of ints
Defines the range of bins (y-axis indices) in inputs within which to
look for 1d plateaus.
min_snr : float
A threshold for discarding low SNR 1d plateaus in each frame.
min_frames : int
The minimum number of frames that a traced blob must span to be
considered valid.
cost_std : 3 element list or tuple of floats
Standard deviation of the vector [center bin, height, snr] (as
applies to the leading end of a blob being traced) for use in
computing Mahalanobis distances as costs for associating candidate
extensions to blobs being traced.
first_scale : int or float
Sigma value of the first scale Laplacian of Gaussian operator. The
remaining scales will be automatically populated as a geometric
progression (first_scale * 2^n) to fit within num_f_bins.
t_blur_sigma : int or float, optional
If not None, the value will define a Gaussian kernel that will be
convolved with the inputs along the time axis to smoothen highly rapid
fluctuations.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, num_f_bins, centroid_bounds, min_snr,
min_frames, cost_std,
first_scale=2., t_blur_sigma=None):
self._threshold = min_snr
self._min_width = min_frames
self._centroid_bounds = centroid_bounds
self._cost_std = np.asarray(cost_std, dtype=np.float32)
self._t_conv_sigma = t_blur_sigma
if t_blur_sigma is not None:
kernel = get_1d_gauss_kernel(self._t_conv_sigma)
self._t_conv_kernel = np.reshape(kernel, [1, int(len(kernel))])
else:
self._t_conv_kernel = None
# Determine num scales possible and populate sigmas accordingly
n = np.arange(
np.floor(np.log2((num_f_bins - 1) / ((2 * 3) * first_scale)) + 1),
dtype=np.int)
self._f_conv_sigmas = first_scale * (2 ** n)
self._f_conv_kernels = [get_1d_LoG_kernel(sigma)
for sigma in self._f_conv_sigmas]
self._f_conv_kernels = [np.reshape(kernel, [len(kernel), 1])
for kernel in self._f_conv_kernels]
@abc.abstractmethod
def extract_blobs(self, frames):
pass
def _process_frames(self, padded_frames, active_blobs, frame_offset=0):
"""The workhorse of the class. Works with both batch- and streaming-mode
applications.
- Input spectrogram frames must be pre-padded. If batch mode, pad with 0
on both left & right for N frames. If streaming mode, concatenate only
left with previous N frames. N depends on t_conv_sigma.
- 'active_blobs' will be written to. Pass an empty list variable if
batch mode, else a list containing "active" blobs traced up to this
invocation.
- 'frame_offset' is only to be used in streaming mode to adjust for the
starting frame's index.
Returns a list of any blobs that ended before the last input frame.
"""
DISALLOWED = float(1e30)
# Temporal blurring, with a Gaussian kernel along x-axis only:
# Get only the valid points after convolution
frames = padded_frames if self._t_conv_sigma is None else \
convolve(padded_frames, self._t_conv_kernel, mode='valid')
num_f, num_t = frames.shape[0], frames.shape[1]
scale_space = self._generate_scale_space(frames)
salient_pts_mask, zc_or_valley_mask = \
_BlobExtractor._get_zero_crossing_and_salient_scale_masks(
scale_space)
# Throw away the smallest scale elements.
# Note: it's already been done so for salient_pts_mask and
# zc_or_valley_mask.
scale_space = scale_space[:, :, 1:]
# To hold blobs that were done tracing before the current frame
inactive_blobs = list()
# Process frames iteratively
for frame_idx in range(num_t):
# "cand"idate extensions
cand_centers_f, cand_scales = \
np.where(salient_pts_mask[
self._centroid_bounds[0]:self._centroid_bounds[1] + 1,
frame_idx, :])
# Adjust for clipped input
cand_centers_f += self._centroid_bounds[0]
# Gather candidate info (F edges & SNRs)
num_cands = len(cand_centers_f)
if num_cands > 0:
# 2d array of F extents for each candidate
cand_edges = np.asarray([
[
last_true(
zc_or_valley_mask[:center_f, frame_idx, scale],
0),
first_true(
zc_or_valley_mask[(center_f + 1):,
frame_idx,
scale],
num_f - 2 - center_f) + center_f + 1]
for center_f, scale in zip(cand_centers_f, cand_scales)],
dtype=np.uint32)
# Candidates' SNRs (height in scale space)
cand_snrs = np.asarray([
scale_space[f_idx, frame_idx, s_idx]
for f_idx, s_idx in zip(cand_centers_f, cand_scales)])
else:
cand_edges = np.zeros((0, 2), dtype=np.uint32)
cand_snrs = np.zeros((0,))
# Initialize mask
unused_cands_mask = np.full((num_cands,), True, dtype=np.bool)
num_active_blobs = len(active_blobs)
if num_active_blobs > 0:
# Determine "costs" of assigning a candidate to an active blob
costs = np.stack([
blob.validate_and_measure_costs(
cand_centers_f, cand_edges, cand_snrs,
self._cost_std, DISALLOWED)
for blob in active_blobs])
# Mark very-high cost assignments as DISALLOWED
# costs[costs > (3. ** 2) * 3] = DISALLOWED
# Solve the least-cost assignment problem
blob_idxs, cand_idxs = linear_sum_assignment(costs)
# Only retain valid pairings
temp = np.asarray(
[costs[blob_idx, cand_idx] < DISALLOWED
for blob_idx, cand_idx in zip(blob_idxs, cand_idxs)],
dtype=np.bool) # Get the mask
blob_idxs, cand_idxs = blob_idxs[temp], cand_idxs[temp]
# Blobs with a matched extension candidate
for blob_idx, cand_idx in zip(blob_idxs, cand_idxs):
active_blobs[blob_idx].extend(cand_centers_f[cand_idx],
cand_edges[cand_idx, :],
cand_snrs[cand_idx])
# Mark unused candidates for possibly starting new "active"
# blobs
unused_cands_mask[cand_idxs] = False
# Move blobs without matched extensions into "inactive" list if
# they are long enough
unextendable_blobs_mask = np.full((num_active_blobs,), True,
dtype=np.bool)
unextendable_blobs_mask[blob_idxs] = False
for blob_idx in np.flip(np.where(unextendable_blobs_mask)[0]):
# Popping in reverse order so that indices remain intact
finished_blob = active_blobs.pop(blob_idx)
if finished_blob.width >= self._min_width:
inactive_blobs.append(finished_blob.finalize())
# Unassigned candidates. Start new "active" blobs from them if they
# satisfy threshold criterion.
for bin_idx, edge_idxs, snr in \
zip(cand_centers_f[unused_cands_mask],
cand_edges[unused_cands_mask, :],
cand_snrs[unused_cands_mask]):
if snr >= self._threshold:
active_blobs.append(_Blob(frame_offset + frame_idx,
bin_idx, edge_idxs, snr))
return inactive_blobs
def _generate_scale_space(self, surface):
"""Apply LoG filters at chosen scales after padding 'surface'
appropriately."""
num_f, num_t = surface.shape[0], surface.shape[1]
# Preallocate
scale_space = np.zeros((num_f, num_t, len(self._f_conv_sigmas)),
dtype=np.float32)
# Process at all scales
prev_scale_padding = 0
in_surface = surface
for scale_idx, scale_kernel in enumerate(self._f_conv_kernels):
# Add padding (incrementally) prior to convolutions so that values
# at boundaries are not very unrealistic.
curr_scale_padding = len(scale_kernel) // 2
incr_padding = curr_scale_padding - prev_scale_padding
in_surface = np.pad(in_surface,
[[incr_padding, incr_padding], [0, 0]],
'symmetric')
# Apply LoG filter
scale_space[:, :, scale_idx] = \
convolve(in_surface, scale_kernel, mode='valid')
# Update for next iteration
prev_scale_padding = curr_scale_padding
return scale_space
@staticmethod
def _get_zero_crossing_and_salient_scale_masks(scale_space, min_height=0):
# TODO: the 'saliency' search can be restricted to the "valid"
# user-chosen bandwidth. If done, zero crossing (and valley) masks must
# still be done for the whole frequency range.
# Nomenclature guide for below 4 operations:
# gt = greater than
# nf = next frequency bin, pf = previous frequency bin
# ns = next scale
gt_nf = np.greater(
np.pad(scale_space, ((1, 0), (0, 0), (0, 0)), 'constant',
constant_values=-np.inf),
np.pad(scale_space, ((0, 1), (0, 0), (0, 0)), 'constant',
constant_values=-np.inf))
gt_ns = np.greater(
np.pad(scale_space, ((0, 0), (0, 0), (1, 0)), 'constant',
constant_values=-np.inf),
np.pad(scale_space, ((0, 0), (0, 0), (0, 1)), 'constant',
constant_values=-np.inf))
gt_nf_ns = np.greater(
np.pad(scale_space, ((1, 0), (0, 0), (1, 0)), 'constant',
constant_values=-np.inf),
np.pad(scale_space, ((0, 1), (0, 0), (0, 1)), 'constant',
constant_values=-np.inf))
gt_pf_ns = np.greater(
np.pad(scale_space, ((0, 1), (0, 0), (1, 0)), 'constant',
constant_values=-np.inf),
np.pad(scale_space, ((1, 0), (0, 0), (0, 1)), 'constant',
constant_values=-np.inf))
saliency_mask = np.all(
np.stack([
scale_space >= min_height,
gt_nf[1:, :, :], np.logical_not(gt_nf[:-1, :, :]),
gt_ns[:, :, 1:], np.logical_not(gt_ns[:, :, :-1]),
gt_nf_ns[1:, :, 1:], np.logical_not(gt_nf_ns[:-1, :, :-1]),
gt_pf_ns[:-1, :, 1:], np.logical_not(gt_pf_ns[1:, :, :-1])],
axis=3),
axis=3)
scale_space_signs = \
(np.sign(scale_space[:, :, 1:])).astype(dtype=np.int8)
temp = np.abs(scale_space[:, :, 1:])
lower_abs_mask = np.less(temp[:-1, :, :], temp[1:, :, :])
# Find zero-crossings or valley points along frequency axis
zcs = np.not_equal(scale_space_signs[:-1, :, :],
scale_space_signs[1:, :, :])
zcs_or_valleys_mask = np.any(np.stack([
np.pad(np.logical_and(zcs, lower_abs_mask),
((0, 1), (0, 0), (0, 0)), 'constant', constant_values=True),
np.pad(np.logical_and(zcs, np.logical_not(lower_abs_mask)),
((1, 0), (0, 0), (0, 0)), 'constant', constant_values=True),
np.logical_and(np.logical_not(gt_nf[1:, :, 1:]), gt_nf[:-1, :, 1:])
], axis=3), axis=3)
# Throw away the smallest scale elements. Note: for
# zcs_or_valleys_mask, the discarding was already done.
return saliency_mask[:, :, 1:], zcs_or_valleys_mask
class BlobExtractor(_BlobExtractor):
"""
A general-purpose detector for extracting blob-like features from
spectrogram-like inputs. It is an implementation of the algorithm described
in -
<NAME>, <NAME>, and <NAME>. "A general
purpose automatic detector of broadband transient signals in underwater
audio." In 2018 OCEANS-MTS/IEEE | |
preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
break
else:
raise ValueError("unknown vf opcode %d" % byte)
def _init_packet(self, pl):
if self.state != _dvistate.outer:
raise ValueError("Misplaced packet in vf file")
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
return self.file.tell() + pl
def _finalize_packet(self, packet_char, packet_width):
self._chars[packet_char] = Page(
text=self.text, boxes=self.boxes, width=packet_width,
height=None, descent=None)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of vf file")
if i != 202:
raise ValueError("Unknown vf format %d" % i)
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file.
This implementation covers only the bare minimum needed by the Dvi class.
Parameters
----------
filename : string or bytestring
Attributes
----------
checksum : int
Used for verifying against the dvi file.
design_size : int
Design size of the font (unknown units)
width, height, depth : dict
Dimensions of each character, need to be scaled by the factor
specified in the dvi file. These are dicts because indexing may
not start from 0.
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
with open(filename, 'rb') as file:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack(str('!6H'), header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack(str('!2I'), header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[struct.unpack(str('!%dI') % (len(x)/4), x)
for x in (widths, heights, depths)]
for idx, char in enumerate(xrange(bc, ec+1)):
byte0 = ord(char_info[4*idx])
byte1 = ord(char_info[4*idx+1])
self.width[char] = _fix2comp(widths[byte0])
self.height[char] = _fix2comp(heights[byte1 >> 4])
self.depth[char] = _fix2comp(depths[byte1 & 0xf])
PsFont = namedtuple('Font', 'texname psname effects encoding filename')
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage::
>>> map = PsfontsMap(find_tex_file('pdftex.map'))
>>> entry = map[b'ptmbo8r']
>>> entry.texname
b'ptmbo8r'
>>> entry.psname
b'Times-Bold'
>>> entry.encoding
'/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
>>> entry.effects
{'slant': 0.16700000000000001}
>>> entry.filename
Parameters
----------
filename : string or bytestring
Notes
-----
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
:file:`psfonts.map`, :file:`pdftex.map`, or :file:`dvipdfm.map`.
The file :file:`psfonts.map` is used by :program:`dvips`,
:file:`pdftex.map` by :program:`pdfTeX`, and :file:`dvipdfm.map`
by :program:`dvipdfm`. :file:`psfonts.map` might avoid embedding
the 35 PostScript fonts (i.e., have no filename for them, as in
the Times-Bold example above), while the pdf-related files perhaps
only avoid the "Base 14" pdf fonts. But the user may have
configured these files differently.
"""
__slots__ = ('_font', '_filename')
def __init__(self, filename):
self._font = {}
self._filename = filename
if six.PY3 and isinstance(filename, bytes):
encoding = sys.getfilesystemencoding() or 'utf-8'
self._filename = filename.decode(encoding, errors='replace')
with open(filename, 'rb') as file:
self._parse(file)
def __getitem__(self, texname):
assert isinstance(texname, bytes)
try:
result = self._font[texname]
except KeyError:
fmt = ('A PostScript file for the font whose TeX name is "{0}" '
'could not be found in the file "{1}". The dviread module '
'can only handle fonts that have an associated PostScript '
'font file. '
'This problem can often be solved by installing '
'a suitable PostScript font package in your (TeX) '
'package manager.')
msg = fmt.format(texname.decode('ascii'), self._filename)
msg = textwrap.fill(msg, break_on_hyphens=False,
break_long_words=False)
matplotlib.verbose.report(msg, 'helpful')
raise
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith(b'/'):
fn = find_tex_file(fn)
if enc is not None and not enc.startswith(b'/'):
enc = find_tex_file(result.encoding)
return result._replace(filename=fn, encoding=enc)
def _parse(self, file):
"""
Parse the font mapping file.
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
# If the map file specifies multiple encodings for a font, we
# follow pdfTeX in choosing the last one specified. Such
# entries are probably mistakes but they have occurred.
# http://tex.stackexchange.com/questions/10826/
# http://article.gmane.org/gmane.comp.tex.pdftex/4914
empty_re = re.compile(br'%|\s*$')
word_re = re.compile(
br'''(?x) (?:
"<\[ (?P<enc1> [^"]+ )" | # quoted encoding marked by [
"< (?P<enc2> [^"]+.enc)" | # quoted encoding, ends in .enc
"<<? (?P<file1> [^"]+ )" | # quoted font file name
" (?P<eff1> [^"]+ )" | # quoted effects or font name
<\[ (?P<enc3> \S+ ) | # encoding marked by [
< (?P<enc4> \S+ .enc) | # encoding, ends in .enc
<<? (?P<file2> \S+ ) | # font file name
(?P<eff2> \S+ ) # effects or font name
)''')
effects_re = re.compile(
br'''(?x) (?P<slant> -?[0-9]*(?:\.[0-9]+)) \s* SlantFont
| (?P<extend>-?[0-9]*(?:\.[0-9]+)) \s* ExtendFont''')
lines = (line.strip()
for line in file
if not empty_re.match(line))
for line in lines:
effects, encoding, filename = b'', None, None
words = word_re.finditer(line)
# The named groups are mutually exclusive and are
# referenced below at an estimated order of probability of
# occurrence based on looking at my copy of pdftex.map.
# The font names are probably unquoted:
w = next(words)
texname = w.group('eff2') or w.group('eff1')
w = next(words)
psname = w.group('eff2') or w.group('eff1')
for w in words:
# Any effects are almost always quoted:
eff = w.group('eff1') or w.group('eff2')
if eff:
effects = eff
continue
# Encoding files usually have the .enc suffix
# and almost never need quoting:
enc = (w.group('enc4') or w.group('enc3') or
w.group('enc2') or w.group('enc1'))
if enc:
if encoding is not None:
matplotlib.verbose.report(
'Multiple encodings for %s = %s'
% (texname, psname),
'debug')
encoding = enc
continue
# File names are probably unquoted:
filename = w.group('file2') or w.group('file1')
effects_dict = {}
for match in effects_re.finditer(effects):
slant = match.group('slant')
if slant:
effects_dict['slant'] = float(slant)
else:
effects_dict['extend'] = float(match.group('extend'))
self._font[texname] = PsFont(
texname=texname, psname=psname, effects=effects_dict,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \\*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
Parameters
----------
filename : string or bytestring
Attributes
----------
encoding : list
List of character names
"""
__slots__ = ('encoding',)
def __init__(self, filename):
with open(filename, 'rb') as file:
matplotlib.verbose.report('Parsing TeX encoding ' + filename,
'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + repr(self.encoding),
'debug-annoying')
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
lines = (line.split(b'%', 1)[0].strip() for line in file)
data = b''.join(lines)
beginning = data.find(b'[')
if beginning < 0:
raise ValueError("Cannot locate beginning of encoding in {}"
.format(file))
data = data[beginning:]
end = data.find(b']')
if | |
= [[column] + self.values[column] for column in self.values]
else:
k = int(max_columns / 2)
columns = [elem for elem in self.values]
values0 = [[columns[i]] + self.values[columns[i]] for i in range(k)]
values1 = [["..." for i in range(len(self.values[columns[0]]) + 1)]]
values2 = [
[columns[i]] + self.values[columns[i]]
for i in range(n - max_columns + k, n)
]
data_columns = values0 + values1 + values2
dtype["..."] = "undefined"
percent = self.percent
for elem in self.values:
if elem not in percent and (elem != "index"):
percent = {}
break
formatted_text = print_table(
data_columns,
is_finished=(self.count <= len(data_columns[0]) + self.offset),
offset=self.offset,
repeat_first_column=("index" in self.values),
return_html=True,
dtype=dtype,
percent=percent,
)
start, end = self.offset + 1, len(data_columns[0]) - 1 + self.offset
formatted_text += '<div style="margin-top:6px; font-size:1.02em">'
if (self.offset == 0) and (len(data_columns[0]) - 1 == self.count):
rows = self.count
else:
if start > end:
rows = "0{}".format(
" of {}".format(self.count) if (self.count > 0) else ""
)
else:
rows = "{}-{}{}".format(
start, end, " of {}".format(self.count) if (self.count > 0) else "",
)
if len(self.values) == 1:
column = list(self.values.keys())[0]
if self.offset > self.count:
formatted_text += "<b>Column:</b> {} | <b>Type:</b> {}".format(
column, self.dtype[column]
)
else:
formatted_text += "<b>Rows:</b> {} | <b>Column:</b> {} | <b>Type:</b> {}".format(
rows, column, self.dtype[column]
)
else:
if self.offset > self.count:
formatted_text += "<b>Columns:</b> {}".format(n)
else:
formatted_text += "<b>Rows:</b> {} | <b>Columns:</b> {}".format(rows, n)
formatted_text += "</div>"
return formatted_text
# ---#
def __repr__(self):
if len(self.values) == 0:
return ""
n = len(self.values)
dtype = self.dtype
max_columns = (
self.max_columns
if self.max_columns > 0
else verticapy.options["max_columns"]
)
if n < max_columns:
data_columns = [[column] + self.values[column] for column in self.values]
else:
k = int(max_columns / 2)
columns = [elem for elem in self.values]
values0 = [[columns[i]] + self.values[columns[i]] for i in range(k)]
values1 = [["..." for i in range(len(self.values[columns[0]]) + 1)]]
values2 = [
[columns[i]] + self.values[columns[i]]
for i in range(n - max_columns + k, n)
]
data_columns = values0 + values1 + values2
dtype["..."] = "undefined"
formatted_text = print_table(
data_columns,
is_finished=(self.count <= len(data_columns[0]) + self.offset),
offset=self.offset,
repeat_first_column=("index" in self.values),
return_html=False,
dtype=dtype,
percent=self.percent,
)
start, end = self.offset + 1, len(data_columns[0]) - 1 + self.offset
if (self.offset == 0) and (len(data_columns[0]) - 1 == self.count):
rows = self.count
else:
if start > end:
rows = "0{}".format(
" of {}".format(self.count) if (self.count > 0) else ""
)
else:
rows = "{}-{}{}".format(
start, end, " of {}".format(self.count) if (self.count > 0) else "",
)
if len(self.values) == 1:
column = list(self.values.keys())[0]
if self.offset > self.count:
formatted_text += "Column: {} | Type: {}".format(
column, self.dtype[column]
)
else:
formatted_text += "Rows: {} | Column: {} | Type: {}".format(
rows, column, self.dtype[column]
)
else:
if self.offset > self.count:
formatted_text += "Columns: {}".format(n)
else:
formatted_text += "Rows: {} | Columns: {}".format(rows, n)
return formatted_text
#
# Methods
#
# ---#
def append(self, tbs):
"""
---------------------------------------------------------------------------
Appends the input tablesample to a target tablesample.
Parameters
----------
tbs: tablesample
Tablesample to append.
Returns
-------
tablesample
self
"""
check_types([("tbs", tbs, [tablesample])])
n1, n2 = self.shape()[0], tbs.shape()[0]
assert n1 == n2, ParameterError(
"The input and target tablesamples must have the same number of columns."
f" Expected {n1}, Found {n2}."
)
cols1, cols2 = [col for col in self.values], [col for col in tbs.values]
for idx in range(n1):
self.values[cols1[idx]] += tbs.values[cols2[idx]]
return self
# ---#
def decimal_to_float(self):
"""
---------------------------------------------------------------------------
Converts all the tablesample's decimals to floats.
Returns
-------
tablesample
self
"""
for elem in self.values:
if elem != "index":
for i in range(len(self.values[elem])):
if isinstance(self.values[elem][i], decimal.Decimal):
self.values[elem][i] = float(self.values[elem][i])
return self
# ---#
def merge(self, tbs):
"""
---------------------------------------------------------------------------
Merges the input tablesample to a target tablesample.
Parameters
----------
tbs: tablesample
Tablesample to merge.
Returns
-------
tablesample
self
"""
check_types([("tbs", tbs, [tablesample])])
n1, n2 = self.shape()[1], tbs.shape()[1]
assert n1 == n2, ParameterError(
"The input and target tablesamples must have the same number of rows."
f" Expected {n1}, Found {n2}."
)
for col in tbs.values:
if col != "index":
if col not in self.values:
self.values[col] = []
self.values[col] += tbs.values[col]
return self
# ---#
def shape(self):
"""
---------------------------------------------------------------------------
Computes the tablesample shape.
Returns
-------
tuple
(number of columns, number of rows)
"""
cols = [col for col in self.values]
n, m = len(cols), len(self.values[cols[0]])
return (n, m)
# ---#
def sort(self, column: str, desc: bool = False):
"""
---------------------------------------------------------------------------
Sorts the tablesample using the input column.
Parameters
----------
column: str, optional
Column used to sort the data.
desc: bool, optional
If set to True, the result is sorted in descending order.
Returns
-------
tablesample
self
"""
check_types([("column", column, [str]), ("desc", desc, [bool])])
column = column.replace('"', "").lower()
columns = [col for col in self.values]
idx = None
for i, col in enumerate(columns):
col_tmp = col.replace('"', "").lower()
if column == col_tmp:
idx = i
column = col
if idx is None:
raise MissingColumn(
"The Column '{}' doesn't exist.".format(column.lower().replace('"', ""))
)
n, sort = len(self[column]), []
for i in range(n):
tmp_list = []
for col in columns:
tmp_list += [self[col][i]]
sort += [tmp_list]
sort.sort(key=lambda tup: tup[idx], reverse=desc)
for i, col in enumerate(columns):
self.values[col] = [sort[j][i] for j in range(n)]
return self
# ---#
def transpose(self):
"""
---------------------------------------------------------------------------
Transposes the tablesample.
Returns
-------
tablesample
transposed tablesample
"""
index = [column for column in self.values]
first_item = list(self.values.keys())[0]
columns = [[] for i in range(len(self.values[first_item]))]
for column in self.values:
for idx, item in enumerate(self.values[column]):
try:
columns[idx] += [item]
except:
pass
columns = [index] + columns
values = {}
for item in columns:
values[item[0]] = item[1 : len(item)]
return tablesample(values, self.dtype, self.count, self.offset, self.percent)
# ---#
def to_list(self):
"""
---------------------------------------------------------------------------
Converts the tablesample to a list.
Returns
-------
list
Python list.
"""
result = []
all_cols = [elem for elem in self.values]
if all_cols == []:
return []
for i in range(len(self.values[all_cols[0]])):
result_tmp = []
for elem in self.values:
if elem != "index":
result_tmp += [self.values[elem][i]]
result += [result_tmp]
return result
# ---#
def to_numpy(self):
"""
---------------------------------------------------------------------------
Converts the tablesample to a numpy array.
Returns
-------
numpy.array
Numpy Array.
"""
import numpy as np
return np.array(self.to_list())
# ---#
def to_pandas(self):
"""
---------------------------------------------------------------------------
Converts the tablesample to a pandas DataFrame.
Returns
-------
pandas.DataFrame
pandas DataFrame of the tablesample.
See Also
--------
tablesample.to_sql : Generates the SQL query associated to the tablesample.
tablesample.to_vdf : Converts the tablesample to vDataFrame.
"""
if "index" in self.values:
df = pd.DataFrame(data=self.values, index=self.values["index"])
return df.drop(columns=["index"])
else:
return pd.DataFrame(data=self.values)
# ---#
def to_sql(self):
"""
---------------------------------------------------------------------------
Generates the SQL query associated to the tablesample.
Returns
-------
str
SQL query associated to the tablesample.
See Also
--------
tablesample.to_pandas : Converts the tablesample to a pandas DataFrame.
tablesample.to_sql : Generates the SQL query associated to the tablesample.
"""
sql = []
n = len(self.values[list(self.values.keys())[0]])
for i in range(n):
row = []
for column in self.values:
val = self.values[column][i]
if isinstance(val, str):
val = "'" + val.replace("'", "''") + "'"
elif val == None:
val = "NULL"
elif isinstance(val, bytes):
val = str(val)[2:-1]
val = "'{}'::binary({})".format(val, len(val))
elif isinstance(val, datetime.datetime):
val = "'{}'::datetime".format(val)
elif isinstance(val, datetime.date):
val = "'{}'::date".format(val)
try:
if math.isnan(val):
val = "NULL"
except:
pass
row += ["{} AS {}".format(val, '"' + column.replace('"', "") + '"')]
sql += ["(SELECT {})".format(", ".join(row))]
sql = " UNION ALL ".join(sql)
return sql
# ---#
def to_vdf(self):
"""
---------------------------------------------------------------------------
Converts the tablesample to a vDataFrame.
Returns
-------
vDataFrame
vDataFrame of the tablesample.
See Also
--------
tablesample.to_pandas : Converts the tablesample to a pandas DataFrame.
tablesample.to_sql : Generates the SQL query associated to the tablesample.
"""
relation = "({}) sql_relation".format(self.to_sql())
return vDataFrameSQL(relation)
# ---#
def to_tablesample(
query: str, title: str = "", max_columns: int = -1,
):
"""
---------------------------------------------------------------------------
Returns the result of a SQL query as a tablesample object.
Parameters
----------
query: str, optional
SQL Query.
title: str, optional
Query title when the query is displayed.
max_columns: int, optional
Maximum number of columns to display.
Returns
-------
tablesample
Result of the query.
See Also
--------
tablesample : Object in memory created for rendering purposes.
"""
check_types(
[("query", query, [str]), ("max_columns", max_columns, [int]),]
)
if verticapy.options["sql_on"]:
print_query(query, title)
start_time = time.time()
cursor = executeSQL(query, print_time_sql=False)
description, dtype = cursor.description, | |
It saves the alchemical rotamer library, which is the combination
of the rotamer libraries of both molecules, to the path that
is supplied.
Parameters
----------
path : str
The path where to save the rotamer library
fep_lambda : float
The value to define an FEP lambda. This lambda affects
all the parameters. It needs to be contained between
0 and 1. Default is None
coul_lambda : float
The value to define a general coulombic lambda. This lambda
only affects coulombic parameters of both molecules. It needs
to be contained between 0 and 1. It has precedence over
fep_lambda. Default is None
coul1_lambda : float
The value to define a coulombic lambda for exclusive atoms
of molecule 1. This lambda only affects coulombic parameters
of exclusive atoms of molecule 1. It needs to be contained
between 0 and 1. It has precedence over coul_lambda or
fep_lambda. Default is None
coul2_lambda : float
The value to define a coulombic lambda for exclusive atoms
of molecule 2. This lambda only affects coulombic parameters
of exclusive atoms of molecule 2. It needs to be contained
between 0 and 1. It has precedence over coul_lambda or
fep_lambda. Default is None
vdw_lambda : float
The value to define a vdw lambda. This lambda only
affects van der Waals parameters. It needs to be contained
between 0 and 1. It has precedence over fep_lambda.
Default is None
bonded_lambda : float
The value to define a coulombic lambda. This lambda only
affects bonded parameters. It needs to be contained
between 0 and 1. It has precedence over fep_lambda.
Default is None
"""
at_least_one = fep_lambda is not None or \
coul_lambda is not None or coul1_lambda is not None or \
coul2_lambda is not None or vdw_lambda is not None or \
bonded_lambda is not None
# Define lambdas
fep_lambda = FEPLambda(fep_lambda)
coul_lambda = CoulombicLambda(coul_lambda)
coul1_lambda = Coulombic1Lambda(coul1_lambda)
coul2_lambda = Coulombic2Lambda(coul2_lambda)
vdw_lambda = VanDerWaalsLambda(vdw_lambda)
bonded_lambda = BondedLambda(bonded_lambda)
lambda_set = LambdaSet(fep_lambda, coul_lambda, coul1_lambda,
coul2_lambda, vdw_lambda, bonded_lambda)
if (at_least_one and
lambda_set.get_lambda_for_bonded() == 0.0 and
lambda_set.get_lambda_for_vdw() == 0.0 and
lambda_set.get_lambda_for_coulomb() == 0.0 and
lambda_set.get_lambda_for_coulomb1() == 0.0 and
lambda_set.get_lambda_for_coulomb2() == 0.0):
rotamers = self.molecule1.rotamers
mapping = False
elif (at_least_one and
lambda_set.get_lambda_for_bonded() == 1.0 and
lambda_set.get_lambda_for_vdw() == 1.0 and
lambda_set.get_lambda_for_coulomb() == 1.0 and
lambda_set.get_lambda_for_coulomb1() == 1.0 and
lambda_set.get_lambda_for_coulomb2() == 1.0):
rotamers = self.molecule2.rotamers
mapping = True
else:
rotamers = self._rotamers
mapping = False
# Initial definitions
pdb_atom_names = [atom.PDB_name.replace(' ', '_',)
for atom in self._joint_topology.atoms]
molecule_tag = self._joint_topology.molecule.tag
with open(path, 'w') as file:
file.write('rot assign res {} &\n'.format(molecule_tag))
for i, rotamer_branches in enumerate(rotamers):
if i > 0:
file.write(' newgrp &\n')
for rotamer in rotamer_branches:
index1 = rotamer.index1
index2 = rotamer.index2
if mapping:
index1 = self._mol2_to_alc_map[index1]
index2 = self._mol2_to_alc_map[index2]
atom_name1 = pdb_atom_names[index1]
atom_name2 = pdb_atom_names[index2]
file.write(' sidelib FREE{} {} {} &\n'.format(
rotamer.resolution, atom_name1, atom_name2))
def obc_parameters_to_file(self, path, fep_lambda=None,
coul_lambda=None, coul1_lambda=None,
coul2_lambda=None, vdw_lambda=None,
bonded_lambda=None):
"""
It saves the alchemical OBC parameters, which is the combination
of the OBC parameters of both molecules, to the path that
is supplied.
Parameters
----------
path : str
The path where to save the OBC parameters template
fep_lambda : float
The value to define an FEP lambda. This lambda affects
all the parameters. It needs to be contained between
0 and 1. Default is None
coul_lambda : float
The value to define a general coulombic lambda. This lambda
only affects coulombic parameters of both molecules. It needs
to be contained between 0 and 1. It has precedence over
fep_lambda. Default is None
coul1_lambda : float
The value to define a coulombic lambda for exclusive atoms
of molecule 1. This lambda only affects coulombic parameters
of exclusive atoms of molecule 1. It needs to be contained
between 0 and 1. It has precedence over coul_lambda or
fep_lambda. Default is None
coul2_lambda : float
The value to define a coulombic lambda for exclusive atoms
of molecule 2. This lambda only affects coulombic parameters
of exclusive atoms of molecule 2. It needs to be contained
between 0 and 1. It has precedence over coul_lambda or
fep_lambda. Default is None
vdw_lambda : float
The value to define a vdw lambda. This lambda only
affects van der Waals parameters. It needs to be contained
between 0 and 1. It has precedence over fep_lambda.
Default is None
bonded_lambda : float
The value to define a coulombic lambda. This lambda only
affects bonded parameters. It needs to be contained
between 0 and 1. It has precedence over fep_lambda.
Default is None
Returns
-------
path : str
The path where to save the rotamer library
"""
# Handle peleffy Logger
from peleffy.utils import Logger
logger = Logger()
log_level = logger.get_level()
logger.set_level('ERROR')
# Define lambdas
fep_lambda = FEPLambda(fep_lambda)
coul_lambda = CoulombicLambda(coul_lambda)
coul1_lambda = Coulombic1Lambda(coul1_lambda)
coul2_lambda = Coulombic2Lambda(coul2_lambda)
vdw_lambda = VanDerWaalsLambda(vdw_lambda)
bonded_lambda = BondedLambda(bonded_lambda)
lambda_set = LambdaSet(fep_lambda, coul_lambda, coul1_lambda,
coul2_lambda, vdw_lambda, bonded_lambda)
# Define mappers
mol1_mapped_atoms = [atom_pair[0] for atom_pair in self.mapping]
mol2_mapped_atoms = [atom_pair[1] for atom_pair in self.mapping]
mol1_to_mol2_map = dict(zip(mol1_mapped_atoms, mol2_mapped_atoms))
# Generate individual OBC parameters
from copy import deepcopy
from peleffy.solvent import OBC2
mol1_obc_params = OBC2(self.topology1)
mol2_obc_params = OBC2(self.topology2)
# Generate alchemical OBC parameters object
alchemical_obc_params = deepcopy(mol1_obc_params)
alchemical_obc_params._topologies = [self._joint_topology, ]
# Get OBC parameters of molecule 1
radii1 = alchemical_obc_params._radii[0]
scales1 = alchemical_obc_params._scales[0]
radii2 = mol2_obc_params._radii[0]
scales2 = mol2_obc_params._scales[0]
for atom_idx, atom in enumerate(self._joint_topology.atoms):
if atom_idx in self._exclusive_atoms:
lambda_value = 1.0 - lambda_set.get_lambda_for_coulomb1()
radius = radii1[(atom_idx, )] * lambda_value
scale = scales1[(atom_idx, )] * lambda_value
elif atom_idx in self._non_native_atoms:
for mol2_index, alc_index in self._mol2_to_alc_map.items():
if alc_index == atom_idx:
lambda_value = lambda_set.get_lambda_for_coulomb2()
radius = radii2[(mol2_index, )] * lambda_value
scale = scales2[(mol2_index, )] * lambda_value
break
else:
logger.error(['Error: mapping for atom index ' +
f'{atom_idx} not found in the ' +
'hybrid molecule'])
radius = 0
scale = 0
elif atom_idx in mol1_mapped_atoms:
mol2_idx = mol1_to_mol2_map[atom_idx]
radius2 = mol2_obc_params._radii[0][(mol2_idx, )]
scale2 = mol2_obc_params._scales[0][(mol2_idx, )]
lambda_value = 1.0 - lambda_set.get_lambda_for_coulomb2()
radius = radii1[(atom_idx, )] * lambda_value \
+ (1.0 - lambda_value) * radius2
scale = scales1[(atom_idx, )] * lambda_value \
+ (1.0 - lambda_value) * scale2
alchemical_obc_params._radii[0][(atom_idx, )] = radius
alchemical_obc_params._scales[0][(atom_idx, )] = scale
alchemical_obc_params.to_file(path)
logger.set_level(log_level)
def _ipython_display_(self):
"""
It returns a representation of the alchemical mapping.
Returns
-------
mapping_representation : an IPython display object
Displayable RDKit molecules with mapping information
"""
from IPython.display import display
return display(self._mapper)
class Lambda(ABC):
"""
It defines the Lambda class.
"""
_TYPE = ""
def __init__(self, value=None):
"""
It initializes a Lambda object.
Parameters
----------
value : float
The value of this Lambda object. It needs to be
contained between 0 and 1. Default is None
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError("Invalid value for a lambda: " +
f"\'{value}\'")
if (value > 1) or (value < 0):
raise ValueError("Invalid value for a lambda: " +
f"\'{value}\'. " +
"It has to be between 0 and 1")
self._value = value
@property
def value(self):
"""
It returns the value of this Lambda object.
Returns
-------
value : float or None
The value of this Lambda object. It can be None if the
value for this Lambda object has not been set
"""
return self._value
@property
def type(self):
"""
It returns the type of this Lambda object.
Returns
-------
type : str
The type of this Lambda object
"""
return self._TYPE
@property
def is_set(self):
"""
It answers whether the value of this Lambda object has
been set or not.
Returns
-------
is_set : bool
It is true only if the value of Lambda object has been
set
"""
return self.value is not None
class FEPLambda(Lambda):
"""
It defines the FEPLambda class. It affects all parameters.
"""
_TYPE = "fep"
class CoulombicLambda(Lambda):
"""
It defines the CoulombicLambda class. It affects only coulombic
parameters involving both molecules.
"""
_TYPE = "coulombic"
class Coulombic1Lambda(Lambda):
"""
It defines the CoulombicLambda1 class. It affects only coulombic
| |
<filename>template_flask/constants.py
REQUIREMENTS = [
'flask',
'flask_sqlalchemy',
'flask_login',
'flask_mail',
'flask_wtf',
'flask_debugtoolbar',
'gunicorn',
'pytest',
'pytest_cov'
]
INDEX_HTML = [
['<!DOCTYPE html>', False],
['<html lang="en">', False],
['<head>', False],
['\t<meta charset="UTF-8">', False],
['\t<title>NEW PROJECT: {app}</title>', True],
['</head>', False],
['<body>', False],
['\t<h1>Welcome to your new project: {app}</h1>', True],
['''\t<p><button onclick="Fetcher.fetcher('fetcher', {hello: 'world'});">Click here for asynchronous post</button></p>''', False],
['\t<p><h3>Check out the Bootstrap Templates below to add to your project</h3></p>', False],
['\t<p><label>The HTML, CSS AND JS for these are located in the TEMPLATES AND STATIC folders</label></p> ', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='album') }}"><b>Album</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='blog') }}"><b>Blog</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='carousel') }}"><b>Carousel</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='cheatsheet') }}"><b>Cheatsheet</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='checkout') }}"><b>Checkout</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='cover') }}"><b>Cover</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='dashboard') }}"><b>Dashboard</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='grid') }}"><b>Grid</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='headers') }}"><b>Headers</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='heroes') }}"><b>Heroes</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='jumbotron') }}"><b>Jumbotron</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='masonry') }}"><b>Masonry</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='navbars') }}"><b>Navbars</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='navbar_bottom') }}"><b>Navbar Bottom</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='navbar_top_fixed') }}"><b>Navbar Top Fixed</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='navbar_top_static') }}"><b>Navbar Top Static</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='pricing') }}"><b>Pricing</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='product') }}"><b>Product</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='sidebars') }}"><b>Sidebars</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='sign_in') }}"><b>Sign In</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='starter_template') }}"><b>Starter Template</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='sticky_footer') }}"><b>Sticky Footer</b></a></p>''', False],
['''\t<p><a href="{{ url_for('bootstrap', bs_page='sticky_footer_navbar') }}"><b>Sticky Footer Navbar</b></a></p>''', False],
['''\t<script src="{{ url_for('static', filename='js/fetcher.js') }}"></script>''', False],
['</body>', False],
['</html>', False]
]
CONFIG_INIT = [
["", False]
]
CONFIG_CONFIG = [
['SECRET_KEY = """{secret_key}"""', True],
["SQLALCHEMY_DATABASE_URI = 'sqlite:///{app}_db.db'", True],
["# SQLALCHEMY_DATABASE_URI = 'postgresql://<your_username>:<your_password>@<host>:<port>/<database_name>'", False],
["# SQLALCHEMY_DATABASE_URI = 'mysql://<your_username>:<your_password>@<host>:<port>/<database_name>", False],
["# SQLALCHEMY_DATABASE_URI = 'oracle://<your_username>:<your_password>@<host>:<port>/<sid_name>", False]
]
MODEL_INIT_HINT = [
['', False]
]
MODEL_INIT = [
["# Import all Flask-SQLAlchemy Models to here and then the main __init__.py will import * (all) from here", False],
["", False],
["", False],
["from {app}_app.models.db import db", True],
["from {app}_app.models.user import User", True],
["from {app}_app.models.other_model import OtherModel", True]
]
MODEL_DB = [
["from datetime import datetime", False],
["from flask_sqlalchemy import SQLAlchemy", False],
["from sqlalchemy.ext.hybrid import hybrid_property", False],
["", False],
["", False],
["# Shared Database (db) for models and tables in seperated .py files.", False],
["# models.__init__.py imports db and other Flask-SQLAlchemy Models and", False],
["# then the main __init__.py for the app will import * (all) from app.models.", False],
["", False],
["", False],
["db = SQLAlchemy()", False]
]
MODEL_DATATYPES = [
["from hashlib import sha256", False],
["from sqlalchemy.types import TypeDecorator, String", False],
["", False],
["", False],
["# Here you can create custom Column Datatypes for Flask-SQLAlchemy Models", False],
["", False],
["", False],
["class Name(TypeDecorator):", False],
["\timpl = String(128) # The implied class type from SQLAlchemy", False],
["", False],
["\tdef process_bind_param(self, value, dialect):", False],
["\t\t#This method will process the value upon creation and can transform it before inserting into database", False],
["\t\treturn value.lower().capitalize()", False],
["", False],
["\tdef process_result_value(self, value, dialect):", False],
["\t\t#This method will process the value upon loading and can transform it before the Model attribute is set", False],
["\t\treturn value", False],
["", False],
["", False],
["class Password(TypeDecorator):", False],
["\timpl = String(64) # The implied class type from SQLAlchemy", False],
["", False],
["\tdef process_bind_param(self, value, dialect):", False],
["\t\t#This method will process the value upon creation and can transform it before inserting into database", False],
["\t\treturn sha256(value.encode('utf-8')).hexdigest()", False],
["", False],
["\tdef process_result_value(self, value, dialect):", False],
["\t\t#This method will process the value upon loading and can transform it before the Model attribute is set", False],
["\t\treturn value", False]
]
MODEL_USER = [
["from {app}_app.models.db import db, datetime, hybrid_property", True],
["from {app}_app.models.datatypes import Name, Password", True],
["", False],
["", False],
["# Example User Model", False],
["# When creating more Models in separate python files, go to <app>.models.__init__.py and import the new model to there.", False],
["# The main __init__.py for the app will import * (all) from <app>.models.__init__.py ", False],
["", False],
["", False],
["class User(db.Model):", False],
["\tid = db.Column(db.Integer, primary_key=True)", False],
["\tdate_created = db.Column(db.DateTime, nullable=False, default=datetime.now())", False],
["\tfirst_name = db.Column(Name) # From datatypes.py - Custom datatype which will capitalize the name upon creation", False],
["\tlast_name = db.Column(Name) # From datatypes.py - Custom datatype which will capitalize the name upon creation", False],
["\temail = db.Column(db.String(128))", False],
["\tusername = db.Column(db.String(64), unique=True)", False],
["\tpassword = db.Column(Password) # From datatypes.py - Custom datatype which will convert password to sha256 hexdigest", False],
["", False],
["\t@hybrid_property", False],
["\tdef full_name(self):", False],
["\t\treturn f'{self.first_name} {self.last_name}'", False]
]
MODEL_OTHER_MODEL = [
["from datetime import datetime", False],
["from {app}_app.models.db import db, datetime", True],
["", False],
["", False],
["# Other Example Model", False],
["# When creating more Models in separate python files, go to <app>.models.__init__.py and import the new model to there.", False],
["# The main __init__.py for the app will import * (all) from <app>.models.__init__.py ", False],
["", False],
["", False],
["class OtherModel(db.Model):", False],
["\t__tablename__ = 'other_model'", False],
["\tid = db.Column(db.Integer, primary_key=True)", False],
["\tdate_created = db.Column(db.DateTime, nullable=False, default=datetime.now())", False]
]
ROUTE_INIT = [
["", False]
]
ROUTE_ROUTE = [
["from flask import render_template, redirect, url_for, request, session, flash, jsonify, make_response", False],
["from {app}_app import app, db, User, OtherModel", True],
["", False],
["", False],
["@app.route('/', methods=['POST', 'GET'])", False],
["@app.route('/home', methods=['POST', 'GET'])", False],
["def index():", False],
["\tif request.method == 'POST':", False],
["\t\tprint(request.form)", False],
["\t\tprint(request.files)", False],
["\treturn render_template('index.html')", False],
["", False],
["", False],
["@app.route('/bootstrap_<bs_page>')", False],
["def bootstrap(bs_page):", False],
["\treturn render_template(f'bootstrap/{bs_page}.html')", False],
["", False],
["", False],
["# Asynchronous route using the Fetcher Object in static/js/fetcher.js", False],
["# Customize this object for use in your application", False],
["@app.route('/fetcher', methods=['POST'])", False],
["def fetcher():", False],
["\tpost = request.get_json()", False],
["\tprint('fetcher post = ', post)", False],
["\tresponse = make_response(jsonify({'message': 'good response', 'data': {'hello': True, 'world': [1, 2, 3]}}), 200)", False],
["\treturn response", False]
]
RUN = [
["from {app}_app import app, DEBUG", True],
["", False],
["", False],
["if __name__ == '__main__':", False],
["\tapp.run(debug=DEBUG)", False]
]
INIT = [
["from flask import Flask", False],
["# from flask_login import ()", False],
["# from flask_mail import ()", False],
["# from flask_wft import ()", False],
["# from flask_debugtoolbar import ()", False],
["from {app}_app.config.app_config import *", True],
["from {app}_app.models import *", True],
["", False],
["", False],
["app = Flask(__name__)", False],
["app.config['SECRET_KEY'] = SECRET_KEY", False],
["", False],
["# SQLAlchemy Config", False],
["app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI", False],
["app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False", False],
["", False],
["# Flask Mail Config", False],
["# app.config['MAIL_SERVER'] = ''\t# Your Mail Server (ex. 'smtp.zoho.com')", False],
["# app.config['MAIL_PORT'] = ''\t# Your Mail Port (ex. 465)", False],
["# app.config['MAIL_USE_SSL'] = ''\t# Using SSL? (True/False)", False],
["# app.config['MAIL_USE_TLS'] = ''\t# Using TLS? (True/False)", False],
["# app.config['MAIL_USERNAME'] = ''\t# Your Mail Email Address (ex. '<EMAIL>')", False],
["# app.config['MAIL_PASSWORD'] = ''\t# Your Mail Password", False],
["# app.config['MAIL_DEFAULT_SENDER'] = ''\t# Your Mail Default Sender (ex. '<EMAIL>')", False],
["", False],
["# Other Config", False],
["# app.config[''] = ''", False],
["# app.config[''] = ''", False],
["", False],
["# Use this bool to control your app debug state, run.py will import this", False],
["DEBUG = True", False],
["", False],
["", False],
["with app.app_context():", False],
["\tdb.init_app(app)", False],
["\t# db.create_all()", False],
["", False],
["", False],
["from {app}_app.routes import routes", True]
]
JS_FETCHER = [
["// Global Instance of Fetcher", False],
["// This object can send asynchronous requests to your Flask routes", False],
["// Main use - Fetcher.fetcher(route_url_endpoint, data to be posted, method (default is POST))", False],
["", False],
["(function (global) {", False],
['\t"use strict";', False],
["\tlet lastFetch = {};", False],
["\tlet lastFetchEndpoint = null;", False],
["\tlet lastFetchUrl = null;", False],
["\tlet lastFetchBody = null;", False],
["\tlet lastFetchMethod = null;", False],
["", False],
["\tlet fetcher = function(url_endpoint, body, method='POST'){", False],
["\t\t// Update last fetch properties", False],
["\t\tlastFetchEndpoint = url_endpoint;", False],
["\t\tlastFetchUrl = `${window.origin}/${url_endpoint}`;", False],
["\t\tlastFetchBody = body;", False],
["\t\tlastFetchMethod = method;", False],
["\t\tlastFetch = {", False],
["\t\t\torigin: window.origin,", False],
["\t\t\tendpoint: lastFetchEndpoint,", False],
["\t\t\turl: lastFetchUrl,", False],
["\t\t\tbody: lastFetchBody,", False],
["\t\t\tmethod: lastFetchMethod", False],
["\t\t};", False],
["", False],
["\t\t// Start fetching process to Flask route", False],
["\t\tfetch(lastFetchUrl, {", False],
["\t\t\tmethod: lastFetchMethod,", False],
["\t\t\tcredentials: 'include',", False],
["\t\t\tbody: JSON.stringify(lastFetchBody),", False],
["\t\t\tcache: 'no-cache',", False],
["\t\t\theaders: new Headers({", False],
["\t\t\t\t'content-type': 'application/json'", False],
["\t\t\t})", False],
["\t\t})", False],
["\t\t.then(function(response) {", False],
["\t\t\t// You | |
obtain
information about the monitor in a simple way even in the
absence of quorum.
:param mon_id: the ID portion of the monitor's name (i.e., mon.<ID>)
:type mon_id: str
:returns: the string reply from the monitor
"""
self.require_state("configuring", "connected")
outstrp = pointer(pointer(c_char()))
outstrlen = c_long()
ret = run_in_thread(self.librados.rados_ping_monitor,
(self.cluster, c_char_p(mon_id),
outstrp, byref(outstrlen)))
my_outstr = outstrp.contents[:(outstrlen.value)]
if outstrlen.value:
run_in_thread(self.librados.rados_buffer_free, (outstrp.contents,))
if ret != 0:
raise make_ex(ret, "error calling ping_monitor")
return my_outstr
def connect(self, timeout=0):
"""
Connect to the cluster. Use shutdown() to release resources.
"""
self.require_state("configuring")
ret = run_in_thread(self.librados.rados_connect, (self.cluster,),
timeout)
if (ret != 0):
raise make_ex(ret, "error connecting to the cluster")
self.state = "connected"
def get_cluster_stats(self):
"""
Read usage info about the cluster
This tells you total space, space used, space available, and number
of objects. These are not updated immediately when data is written,
they are eventually consistent.
:returns: dict - contains the following keys:
- ``kb`` (int) - total space
- ``kb_used`` (int) - space used
- ``kb_avail`` (int) - free space available
- ``num_objects`` (int) - number of objects
"""
stats = rados_cluster_stat_t()
ret = run_in_thread(self.librados.rados_cluster_stat,
(self.cluster, byref(stats)))
if ret < 0:
raise make_ex(
ret, "Rados.get_cluster_stats(%s): get_stats failed" % self.rados_id)
return {'kb': stats.kb,
'kb_used': stats.kb_used,
'kb_avail': stats.kb_avail,
'num_objects': stats.num_objects}
def pool_exists(self, pool_name):
"""
Checks if a given pool exists.
:param pool_name: name of the pool to check
:type pool_name: str
:raises: :class:`TypeError`, :class:`Error`
:returns: bool - whether the pool exists, false otherwise.
"""
self.require_state("connected")
if not isinstance(pool_name, str):
raise TypeError('pool_name must be a string')
ret = run_in_thread(self.librados.rados_pool_lookup,
(self.cluster, c_char_p(pool_name)))
if (ret >= 0):
return True
elif (ret == -errno.ENOENT):
return False
else:
raise make_ex(ret, "error looking up pool '%s'" % pool_name)
def pool_lookup(self, pool_name):
"""
Returns a pool's ID based on its name.
:param pool_name: name of the pool to look up
:type pool_name: str
:raises: :class:`TypeError`, :class:`Error`
:returns: int - pool ID, or None if it doesn't exist
"""
self.require_state("connected")
if not isinstance(pool_name, str):
raise TypeError('pool_name must be a string')
ret = run_in_thread(self.librados.rados_pool_lookup,
(self.cluster, c_char_p(pool_name)))
if (ret >= 0):
return int(ret)
elif (ret == -errno.ENOENT):
return None
else:
raise make_ex(ret, "error looking up pool '%s'" % pool_name)
def pool_reverse_lookup(self, pool_id):
"""
Returns a pool's name based on its ID.
:param pool_id: ID of the pool to look up
:type pool_id: int
:raises: :class:`TypeError`, :class:`Error`
:returns: string - pool name, or None if it doesn't exist
"""
self.require_state("connected")
if not isinstance(pool_id, int):
raise TypeError('pool_id must be an integer')
size = c_size_t(512)
while True:
c_name = create_string_buffer(size.value)
ret = run_in_thread(self.librados.rados_pool_reverse_lookup,
(self.cluster, c_int64(pool_id), byref(c_name), size))
if ret > size.value:
size = c_size_t(ret)
elif ret == -errno.ENOENT:
return None
elif ret < 0:
raise make_ex(ret, "error reverse looking up pool '%s'" % pool_id)
else:
return c_name.value
break
def create_pool(self, pool_name, auid=None, crush_rule=None):
"""
Create a pool:
- with default settings: if auid=None and crush_rule=None
- owned by a specific auid: auid given and crush_rule=None
- with a specific CRUSH rule: if auid=None and crush_rule given
- with a specific CRUSH rule and auid: if auid and crush_rule given
:param pool_name: name of the pool to create
:type pool_name: str
:param auid: the id of the owner of the new pool
:type auid: int
:param crush_rule: rule to use for placement in the new pool
:type crush_rule: str
:raises: :class:`TypeError`, :class:`Error`
"""
self.require_state("connected")
if not isinstance(pool_name, str):
raise TypeError('pool_name must be a string')
if crush_rule is not None and not isinstance(crush_rule, str):
raise TypeError('cruse_rule must be a string')
if (auid == None):
if (crush_rule == None):
ret = run_in_thread(self.librados.rados_pool_create,
(self.cluster, c_char_p(pool_name)))
else:
ret = run_in_thread(self.librados.\
rados_pool_create_with_crush_rule,
(self.cluster, c_char_p(pool_name),
c_ubyte(crush_rule)))
elif (crush_rule == None):
ret = run_in_thread(self.librados.rados_pool_create_with_auid,
(self.cluster, c_char_p(pool_name),
c_uint64(auid)))
else:
ret = run_in_thread(self.librados.rados_pool_create_with_all,
(self.cluster, c_char_p(pool_name),
c_uint64(auid), c_ubyte(crush_rule)))
if ret < 0:
raise make_ex(ret, "error creating pool '%s'" % pool_name)
def get_pool_base_tier(self, pool_id):
"""
Get base pool
:returns: base pool, or pool_id if tiering is not configured for the pool
"""
self.require_state("connected")
if not isinstance(pool_id, int):
raise TypeError('pool_id must be an int')
base_tier = c_int64(0)
ret = run_in_thread(self.librados.rados_pool_get_base_tier,
(self.cluster, c_int64(pool_id), byref(base_tier)))
if ret < 0:
raise make_ex(ret, "get_pool_base_tier(%d)" % pool_id)
return base_tier.value
def delete_pool(self, pool_name):
"""
Delete a pool and all data inside it.
The pool is removed from the cluster immediately,
but the actual data is deleted in the background.
:param pool_name: name of the pool to delete
:type pool_name: str
:raises: :class:`TypeError`, :class:`Error`
"""
self.require_state("connected")
if not isinstance(pool_name, str):
raise TypeError('pool_name must be a string')
ret = run_in_thread(self.librados.rados_pool_delete,
(self.cluster, c_char_p(pool_name)))
if ret < 0:
raise make_ex(ret, "error deleting pool '%s'" % pool_name)
def list_pools(self):
"""
Gets a list of pool names.
:returns: list - of pool names.
"""
self.require_state("connected")
size = c_size_t(512)
while True:
c_names = create_string_buffer(size.value)
ret = run_in_thread(self.librados.rados_pool_list,
(self.cluster, byref(c_names), size))
if ret > size.value:
size = c_size_t(ret)
else:
break
return filter(lambda name: name != '', c_names.raw.split('\0'))
def get_fsid(self):
"""
Get the fsid of the cluster as a hexadecimal string.
:raises: :class:`Error`
:returns: str - cluster fsid
"""
self.require_state("connected")
buf_len = 37
fsid = create_string_buffer(buf_len)
ret = run_in_thread(self.librados.rados_cluster_fsid,
(self.cluster, byref(fsid), c_size_t(buf_len)))
if ret < 0:
raise make_ex(ret, "error getting cluster fsid")
return fsid.value
def open_ioctx(self, ioctx_name):
"""
Create an io context
The io context allows you to perform operations within a particular
pool.
:param ioctx_name: name of the pool
:type ioctx_name: str
:raises: :class:`TypeError`, :class:`Error`
:returns: Ioctx - Rados Ioctx object
"""
self.require_state("connected")
if not isinstance(ioctx_name, str):
raise TypeError('the name of the pool must be a string')
ioctx = c_void_p()
ret = run_in_thread(self.librados.rados_ioctx_create,
(self.cluster, c_char_p(ioctx_name), byref(ioctx)))
if ret < 0:
raise make_ex(ret, "error opening pool '%s'" % ioctx_name)
return Ioctx(ioctx_name, self.librados, ioctx)
def mon_command(self, cmd, inbuf, timeout=0, target=None):
"""
mon_command[_target](cmd, inbuf, outbuf, outbuflen, outs, outslen)
returns (int ret, string outbuf, string outs)
"""
self.require_state("connected")
outbufp = pointer(pointer(c_char()))
outbuflen = c_long()
outsp = pointer(pointer(c_char()))
outslen = c_long()
cmdarr = (c_char_p * len(cmd))(*cmd)
if target:
ret = run_in_thread(self.librados.rados_mon_command_target,
(self.cluster, c_char_p(target), cmdarr,
len(cmd), c_char_p(inbuf), len(inbuf),
outbufp, byref(outbuflen), outsp,
byref(outslen)), timeout)
else:
ret = run_in_thread(self.librados.rados_mon_command,
(self.cluster, cmdarr, len(cmd),
c_char_p(inbuf), len(inbuf),
outbufp, byref(outbuflen), outsp, byref(outslen)),
timeout)
# copy returned memory (ctypes makes a copy, not a reference)
my_outbuf = outbufp.contents[:(outbuflen.value)]
my_outs = outsp.contents[:(outslen.value)]
# free callee's allocations
if outbuflen.value:
run_in_thread(self.librados.rados_buffer_free, (outbufp.contents,))
if outslen.value:
run_in_thread(self.librados.rados_buffer_free, (outsp.contents,))
return (ret, my_outbuf, my_outs)
def osd_command(self, osdid, cmd, inbuf, timeout=0):
"""
osd_command(osdid, cmd, inbuf, outbuf, outbuflen, outs, outslen)
returns (int ret, string outbuf, string outs)
"""
self.require_state("connected")
outbufp = pointer(pointer(c_char()))
outbuflen = c_long()
outsp = pointer(pointer(c_char()))
outslen = c_long()
cmdarr = (c_char_p * len(cmd))(*cmd)
ret = run_in_thread(self.librados.rados_osd_command,
(self.cluster, osdid, cmdarr, len(cmd),
c_char_p(inbuf), len(inbuf),
outbufp, byref(outbuflen), outsp, byref(outslen)),
timeout)
# copy returned memory (ctypes makes a copy, not a reference)
my_outbuf = outbufp.contents[:(outbuflen.value)]
my_outs = outsp.contents[:(outslen.value)]
# free callee's allocations
if outbuflen.value:
run_in_thread(self.librados.rados_buffer_free, (outbufp.contents,))
if outslen.value:
run_in_thread(self.librados.rados_buffer_free, (outsp.contents,))
return (ret, my_outbuf, my_outs)
def pg_command(self, pgid, cmd, inbuf, timeout=0):
"""
pg_command(pgid, cmd, inbuf, outbuf, outbuflen, outs, outslen)
returns (int ret, string outbuf, string outs)
"""
self.require_state("connected")
outbufp = pointer(pointer(c_char()))
outbuflen = c_long()
outsp = pointer(pointer(c_char()))
outslen = c_long()
cmdarr = (c_char_p * len(cmd))(*cmd)
ret = run_in_thread(self.librados.rados_pg_command,
(self.cluster, c_char_p(pgid), cmdarr, len(cmd),
c_char_p(inbuf), len(inbuf),
outbufp, byref(outbuflen), outsp, byref(outslen)),
timeout)
# copy returned memory (ctypes makes a copy, not a reference)
my_outbuf = outbufp.contents[:(outbuflen.value)]
my_outs = outsp.contents[:(outslen.value)]
# free callee's allocations
if outbuflen.value:
run_in_thread(self.librados.rados_buffer_free, (outbufp.contents,))
if outslen.value:
run_in_thread(self.librados.rados_buffer_free, (outsp.contents,))
return (ret, my_outbuf, my_outs)
def wait_for_latest_osdmap(self):
self.require_state("connected")
return run_in_thread(self.librados.rados_wait_for_latest_osdmap, (self.cluster,))
def blacklist_add(self, client_address, expire_seconds = 0):
"""
Blacklist a client from the OSDs
:param client_address: client address
:type client_address: str
:param expire_seconds: number of seconds to blacklist
:type expire_seconds: int
:raises: :class:`Error`
"""
self.require_state("connected")
ret = run_in_thread(self.librados.rados_blacklist_add,
(self.cluster, c_char_p(client_address),
c_uint32(expire_seconds)))
if ret < 0:
raise make_ex(ret, "error blacklisting client '%s'" % client_address)
class ObjectIterator(object):
"""rados.Ioctx Object iterator"""
def __init__(self, ioctx):
self.ioctx = ioctx
self.ctx = c_void_p()
ret = run_in_thread(self.ioctx.librados.rados_nobjects_list_open,
(self.ioctx.io, byref(self.ctx)))
if ret < 0:
raise make_ex(ret, "error iterating over the objects in ioctx '%s'" | |
cmds.getAttr(material+".intior")
bsdfElement.addChild( FloatParameter('intIOR', intIOR) )
# Get exterior IOR preset or value
exteriorMaterialName = cmds.getAttr(material + ".exteriorMaterial", asString=True)
exteriorMaterialName = exteriorMaterialName.split('-')[0].strip()
if exteriorMaterialName in iorMaterialUIToPreset:
exteriorMaterialPreset = iorMaterialUIToPreset[exteriorMaterialName]
bsdfElement.addChild( StringParameter('extIOR', exteriorMaterialPreset) )
else:
extIOR = cmds.getAttr(material+".extior")
bsdfElement.addChild( FloatParameter('extIOR', extIOR) )
bsdfElement.addChild( TexturedColorAttributeElement(material, "specularReflectance") )
bsdfElement.addChild( TexturedColorAttributeElement(material, "specularTransmittance") )
return bsdfElement
def writeShaderWard(material, materialName):
bsdfElement = BSDFElement('ward', materialName)
variant = cmds.getAttr(material+".variant", asString=True)
if variant in wardVariantUIToPreset:
variantPreset = wardVariantUIToPreset[variant]
else:
variantPreset = "balanced"
bsdfElement.addChild( StringParameter('variant', variantPreset) )
bsdfElement.addChild( TexturedFloatAttributeElement(material, "alphaU") )
bsdfElement.addChild( TexturedFloatAttributeElement(material, "alphaV") )
bsdfElement.addChild( TexturedColorAttributeElement(material, "diffuseReflectance") )
bsdfElement.addChild( TexturedColorAttributeElement(material, "specularReflectance") )
return bsdfElement
def writeShaderIrawan(material, materialName):
filename = cmds.getAttr(material+".filename", asString=True)
repeatu = cmds.getAttr(material+".repeatu")
repeatv = cmds.getAttr(material+".repeatv")
warpkd = cmds.getAttr(material+".warpkd")
warpks = cmds.getAttr(material+".warpks")
weftkd = cmds.getAttr(material+".weftkd")
weftks = cmds.getAttr(material+".weftks")
bsdfElement = BSDFElement('irawan', materialName)
bsdfElement.addChild( StringParameter('filename', filename) )
bsdfElement.addChild( FloatParameter('repeatU', repeatu) )
bsdfElement.addChild( FloatParameter('repeatV', repeatv) )
bsdfElement.addChild( ColorParameter('warp_kd', warpkd[0], colorspace='rgb') )
bsdfElement.addChild( ColorParameter('warp_ks', warpks[0], colorspace='rgb') )
bsdfElement.addChild( ColorParameter('weft_kd', weftkd[0], colorspace='rgb') )
bsdfElement.addChild( ColorParameter('weft_ks', weftks[0], colorspace='rgb') )
return bsdfElement
def writeShaderTwoSided(material, materialName):
bsdfElement = BSDFElement('twosided', materialName)
frontBSDFElement = NestedBSDFElement(material, "frontBSDF")
bsdfElement.addChild( frontBSDFElement )
backBSDFElement = NestedBSDFElement(material, "backBSDF", useDefault=False)
if backBSDFElement:
bsdfElement.addChild( backBSDFElement )
return bsdfElement
def writeShaderMixture(material, materialName):
bsdfElement = BSDFElement('mixturebsdf', materialName)
weight1 = cmds.getAttr(material+".weight1")
weight2 = cmds.getAttr(material+".weight2")
weight3 = cmds.getAttr(material+".weight3")
weight4 = cmds.getAttr(material+".weight4")
weights = [weight1, weight2, weight3, weight4]
weights = [x for x in weights if x != 0]
weightString = ", ".join(map(str, weights))
if weight1 > 0.0:
bsdf1Element = NestedBSDFElement(material, "bsdf1")
bsdfElement.addChild( bsdf1Element )
if weight2 > 0.0:
bsdf2Element = NestedBSDFElement(material, "bsdf2")
bsdfElement.addChild( bsdf2Element )
if weight3 > 0.0:
bsdf3Element = NestedBSDFElement(material, "bsdf3")
bsdfElement.addChild( bsdf3Element )
if weight4 > 0.0:
bsdf4Element = NestedBSDFElement(material, "bsdf4")
bsdfElement.addChild( bsdf4Element )
bsdfElement.addChild( StringParameter('weights', weightString) )
return bsdfElement
def writeShaderBlend(material, materialName):
bsdfElement = BSDFElement('blendbsdf', materialName)
bsdfElement.addChild( TexturedFloatAttributeElement(material, "weight") )
bsdf1Element = NestedBSDFElement(material, "bsdf1")
bsdfElement.addChild( bsdf1Element )
bsdf2Element = NestedBSDFElement(material, "bsdf2")
bsdfElement.addChild( bsdf2Element )
return bsdfElement
def writeShaderMask(material, materialName):
bsdfElement = BSDFElement('mask', materialName)
bsdfElement.addChild( TexturedColorAttributeElement(material, "opacity") )
bsdf1Element = NestedBSDFElement(material, "bsdf")
bsdfElement.addChild( bsdf1Element )
return bsdfElement
def writeShaderBump(material, materialName):
bsdfElement = BSDFElement('bumpmap', materialName)
bumpScale = cmds.getAttr(material+".bumpScale")
bsdfElement.addChild( TexturedColorAttributeElement(material, "texture", scale=bumpScale) )
bsdf1Element = NestedBSDFElement(material, "bsdf")
bsdfElement.addChild( bsdf1Element )
return bsdfElement
def writeShaderHK(material, materialName):
bsdfElement = BSDFElement('hk', materialName)
useSigmaSA = cmds.getAttr(material+".useSigmaSA")
useSigmaTAlbedo = cmds.getAttr(material+".useSigmaTAlbedo")
if useSigmaSA:
bsdfElement.addChild( TexturedColorAttributeElement(material, "sigmaS") )
bsdfElement.addChild( TexturedColorAttributeElement(material, "sigmaA") )
elif useSigmaTAlbedo:
bsdfElement.addChild( TexturedColorAttributeElement(material, "sigmaT") )
bsdfElement.addChild( TexturedColorAttributeElement(material, "albedo") )
else:
materialString = cmds.getAttr(material+".material", asString=True)
bsdfElement.addChild( StringParameter('material', materialString) )
thickness = cmds.getAttr(material+".thickness")
bsdfElement.addChild( FloatParameter('thickness', thickness) )
phaseFunctionUIName = cmds.getAttr(material+".phaseFunction", asString=True)
if phaseFunctionUIName in phaseFunctionUIToPreset:
phaseFunctionName = phaseFunctionUIToPreset[phaseFunctionUIName]
phaseFunctionElement = PhaseElement(phaseFunctionName)
if phaseFunctionName == 'hg':
g = cmds.getAttr(material+".phaseFunctionHGG")
phaseFunctionElement.addChild( FloatParameter('g', g) )
elif phaseFunctionName == 'microflake':
s = cmds.getAttr(material+".phaseFunctionMFSD")
phaseFunctionElement.addChild( FloatParameter('stddev', s) )
bsdfElement.addChild( phaseFunctionElement )
return bsdfElement
def writeShaderObjectAreaLight(material, materialName):
elementDict = EmitterElement('area', materialName)
samplingWeight = cmds.getAttr(material+".samplingWeight")
elementDict.addChild( TexturedColorAttributeElement(material, "radiance") )
elementDict.addChild( FloatParameter('samplingWeight', samplingWeight) )
return elementDict
def writeShaderDipoleSSS(material, materialName):
sssElement = SubsurfaceElement('dipole', materialName)
useSigmaSA = cmds.getAttr(material+".useSigmaSA")
useSigmaTAlbedo = cmds.getAttr(material+".useSigmaTAlbedo")
if useSigmaSA:
sigmaS = cmds.getAttr(material+".sigmaS")
sigmaA = cmds.getAttr(material+".sigmaA")
sssElement.addChild( ColorParameter("sigmaS", sigmaS[0], colorspace='rgb') )
sssElement.addChild( ColorParameter("sigmaA", sigmaA[0], colorspace='rgb') )
elif useSigmaTAlbedo:
sigmaT = cmds.getAttr(material+".sigmaT")
albedo = cmds.getAttr(material+".albedo")
sssElement.addChild( ColorParameter("sigmaT", sigmaT[0], colorspace='rgb') )
sssElement.addChild( ColorParameter("albedo", albedo[0], colorspace='rgb') )
else:
materialString = cmds.getAttr(material+".material", asString=True)
sssElement.addChild( StringParameter('material', materialString) )
scale = cmds.getAttr(material+".scale")
sssElement.addChild( FloatParameter("scale", scale) )
irrSamples = cmds.getAttr(material+".irrSamples")
sssElement.addChild( IntegerParameter("irrSamples", irrSamples) )
# Get interior IOR preset or value
interiorMaterialName = cmds.getAttr(material + ".interiorMaterial", asString=True)
interiorMaterialName = interiorMaterialName.split('-')[0].strip()
if interiorMaterialName in iorMaterialUIToPreset:
interiorMaterialPreset = iorMaterialUIToPreset[interiorMaterialName]
sssElement.addChild( StringParameter('intIOR', interiorMaterialPreset) )
else:
intIOR = cmds.getAttr(material+".intior")
sssElement.addChild( FloatParameter('intIOR', intIOR) )
# Get exterior IOR preset or value
exteriorMaterialName = cmds.getAttr(material + ".exteriorMaterial", asString=True)
exteriorMaterialName = exteriorMaterialName.split('-')[0].strip()
if exteriorMaterialName in iorMaterialUIToPreset:
exteriorMaterialPreset = iorMaterialUIToPreset[exteriorMaterialName]
sssElement.addChild( StringParameter('extIOR', exteriorMaterialPreset) )
else:
extIOR = cmds.getAttr(material+".extior")
sssElement.addChild( FloatParameter('extIOR', extIOR) )
return sssElement
def addTwoSided(material, materialElement):
# Create a structure to be written
elementDict = BSDFElement('twosided', material)
# Remove the id so there's no chance of this embedded definition conflicting with another
# definition of the same BSDF
materialElement.removeAttribute('id')
elementDict.addChild( materialElement)
return elementDict
#
#Write a surface material (material) to a Mitsuba scene file (outFile)
#
def writeShader(material, materialName):
matType = cmds.nodeType(material)
mayaMaterialTypeToShaderFunction = {
"MitsubaSmoothCoatingShader" : writeShaderSmoothCoating,
"MitsubaConductorShader" : writeShaderConductor,
"MitsubaDielectricShader" : writeShaderDielectric,
"MitsubaDiffuseTransmitterShader" : writeShaderDiffuseTransmitter,
"MitsubaDiffuseShader" : writeShaderDiffuse,
"MitsubaPhongShader" : writeShaderPhong,
"MitsubaPlasticShader" : writeShaderPlastic,
"MitsubaRoughCoatingShader" : writeShaderRoughCoating,
"MitsubaRoughConductorShader" : writeShaderRoughConductor,
"MitsubaRoughDielectricShader" : writeShaderRoughDielectric,
"MitsubaRoughDiffuseShader" : writeShaderRoughDiffuse,
"MitsubaRoughPlasticShader" : writeShaderRoughPlastic,
"MitsubaThinDielectricShader" : writeShaderThinDielectric,
"MitsubaWardShader" : writeShaderWard,
"MitsubaIrawanShader" : writeShaderIrawan,
"MitsubaObjectAreaLightShader" : writeShaderObjectAreaLight,
"MitsubaTwoSidedShader" : writeShaderTwoSided,
"MitsubaMixtureShader" : writeShaderMixture,
"MitsubaBlendShader" : writeShaderBlend,
"MitsubaMaskShader" : writeShaderMask,
"MitsubaBumpShader" : writeShaderBump,
"MitsubaHKShader" : writeShaderHK,
"MitsubaHomogeneousParticipatingMedium" : writeMediumHomogeneous,
"MitsubaHeterogeneousParticipatingMedium" : writeMediumHeterogeneous,
"MitsubaSSSDipoleShader" : writeShaderDipoleSSS,
}
if matType in mayaMaterialTypeToShaderFunction:
writeShaderFunction = mayaMaterialTypeToShaderFunction[matType]
else:
print( "Skipping unsupported material : %s." % matType)
writeShaderFunction = None
shaderElement = None
if writeShaderFunction:
shaderElement = writeShaderFunction(material, materialName)
if "twosided" in cmds.listAttr(material) and cmds.getAttr(material + ".twosided"):
shaderElement = addTwoSided(material, shaderElement)
return shaderElement
#
#Write the appropriate integrator
#
def writeIntegratorPathTracer(renderSettings, integratorMitsuba):
attrPrefixes = {
"path" : "",
"volpath" : "Volumetric",
"volpath_simple" : "SimpleVolumetric"
}
attrPrefix = attrPrefixes[integratorMitsuba]
# Get values from the scene
iPathTracerUseInfiniteDepth = cmds.getAttr("%s.%s" % (renderSettings, "i%sPathTracerUseInfiniteDepth" % attrPrefix))
iPathTracerMaxDepth = cmds.getAttr("%s.%s" % (renderSettings, "i%sPathTracerMaxDepth" % attrPrefix))
iPathTracerRRDepth = cmds.getAttr("%s.%s" % (renderSettings, "i%sPathTracerRRDepth" % attrPrefix))
iPathTracerStrictNormals = cmds.getAttr("%s.%s" % (renderSettings, "i%sPathTracerStrictNormals" % attrPrefix))
iPathTracerHideEmitters = cmds.getAttr("%s.%s" % (renderSettings, "i%sPathTracerHideEmitters" % attrPrefix))
iPathTracerMaxDepth = -1 if iPathTracerUseInfiniteDepth else iPathTracerMaxDepth
# Create a structure to be written
element = IntegratorElement(integratorMitsuba)
element.addChild( IntegerParameter('maxDepth', iPathTracerMaxDepth) )
element.addChild( IntegerParameter('rrDepth', iPathTracerRRDepth) )
element.addChild( BooleanParameter('strictNormals', iPathTracerStrictNormals) )
element.addChild( BooleanParameter('hideEmitters', iPathTracerHideEmitters) )
return element
def writeIntegratorBidirectionalPathTracer(renderSettings, integratorMitsuba):
# Get values from the scene
iBidrectionalPathTracerUseInfiniteDepth = cmds.getAttr("%s.%s" % (renderSettings, "iBidrectionalPathTracerUseInfiniteDepth"))
iBidrectionalPathTracerMaxDepth = cmds.getAttr("%s.%s" % (renderSettings, "iBidrectionalPathTracerMaxDepth"))
iBidrectionalPathTracerRRDepth = cmds.getAttr("%s.%s" % (renderSettings, "iBidrectionalPathTracerRRDepth"))
iBidrectionalPathTracerLightImage = cmds.getAttr("%s.%s" % (renderSettings, "iBidrectionalPathTracerLightImage"))
iBidrectionalPathTracerSampleDirect = cmds.getAttr("%s.%s" % (renderSettings, "iBidrectionalPathTracerSampleDirect"))
iBidrectionalPathTracerMaxDepth = -1 if iBidrectionalPathTracerUseInfiniteDepth else iBidrectionalPathTracerMaxDepth
# Create a structure to be written
elementDict = IntegratorElement(integratorMitsuba)
elementDict.addChild( IntegerParameter('maxDepth', iBidrectionalPathTracerMaxDepth) )
elementDict.addChild( IntegerParameter('rrDepth', iBidrectionalPathTracerRRDepth) )
elementDict.addChild( BooleanParameter('lightImage', iBidrectionalPathTracerLightImage) )
elementDict.addChild( BooleanParameter('sampleDirect', iBidrectionalPathTracerSampleDirect) )
return elementDict
def writeIntegratorAmbientOcclusion(renderSettings, integratorMitsuba):
# Get values from the scene
iAmbientOcclusionShadingSamples = cmds.getAttr("%s.%s" % (renderSettings, "iAmbientOcclusionShadingSamples"))
iAmbientOcclusionUseAutomaticRayLength = cmds.getAttr("%s.%s" % (renderSettings, "iAmbientOcclusionUseAutomaticRayLength"))
iAmbientOcclusionRayLength = cmds.getAttr("%s.%s" % (renderSettings, "iAmbientOcclusionRayLength"))
iAmbientOcclusionRayLength = -1 if iAmbientOcclusionUseAutomaticRayLength else iAmbientOcclusionRayLength
# Create a structure to be written
elementDict = IntegratorElement(integratorMitsuba)
elementDict.addChild( IntegerParameter('shadingSamples', iAmbientOcclusionShadingSamples) )
elementDict.addChild( FloatParameter('rayLength', iAmbientOcclusionRayLength) )
return elementDict
def writeIntegratorDirectIllumination(renderSettings, integratorMitsuba):
# Get values from the scene
iDirectIlluminationShadingSamples = cmds.getAttr("%s.%s" % (renderSettings, "iDirectIlluminationShadingSamples"))
iDirectIlluminationUseEmitterAndBSDFSamples = cmds.getAttr("%s.%s" % (renderSettings, "iDirectIlluminationUseEmitterAndBSDFSamples"))
iDirectIlluminationEmitterSamples = cmds.getAttr("%s.%s" % (renderSettings, "iDirectIlluminationEmitterSamples"))
iDirectIlluminationBSDFSamples = cmds.getAttr("%s.%s" % (renderSettings, "iDirectIlluminationBSDFSamples"))
iDirectIlluminationStrictNormals = cmds.getAttr("%s.%s" % (renderSettings, "iDirectIlluminationStrictNormals"))
iDirectIlluminationHideEmitters = cmds.getAttr("%s.%s" % (renderSettings, "iDirectIlluminationHideEmitters"))
# Create a structure to be written
elementDict = IntegratorElement(integratorMitsuba)
if iDirectIlluminationUseEmitterAndBSDFSamples:
elementDict.addChild( IntegerParameter('emitterSamples', iDirectIlluminationEmitterSamples) )
elementDict.addChild( IntegerParameter('bsdfSamples', iDirectIlluminationBSDFSamples) )
else:
elementDict.addChild( IntegerParameter('shadingSamples', iDirectIlluminationShadingSamples) )
elementDict.addChild( BooleanParameter('strictNormals', iDirectIlluminationStrictNormals) )
elementDict.addChild( BooleanParameter('hideEmitters', iDirectIlluminationHideEmitters) )
return elementDict
def writeIntegratorPhotonMap(renderSettings, integratorMitsuba):
# Get values from the scene
iPhotonMapDirectSamples = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapDirectSamples"))
iPhotonMapGlossySamples = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapGlossySamples"))
iPhotonMapUseInfiniteDepth = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapUseInfiniteDepth"))
iPhotonMapMaxDepth = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapMaxDepth"))
iPhotonMapGlobalPhotons = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapGlobalPhotons"))
iPhotonMapCausticPhotons = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapCausticPhotons"))
iPhotonMapVolumePhotons = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapVolumePhotons"))
iPhotonMapGlobalLookupRadius = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapGlobalLookupRadius"))
iPhotonMapCausticLookupRadius = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapCausticLookupRadius"))
iPhotonMapLookupSize = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapLookupSize"))
iPhotonMapGranularity = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapGranularity"))
iPhotonMapHideEmitters = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapHideEmitters"))
iPhotonMapRRDepth = cmds.getAttr("%s.%s" % (renderSettings, "iPhotonMapRRDepth"))
iPhotonMapMaxDepth = -1 if iPhotonMapUseInfiniteDepth else iPhotonMapMaxDepth
# Create a structure to be written
elementDict = IntegratorElement(integratorMitsuba)
elementDict.addChild( IntegerParameter('directSamples', iPhotonMapDirectSamples) )
elementDict.addChild( IntegerParameter('glossySamples', iPhotonMapGlossySamples) )
elementDict.addChild( IntegerParameter('maxDepth', iPhotonMapMaxDepth) )
elementDict.addChild( IntegerParameter('globalPhotons', iPhotonMapGlobalPhotons) )
elementDict.addChild( IntegerParameter('causticPhotons', iPhotonMapCausticPhotons) )
elementDict.addChild( IntegerParameter('volumePhotons', iPhotonMapVolumePhotons) )
elementDict.addChild( FloatParameter('globalLookupRadius', iPhotonMapGlobalLookupRadius) )
elementDict.addChild( FloatParameter('causticLookupRadius', iPhotonMapCausticLookupRadius) )
elementDict.addChild( IntegerParameter('lookupSize', iPhotonMapLookupSize) )
elementDict.addChild( IntegerParameter('granularity', iPhotonMapGranularity) )
elementDict.addChild( BooleanParameter('hideEmitters', iPhotonMapHideEmitters) )
elementDict.addChild( IntegerParameter('rrDepth', iPhotonMapRRDepth) )
return elementDict
def writeIntegratorProgressivePhotonMap(renderSettings, integratorMitsuba):
# Get values from the scene
attrPrefixes = {
"ppm" : "",
"sppm" : "Stochastic",
}
attrPrefix = attrPrefixes[integratorMitsuba]
iProgressivePhotonMapUseInfiniteDepth = cmds.getAttr("%s.%s" % (renderSettings, "i%sProgressivePhotonMapUseInfiniteDepth" % attrPrefix))
iProgressivePhotonMapMaxDepth = cmds.getAttr("%s.%s" % (renderSettings, "i%sProgressivePhotonMapMaxDepth" % attrPrefix))
iProgressivePhotonMapPhotonCount = cmds.getAttr("%s.%s" % (renderSettings, "i%sProgressivePhotonMapPhotonCount" % attrPrefix))
iProgressivePhotonMapInitialRadius = | |
<filename>ensembler/visualisation/plotConveyorBelt.py
import matplotlib.patches as patches
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import numpy as np
def calc_lam(CapLam, i=0, numsys=8, w=0.1):
ome = (CapLam + i * np.pi * 2.0 / numsys) % (2. * np.pi)
if ome > np.pi:
ome = 2.0 * np.pi - ome
return ome / np.pi
def drawCirc(ax, radius, centX, centY, angle_, theta2_, lineWidth=3, color_='black'):
# ========Line
arc = patches.Arc([centX, centY], radius, radius, angle=angle_,
theta1=0, theta2=theta2_, capstyle='round', linestyle='-', lw=lineWidth, color=color_)
ax.add_patch(arc)
# ========Create the arrow head
# endX=centX+(radius/2)*np.cos((theta2_+angle_)/180*np.pi) #Do trig to determine end position
# endY=centY+(radius/2)*np.sin((theta2_+angle_)/180*np.pi)
# ax.add_patch( #Create triangle as arrow head
# patches.RegularPolygon(
# (endX, endY), # (x,y)
# 3, # number of vertices
# radius/10, # radius
# (angle_+theta2_)/180*np.pi, # orientation
# color=color_
# )
# )
# ========Create the arrow head
begX = centX + (radius / 2) * np.cos((angle_) / 180 * np.pi) # Do trig to determine end position
begY = centY + (radius / 2) * np.sin((angle_) / 180 * np.pi)
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(begX, begY), # (x,y)
3, # number of vertices
radius / 20, # radius
(180 + angle_) / 180 * np.pi, # orientation
color=color_
)
)
ax.set_xlim([centX - radius, centY + radius]) and ax.set_ylim([centY - radius, centY + radius])
def drawFunicular(x, y, CapLam=0.1, M=2, drawArrows=False):
pSize = 2.009
goldRat = 1.618
lineWidth = 1
[path_effects.SimpleLineShadow(), path_effects.Normal()]
fig = plt.figure(figsize=(pSize * goldRat, pSize))
ax = fig.gca()
fig.subplots_adjust(left=0.1, right=1.0-0.1, bottom=0.24, top=0.99)
rx=0.05
ry=rx
shifty=0.75/goldRat
cvb_bot=np.zeros((90,2))
cvb_bot[:,0]=np.linspace(calc_lam(CapLam, 1, numsys=2), 1.0-rx, 90)
cvb_bot[:,1]=np.ones(90)*shifty
cvb_top=np.zeros((90,2))
cvb_top[:,0]=np.linspace(calc_lam(CapLam, 0, numsys=2), 1.0-rx, 90)
cvb_top[:,1]=np.ones(90)*(shifty+2.0*ry)
lamVals=x-x.min()
lamVals/=lamVals.max()
gVals=y-y.min()
if gVals.max() != 0.0:
gVals/=(2.0*gVals.max()*goldRat)
else:
gVals+=1/(2.0*goldRat)
ax.plot(lamVals[2:], gVals[2:], 'k', lw=lineWidth)
l = CapLam
numsys = M
rotation = []
y = []
for i in range(M):
if calc_lam(CapLam, i, numsys=M) > rx and calc_lam(CapLam, i, numsys=M) < (1.0 - rx):
rotation.append(45)
y.append(1.0)
elif calc_lam(CapLam, i, numsys=M) < rx:
alpha = np.arcsin((rx - calc_lam(CapLam, i, numsys=M)) / rx)
rotation.append(45 - alpha / np.pi * 180.0)
y.append(np.cos(alpha))
else:
alpha = np.arcsin((rx - (1 - calc_lam(CapLam, i, numsys=M))) / rx)
rotation.append(45 - alpha / np.pi * 180.0)
y.append(np.cos(alpha))
shiftMarker = 0.02 * np.sqrt(2)
ax.plot(cvb_bot[:, 0], cvb_bot[:, 1], 'k', lw=lineWidth, zorder=1)
ax.plot(cvb_top[:, 0], cvb_top[:, 1], 'k', lw=lineWidth, zorder=1)
# ax.add_artist(patches.Arc((rx,shifty+ry), 2*rx, 2*ry, theta1=90, theta2=270, lw=lineWidth))
ax.add_artist(patches.Arc((1.0 - rx, shifty + ry), 2 * rx, 2 * ry, theta1=270, theta2=90, lw=lineWidth))
# ax.add_artist(patches.Arc((rx,shifty+ry), 1.4*rx, 1.4*ry, lw=lineWidth))
ax.add_artist(patches.Arc((1.0 - rx, shifty + ry), 1.4 * rx, 1.4 * ry, lw=lineWidth))
# ax.annotate(r'$\Lambda=0$', xy=(-0.01, shifty+ry), xytext=(-0.05, shifty+ry), va='center', ha='right', arrowprops=dict(arrowstyle='-'))
# ax.annotate(r'$\Lambda=\frac{\pi}{2}$', xy=(0.5, shifty+2*ry+0.01), xytext=(0.5, shifty+2*ry+0.05), va='bottom', ha='center', arrowprops=dict(arrowstyle='-'))
# ax.annotate(r'$\Lambda=\frac{3\pi}{2}$', xy=(0.5, shifty-0.01), xytext=(0.5, shifty-0.05), va='top', ha='center', arrowprops=dict(arrowstyle='-'))
# ax.annotate(r'$\Lambda=\pi$', xy=(1.01, shifty+ry), xytext=(1.05, shifty+ry), va='center', ha='left', arrowprops=dict(arrowstyle='-'))
# if np.fabs(rotation[0]-45)>0.0001:
# print(alpha)
# ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam), xy=(calc_lam(CapLam, 0, numsys=M), shifty+ry+np.cos(alpha)*ry),
# xytext=(calc_lam(CapLam, 0, numsys=M)-np.sin(alpha)*1.5*rx, shifty+(1+np.cos(alpha)*2.5)*ry),
# arrowprops=dict(arrowstyle='<-', linewidth=3), va='center', ha='center', zorder=0)
# else:
# ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam), xy=(calc_lam(CapLam, 0, numsys=M), shifty+2.0*ry+shiftMarker),
# xytext=(calc_lam(CapLam, 0, numsys=M), shifty+3.5*ry),
# arrowprops=dict(arrowstyle='<-', linewidth=3), va='center', ha='center', zorder=0)
# arrows in the conveyor belt
# drawCirc(ax,rx*0.8,rx,shifty+ry,45,270, color_='red')
drawCirc(ax, rx * 0.8, 1.0 - rx, shifty + ry, 225, 270, lineWidth=lineWidth, color_='red')
for i in range(int(M / 2)):
x = calc_lam(CapLam, i, numsys=M) - np.sqrt(1 - y[i] ** 2) * shiftMarker
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(x, shifty + ry + y[i] * ry), # (x,y)
4, # number of vertices
0.02, # radius
rotation[i] / 180.0 * np.pi, # orientation
color='red',
zorder=10
)
)
ax.scatter(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker, s=30, marker='o', edgecolors='face', color='r',
zorder=10)
if drawArrows:
ax.annotate('', xy=(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker),
xytext=(x + 0.1, gVals[np.abs(lamVals - x - 0.1).argmin()] + shiftMarker),
arrowprops=dict(arrowstyle='<-', linewidth=lineWidth))
ax.plot([x, x], [gVals[np.abs(lamVals - x).argmin()], shifty + ry + y[i] * ry], color='0.8', lw=lineWidth,
zorder=0)
for i in range(int(M / 2)):
x = calc_lam(CapLam, i + int(M / 2), numsys=M) - np.sqrt(1 - y[i] ** 2) * shiftMarker
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(x, shifty), # (x,y)
4, # number of vertices
0.02, # radius
rotation[i] / 180.0 * np.pi, # orientation
color='red',
zorder=10
)
)
ax.plot([x, x], [gVals[np.abs(lamVals - x).argmin()], shifty + (1.0 - y[i]) * ry], color='0.8', lw=lineWidth,
zorder=0)
ax.scatter(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker, s=30, marker='o', edgecolors='face', color='r',
zorder=10)
if drawArrows:
ax.annotate('', xy=(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker),
xytext=(x - 0.1, gVals[np.abs(lamVals - x + 0.1).argmin()] + shiftMarker),
arrowprops=dict(arrowstyle='<-', linewidth=lineWidth))
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(0, 1.2 / goldRat)
ax.set_xticks([0.0, 0.5, 1.0])
ax.set_xticklabels(['0\n(A)', r'$\sfrac{1}{2}$', '1\n(B)'])
# ax.text(lamVals[-1], gVals[-1]-0.05, 'Free energy profile', ha='right', va='top')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_yticks([])
ax.spines['left'].set_color('None')
ax.spines['right'].set_color('None')
ax.spines['top'].set_color('None')
ax.annotate('', xy=(0, 0),
xytext=(0, 0.5 / goldRat), ha='center', va='bottom',
arrowprops=dict(arrowstyle='<|-', facecolor='k', linewidth=1.5))
ax.text(-0.025, 0.25 / goldRat, '$G(\lambda)$', ha='right', va='center', fontsize=14)
ax.text(1.025, 0.0, '$\lambda$', ha='left', va='center', fontsize=14)
return fig
def plotEnsembler(x, y, CapLam=0.1, M=8, drawArrows=False):
pSize = 6.027
goldRat = 1.70
lineWidth = 1
[path_effects.SimpleLineShadow(), path_effects.Normal()]
fig = plt.figure(figsize=(pSize * goldRat, pSize))
ax = fig.gca()
fig.subplots_adjust(left=0.1, right=1.0-0.1, bottom=0.25, top=0.964)
rx=0.05
ry=rx
shifty=0.75/goldRat
cvb_bot=np.zeros((90,2))
cvb_bot[:,0]=np.linspace(rx, 1.0-rx, 90)
cvb_bot[:,1]=np.ones(90)*shifty
cvb_top=np.zeros((90,2))
cvb_top[:,0]=np.linspace(rx, 1.0-rx, 90)
cvb_top[:,1]=np.ones(90)*(shifty+2.0*ry)
lamVals=x-x.min()
lamVals/=lamVals.max()
gVals=y-y.min()
if gVals.max() != 0.0:
gVals/=(2.0*gVals.max()*goldRat)
else:
gVals+=1/(2.0*goldRat)
ax.plot(lamVals[2:], gVals[2:], 'k', lw=lineWidth)
l = CapLam
numsys = M
rotation = []
y = []
# replicas boxes
for i in range(M):
if calc_lam(CapLam, i, numsys=M) > rx and calc_lam(CapLam, i, numsys=M) < (1.0 - rx):
rotation.append(45)
y.append(1.0)
elif calc_lam(CapLam, i, numsys=M) < rx:
alpha = np.arcsin((rx - calc_lam(CapLam, i, numsys=M)) / rx)
if (CapLam + i * 2 * np.pi / float(M)) % (2. * np.pi) < np.pi:
rotation.append(45 + alpha / np.pi * 180.0)
else:
rotation.append(45 - alpha / np.pi * 180.0)
y.append(np.cos(alpha))
else:
alpha = np.arcsin((rx - (1 - calc_lam(CapLam, i, numsys=M))) / rx)
if (CapLam + i * 2 * np.pi / float(M)) % (2. * np.pi) < np.pi:
rotation.append(45 - alpha / np.pi * 180.0)
else:
rotation.append(45 + alpha / np.pi * 180.0)
y.append(np.cos(alpha))
shiftMarker = 0.02 * np.sqrt(2)
# funicular
ax.plot(cvb_bot[:, 0], cvb_bot[:, 1], 'k', lw=lineWidth)
ax.plot(cvb_top[:, 0], cvb_top[:, 1], 'k', lw=lineWidth)
ax.add_artist(patches.Arc((rx, shifty + ry), 2 * rx, 2 * ry, theta1=90, theta2=270, lw=lineWidth))
ax.add_artist(patches.Arc((1.0 - rx, shifty + ry), 2 * rx, 2 * ry, theta1=270, theta2=90, lw=lineWidth))
ax.add_artist(patches.Arc((rx, shifty + ry), 1.4 * rx, 1.4 * ry, lw=lineWidth))
ax.add_artist(patches.Arc((1.0 - rx, shifty + ry), 1.4 * rx, 1.4 * ry, lw=lineWidth))
ax.annotate(r'$\Lambda=0$', xy=(0.01, shifty + ry), xytext=(-0.05, shifty + ry), va='center', ha='right',
fontsize='small', arrowprops=dict(arrowstyle='-', linewidth=lineWidth))
ax.annotate(r'$\Lambda=\frac{\pi}{2}$', xy=(0.5, shifty + 2 * ry + 0.01), xytext=(0.5, shifty + 2 * ry + 0.05),
va='bottom', ha='center', fontsize='small', arrowprops=dict(arrowstyle='-', linewidth=lineWidth))
ax.annotate(r'$\Lambda=\frac{3\pi}{2}$', xy=(0.5, shifty - 0.01), xytext=(0.5, shifty - 0.05), va='top',
ha='center', fontsize='small', arrowprops=dict(arrowstyle='-', linewidth=lineWidth))
ax.annotate(r'$\Lambda=\pi$', xy=(.99, shifty + ry), xytext=(1.05, shifty + ry), va='center', ha='left',
fontsize='small', arrowprops=dict(arrowstyle='-', linewidth=lineWidth))
if drawArrows:
if np.fabs(rotation[0] - 45) > 0.0001:
ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam),
xy=(calc_lam(CapLam, 0, numsys=M), shifty + ry + np.cos(alpha) * (ry + shiftMarker)),
xytext=(
calc_lam(CapLam, 0, numsys=M) - np.sin(alpha) * 2 * rx, shifty + (1 + np.cos(alpha) * 5) * ry),
fontsize='small',
arrowprops=dict(arrowstyle='<-', linewidth=1.0, shrinkA=0.0), va='top', ha='center', zorder=0,
bbox=dict(pad=-.1, lw=0.0, color='None'))
else:
ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam),
xy=(calc_lam(CapLam, 0, numsys=M), shifty + 2.0 * ry + shiftMarker),
xytext=(calc_lam(CapLam, 0, numsys=M), shifty + 6 * ry),
arrowprops=dict(arrowstyle='<-', linewidth=1.0, shrinkA=0.0), fontsize='small', va='top',
ha='center', zorder=0, bbox=dict(pad=-.1, lw=0.0, color='None'))
# arrows in the conveyor belt
drawCirc(ax, rx * 0.8, rx, shifty + ry, 45, 270, lineWidth=1.0, color_='red')
drawCirc(ax, rx * 0.8, 1.0 - rx, shifty + ry, 225, 270, lineWidth=1.0, color_='red')
# lines and markers for Epot
for i in range(M):
x = calc_lam(CapLam, i, numsys=M)
if x < rx:
rx -= np.sqrt(1 - y[i] ** 2) * shiftMarker
elif x > 1 - rx:
rx += np.sqrt(1 - y[i] ** 2) * shiftMarker
if (CapLam + i * 2 * np.pi / float(M)) % (2. * np.pi) < np.pi:
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(x, shifty + ry + y[i] * ry + y[i] * shiftMarker), # (x,y)
4, # number of vertices
0.02, # radius
rotation[i] | |
<filename>draw_card/announcement.py
import aiohttp
from bs4 import BeautifulSoup
import re
from datetime import datetime, timedelta
from .config import DRAW_PATH
from pathlib import Path
from asyncio.exceptions import TimeoutError
from nonebot.log import logger
try:
import ujson as json
except ModuleNotFoundError:
import json
headers = {'User-Agent': '"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)"'}
prts_up_char = Path(DRAW_PATH + "/draw_card_up/prts_up_char.json")
genshin_up_char = Path(DRAW_PATH + "/draw_card_up/genshin_up_char.json")
pretty_up_char = Path(DRAW_PATH + "/draw_card_up/pretty_up_char.json")
guardian_up_char = Path(DRAW_PATH + "/draw_card_up/guardian_up_char.json")
prts_url = "https://ak.hypergryph.com/news.html"
genshin_url = "https://wiki.biligame.com/ys/%E7%A5%88%E6%84%BF"
pretty_url = "https://wiki.biligame.com/umamusume/%E5%85%AC%E5%91%8A"
guardian_url = "https://wiki.biligame.com/gt/%E9%A6%96%E9%A1%B5"
# 是否过时
def is_expired(data: dict):
times = data['time'].split('-')
for i in range(len(times)):
times[i] = str(datetime.now().year) + '-' + times[i].split('日')[0].strip().replace('月', '-')
start_date = datetime.strptime(times[0], '%Y-%m-%d').date()
end_date = datetime.strptime(times[1], '%Y-%m-%d').date()
now = datetime.now().date()
return not start_date <= now <= end_date
# 检查写入
def check_write(data: dict, up_char_file):
try:
if is_expired(data['char']):
for x in list(data.keys()):
data[x]['title'] = ''
else:
with open(up_char_file, 'w', encoding='utf8') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
if not up_char_file.exists():
with open(up_char_file, 'w', encoding='utf8') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
else:
with open(up_char_file, 'r', encoding='utf8') as f:
old_data = json.load(f)
if is_expired(old_data['char']):
return old_data
else:
with open(up_char_file, 'w', encoding='utf8') as f:
json.dump(data, f, indent=4, ensure_ascii=False)
except ValueError:
pass
return data
class PrtsAnnouncement:
def __init__(self):
self.game_name = '明日方舟'
async def _get_announcement_text(self):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(prts_url, timeout=7) as res:
soup = BeautifulSoup(await res.text(), 'lxml')
ol = soup.find('ol', {'class': 'articleList active', 'data-category-key': 'LATEST'})
for li in ol:
itype = li.find('span', {'class': 'articleItemCate'}).text
if itype == '活动':
a = li.find('a')['href']
async with session.get(f'https://ak.hypergryph.com{a}', headers=headers, timeout=7) as res:
return await res.text()
async def update_up_char(self):
prts_up_char.parent.mkdir(parents=True, exist_ok=True)
if prts_up_char.exists():
with open(prts_up_char, 'r', encoding='utf8') as f:
data = json.load(f)
if not data.get('char'):
prts_up_char.unlink()
try:
data = {'char': {'up_char': {'6': {}, '5': {}, '4': {}}, 'title': '', 'time': '', 'pool_img': ''}}
text = await self._get_announcement_text()
soup = BeautifulSoup(text, 'lxml')
content = soup.find('div', {'class': 'article-content'})
contents = [x for x in content.contents if x.text or str(x).find('img') != -1]
start_index = -1
end_index = -1
for i in range(len(contents)):
if str(contents[i]).startswith('<p>'):
r = re.search('(.*)(寻访|复刻).*?开启', contents[i].text)
if r:
if str(contents[i+3].text).find('★') != -1:
img = contents[i-1].find('img')
if img:
data['char']['pool_img'] = img['src']
start_index = i
for j in range(i, len(contents)):
if str(contents[j]).find('注意') != -1:
end_index = j
break
break
contents = contents[start_index: end_index]
title = contents[0].text
data['char']['title'] = title[title.find('【'): title.find('】') + 1]
data['char']['time'] = str(contents[1].text).split(':', maxsplit=1)[1]
for p in contents[2:]:
p = str(p.text)
r = None
if p.find('★') != -1:
if p.find('权值') == -1:
r = re.search(r'.*?:(.*)(占(.*)★.*?的(.*)%)', p)
else:
r = re.search(r'.*?:(.*)(在(.*)★.*?以(.*)倍权值.*?)', p)
star = r.group(2)
if r:
chars = r.group(1)
if chars.find('/') != -1:
chars = chars.strip().split('/')
elif chars.find('\\') != -1:
chars = chars.strip().split('\\')
else:
chars = chars.split('\n')
chars = [x.replace('[限定]', '').strip() for x in chars]
probability = r.group(3)
probability = probability if int(probability) > 10 else f'权{probability}'
for char in chars:
if char.strip():
data['char']['up_char'][star][char.strip()] = probability
except TimeoutError:
logger.warning(f'更新明日方舟UP池信息超时...')
if prts_up_char.exists():
with open(prts_up_char, 'r', encoding='utf8') as f:
data = json.load(f)
except Exception as e:
logger.error(f'更新明日方舟未知错误 e:{e}')
if prts_up_char.exists():
with open(prts_up_char, 'r', encoding='utf8') as f:
data = json.load(f)
return check_write(data, prts_up_char)
class GenshinAnnouncement:
def __init__(self):
self.game_name = '原神'
async def _get_announcement_text(self):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(genshin_url, timeout=7) as res:
return await res.text()
async def update_up_char(self):
genshin_up_char.parent.mkdir(exist_ok=True, parents=True)
data = {
'char': {'up_char': {'5': {}, '4': {}}, 'title': '', 'time': '', 'pool_img': ''},
'arms': {'up_char': {'5': {}, '4': {}}, 'title': '', 'time': '', 'pool_img': ''}
}
text = await self._get_announcement_text()
soup = BeautifulSoup(text, 'lxml')
try:
div = soup.find_all('div', {'class': 'row'})[1]
tables = div.find_all('table', {'class': 'wikitable'})
for table in tables:
trs = table.find('tbody').find_all('tr')
pool_img = trs[0].find('th').find('img')
if pool_img['title'].find('角色活动') == -1:
itype = 'arms'
else:
itype = 'char'
try:
data[itype]['pool_img'] = str(pool_img['srcset']).split(' ')[0]
except KeyError:
data[itype]['pool_img'] = pool_img['src']
data[itype]['title'] = str(pool_img['title']).split(f'期{"角色" if itype == "char" else "武器"}')[0][:-3]
data[itype]['time'] = trs[1].find('td').text
if data[itype]['time'][-1] == '\n':
data[itype]['time'] = data[itype]['time'][:-1]
if '版本更新后' in data[itype]['time']:
sp = data[itype]['time'].split('~')
end_time = datetime.strptime(sp[1].strip(), "%Y/%m/%d %H:%M")
start_time = end_time - timedelta(days=20)
data[itype]['time'] = start_time.strftime('%Y/%m/%d') + ' ~ ' + end_time.strftime('%Y/%m/%d')
tmp = ''
for tm in data[itype]['time'].split('~'):
date_time_sp = tm.split('/')
date_time_sp[2] = date_time_sp[2].strip().replace(' ', '日 ')
tmp += date_time_sp[1] + '月' + date_time_sp[2] + ' - '
data[itype]['time'] = tmp[:-2].strip()
for a in trs[2].find('td').find_all('a'):
char_name = a['title']
data[itype]['up_char']['5'][char_name] = "50"
for a in trs[3].find('td').find_all('a'):
char_name = a['title']
data[itype]['up_char']['4'][char_name] = "50"
except TimeoutError:
logger.warning(f'更新原神UP池信息超时...')
if genshin_up_char.exists():
with open(genshin_up_char, 'r', encoding='utf8') as f:
data = json.load(f)
except Exception as e:
logger.error(f'更新原神UP失败,疑似UP池已结束, e:{e}')
if genshin_up_char.exists():
with open(genshin_up_char, 'r', encoding='utf8') as f:
data = json.load(f)
data['char']['title'] = ''
data['arms']['title'] = ''
with open(genshin_up_char, 'w', encoding='utf8') as wf:
json.dump(data, wf, ensure_ascii=False, indent=4)
return data
return check_write(data, genshin_up_char)
class PrettyAnnouncement:
def __init__(self):
self.game_name = '赛马娘'
async def _get_announcement_text(self):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(pretty_url, timeout=7) as res:
soup = BeautifulSoup(await res.text(), 'lxml')
divs = soup.find('div', {'id': 'mw-content-text'}).find('div').find_all('div')
for div in divs:
a = div.find('a')
try:
title = a['title']
except (KeyError, TypeError):
continue
if title.find('新角色追加') != -1:
url = a['href']
break
async with session.get(f'https://wiki.biligame.com/{url}', timeout=7) as res:
return await res.text(), title[:-2]
async def update_up_char(self):
data = {
'char': {'up_char': {'3': {}, '2': {}, '1': {}}, 'title': '', 'time': '', 'pool_img': ''},
'card': {'up_char': {'3': {}, '2': {}, '1': {}}, 'title': '', 'time': '', 'pool_img': ''}
}
try:
text, title = await self._get_announcement_text()
soup = BeautifulSoup(text, 'lxml')
context = soup.find('div', {'class': 'toc-sticky'})
if not context:
context = soup.find('div', {'class': 'mw-parser-output'})
data['char']['title'] = title
data['card']['title'] = title
for big in context.find_all('big'):
r = re.search(r'\d{1,2}/\d{1,2} \d{1,2}:\d{1,2}', str(big.text))
if r:
time = str(big.text)
break
else:
logger.warning('赛马娘UP无法找到活动日期....取消更新UP池子...')
return
time = time.replace('~', '-').replace('/', '月').split(' ')
time = time[0] + '日 ' + time[1] + ' - ' + time[3] + '日 ' + time[4]
data['char']['time'] = time
data['card']['time'] = time
for p in context.find_all('p'):
if str(p).find('当期UP赛马娘') != -1 and str(p).find('■') != -1:
if not data['char']['pool_img']:
try:
data['char']['pool_img'] = p.find('img')['src']
except TypeError:
for center in context.find_all('center'):
try:
img = center.find('img')
if img and str(img['alt']).find('新马娘') != -1 and str(img['alt']).find('总览') == 1:
data['char']['pool_img'] = img['src']
except (TypeError, KeyError):
pass
r = re.findall(r'.*?当期UP赛马娘([\s\S]*)<奖励内容>.*?', str(p))
if r:
for x in r:
x = str(x).split('\n')
for msg in x:
if msg.find('★') != -1:
msg = msg.replace('<br/>', '')
char_name = msg[msg.find('['):].strip()
if (star := len(msg[:msg.find('[')].strip())) == 3:
data['char']['up_char']['3'][char_name] = '70'
elif star == 2:
data['char']['up_char']['2'][char_name] = '70'
elif star == 1:
data['char']['up_char']['1'][char_name] = '70'
if str(p).find('(当期UP对象)') != -1 and str(p).find('赛马娘') == -1 and str(p).find('■') != -1:
# data['card']['pool_img'] = p.find('img')['src']
if not data['char']['pool_img']:
try:
data['char']['pool_img'] = p.find('img')['src']
except TypeError:
for center in context.find_all('center'):
try:
img = center.find('img')
if img and str(img['alt']).find('新卡') != -1 and str(img['alt']).find('总览') == 1:
data['card']['pool_img'] = img['src']
except (TypeError, KeyError):
pass
r = re.search(r'■全?新?支援卡(当期UP对象)([\s\S]*)</p>', str(p))
if r:
rmsg = r.group(1).strip()
rmsg = rmsg.split('<br/>')
rmsg = [x for x in rmsg if x]
for x in rmsg:
x = x.replace('\n', '').replace('・', '')
star = x[:x.find('[')].strip()
char_name = x[x.find('['):].strip()
if star == 'SSR':
data['card']['up_char']['3'][char_name] = '70'
if star == 'SR':
data['card']['up_char']['2'][char_name] = '70'
if star == 'R':
data['card']['up_char']['1'][char_name] = '70'
# 日文->中文
with open(DRAW_PATH + 'pretty_card.json', 'r', encoding='utf8') as f:
all_data = json.load(f)
for star in data['card']['up_char'].keys():
for name in list(data['card']['up_char'][star].keys()):
char_name = name.split(']')[1].strip()
tp_name = name[name.find('['): name.find(']') + 1].strip().replace('[', '【').replace(']', '】')
for x in all_data.keys():
if all_data[x]['名称'].find(tp_name) != -1 and all_data[x]['关联角色'] == char_name:
data['card']['up_char'][star].pop(name)
data['card']['up_char'][star][all_data[x]['中文名']] = '70'
except TimeoutError:
logger.warning(f'更新赛马娘UP池信息超时...')
if pretty_up_char.exists():
with open(pretty_up_char, 'r', encoding='utf8') as f:
data = json.load(f)
except Exception as e:
logger.error(f'赛马娘up更新未知错误 {type(e)}:{e}')
if pretty_up_char.exists():
with open(pretty_up_char, 'r', encoding='utf8') as f:
data = json.load(f)
return check_write(data, pretty_up_char)
class GuardianAnnouncement:
def __init__(self):
self.game_name = '坎公骑冠剑'
async def _get_announcement_text(self):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(guardian_url, timeout=7) as res:
return await res.text()
async def update_up_char(self):
data = {
'char': {'up_char': {'3': {}}, 'title': '', 'time': '', 'pool_img': ''},
'arms': {'up_char': {'5': {}}, 'title': '', 'time': '', 'pool_img': ''}
}
try:
text = await self._get_announcement_text()
soup = BeautifulSoup(text, 'lxml')
context = soup.select('div.col-sm-3:nth-child(3) > div:nth-child(2) > div:nth-child(1) '
'> div:nth-child(2) > div:nth-child(3) > font:nth-child(1)')[0]
title = context.find('p').find('b').text
tmp = title.split(',')
time = ''
for msg in tmp:
r = re.search(r'[从|至](.*)(开始|结束)', msg)
if r:
time += r.group(1).strip() | |
"""Activation Layers"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import builtins
import inspect
import string
from functools import partial
from pydoc import locate
import six
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.modules.activation as af
from trident.backend.common import get_function, get_class, camel2snake,snake2camel, enforce_singleton,TensorShape
from trident.backend.pytorch_backend import Layer,Parameter
from trident.backend.pytorch_ops import *
__all__ = ['Identity', 'Sigmoid', 'Tanh', 'Relu', 'Relu6', 'LeakyRelu', 'LeakyRelu6', 'SmoothRelu','CRelu','Silu', 'PRelu', 'Swish',
'Elu', 'HardSigmoid', 'HardSwish', 'Selu', 'LecunTanh', 'SoftSign', 'SoftPlus', 'HardTanh', 'Logit',
'LogLog', 'Mish','HardMish', 'Softmax', 'Gelu', 'GptGelu','SIREN', 'LogSoftmax', 'get_activation']
class Identity(Layer):
"""
Identity activation Layer
A placeholder identity operator that is argument-insensitive.
Examples:
>>> Identity()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
tensor([-3.0, -1.0, 0.0, 2.0])
"""
def __init__(self, keep_output=False,name=None):
super(Identity, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return x
class Relu(Layer):
"""Rectified Linear Unit activation function.
With default values, it returns element-wise max(x, 0).
Otherwise, it follows:
```
f(x) = max_value if x >= max_value
f(x) = x if threshold <= x < max_value
f(x) = negative_slope * (x - threshold) otherwise
```
Examples:
>>> Relu()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
"""
def __init__(self,inplace=False,keep_output=False, name=None):
super(Relu, self).__init__(keep_output=keep_output,name=name)
self._built = True
self.inplace=inplace
def forward(self, x, **kwargs):
"""
Args:
x: Input tensor.
Returns: output tensor
"""
if not hasattr(self,'inplace'):
self.inplace=False
if self.inplace and not x.is_leaf:
return torch.relu_(x)
else:
return torch.relu(x)
class Relu6(Layer):
"""Rectified Linear Unit 6 activation function.
With default values, it returns element-wise min(max(x, 0),6).
Otherwise, it follows:
```
f(x) = 6 if x >= 6
f(x) = x if threshold <= x < 6
f(x) = negative_slope * (x - threshold) otherwise
```
Examples:
>>> Relu6()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
"""
def __init__(self,inplace=False, keep_output=False,name=None):
super(Relu6, self).__init__(keep_output=keep_output,name=name)
self._built = True
self.inplace=inplace
def forward(self, x, **kwargs):
if not hasattr(self,'inplace'):
self.inplace=False
if self.inplace and not x.is_leaf:
return torch.clip_(F.relu_(x),max=6)
else:
return torch.clip(F.relu(x),max=6)
class LeakyRelu(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
```
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Examples:
>>> LeakyRelu()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
"""
def __init__(self,inplace=False, alpha=0.2, keep_output=False,name=None):
super(LeakyRelu, self).__init__(keep_output=keep_output,name=name)
self.alpha = alpha
self._built = True
self.inplace=inplace
def forward(self, x, **kwargs):
if not hasattr(self,'inplace'):
self.inplace=False
if self.inplace and not x.is_leaf:
return F.leaky_relu(x,self.alpha,inplace=True)
else:
return F.leaky_relu(x,self.alpha,inplace=False)
def extra_repr(self):
s = 'alpha={alpha}'
return s.format(**self.__dict__)
class LeakyRelu6(Layer):
"""Leaky version of a Rectified Linear Unit.6
It allows a small gradient when the unit is not active:
```
f(x) = alpha * x if x < 0
f(x) = x if 6>=x >= 0
f(x) = 6 if x > 6
```
Examples:
>>> LeakyRelu6()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
"""
def __init__(self,inplace=False, alpha=0.2, keep_output=False,name=None):
super(LeakyRelu6, self).__init__(keep_output=keep_output,name=name)
self._built = True
self.alpha = alpha
self.inplace=inplace
def forward(self, x, **kwargs):
if not hasattr(self,'inplace'):
self.inplace=False
if self.inplace and not x.is_leaf:
return torch.clip_(F.leaky_relu(x, self.alpha, inplace=True),min=-6,max=6)
else:
return torch.clip(F.leaky_relu(x, self.alpha, inplace=False),min=-6,max=6)
def extra_repr(self):
s = 'alpha={alpha}'
return s.format(**self.__dict__)
class SmoothRelu(Layer):
"""Smooth_relu activation Layer
Examples:
>>> SmoothRelu()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
"""
def __init__(self, keep_output=False,name=None):
super(SmoothRelu, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return smooth_relu(x)
class CRelu(Layer):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. <NAME>, et
al.](https://arxiv.org/abs/1603.05201)
References:
Understanding and Improving Convolutional Neural Networks via Concatenated
Rectified Linear Units:
[Shang et al., 2016](http://proceedings.mlr.press/v48/shang16)
([pdf](http://proceedings.mlr.press/v48/shang16.pdf))
"""
def __init__(self, axis=1,keep_output=False,name=None):
super(CRelu, self).__init__(keep_output=keep_output,name=name)
self._built = True
self.axis=axis
def forward(self, x, **kwargs):
return crelu(x,axis=self.axis)
class Silu(Layer):
"""Applies the silu function, element-wise.
.. math::
\text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.}
.. note::
See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_
where the SiLU (Sigmoid Linear Unit) was originally coined, and see
`Sigmoid-Weighted Linear Units for Neural Network Function Approximation
in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:
a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_
where the SiLU was experimented with later.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = Silu()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, keep_output=False,name=None):
super(Silu, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return torch.nn.functional.silu(x)
class PRelu(Layer):
"""Parametric Rectified Linear Unit.
It follows:
```
f(x) = alpha * x for x < 0
f(x) = x for x >= 0
```
where `alpha` is a learned parameters , it's a 1-D array, the length equal 1 or input_filters.
Args:
num_parameters:(1 or None) if None num_parameters will equal to input_filters .
init (float): initial value of the parameters
"""
def __init__(self, num_parameters=None, init=0.25, keep_output=False,name=None):
super(PRelu, self).__init__(keep_output=keep_output,name=name)
self.num_parameters = None
if num_parameters == 1:
self.num_parameters = num_parameters
self.init = init
self.weight = None
def build(self, input_shape:TensorShape):
if self._built == False:
if self.num_parameters is None:
self.num_parameters = self.input_filters
self.weight = Parameter(ones((self.num_parameters)) * self.init)
self._built = True
def forward(self, x, **kwargs):
pos = relu(x)
reshape_shape =[1]*len(x.shape)
reshape_shape[1] =self.num_parameters
neg = self.weight.view(*reshape_shape) * (x - abs(x)) * 0.5
return pos + neg
class Sigmoid(Layer):
"""Sigmoid activation layer.
Examples:
>>> Sigmoid()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
"""
def __init__(self, keep_output=False,name=None):
super(Sigmoid, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
"""
Args:
x: Input tensor.
Returns: output tensor
"""
return sigmoid(x)
class Tanh(Layer):
""" Tanh activation layer.
Examples:
>>> Tanh()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
"""
def __init__(self, keep_output=False,name=None):
super(Tanh, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return tanh(x)
class Swish(Layer):
""" Self-Gated Activation Function.
it follows:
```
f(x) = x * sigmoid(x)
```
References:
Swish: a Self-Gated Activation Function
https://arxiv.org/abs/1710.05941v1
Examples:
>>> Swish()(to_tensor([[-3.0, -1.0, 0.0, 2.0]])).cpu()
tensor([[-0.1423, -0.2689, 0.0000, 1.7616]])
"""
def __init__(self, keep_output=False,name=None):
super(Swish, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return swish(x)
class HardSigmoid(Layer):
""" Hard sigmoid activation layer.
it follows:
```
f(x) = relu6(x + 3) / 6
```
Examples:
>>> HardSigmoid()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
"""
def __init__(self, inplace=False, keep_output=False,name=None):
super(HardSigmoid, self).__init__(keep_output=keep_output,name=name)
self.inplace = inplace
self._built = True
def forward(self, x, **kwargs):
return hard_sigmoid(x)
class HardSwish(Layer):
"""Hard swish Activation Function.
Memory saving version of swish
it follows:
```
f(x) = x * hard_sigmoid(x)
```
References:
Searching for MobileNetV3
https://arxiv.org/abs/1905.02244
Examples:
>>> HardSwish()(to_tensor([[-3.0, -1.0, 0.0, 2.0]])).cpu()
tensor([[-0.0000, -0.3333, 0.0000, 1.6667]])
"""
def __init__(self, keep_output=False,name=None):
super(HardSwish, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return hard_swish(x)
class HardTanh(Layer):
"""Hard tanh Activation Function.
Examples:
>>> HardTanh()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
"""
def __init__(self, keep_output=False,name=None):
super(HardTanh, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return hard_tanh(x)
class Selu(Layer):
"""Selu activation function
Scaled exponential linear unit operation. Computes the element-wise exponential linear
of ``x``: ``scale * x`` for ``x >= 0`` and ``x``: ``scale * alpha * (exp(x)-1)`` otherwise.
scale=1.0507009873554804934193349852946, alpha=1.6732632423543772848170429916717
Args:
x (tensor): input tensor
name(string, None): name of the layer.
Returns:The output tensor has the same shape as ``x``
Examples:
>>> selu(to_tensor([[-1, -0.5, 0, 1, 2]]))
tensor([[-1.1113, -0.6918, 0.0000, 1.0507, 2.1014]])
References:
paper: https://arxiv.org/abs/1706.02515
Self-Normalizing Neural Networks
<NAME>, <NAME>, <NAME>, <NAME>
"""
def __init__(self, keep_output=False,name=None):
super(Selu, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return selu(x)
class Elu(Layer):
"""Exponential Linear Unit.
It follows:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
"""
def __init__(self, keep_output=False,name=None):
super(Elu, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return elu(x)
class LecunTanh(Layer):
def __init__(self, keep_output=False,name=None):
super(LecunTanh, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return hard_swish(x)
class SoftSign(Layer):
def __init__(self, keep_output=False,name=None):
super(SoftSign, self).__init__(keep_output=keep_output,name=name)
def forward(self, x, **kwargs):
return soft_sign(x)
class SoftPlus(Layer):
def __init__(self, keep_output=False,name=None):
super(SoftPlus, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return soft_plus(x)
class Logit(Layer):
def __init__(self, keep_output=False,name=None):
super(Logit, self).__init__(keep_output=keep_output,name=name)
self._built = True
def forward(self, x, **kwargs):
return logit(x)
class LogLog(Layer):
"""LogLog Activation Function
it follows:
```
f(x) = 1 - exp(-exp(x))
```
References:
"Complementary Log-Log and Probit: Activation Functions Implemented in Artificial Neural Networks"
https://ieeexplore.ieee.org/document/4626755/
Examples:
>>> LogLog()(to_tensor([-3.0, -1.0, 0.0, 2.0]))
tensor([-1.4228e-01, -2.6894e-01, 0.0000e+00, 1.7616e+00]
"""
def __init__(self, keep_output=False,name=None):
super(LogLog, self).__init__(keep_output=keep_output,name=name)
self._built = True
def | |
<filename>plugins/modules/panos_export.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: panos_export
short_description: export file from PAN-OS devices
description:
- Export files from PAN-OS device
author:
- <NAME> (@mrichardson03)
version_added: "2.9"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
- xmltodict
notes:
- Checkmode is NOT supported.
- Panorama is supported.
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.transitional_provider
options:
category:
description:
- Element type to export.
choices:
- application-block-page
- application-pcap
- captive-portal-text
- certificate
- configuration
- credential-block-page
- credential-coach-text
- data-filter-block-page
- device-state
- file-block-continue-page
- file-block-page
- filter-pcap
- global-protect-portal-custom-help-page
- global-protect-portal-custom-home-page
- global-protect-portal-custom-login-page
- global-protect-portal-custom-welcome-page
- mfa-login-page
- safe-search-block-page
- ssl-cert-status-page
- ssl-optout-text
- stats-dump
- tech-support
- threat-pcap
- url-block-page
- url-coach-text
- virus-block-page
default: 'configuration'
required: true
certificate_name:
description:
- Name of the certificate to export.
type: string
certificate_format:
description:
- Format for the certificate.
type: string
choices:
- pem
- pkcs10
- pkcs12
certificate_include_keys:
description:
- Whether to include the private key in the export.
default: False
type: bool
certificate_passphrase:
description:
- Passphrase used to encrypt the certificate and/or private key.
type: string
filename:
description:
- Local path to output file (if any).
type: string
application_pcap_name:
description:
- When `category` is `application-pcap`, this can be a blank string, a packet capture directory name,
or a packet capture name. If the value is either blank or a directory name, a list of directories or
packet capture files will be returned. If the value is a packet capture file name, the file will be
written to `filename`.
type: string
dlp_pcap_name:
description:
- When `category` is `dlp-pcap`, this value can be a blank string, or a packet capture name. If the value
is blank, a list of packet capture files will be returned. If the value is a packet capture file name,
the file will be written to `filename`.
type: string
dlp_password:
description:
- Password used to decrypt DLP packet capture.
type: string
filter_pcap_name:
description:
- When `category` is `filter-pcap`, this value can be a blank string, or a packet capture name. If the
value is blank, a list of packet capture files will be returned. If the value is a packet capture file
name, the file will be written to `filename`.
type: string
threat_pcap_id:
description:
- When `category` is `threat-pcap`, this value is a unique identifier for the packet capture, and can be
obtained from the **pcap_id** field in the THREAT log.
type: string
threat_pcap_search_time:
description:
- When `category` is `threat-pcap`, this value is is used to narrow the search for the **pcap_id** and is
used to set a time window in the range -5 minutes to +2 hours of the time specified. The search time is
typically set to the **receive_time** field in the THREAT log. The PAN-OS log time string format is used,
for example '2015/01/20 10:51:09'. If the value is not specified, it will be set to the threat epoch time
which is part of the **pcap_id**.
type: string
threat_pcap_serial:
description:
- When `category` is `threat-pcap`, this value is required when exporting from Panorama and is used to
specify the device to fetch the packet capture from.
type: string
timeout:
description:
- When category is set to 'tech-support', 'stats-dump', or 'device-state', the operating can take a while
to complete. This is the maximum amount of time to wait, in seconds.
type: int
default: 600
'''
EXAMPLES = '''
- name: Export configuration
panos_export:
provider: '{{ provider }}'
category: 'configuration'
filename: 'running-config.xml'
- name: Export application block page
panos_export:
provider: '{{ provider }}'
category: 'application-block-page'
filename: 'application-block-page.html'
- name: Export tech support (module will wait until file is ready)
panos_export:
provider: '{{ provider }}'
category: 'tech-support'
filename: 'tech-support.tgz'
- name: Export threat packet capture
panos_export:
provider: '{{ provider }}'
category: 'threat-pcap'
threat_pcap_id: '1206450340254187521'
threat_pcap_search_time: '2020/07/20 18:20:19'
filename: 'threat.pcap'
'''
RETURN = '''
stdout:
description: If the output gives a directory listing, give the listing as JSON formatted string
returned: success
type: string
sample: "{\"dir-listing\": {\"file\": [\"/capture-rx\", \"/capture-tx\", \"/capture-fw\"]}}"
stdout_xml:
description: If the output gives a directory listing, give the listing as XML formatted string
returned: success
type: string
sample: "<dir-listing><file>/capture-rx</file><file>/capture-tx</file><file>/capture-fw</file></dir-listing>"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import get_connection
try:
from pandevice.panorama import Panorama
from pandevice.errors import PanDeviceError
import pan.xapi
import xmltodict
HAS_LIB = True
except ImportError:
HAS_LIB = False
import json
import time
import os
import xml.etree.ElementTree as ET
def export_text(module, xapi, category, filename):
xapi.export(category=category)
f = None
try:
f = open(filename, 'w')
except IOError as msg:
module.fail_json(msg=msg)
else:
if category == 'configuration':
f.write(xapi.xml_root())
elif category in HTML_EXPORTS:
f.write(xapi.text_document)
f.close()
def export_binary(module, xapi, filename):
f = None
try:
f = open(filename, 'wb')
except IOError as msg:
module.fail_json(msg=msg)
else:
content = xapi.export_result['content']
if content is not None:
f.write(content)
f.close()
def export_async(module, xapi, category, filename, interval=60, timeout=600):
# Submit job, get resulting job id
xapi.export(category=category)
job_result = ET.fromstring(xapi.xml_root())
job_id = None
if job_result.find('.//job') is not None:
job_id = job_result.find('.//job').text
end_time = time.time() + timeout
while True:
# Check job progress
xapi.export(category=category, extra_qs={'action': 'status', 'job-id': job_id})
poll_result = ET.fromstring(xapi.xml_root())
status = poll_result.find('.//status')
if status.text == "FIN":
break
if time.time() > end_time:
module.fail_json(msg='Timeout')
time.sleep(interval)
# Get completed job
xapi.export(category=category, extra_qs={'action': 'get', 'job-id': job_id})
export_binary(module, xapi, filename)
HTML_EXPORTS = [
'application-block-page',
'captive-portal-text',
'credential-block-page',
'credential-coach-text',
'data-filter-block-page',
'file-block-continue-page',
'file-block-page',
'global-protect-portal-custom-help-page',
'global-protect-portal-custom-home-page',
'global-protect-portal-custom-login-page',
'global-protect-portal-custom-welcome-page',
'mfa-login-page',
'safe-search-block-page',
'ssl-cert-status-page',
'ssl-optout-text',
'url-block-page',
'url-coach-text',
'virus-block-page'
]
FILE_EXPORTS = [
'device-state', 'tech-support', 'stats-dump'
]
def main():
helper = get_connection(
with_classic_provider_spec=True,
argument_spec=dict(
category=dict(default='configuration', choices=(
['configuration'] + HTML_EXPORTS + FILE_EXPORTS +
['application-pcap', 'filter-pcap', 'dlp-pcap', 'threat-pcap']),
),
filename=dict(type='str'),
certificate_name=dict(type='str'),
certificate_format=dict(type='str', choices=['pem', 'pkcs10', 'pkcs12']),
certificate_include_keys=dict(type='bool', default=False),
certificate_passphrase=dict(type='str', no_log=True),
application_pcap_name=dict(type='str'),
dlp_pcap_name=dict(type='str'),
dlp_password=dict(type='str', no_log=True),
filter_pcap_name=dict(type='str'),
threat_pcap_id=dict(type='str'),
threat_pcap_search_time=dict(type='str'),
threat_pcap_serial=dict(type='str'),
timeout=dict(type='int', default=600),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=False,
required_one_of=helper.required_one_of,
required_together=[
['certificate_name', 'certificate_format'],
['dlp_pcap_name', 'dlp_password'],
]
)
if not HAS_LIB:
module.fail_json(msg='pan-python, pandevice, and xmltodict are required for this module')
category = module.params['category']
filename = module.params['filename']
timeout = module.params['timeout']
parent = helper.get_pandevice_parent(module)
xapi = parent.xapi
if category in (['configuration'] + HTML_EXPORTS):
if filename is None:
module.fail_json(msg='filename is required for export')
export_text(module, xapi, category, filename)
elif category in FILE_EXPORTS:
if filename is None:
module.fail_json(msg='filename is required for export')
if category == 'stats-dump' and isinstance(parent, Panorama):
module.fail_json(msg='stats-dump is not supported on Panorama')
export_async(module, xapi, category, filename, timeout=timeout)
elif category == 'certificate':
if filename is None:
module.fail_json(msg='filename is required for export')
cert_name = module.params['certificate_name']
cert_format = module.params['certificate_format']
cert_include_keys = 'yes' if module.params['certificate_include_keys'] else 'no'
cert_passphrase = module.params['certificate_passphrase']
params = {
'certificate-name': cert_name,
'format': cert_format,
'include-keys': cert_include_keys
}
if cert_include_keys == 'yes' and cert_passphrase is None:
module.exit_json(msg='certificate_passphrase is required when certificate_include_keys is yes')
if cert_passphrase is not None:
params['passphrase'] = cert_passphrase
xapi.export(category='certificate', extra_qs=params)
export_binary(module, xapi, filename)
elif category == 'application-pcap':
# When exporting an application pcap, from_name can be:
# - nothing, which gets you a list of directories
# - a directory name, which gets you a list of pcaps in that directory
# - a filename, which gets you the pcap file
from_name = module.params['application_pcap_name']
xapi.export(category='application-pcap', from_name=from_name)
if from_name is None or '.pcap' not in from_name:
xml_result = xapi.xml_result()
obj_dict = xmltodict.parse(xml_result)
json_output = json.dumps(obj_dict)
module.exit_json(changed=False, stdout=json_output, stdout_xml=xml_result)
else:
if filename is None:
module.fail_json(msg='filename is required for export')
export_binary(module, xapi, filename)
elif category == 'filter-pcap':
# When exporting a filter pcap, from_name can be:
# - nothing, which gets you a list of files
# - a filename, which gets you the pcap file
from_name = module.params['filter_pcap_name']
xapi.export(category='filter-pcap', from_name=from_name)
if from_name is None:
xml_result = xapi.xml_result()
obj_dict = xmltodict.parse(xml_result)
json_output = json.dumps(obj_dict)
module.exit_json(changed=False, stdout=json_output, stdout_xml=xml_result)
else:
if filename is None:
module.fail_json(msg='filename is required for export')
| |
255, 255); }"))
self.gb_pb_sell_volume_1_1_9.setText(_fromUtf8(""))
self.gb_pb_sell_volume_1_1_9.setObjectName(_fromUtf8("gb_pb_sell_volume_1_1_9"))
self.gb_pb_sell_volume_1_3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_sell_volume_1_3_9.setGeometry(QtCore.QRect(350, 120, 41, 23))
self.gb_pb_sell_volume_1_3_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_1_3_9.setText(_fromUtf8(""))
self.gb_pb_sell_volume_1_3_9.setObjectName(_fromUtf8("gb_pb_sell_volume_1_3_9"))
self.gb_pb_sell_volume_1_2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_sell_volume_1_2_9.setGeometry(QtCore.QRect(310, 120, 41, 23))
self.gb_pb_sell_volume_1_2_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_1_2_9.setText(_fromUtf8(""))
self.gb_pb_sell_volume_1_2_9.setObjectName(_fromUtf8("gb_pb_sell_volume_1_2_9"))
self.label_9 = QtGui.QLabel(self.gb_ETFOrder_9)
self.label_9.setGeometry(QtCore.QRect(10, 140, 381, 20))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gb_pb_buy_volume_2_2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_2_2_9.setGeometry(QtCore.QRect(310, 180, 41, 23))
self.gb_pb_buy_volume_2_2_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_2_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_2_9.setObjectName(_fromUtf8("gb_pb_buy_volume_2_2_9"))
self.gb_pb_buy_volume_row_minus_2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_minus_2_9.setGeometry(QtCore.QRect(180, 180, 31, 23))
self.gb_pb_buy_volume_row_minus_2_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_2_9"))
self.gb_pb_b5_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b5_9.setGeometry(QtCore.QRect(10, 240, 51, 20))
self.gb_pb_b5_9.setFlat(True)
self.gb_pb_b5_9.setObjectName(_fromUtf8("gb_pb_b5_9"))
self.gb_pb_b5_price_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b5_price_9.setGeometry(QtCore.QRect(60, 240, 51, 20))
self.gb_pb_b5_price_9.setStyleSheet(_fromUtf8(""))
self.gb_pb_b5_price_9.setFlat(True)
self.gb_pb_b5_price_9.setObjectName(_fromUtf8("gb_pb_b5_price_9"))
self.gb_pb_b4_1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b4_1_9.setGeometry(QtCore.QRect(10, 220, 51, 20))
self.gb_pb_b4_1_9.setFlat(True)
self.gb_pb_b4_1_9.setObjectName(_fromUtf8("gb_pb_b4_1_9"))
self.gb_pb_buy_volume_row_minus_1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_minus_1_9.setGeometry(QtCore.QRect(180, 160, 31, 23))
self.gb_pb_buy_volume_row_minus_1_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_1_9"))
self.gb_pb_b3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b3_9.setGeometry(QtCore.QRect(10, 200, 51, 20))
self.gb_pb_b3_9.setFlat(True)
self.gb_pb_b3_9.setObjectName(_fromUtf8("gb_pb_b3_9"))
self.gb_pb_b2_volume_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b2_volume_9.setGeometry(QtCore.QRect(110, 180, 71, 20))
self.gb_pb_b2_volume_9.setFlat(True)
self.gb_pb_b2_volume_9.setObjectName(_fromUtf8("gb_pb_b2_volume_9"))
self.gb_pb_buy_volume_1_3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_1_3_9.setGeometry(QtCore.QRect(350, 160, 41, 23))
self.gb_pb_buy_volume_1_3_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_3_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_3_9.setObjectName(_fromUtf8("gb_pb_buy_volume_1_3_9"))
self.gb_pb_buy_volume_row_3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_3_9.setGeometry(QtCore.QRect(210, 200, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_3_9.setFont(font)
self.gb_pb_buy_volume_row_3_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_3_9.setFlat(True)
self.gb_pb_buy_volume_row_3_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_3_9"))
self.gb_pb_buy_volume_5_1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_5_1_9.setGeometry(QtCore.QRect(270, 240, 41, 23))
self.gb_pb_buy_volume_5_1_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_1_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_1_9.setObjectName(_fromUtf8("gb_pb_buy_volume_5_1_9"))
self.gb_pb_buy_volume_4_1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_4_1_9.setGeometry(QtCore.QRect(270, 220, 41, 23))
self.gb_pb_buy_volume_4_1_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_1_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_1_9.setObjectName(_fromUtf8("gb_pb_buy_volume_4_1_9"))
self.gb_pb_buy_volume_row_4_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_4_9.setGeometry(QtCore.QRect(210, 220, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_4_9.setFont(font)
self.gb_pb_buy_volume_row_4_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_4_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_4_9.setFlat(True)
self.gb_pb_buy_volume_row_4_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_4_9"))
self.gb_pb_buy_volume_3_1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_3_1_9.setGeometry(QtCore.QRect(270, 200, 41, 23))
self.gb_pb_buy_volume_3_1_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_1_9.setObjectName(_fromUtf8("gb_pb_buy_volume_3_1_9"))
self.gb_pb_b1_price_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b1_price_9.setGeometry(QtCore.QRect(60, 160, 51, 20))
self.gb_pb_b1_price_9.setFlat(True)
self.gb_pb_b1_price_9.setObjectName(_fromUtf8("gb_pb_b1_price_9"))
self.gb_pb_buy_volume_3_2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_3_2_9.setGeometry(QtCore.QRect(310, 200, 41, 23))
self.gb_pb_buy_volume_3_2_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_2_9.setObjectName(_fromUtf8("gb_pb_buy_volume_3_2_9"))
self.gb_pb_b3_volume_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b3_volume_9.setGeometry(QtCore.QRect(110, 200, 71, 20))
self.gb_pb_b3_volume_9.setFlat(True)
self.gb_pb_b3_volume_9.setObjectName(_fromUtf8("gb_pb_b3_volume_9"))
self.gb_pb_buy_volume_row_2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_2_9.setGeometry(QtCore.QRect(210, 180, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_2_9.setFont(font)
self.gb_pb_buy_volume_row_2_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_2_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_2_9.setFlat(True)
self.gb_pb_buy_volume_row_2_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_2_9"))
self.gb_pb_b2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b2_9.setGeometry(QtCore.QRect(10, 180, 51, 20))
self.gb_pb_b2_9.setFlat(True)
self.gb_pb_b2_9.setObjectName(_fromUtf8("gb_pb_b2_9"))
self.gb_pb_buy_volume_3_3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_3_3_9.setGeometry(QtCore.QRect(350, 200, 41, 23))
self.gb_pb_buy_volume_3_3_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_3_9.setObjectName(_fromUtf8("gb_pb_buy_volume_3_3_9"))
self.gb_pb_b2_price_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b2_price_9.setGeometry(QtCore.QRect(60, 180, 51, 20))
self.gb_pb_b2_price_9.setFlat(True)
self.gb_pb_b2_price_9.setObjectName(_fromUtf8("gb_pb_b2_price_9"))
self.gb_pb_buy_volume_row_minus_3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_minus_3_9.setGeometry(QtCore.QRect(180, 200, 31, 23))
self.gb_pb_buy_volume_row_minus_3_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_3_9"))
self.gb_pb_b3_price_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b3_price_9.setGeometry(QtCore.QRect(60, 200, 51, 20))
self.gb_pb_b3_price_9.setFlat(True)
self.gb_pb_b3_price_9.setObjectName(_fromUtf8("gb_pb_b3_price_9"))
self.gb_pb_b4_volume_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b4_volume_9.setGeometry(QtCore.QRect(110, 220, 71, 20))
self.gb_pb_b4_volume_9.setFlat(True)
self.gb_pb_b4_volume_9.setObjectName(_fromUtf8("gb_pb_b4_volume_9"))
self.gb_pb_buy_volume_1_1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_1_1_9.setGeometry(QtCore.QRect(270, 160, 41, 23))
self.gb_pb_buy_volume_1_1_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_1_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_1_9.setObjectName(_fromUtf8("gb_pb_buy_volume_1_1_9"))
self.gb_pb_buy_volume_row_minus_5_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_minus_5_9.setGeometry(QtCore.QRect(180, 240, 31, 23))
self.gb_pb_buy_volume_row_minus_5_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_5_9"))
self.gb_pb_buy_volume_5_3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_5_3_9.setGeometry(QtCore.QRect(350, 240, 41, 23))
self.gb_pb_buy_volume_5_3_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_3_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_3_9.setObjectName(_fromUtf8("gb_pb_buy_volume_5_3_9"))
self.gb_pb_buy_volume_2_1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_2_1_9.setGeometry(QtCore.QRect(270, 180, 41, 23))
self.gb_pb_buy_volume_2_1_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_1_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_1_9.setObjectName(_fromUtf8("gb_pb_buy_volume_2_1_9"))
self.gb_pb_buy_volume_1_2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_1_2_9.setGeometry(QtCore.QRect(310, 160, 41, 23))
self.gb_pb_buy_volume_1_2_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_2_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_2_9.setObjectName(_fromUtf8("gb_pb_buy_volume_1_2_9"))
self.gb_pb_buy_volume_row_5_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_5_9.setGeometry(QtCore.QRect(210, 240, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_5_9.setFont(font)
self.gb_pb_buy_volume_row_5_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_5_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_5_9.setFlat(True)
self.gb_pb_buy_volume_row_5_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_5_9"))
self.gb_pb_buy_volume_4_3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_4_3_9.setGeometry(QtCore.QRect(350, 220, 41, 23))
self.gb_pb_buy_volume_4_3_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_3_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_3_9.setObjectName(_fromUtf8("gb_pb_buy_volume_4_3_9"))
self.gb_pb_b4_price_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b4_price_9.setGeometry(QtCore.QRect(60, 220, 51, 20))
self.gb_pb_b4_price_9.setStyleSheet(_fromUtf8("\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_b4_price_9.setFlat(True)
self.gb_pb_b4_price_9.setObjectName(_fromUtf8("gb_pb_b4_price_9"))
self.gb_pb_b5_volume_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b5_volume_9.setGeometry(QtCore.QRect(110, 240, 71, 20))
self.gb_pb_b5_volume_9.setFlat(True)
self.gb_pb_b5_volume_9.setObjectName(_fromUtf8("gb_pb_b5_volume_9"))
self.gb_pb_buy_volume_4_2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_4_2_9.setGeometry(QtCore.QRect(310, 220, 41, 23))
self.gb_pb_buy_volume_4_2_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_2_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_2_9.setObjectName(_fromUtf8("gb_pb_buy_volume_4_2_9"))
self.gb_pb_b1_volume_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b1_volume_9.setGeometry(QtCore.QRect(110, 160, 71, 20))
self.gb_pb_b1_volume_9.setFlat(True)
self.gb_pb_b1_volume_9.setObjectName(_fromUtf8("gb_pb_b1_volume_9"))
self.gb_pb_buy_volume_2_3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_2_3_9.setGeometry(QtCore.QRect(350, 180, 41, 23))
self.gb_pb_buy_volume_2_3_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_3_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_3_9.setObjectName(_fromUtf8("gb_pb_buy_volume_2_3_9"))
self.gb_pb_buy_volume_row_minus_4_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_minus_4_9.setGeometry(QtCore.QRect(180, 220, 31, 23))
self.gb_pb_buy_volume_row_minus_4_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_4_9"))
self.gb_pb_buy_volume_row_1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_row_1_9.setGeometry(QtCore.QRect(210, 160, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_1_9.setFont(font)
self.gb_pb_buy_volume_row_1_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_1_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_1_9.setFlat(True)
self.gb_pb_buy_volume_row_1_9.setObjectName(_fromUtf8("gb_pb_buy_volume_row_1_9"))
self.gb_pb_b1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_b1_9.setGeometry(QtCore.QRect(10, 160, 51, 20))
self.gb_pb_b1_9.setFlat(True)
self.gb_pb_b1_9.setObjectName(_fromUtf8("gb_pb_b1_9"))
self.gb_pb_buy_volume_5_2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_5_2_9.setGeometry(QtCore.QRect(310, 240, 41, 23))
self.gb_pb_buy_volume_5_2_9.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_2_9.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_2_9.setObjectName(_fromUtf8("gb_pb_buy_volume_5_2_9"))
self.gb_pb_buy_total_money_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_total_money_9.setGeometry(QtCore.QRect(100, 260, 81, 23))
self.gb_pb_buy_total_money_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_total_money_9.setFlat(True)
self.gb_pb_buy_total_money_9.setObjectName(_fromUtf8("gb_pb_buy_total_money_9"))
self.gb_pb_buy_volume_total_minus_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_total_minus_9.setGeometry(QtCore.QRect(240, 260, 31, 23))
self.gb_pb_buy_volume_total_minus_9.setObjectName(_fromUtf8("gb_pb_buy_volume_total_minus_9"))
self.gb_pb_buy_volume_column_3_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_column_3_9.setGeometry(QtCore.QRect(350, 260, 41, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_column_3_9.setFont(font)
self.gb_pb_buy_volume_column_3_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_column_3_9.setFlat(True)
self.gb_pb_buy_volume_column_3_9.setObjectName(_fromUtf8("gb_pb_buy_volume_column_3_9"))
self.gb_pb_buy_volume_column_2_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_column_2_9.setGeometry(QtCore.QRect(310, 260, 41, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_column_2_9.setFont(font)
self.gb_pb_buy_volume_column_2_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_column_2_9.setFlat(True)
self.gb_pb_buy_volume_column_2_9.setObjectName(_fromUtf8("gb_pb_buy_volume_column_2_9"))
self.gb_pb_buy_volume_total_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_total_9.setGeometry(QtCore.QRect(210, 260, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_total_9.setFont(font)
self.gb_pb_buy_volume_total_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_total_9.setFlat(True)
self.gb_pb_buy_volume_total_9.setObjectName(_fromUtf8("gb_pb_buy_volume_total_9"))
self.gb_pb_buy_volume_column_1_9 = QtGui.QPushButton(self.gb_ETFOrder_9)
self.gb_pb_buy_volume_column_1_9.setGeometry(QtCore.QRect(270, 260, 41, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_column_1_9.setFont(font)
self.gb_pb_buy_volume_column_1_9.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_column_1_9.setFlat(True)
self.gb_pb_buy_volume_column_1_9.setObjectName(_fromUtf8("gb_pb_buy_volume_column_1_9"))
DockWidget.setWidget(self.dockWidgetContents)
self.retranslateUi(DockWidget)
QtCore.QMetaObject.connectSlotsByName(DockWidget)
def retranslateUi(self, DockWidget):
DockWidget.setWindowTitle(_translate("DockWidget", "ETF委托汇总", None))
self.gb_pb_sell_total_money_1.setText(_translate("DockWidget", "1,123万元", None))
self.gb_pb_sell_volume_total_1.setText(_translate("DockWidget", "720", None))
self.gb_pb_sell_volume_total_minus_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_sell_volume_column_1_1.setText(_translate("DockWidget", "120", None))
self.gb_pb_sell_volume_column_2_1.setText(_translate("DockWidget", "100", None))
self.gb_pb_sell_volume_column_3_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_s3_volume_1.setText(_translate("DockWidget", "卖3", None))
self.gb_pb_s2_1.setText(_translate("DockWidget", "卖2", None))
self.gb_pb_s4_price_1.setText(_translate("DockWidget", "卖4", None))
self.gb_pb_s4_1.setText(_translate("DockWidget", "卖4", None))
self.gb_pb_s3_price_1.setText(_translate("DockWidget", "卖3", None))
self.gb_pb_s5_volume_1.setText(_translate("DockWidget", "卖5", None))
self.gb_pb_s2_volume_1.setText(_translate("DockWidget", "卖2", None))
self.gb_pb_s5_price_1.setText(_translate("DockWidget", "卖5", None))
self.gb_pb_s4_volume_1.setText(_translate("DockWidget", "卖4", None))
self.gb_pb_s2_price_1.setText(_translate("DockWidget", "卖2", None))
self.gb_pb_s1_volume_1.setText(_translate("DockWidget", "卖1", None))
self.gb_pb_s3_1.setText(_translate("DockWidget", "卖3", None))
self.gb_pb_s1_price_1.setText(_translate("DockWidget", "卖1", None))
self.gb_pb_s5_1.setText(_translate("DockWidget", "卖5", None))
self.gb_pb_s1_1.setText(_translate("DockWidget", "卖1", None))
self.gb_pb_sell_volume_row_5_1.setText(_translate("DockWidget", "500", None))
self.gb_pb_sell_volume_row_minus_5_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_sell_volume_row_minus_4_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_sell_volume_row_minus_3_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_sell_volume_row_minus_2_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_sell_volume_row_minus_1_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_sell_volume_5_1_1.setText(_translate("DockWidget", "200", None))
self.gb_pb_sell_volume_5_2_1.setText(_translate("DockWidget", "300", None))
self.gb_pb_sell_volume_5_3_1.setText(_translate("DockWidget", "-", None))
self.label_1.setText(_translate("DockWidget", "---------------------------------------------------------------", None))
self.gb_pb_buy_volume_row_minus_2_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_b5_1.setText(_translate("DockWidget", "买5", None))
self.gb_pb_b5_price_1.setText(_translate("DockWidget", "买5", None))
self.gb_pb_b4_1_1.setText(_translate("DockWidget", "买4", None))
self.gb_pb_buy_volume_row_minus_1_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_b3_1.setText(_translate("DockWidget", "买3", None))
self.gb_pb_b2_volume_1.setText(_translate("DockWidget", "买2", None))
self.gb_pb_buy_volume_row_3_1.setText(_translate("DockWidget", "500", None))
self.gb_pb_buy_volume_3_1_1.setText(_translate("DockWidget", "200", None))
self.gb_pb_b1_price_1.setText(_translate("DockWidget", "买1", None))
self.gb_pb_buy_volume_3_2_1.setText(_translate("DockWidget", "300", None))
self.gb_pb_b3_volume_1.setText(_translate("DockWidget", "买3", None))
self.gb_pb_b2_1.setText(_translate("DockWidget", "买2", None))
self.gb_pb_buy_volume_3_3_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_b2_price_1.setText(_translate("DockWidget", "买2", None))
self.gb_pb_buy_volume_row_minus_3_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_b3_price_1.setText(_translate("DockWidget", "买3", None))
self.gb_pb_b4_volume_1.setText(_translate("DockWidget", "买4", None))
self.gb_pb_buy_volume_row_minus_5_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_b4_price_1.setText(_translate("DockWidget", "买4", None))
self.gb_pb_b5_volume_1.setText(_translate("DockWidget", "买5", None))
self.gb_pb_b1_volume_1.setText(_translate("DockWidget", "买1", None))
self.gb_pb_buy_volume_row_minus_4_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_b1_1.setText(_translate("DockWidget", "买1", None))
self.gb_pb_buy_total_money_1.setText(_translate("DockWidget", "1,123万元", None))
self.gb_pb_buy_volume_total_minus_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_buy_volume_column_3_1.setText(_translate("DockWidget", "-", None))
self.gb_pb_buy_volume_column_2_1.setText(_translate("DockWidget", "100", None))
self.gb_pb_buy_volume_total_1.setText(_translate("DockWidget", "720", None))
self.gb_pb_buy_volume_column_1_1.setText(_translate("DockWidget", "120", None))
self.gb_pb_sell_total_money_2.setText(_translate("DockWidget", "1,123万元", None))
self.gb_pb_sell_volume_total_2.setText(_translate("DockWidget", "720", None))
self.gb_pb_sell_volume_total_minus_2.setText(_translate("DockWidget", "-", None))
self.gb_pb_sell_volume_column_1_2.setText(_translate("DockWidget", "120", None))
self.gb_pb_sell_volume_column_2_2.setText(_translate("DockWidget", "100", None))
self.gb_pb_sell_volume_column_3_2.setText(_translate("DockWidget", "-", None))
self.gb_pb_s3_volume_2.setText(_translate("DockWidget", "卖3", None))
self.gb_pb_s2_2.setText(_translate("DockWidget", "卖2", None))
self.gb_pb_s4_price_2.setText(_translate("DockWidget", "卖4", None))
self.gb_pb_s4_2.setText(_translate("DockWidget", "卖4", None))
self.gb_pb_s3_price_2.setText(_translate("DockWidget", "卖3", None))
self.gb_pb_s5_volume_2.setText(_translate("DockWidget", "卖5", None))
self.gb_pb_s2_volume_2.setText(_translate("DockWidget", "卖2", None))
self.gb_pb_s5_price_2.setText(_translate("DockWidget", "卖5", None))
self.gb_pb_s4_volume_2.setText(_translate("DockWidget", "卖4", None))
self.gb_pb_s2_price_2.setText(_translate("DockWidget", "卖2", None))
self.gb_pb_s1_volume_2.setText(_translate("DockWidget", "卖1", None))
self.gb_pb_s3_2.setText(_translate("DockWidget", "卖3", None))
self.gb_pb_s1_price_2.setText(_translate("DockWidget", "卖1", None))
self.gb_pb_s5_2.setText(_translate("DockWidget", "卖5", None))
self.gb_pb_s1_2.setText(_translate("DockWidget", "卖1", None))
self.gb_pb_sell_volume_row_5_2.setText(_translate("DockWidget", "500", None))
self.gb_pb_sell_volume_row_minus_5_2.setText(_translate("DockWidget", "-", None))
self.gb_pb_sell_volume_row_minus_4_2.setText(_translate("DockWidget", "-", None))
self.gb_pb_sell_volume_row_minus_3_2.setText(_translate("DockWidget", | |
m.x112 == 0)
m.c81 = Constraint(expr= m.x18 - m.x98 - m.x99 == 0)
m.c82 = Constraint(expr= m.x23 - m.x110 - m.x112 == 0)
m.c83 = Constraint(expr= m.x98 - 3.34221486003388*m.b206 <= 0)
m.c84 = Constraint(expr= m.x99 + 3.34221486003388*m.b206 <= 3.34221486003388)
m.c85 = Constraint(expr= m.x110 - 1.32154609891348*m.b206 <= 0)
m.c86 = Constraint(expr= m.x112 + 1.32154609891348*m.b206 <= 1.32154609891348)
m.c87 = Constraint(expr=(m.x114/(1e-6 + m.b207) - log(1 + m.x91/(1e-6 + m.b207)))*(1e-6 + m.b207) <= 0)
m.c88 = Constraint(expr= m.x93 == 0)
m.c89 = Constraint(expr= m.x115 == 0)
m.c90 = Constraint(expr= m.x15 - m.x91 - m.x93 == 0)
m.c91 = Constraint(expr= m.x24 - m.x114 - m.x115 == 0)
m.c92 = Constraint(expr= m.x91 - 2.54515263975353*m.b207 <= 0)
m.c93 = Constraint(expr= m.x93 + 2.54515263975353*m.b207 <= 2.54515263975353)
m.c94 = Constraint(expr= m.x114 - 1.26558121681553*m.b207 <= 0)
m.c95 = Constraint(expr= m.x115 + 1.26558121681553*m.b207 <= 1.26558121681553)
m.c96 = Constraint(expr= - 0.9*m.x100 + m.x116 == 0)
m.c97 = Constraint(expr= m.x101 == 0)
m.c98 = Constraint(expr= m.x117 == 0)
m.c99 = Constraint(expr= m.x19 - m.x100 - m.x101 == 0)
m.c100 = Constraint(expr= m.x25 - m.x116 - m.x117 == 0)
m.c101 = Constraint(expr= m.x100 - 15*m.b208 <= 0)
m.c102 = Constraint(expr= m.x101 + 15*m.b208 <= 15)
m.c103 = Constraint(expr= m.x116 - 13.5*m.b208 <= 0)
m.c104 = Constraint(expr= m.x117 + 13.5*m.b208 <= 13.5)
m.c105 = Constraint(expr= - 0.6*m.x102 + m.x118 == 0)
m.c106 = Constraint(expr= m.x103 == 0)
m.c107 = Constraint(expr= m.x119 == 0)
m.c108 = Constraint(expr= m.x20 - m.x102 - m.x103 == 0)
m.c109 = Constraint(expr= m.x26 - m.x118 - m.x119 == 0)
m.c110 = Constraint(expr= m.x102 - 15*m.b209 <= 0)
m.c111 = Constraint(expr= m.x103 + 15*m.b209 <= 15)
m.c112 = Constraint(expr= m.x118 - 9*m.b209 <= 0)
m.c113 = Constraint(expr= m.x119 + 9*m.b209 <= 9)
m.c114 = Constraint(expr=(m.x120/(1e-6 + m.b210) - 1.1*log(1 + m.x104/(1e-6 + m.b210)))*(1e-6 + m.b210) <= 0)
m.c115 = Constraint(expr= m.x105 == 0)
m.c116 = Constraint(expr= m.x121 == 0)
m.c117 = Constraint(expr= m.x21 - m.x104 - m.x105 == 0)
m.c118 = Constraint(expr= m.x27 - m.x120 - m.x121 == 0)
m.c119 = Constraint(expr= m.x104 - 15*m.b210 <= 0)
m.c120 = Constraint(expr= m.x105 + 15*m.b210 <= 15)
m.c121 = Constraint(expr= m.x120 - 3.04984759446376*m.b210 <= 0)
m.c122 = Constraint(expr= m.x121 + 3.04984759446376*m.b210 <= 3.04984759446376)
m.c123 = Constraint(expr= - 0.9*m.x107 + m.x140 == 0)
m.c124 = Constraint(expr= - m.x126 + m.x140 == 0)
m.c125 = Constraint(expr= m.x109 == 0)
m.c126 = Constraint(expr= m.x127 == 0)
m.c127 = Constraint(expr= m.x141 == 0)
m.c128 = Constraint(expr= m.x22 - m.x107 - m.x109 == 0)
m.c129 = Constraint(expr= m.x30 - m.x126 - m.x127 == 0)
m.c130 = Constraint(expr= m.x38 - m.x140 - m.x141 == 0)
m.c131 = Constraint(expr= m.x107 - 1.83548069293539*m.b211 <= 0)
m.c132 = Constraint(expr= m.x109 + 1.83548069293539*m.b211 <= 1.83548069293539)
m.c133 = Constraint(expr= m.x126 - 20*m.b211 <= 0)
m.c134 = Constraint(expr= m.x127 + 20*m.b211 <= 20)
m.c135 = Constraint(expr= m.x140 - 20*m.b211 <= 0)
m.c136 = Constraint(expr= m.x141 + 20*m.b211 <= 20)
m.c137 = Constraint(expr=(m.x142/(1e-6 + m.b212) - log(1 + m.x111/(1e-6 + m.b212)))*(1e-6 + m.b212) <= 0)
m.c138 = Constraint(expr= m.x113 == 0)
m.c139 = Constraint(expr= m.x143 == 0)
m.c140 = Constraint(expr= m.x23 - m.x111 - m.x113 == 0)
m.c141 = Constraint(expr= m.x39 - m.x142 - m.x143 == 0)
m.c142 = Constraint(expr= m.x111 - 1.32154609891348*m.b212 <= 0)
m.c143 = Constraint(expr= m.x113 + 1.32154609891348*m.b212 <= 1.32154609891348)
m.c144 = Constraint(expr= m.x142 - 0.842233385663186*m.b212 <= 0)
m.c145 = Constraint(expr= m.x143 + 0.842233385663186*m.b212 <= 0.842233385663186)
m.c146 = Constraint(expr=(m.x144/(1e-6 + m.b213) - 0.7*log(1 + m.x122/(1e-6 + m.b213)))*(1e-6 + m.b213) <= 0)
m.c147 = Constraint(expr= m.x123 == 0)
m.c148 = Constraint(expr= m.x145 == 0)
m.c149 = Constraint(expr= m.x28 - m.x122 - m.x123 == 0)
m.c150 = Constraint(expr= m.x40 - m.x144 - m.x145 == 0)
m.c151 = Constraint(expr= m.x122 - 1.26558121681553*m.b213 <= 0)
m.c152 = Constraint(expr= m.x123 + 1.26558121681553*m.b213 <= 1.26558121681553)
m.c153 = Constraint(expr= m.x144 - 0.572481933717686*m.b213 <= 0)
m.c154 = Constraint(expr= m.x145 + 0.572481933717686*m.b213 <= 0.572481933717686)
m.c155 = Constraint(expr=(m.x146/(1e-6 + m.b214) - 0.65*log(1 + m.x124/(1e-6 + m.b214)))*(1e-6 + m.b214) <= 0)
m.c156 = Constraint(expr=(m.x146/(1e-6 + m.b214) - 0.65*log(1 + m.x128/(1e-6 + m.b214)))*(1e-6 + m.b214) <= 0)
m.c157 = Constraint(expr= m.x125 == 0)
m.c158 = Constraint(expr= m.x129 == 0)
m.c159 = Constraint(expr= m.x147 == 0)
m.c160 = Constraint(expr= m.x29 - m.x124 - m.x125 == 0)
m.c161 = Constraint(expr= m.x32 - m.x128 - m.x129 == 0)
m.c162 = Constraint(expr= m.x41 - m.x146 - m.x147 == 0)
m.c163 = Constraint(expr= m.x124 - 1.26558121681553*m.b214 <= 0)
m.c164 = Constraint(expr= m.x125 + 1.26558121681553*m.b214 <= 1.26558121681553)
m.c165 = Constraint(expr= m.x128 - 33.5*m.b214 <= 0)
m.c166 = Constraint(expr= m.x129 + 33.5*m.b214 <= 33.5)
m.c167 = Constraint(expr= m.x146 - 2.30162356062425*m.b214 <= 0)
m.c168 = Constraint(expr= m.x147 + 2.30162356062425*m.b214 <= 2.30162356062425)
m.c169 = Constraint(expr= - m.x130 + m.x148 == 0)
m.c170 = Constraint(expr= m.x131 == 0)
m.c171 = Constraint(expr= m.x149 == 0)
m.c172 = Constraint(expr= m.x33 - m.x130 - m.x131 == 0)
m.c173 = Constraint(expr= m.x42 - m.x148 - m.x149 == 0)
m.c174 = Constraint(expr= m.x130 - 9*m.b215 <= 0)
m.c175 = Constraint(expr= m.x131 + 9*m.b215 <= 9)
m.c176 = Constraint(expr= m.x148 - 9*m.b215 <= 0)
m.c177 = Constraint(expr= m.x149 + 9*m.b215 <= 9)
m.c178 = Constraint(expr= - m.x132 + m.x150 == 0)
m.c179 = Constraint(expr= m.x133 == 0)
m.c180 = Constraint(expr= m.x151 == 0)
m.c181 = Constraint(expr= m.x34 - m.x132 - m.x133 == 0)
m.c182 = Constraint(expr= m.x43 - m.x150 - m.x151 == 0)
m.c183 = Constraint(expr= m.x132 - 9*m.b216 <= 0)
m.c184 = Constraint(expr= m.x133 + 9*m.b216 <= 9)
m.c185 = Constraint(expr= m.x150 - 9*m.b216 <= 0)
m.c186 = Constraint(expr= m.x151 + 9*m.b216 <= 9)
m.c187 = Constraint(expr=(m.x152/(1e-6 + m.b217) - 0.75*log(1 + m.x134/(1e-6 + m.b217)))*(1e-6 + m.b217) <= 0)
m.c188 = Constraint(expr= m.x135 == 0)
m.c189 = Constraint(expr= m.x153 == 0)
m.c190 = Constraint(expr= m.x35 - m.x134 - m.x135 == 0)
m.c191 = Constraint(expr= m.x44 - m.x152 - m.x153 == 0)
m.c192 = Constraint(expr= m.x134 - 3.04984759446376*m.b217 <= 0)
m.c193 = Constraint(expr= m.x135 + 3.04984759446376*m.b217 <= 3.04984759446376)
m.c194 = Constraint(expr= m.x152 - 1.04900943706034*m.b217 <= 0)
m.c195 = Constraint(expr= m.x153 + 1.04900943706034*m.b217 <= 1.04900943706034)
m.c196 = Constraint(expr=(m.x154/(1e-6 + m.b218) - 0.8*log(1 + m.x136/(1e-6 + m.b218)))*(1e-6 + m.b218) <= 0)
m.c197 = Constraint(expr= m.x137 == 0)
m.c198 = Constraint(expr= m.x155 == 0)
m.c199 = Constraint(expr= m.x36 - m.x136 - m.x137 == 0)
m.c200 = Constraint(expr= m.x45 - m.x154 - m.x155 == 0)
m.c201 = Constraint(expr= m.x136 - 3.04984759446376*m.b218 <= 0)
m.c202 = Constraint(expr= m.x137 + 3.04984759446376*m.b218 <= 3.04984759446376)
m.c203 = Constraint(expr= m.x154 - 1.11894339953103*m.b218 <= 0)
m.c204 = Constraint(expr= m.x155 + 1.11894339953103*m.b218 <= 1.11894339953103)
m.c205 = Constraint(expr=(m.x156/(1e-6 + m.b219) - 0.85*log(1 + m.x138/(1e-6 + m.b219)))*(1e-6 + m.b219) <= 0)
m.c206 = Constraint(expr= m.x139 == 0)
m.c207 = Constraint(expr= m.x157 == 0)
m.c208 = Constraint(expr= m.x37 - m.x138 - m.x139 == 0)
m.c209 = Constraint(expr= m.x46 - m.x156 - m.x157 == 0)
m.c210 = Constraint(expr= m.x138 - 3.04984759446376*m.b219 <= 0)
m.c211 = Constraint(expr= m.x139 + 3.04984759446376*m.b219 <= 3.04984759446376)
m.c212 = Constraint(expr= m.x156 - 1.18887736200171*m.b219 <= 0)
m.c213 = Constraint(expr= m.x157 + 1.18887736200171*m.b219 <= 1.18887736200171)
m.c214 = Constraint(expr=(m.x162/(1e-6 + m.b220) - log(1 + m.x158/(1e-6 + m.b220)))*(1e-6 + m.b220) <= 0)
m.c215 = Constraint(expr= m.x159 == 0)
m.c216 = Constraint(expr= m.x163 == 0)
m.c217 = Constraint(expr= m.x48 - m.x158 - m.x159 == 0)
m.c218 = Constraint(expr= m.x50 - m.x162 - m.x163 == 0)
m.c219 = Constraint(expr= m.x158 - 1.18887736200171*m.b220 <= 0)
m.c220 = Constraint(expr= m.x159 + 1.18887736200171*m.b220 <= 1.18887736200171)
m.c221 = Constraint(expr= m.x162 - 0.78338879230327*m.b220 <= 0)
m.c222 = Constraint(expr= m.x163 + 0.78338879230327*m.b220 <= 0.78338879230327)
m.c223 = Constraint(expr=(m.x164/(1e-6 + m.b221) - 1.2*log(1 + m.x160/(1e-6 + m.b221)))*(1e-6 + m.b221) <= 0)
m.c224 = Constraint(expr= m.x161 == 0)
m.c225 = Constraint(expr= m.x165 == 0)
m.c226 = Constraint(expr= m.x49 - m.x160 - m.x161 == 0)
m.c227 = Constraint(expr= m.x51 - m.x164 - m.x165 == 0)
m.c228 = Constraint(expr= m.x160 - 1.18887736200171*m.b221 <= 0)
m.c229 = Constraint(expr= m.x161 + 1.18887736200171*m.b221 <= 1.18887736200171)
m.c230 = Constraint(expr= m.x164 - 0.940066550763924*m.b221 <= 0)
m.c231 = Constraint(expr= m.x165 + 0.940066550763924*m.b221 <= 0.940066550763924)
m.c232 = Constraint(expr= - 0.75*m.x166 + m.x174 == 0)
m.c233 = Constraint(expr= m.x167 == 0)
m.c234 = Constraint(expr= m.x175 == 0)
m.c235 = Constraint(expr= m.x55 - m.x166 - m.x167 == 0)
m.c236 = Constraint(expr= m.x59 - m.x174 - m.x175 == 0)
m.c237 = Constraint(expr= m.x166 - 0.940066550763924*m.b222 <= 0)
m.c238 = Constraint(expr= m.x167 + 0.940066550763924*m.b222 <= 0.940066550763924)
m.c239 = Constraint(expr= m.x174 - 0.705049913072943*m.b222 <= 0)
m.c240 = Constraint(expr= m.x175 + 0.705049913072943*m.b222 <= 0.705049913072943)
m.c241 = Constraint(expr=(m.x176/(1e-6 + m.b223) - 1.5*log(1 + m.x168/(1e-6 + m.b223)))*(1e-6 + m.b223) <= 0)
m.c242 = Constraint(expr= m.x169 == 0)
m.c243 = Constraint(expr= m.x178 == 0)
m.c244 = Constraint(expr= m.x56 - m.x168 - m.x169 == 0)
m.c245 = Constraint(expr= m.x60 - m.x176 - m.x178 == 0)
m.c246 = Constraint(expr= m.x168 - 0.940066550763924*m.b223 <= 0)
m.c247 = Constraint(expr= m.x169 + 0.940066550763924*m.b223 <= 0.940066550763924)
m.c248 = Constraint(expr= m.x176 - 0.994083415506506*m.b223 <= 0)
m.c249 = Constraint(expr= m.x178 + 0.994083415506506*m.b223 <= | |
import hashlib
import io
import os
import shutil
import pytz
import uuid
import boto3
from botocore.client import Config
from datetime import datetime
from functools import partial
from typing import Dict, Set
from django.db.models import Count, Prefetch, DateTimeField
from django.db.models.expressions import F, Q
from django.conf import settings
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import transaction
from django.db import models
from django.utils import timezone
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models.organism import Organism
from data_refinery_common.utils import get_env_variable, get_s3_url, calculate_file_size, calculate_sha1, FileUtils
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client('s3', config=Config(signature_version='s3v4'))
logger = get_and_configure_logger(__name__)
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
# We store what salmon ouptuts as its version, therefore for
# comparisions or defaults we shouldn't just store the version string,
# we need something with the pattern: 'salmon X.X.X'
CURRENT_SALMON_VERSION = 'salmon ' + get_env_variable("SALMON_VERSION", "0.13.1")
CHUNK_SIZE = 1024 * 256 # chunk_size is in bytes
"""
# First Order Classes
This represent the primary data types we will be querying
and filtering against.
"""
class PublicObjectsManager(models.Manager):
"""
Only returns objects that have is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class ProcessedObjectsManager(models.Manager):
"""
Only returns objects that have is_processed and is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_processed=True, is_public=True)
class Sample(models.Model):
"""
An individual sample.
"""
class Meta:
db_table = "samples"
base_manager_name = "public_objects"
get_latest_by = "created_at"
indexes = [
models.Index(fields=['accession_code']),
]
def __str__(self):
return self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_objects = ProcessedObjectsManager()
# Identifiers
accession_code = models.CharField(max_length=255, unique=True)
title = models.CharField(max_length=255, unique=False, blank=True)
# Relations
organism = models.ForeignKey(Organism, blank=True, null=True, on_delete=models.SET_NULL)
results = models.ManyToManyField('ComputationalResult', through='SampleResultAssociation')
original_files = models.ManyToManyField('OriginalFile', through='OriginalFileSampleAssociation')
computed_files = models.ManyToManyField('ComputedFile', through='SampleComputedFileAssociation')
experiments = models.ManyToManyField('Experiment', through='ExperimentSampleAssociation')
# Historical Properties
source_database = models.CharField(max_length=255, blank=False)
source_archive_url = models.CharField(max_length=255)
source_filename = models.CharField(max_length=255, blank=False)
source_absolute_file_path = models.CharField(max_length=255)
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Technological Properties
platform_accession_code = models.CharField(max_length=256, blank=True)
platform_name = models.CharField(max_length=256, blank=True)
technology = models.CharField(max_length=256, blank=True) # MICROARRAY, RNA-SEQ
manufacturer = models.CharField(max_length=256, blank=True)
protocol_info = JSONField(default=dict)
# Scientific Properties
sex = models.CharField(max_length=255, blank=True)
age = models.DecimalField(max_length=255, blank=True, max_digits=8, decimal_places=3, null=True)
specimen_part = models.CharField(max_length=255, blank=True)
genotype = models.CharField(max_length=255, blank=True)
disease = models.CharField(max_length=255, blank=True)
disease_stage = models.CharField(max_length=255, blank=True)
cell_line = models.CharField(max_length=255, blank=True)
treatment = models.CharField(max_length=255, blank=True)
race = models.CharField(max_length=255, blank=True)
subject = models.CharField(max_length=255, blank=True)
compound = models.CharField(max_length=255, blank=True)
time = models.CharField(max_length=255, blank=True)
# Crunch Properties
is_processed = models.BooleanField(default=False)
# Blacklisting
is_blacklisted = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Sample, self).save(*args, **kwargs)
def to_metadata_dict(self):
"""Render this Sample as a dict."""
metadata = {}
metadata['refinebio_title'] = self.title
metadata['refinebio_accession_code'] = self.accession_code
metadata['refinebio_organism'] = self.organism.name if self.organism else None
metadata['refinebio_source_database'] = self.source_database
metadata['refinebio_source_archive_url'] = self.source_archive_url
metadata['refinebio_sex'] = self.sex
metadata['refinebio_age'] = self.age or ''
metadata['refinebio_specimen_part'] = self.specimen_part
metadata['refinebio_genetic_information'] = self.genotype
metadata['refinebio_disease'] = self.disease
metadata['refinebio_disease_stage'] = self.disease_stage
metadata['refinebio_cell_line'] = self.cell_line
metadata['refinebio_treatment'] = self.treatment
metadata['refinebio_race'] = self.race
metadata['refinebio_subject'] = self.subject
metadata['refinebio_compound'] = self.compound
metadata['refinebio_time'] = self.time
metadata['refinebio_platform'] = self.pretty_platform
metadata['refinebio_annotations'] = [
data for data in self.sampleannotation_set.all().values_list('data', flat=True)
]
return metadata
# Returns a set of ProcessorJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_processor_jobs(self) -> Set:
processor_jobs = set()
for original_file in self.original_files.prefetch_related("processor_jobs").all():
for processor_job in original_file.processor_jobs.all():
processor_jobs.add(processor_job)
return processor_jobs
# Returns a set of DownloaderJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_downloader_jobs(self) -> Set:
downloader_jobs = set()
for original_file in self.original_files.prefetch_related("downloader_jobs").all():
for downloader_job in original_file.downloader_jobs.all():
downloader_jobs.add(downloader_job)
return downloader_jobs
def get_result_files(self):
""" Get all of the ComputedFile objects associated with this Sample """
return self.computed_files.all()
def get_most_recent_smashable_result_file(self):
""" Get the most recent of the ComputedFile objects associated with this Sample """
try:
latest_computed_file = self.computed_files.filter(
is_public=True,
is_smashable=True,
).latest()
return latest_computed_file
except ComputedFile.DoesNotExist as e:
# This sample has no smashable files yet.
return None
def get_most_recent_quant_sf_file(self):
""" Returns the latest quant.sf file that was generated for this sample.
Note: We don't associate that file to the computed_files of this sample, that's
why we have to go through the computational results. """
return ComputedFile.objects\
.filter(result__in=self.results.all(), filename='quant.sf',
s3_key__isnull=False, s3_bucket__isnull=False)\
.order_by('-created_at')\
.first()
@property
def pretty_platform(self):
""" Turns
[HT_HG-U133_Plus_PM] Affymetrix HT HG-U133+ PM Array Plate
into
Affymetrix HT HG-U133+ PM Array Plate (hthgu133pluspm)
"""
if ']' in self.platform_name:
platform_base = self.platform_name.split(']')[1].strip()
else:
platform_base = self.platform_name
return platform_base + ' (' + self.platform_accession_code + ')'
class SampleAnnotation(models.Model):
""" Semi-standard information associated with a Sample """
class Meta:
db_table = "sample_annotations"
base_manager_name = 'public_objects'
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(SampleAnnotation, self).save(*args, **kwargs)
class ProcessedPublicObjectsManager(models.Manager):
"""
Only returns Experiments that are is_public and have related is_processed Samples.
"""
def get_queryset(self):
return super().get_queryset().filter(
is_public=True,
num_processed_samples__gt=0)
class Experiment(models.Model):
""" An Experiment or Study """
class Meta:
db_table = "experiments"
base_manager_name = 'public_objects'
def __str__(self):
return "Experiment: " + self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_public_objects = ProcessedPublicObjectsManager()
# Relations
samples = models.ManyToManyField('Sample', through='ExperimentSampleAssociation')
organisms = models.ManyToManyField('Organism', through='ExperimentOrganismAssociation')
# Identifiers
accession_code = models.CharField(max_length=64, unique=True)
alternate_accession_code = models.CharField(max_length=64, unique=True, null=True)
# Historical Properties
source_database = models.CharField(max_length=32) # "ArrayExpress, "SRA", "GEO"
source_url = models.TextField()
# Properties
# I was always under the impression that TextFields were slower
# than CharFields, however the Postgres documentation disagrees:
# https://www.postgresql.org/docs/9.0/static/datatype-character.html
title = models.TextField()
description = models.TextField()
protocol_description = JSONField(default=dict)
technology = models.CharField(max_length=256, blank=True)
submitter_institution = models.CharField(max_length=256, blank=True)
has_publication = models.BooleanField(default=False)
publication_title = models.TextField(default="")
publication_doi = models.CharField(max_length=64, blank=True)
publication_authors = ArrayField(models.TextField(), default=list)
pubmed_id = models.CharField(max_length=32, blank=True)
source_first_published = models.DateTimeField(null=True)
source_last_modified = models.DateTimeField(null=True)
# Cached Computed Properties
num_total_samples = models.IntegerField(default=0)
num_processed_samples = models.IntegerField(default=0)
num_downloadable_samples = models.IntegerField(default=0)
sample_metadata_fields = ArrayField(models.TextField(), default=list)
platform_names = ArrayField(models.TextField(), default=list)
platform_accession_codes = ArrayField(models.TextField(), default=list)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
if self.accession_code and not self.alternate_accession_code:
if self.accession_code.startswith('GSE'):
self.alternate_accession_code = 'E-GEOD-' + self.accession_code[3:]
elif self.accession_code.startswith('E-GEOD-'):
self.alternate_accession_code = 'GSE' + self.accession_code[7:]
return super(Experiment, self).save(*args, **kwargs)
def update_num_samples(self):
""" Update our cache values """
aggregates = self.samples.aggregate(
num_total_samples=Count('id'),
num_processed_samples=Count('id', filter=Q(is_processed=True)),
num_downloadable_samples=Count('id', filter=Q(is_processed=True, organism__qn_target__isnull=False))
)
self.num_total_samples = aggregates['num_total_samples']
self.num_processed_samples = aggregates['num_processed_samples']
self.num_downloadable_samples = aggregates['num_downloadable_samples']
self.save()
def to_metadata_dict(self):
""" Render this Experiment as a dict """
metadata = {}
metadata['title'] = self.title
metadata['accession_code'] = self.accession_code
metadata['organisms'] = list(self.organisms.all().values_list('name', flat=True))
metadata['sample_accession_codes'] = list(self.samples.all().values_list('accession_code', flat=True))
metadata['description'] = self.description
metadata['protocol_description'] = self.protocol_description
metadata['technology'] = self.technology
metadata['submitter_institution'] = self.submitter_institution
metadata['has_publication'] = self.has_publication
metadata['publication_title'] = self.publication_title
metadata['publication_doi'] = self.publication_doi
metadata['pubmed_id'] = self.pubmed_id
if self.source_first_published:
metadata['source_first_published'] = self.source_first_published.strftime(
'%Y-%m-%dT%H:%M:%S')
else:
metadata['source_first_published'] = ''
if self.source_last_modified:
metadata['source_last_modified'] = self.source_last_modified.strftime(
'%Y-%m-%dT%H:%M:%S')
else:
metadata['source_last_modified'] = ''
return metadata
def get_sample_metadata_fields(self):
""" Get all metadata fields that are non-empty for at least one sample in the experiment.
See https://github.com/AlexsLemonade/refinebio-frontend/issues/211 for why this is needed.
"""
fields = []
possible_fields = ['sex', 'age', 'specimen_part', 'genotype', 'disease', 'disease_stage',
'cell_line', 'treatment', 'race', 'subject', 'compound', 'time']
samples = self.samples.all()
for field in possible_fields:
for sample in samples:
if getattr(sample, field) != None and getattr(sample, field) != '':
fields.append(field)
break
return fields
def update_sample_metadata_fields(self):
self.sample_metadata_fields = self.get_sample_metadata_fields()
def update_platform_names(self):
self.platform_names = self.get_platform_names()
self.platform_accession_codes = self.get_platform_accession_codes()
def get_sample_technologies(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([sample.technology for sample in self.samples.all()]))
def get_platform_names(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_name for sample in self.samples.all()]))
def get_platform_accession_codes(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_accession_code for sample in self.samples.all()]))
@property
def platforms(self):
""" Returns a list of related pipelines """
return list(set([sample.platform_name for sample in self.samples.all()]))
| |
includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = pysal.open(pysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([ 4.59326407e-05, 3.62437513e-05, 4.98677081e-05,
5.09387329e-05, 3.72735210e-05, 4.01073093e-05,
3.79372794e-05, 3.27019246e-05, 4.26204928e-05,
3.47270722e-05])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([ 0.10543301, 0.0858573 , 0.08256196, 0.09884584, 0.04756872,
0.04845298])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([ 0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = zip((r[0] for r in rdf), padded)
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> stl = pysal.open(pysal.examples.get_path('stl_hom.csv'), 'r')
|