repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 475
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,293,591B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
cmr/cmr_rrt | src/tree.py | 1 | 1500 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# simple tree with parent-pointers and data associated with each node and
# edge.
class Node(object):
def __init__(self, data=None, parent=None):
self.data = data
self.children = []
self.parent = None
self.edge_data = None
def add_child(self, child, edge_data=None):
if child not in self.children:
child.parent = self
child.edge_data = edge_data
self.children.append(child)
def detach(self):
try:
idx = self.parent.children.remove(self)
except ValueError as e:
print "Help!! My parent doesn't think I am its child :("
raise e
self.parent = None
self.edge_data = None
def find_root(self):
cur = self.parent
while cur.parent is not None:
cur = cur.parent
return cur
| gpl-3.0 | -3,372,609,052,861,359,000 | 33.090909 | 73 | 0.650667 | false |
mattjhayes/nmeta2 | nmeta2/nmeta2.py | 1 | 29505 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the main module of the nmeta2 suite
running on top of the Ryu SDN controller to provide network identity
and flow (traffic classification) metadata.
.
It supports OpenFlow v1.3 switches and Data Path Auxiliary Engines
(DPAE)
.
Do not use this code for production deployments - it is proof
of concept code and carries no warrantee whatsoever.
.
You have been warned.
"""
#*** Logging Imports:
import logging
#import coloredlogs
#*** General Imports:
import sys
import time
#*** mongodb Database Import:
from pymongo import MongoClient
#*** Ryu Imports:
from ryu import utils
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import HANDSHAKE_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4, ipv6
from ryu.lib.packet import tcp
#*** Required for api module context:
from ryu.app.wsgi import WSGIApplication
#*** nmeta imports:
import config
import switch_abstraction
import api
import main_policy
import of_error_decode
#*** JSON imports:
import json
from json import JSONEncoder
#*** Universal Unique Identifier:
from uuid import UUID
class Nmeta(app_manager.RyuApp):
"""
This is the main class of nmeta2 and is run by Ryu
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
#*** Used to call api module:
_CONTEXTS = {'wsgi': WSGIApplication}
def __init__(self, *args, **kwargs):
super(Nmeta, self).__init__(*args, **kwargs)
#*** Version number for compatibility checks:
self.version = '0.3.5'
#*** Instantiate config class which imports configuration file
#*** config.yaml and provides access to keys/values:
self.config = config.Config()
#*** Get logging config values from config class:
_logging_level_s = self.config.get_value \
('nmeta_logging_level_s')
_logging_level_c = self.config.get_value \
('nmeta_logging_level_c')
_syslog_enabled = self.config.get_value('syslog_enabled')
_loghost = self.config.get_value('loghost')
_logport = self.config.get_value('logport')
_logfacility = self.config.get_value('logfacility')
_syslog_format = self.config.get_value('syslog_format')
_console_log_enabled = self.config.get_value('console_log_enabled')
_coloredlogs_enabled = self.config.get_value('coloredlogs_enabled')
_console_format = self.config.get_value('console_format')
#*** Set up Logging:
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
#*** Syslog:
if _syslog_enabled:
#*** Log to syslog on host specified in config.yaml:
self.syslog_handler = logging.handlers.SysLogHandler(address=(
_loghost, _logport),
facility=_logfacility)
syslog_formatter = logging.Formatter(_syslog_format)
self.syslog_handler.setFormatter(syslog_formatter)
self.syslog_handler.setLevel(_logging_level_s)
#*** Add syslog log handler to logger:
self.logger.addHandler(self.syslog_handler)
#*** Console logging:
if _console_log_enabled:
#*** Log to the console:
self.console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(_console_format)
self.console_handler.setFormatter(console_formatter)
self.console_handler.setLevel(_logging_level_c)
#*** Add console log handler to logger:
self.logger.addHandler(self.console_handler)
#*** Set a variable to indicate if either or both levels are
#*** at debug:
if _logging_level_s == 'DEBUG' or _logging_level_c == 'DEBUG':
self.debug_on = True
else:
self.debug_on = False
#*** Set up variables:
#*** Get max bytes of new flow packets to send to controller from
#*** config file:
self.miss_send_len = self.config.get_value("miss_send_len")
if self.miss_send_len < 1500:
self.logger.info("Be aware that setting "
"miss_send_len to less than a full size packet "
"may result in errors due to truncation. "
"Configured value is %s bytes",
self.miss_send_len)
#*** Load the Flow Table ID numbers:
self.ft_iig = self.config.get_value("ft_iig")
self.ft_iim = self.config.get_value("ft_iim")
self.ft_tc = self.config.get_value("ft_tc")
self.ft_tt = self.config.get_value("ft_tt")
self.ft_fwd = self.config.get_value("ft_fwd")
#*** Context Configuration:
self.context_default = self.config.get_value("context_default")
#*** DPAE Registration Parameters:
self.dpae2ctrl_mac = self.config.get_value("dpae2ctrl_mac")
self.ctrl2dpae_mac = self.config.get_value("ctrl2dpae_mac")
self.dpae_ethertype = self.config.get_value("dpae_ethertype")
#*** Tell switch how to handle fragments (see OpenFlow spec):
self.ofpc_frag = self.config.get_value("ofpc_frag")
#*** Update JSON to support UUID encoding:
JSONEncoder_olddefault = JSONEncoder.default
def JSONEncoder_newdefault(self, o):
if isinstance(o, UUID):
return str(o)
return JSONEncoder_olddefault(self, o)
JSONEncoder.default = JSONEncoder_newdefault
#*** Instantiate Module Classes:
self.switches = switch_abstraction.Switches(self, self.config)
wsgi = kwargs['wsgi']
self.api = api.Api(self, self.config, wsgi)
self.main_policy = main_policy.MainPolicy(self.config)
#*** Start mongodb:
self.logger.info("Connecting to mongodb database...")
self.mongo_addr = self.config.get_value("mongo_addr")
self.mongo_port = self.config.get_value("mongo_port")
mongo_client = MongoClient(self.mongo_addr, self.mongo_port)
#*** Connect to specific databases and collections in mongodb:
#*** ID Service database:
db_svc = mongo_client.idsvc_database
self.dbidsvc = db_svc.idsvc
#*** ID Node database:
db_node = mongo_client.idnode_database
self.dbidnode = db_svc.idnode
#*** ID IP database:
db_ip = mongo_client.idip_database
self.dbidip = db_svc.idip
#*** ID MAC database (with a connection test var):
db_mac = mongo_client.mac_database
self.dbidmac = db_mac.idmac
dbtest = db_mac.cxntest
#*** DPAE database:
db_dpae = mongo_client.dpae_database
self.dbdpae = db_dpae.dpae
#*** Test a Database Connection:
try:
dbtest.delete_many({})
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.critical("Fatal. Mongodb connection failed. "
"Exception %s, %s, %s. Check that database"
" is running and nmeta config file has correct mongodb "
"connection parameters",
exc_type, exc_value, exc_traceback)
sys.exit()
test_data = {"testing": "1,2,3"}
test_id = dbtest.insert_one(test_data).inserted_id
result = dbtest.find(test_data).count()
if result == 1:
self.logger.info("Success! Connected to mongodb database")
else:
self.logger.critical("Fatal. Mongodb test failed"
"database addr mongo_addr=%s mongo_port=%s. Check that database"
" is running and nmeta config file has correct mongodb "
"connection parameters", self.mongo_addr, self.mongo_port)
sys.exit()
#*** ID Service database - delete all previous entries:
result = self.dbidsvc.delete_many({})
self.logger.info("Initialising ID Service database, Deleted %s "
"previous entries from dbidsvc", result.deleted_count)
#*** ID Node database - delete all previous entries:
result = self.dbidnode.delete_many({})
self.logger.info("Initialising ID Node database, Deleted %s previous "
"entries from dbidnode", result.deleted_count)
#*** ID IP database - delete all previous entries:
result = self.dbidip.delete_many({})
self.logger.info("Initialising ID IP database, Deleted %s previous "
"entries from dbidip", result.deleted_count)
#*** ID MAC database - delete all previous entries:
result = self.dbidmac.delete_many({})
self.logger.info("Initialising ID MAC database, Deleted %s previous "
"entries from dbidmac", result.deleted_count)
#*** DPAE database - delete all previous entries:
result = self.dbdpae.delete_many({})
self.logger.info("Initialising DPAE database, Deleted %s previous "
"entries from dbdpae", result.deleted_count)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_connection_handler(self, ev):
"""
A switch has connected to the SDN controller.
We need to do some tasks to set the switch up properly:
- Instantiate a class to represent the switch and flow tables
- Delete all existing flow entries
- Set config for fragment handling and table miss packet length
- Set up initial flow entries in flow tables
- Install non-DPAE TC flows from optimised policy to switch
- Request the switch send us its description
Supported OpenFlow versions is controlled by the OFP_VERSIONS
constant set in class base.
"""
datapath = ev.msg.datapath
self.logger.info("In switch_connection_handler dpid=%s", datapath.id)
#*** Add switch to our class abstraction:
self.switches.add(datapath)
switch = self.switches[datapath.id]
#*** Delete all existing flows from the switch:
switch.flowtables.delete_all_flows()
#*** Set the configuration on the switch:
switch.set_switch_config(self.ofpc_frag, self.miss_send_len)
#*** Set up switch flow table basics:
switch.flowtables.add_fe_iig_broadcast()
switch.flowtables.add_fe_iig_miss()
switch.flowtables.add_fe_iim_miss()
switch.flowtables.add_fe_tcf_accepts()
switch.flowtables.add_fe_tcf_miss()
switch.flowtables.add_fe_tc_miss()
switch.flowtables.add_fe_amf_miss()
switch.flowtables.add_fe_tt_miss()
switch.flowtables.add_fe_fwd_miss()
#*** Set flow entry for DPAE join packets:
switch.flowtables.add_fe_iim_dpae_join()
#*** Install non-DPAE static TC flows from optimised policy to switch:
switch.flowtables.add_fe_tc_static \
(self.main_policy.optimised_rules.get_rules())
#*** Request the switch send us it's description:
switch.request_switch_desc()
@set_ev_cls(ofp_event.EventOFPDescStatsReply, MAIN_DISPATCHER)
def desc_stats_reply_handler(self, ev):
"""
Receive a reply from a switch to a description
statistics request
"""
body = ev.msg.body
datapath = ev.msg.datapath
dpid = datapath.id
self.logger.info('event=DescStats Switch dpid=%s is mfr_desc="%s" '
'hw_desc="%s" sw_desc="%s" serial_num="%s" dp_desc="%s"',
dpid, body.mfr_desc, body.hw_desc, body.sw_desc,
body.serial_num, body.dp_desc)
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, ev):
"""
A switch has sent an event to us because it has removed
a flow from a flow table
"""
msg = ev.msg
datapath = msg.datapath
ofp = datapath.ofproto
if msg.reason == ofp.OFPRR_IDLE_TIMEOUT:
reason = 'IDLE TIMEOUT'
elif msg.reason == ofp.OFPRR_HARD_TIMEOUT:
reason = 'HARD TIMEOUT'
elif msg.reason == ofp.OFPRR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPRR_GROUP_DELETE:
reason = 'GROUP DELETE'
else:
reason = 'unknown'
self.logger.info('Flow removed msg '
'cookie=%d priority=%d reason=%s table_id=%d '
'duration_sec=%d '
'idle_timeout=%d hard_timeout=%d '
'packets=%d bytes=%d match=%s',
msg.cookie, msg.priority, reason, msg.table_id,
msg.duration_sec,
msg.idle_timeout, msg.hard_timeout,
msg.packet_count, msg.byte_count, msg.match)
# Is it a MAC learning suppression FE idle timeout?
if msg.table_id == self.ft_iim and \
msg.reason == ofp.OFPRR_IDLE_TIMEOUT:
switch = self.switches[datapath.id]
#*** Extract the MAC from the match:
mac = msg.match['eth_src']
in_port = msg.match['in_port']
#*** TBD, deal with context:
context = self.context_default
#*** Call method to delete FEs:
switch.mactable.delete(mac, in_port, context)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
"""
A switch has sent us a Packet In event
"""
msg = ev.msg
datapath = msg.datapath
ofproto = msg.datapath.ofproto
dpid = datapath.id
switch = self.switches[dpid]
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
#*** TBD, deal with context:
context = self.context_default
#*** Extra debug if syslog or console logging set to DEBUG:
if self.debug_on:
self._packet_in_debug(ev, in_port)
#*** Is it a DPAE Join request? If so, call function to handle it:
if eth.src == self.ctrl2dpae_mac and eth.dst == self.dpae2ctrl_mac:
self.dpae_join(pkt, datapath, in_port)
return 1
self.logger.info("Learned mac=%s dpid=%s port=%s",
eth.src, dpid, in_port)
#*** Add to MAC/port pair to switch MAC table:
switch.mactable.add(eth.src, in_port, context)
#*** In active mode with a DPAE, we need to add an AMF flow entry:
if self.main_policy.tc_policies.mode == 'active':
#*** Look the DPID up in the database:
db_result = self.dbdpae.find_one({'dpid': dpid})
if db_result:
self.logger.info("Found DPAE for dpid=%s, adding AMF entry",
dpid)
#*** Get the dpae port for that switch:
#*** TBD, handle more than one DPAE per switch
dpae_port = db_result['switch_port']
if dpae_port:
#*** Add FE to the Active Mode Filter (ft_amf) Flow table:
self.logger.info("Adding AMF entry dpid=%s dpae_port=%s "
"mac=%s", dpid, dpae_port, eth.src)
switch.flowtables.add_fe_amf_macport_dst(dpae_port,
eth.src)
else:
self.logger.error("No DPAE switch port for dpid=%s", dpid)
else:
self.logger.debug("No DPAE found for dpid=%s", dpid)
#*** Add source MAC / in port to Forwarding table as destinations so
#*** that we don't flood them:
switch.flowtables.add_fe_fwd_macport_dst(in_port, eth.src)
#*** Add source MAC / in port to Identity Indicator (MAC) table so
#*** that we don't get further packet in events for this combo:
switch.flowtables.add_fe_iim_macport_src(in_port, eth.src)
#*** Do a packet out to avoid going through DPAE in active mode
#*** which causes bad MAC learning in adjacent switches
#*** if forwarding entry not installed:
# Send out specific port if known or flood:
out_port = switch.mactable.mac2port(eth.dst, context)
if out_port == switch_abstraction.PORT_NOT_FOUND:
out_port = ofproto.OFPP_FLOOD
#*** Packet out:
switch.packet_out(msg.data, in_port, out_port, 0, 1)
@set_ev_cls(ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
"""
A switch has sent us an error event
"""
msg = ev.msg
datapath = msg.datapath
dpid = datapath.id
self.logger.error('event=OFPErrorMsg_received: dpid=%s '
'type=%s code=%s message=%s',
dpid, msg.type, msg.code, utils.hex_array(msg.data))
#*** Log human-friendly decodes for the error type and code:
type1, type2, code1, code2 = of_error_decode.decode(msg.type, msg.code)
self.logger.error('error_type=%s %s error_code=%s %s', type1, type2,
code1, code2)
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
"""
Switch Port Status event
"""
msg = ev.msg
reason = msg.reason
port = msg.desc.port_no
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
self.logger.info("port added port=%s", port)
elif reason == ofproto.OFPPR_DELETE:
self.logger.info("port deleted port=%s", port)
elif reason == ofproto.OFPPR_MODIFY:
self.logger.info("port modified port=%s", port)
else:
self.logger.info("Illegal port state port=%s %s", port, reason)
def tc_start(self, datapath, dpae_port):
"""
Add a Flow Entry to switch to clone selected packets to a
DPAE so that it can perform Traffic Classification analysis
on them
"""
dpid = datapath.id
self.logger.info("Starting TC to DPAE on datapath=%s, dpae_port=%s",
dpid, dpae_port)
switch = self.switches[dpid]
#*** Check if Active or Passive TC Mode:
mode = self.main_policy.tc_policies.mode
self.logger.info("TC mode=%s", mode)
#*** TBD, deal with context:
context = self.context_default
#*** Set up group table to send to DPAE:
# NEEDS OVS 2.1 OR HIGHER SO COMMENTED OUT FOR THE MOMENT
# ALSO NEEDS CODE THAT CAN CATER FOR MULTIPLE DPAE
#switch.flowtables.add_group_dpae(out_port)
if self.main_policy.identity.lldp:
#*** Install FEs to send LLDP Identity indicators to DPAE:
switch.flowtables.add_fe_iig_lldp(dpae_port)
if self.main_policy.identity.dhcp:
#*** Install FEs to send DHCP Identity indicators to DPAE:
switch.flowtables.add_fe_iig_dhcp(dpae_port)
if self.main_policy.identity.dns:
#*** Install FEs to send DNS Identity indicators to DPAE:
switch.flowtables.add_fe_iig_dns(dpae_port)
if mode == 'active':
#*** Install AMF entries for MACs we already know dest for:
mac_list = switch.mactable.dump_macs(context)
for mac in mac_list:
self.logger.debug("Adding previously learned mac=%s dpid=%s "
"dpae_port=%s to Active Mode Filter (amf)", mac, dpid,
dpae_port)
switch.flowtables.add_fe_amf_macport_dst(dpae_port, mac)
#*** Install FE to so packets returning from DPAE in active mode
#*** bypass learning tables and go straight to treatment:
switch.flowtables.add_fe_iim_dpae_active_bypass(dpae_port)
#*** Add any general TC flows to send to DPAE if required by policy
#*** (i.e. statistical or payload):
switch.flowtables.add_fe_tc_dpae(
self.main_policy.optimised_rules.get_rules(),
dpae_port, mode)
self.logger.info("TC started to DPAE on datapath=%s, dpae_port=%s",
dpid, dpae_port)
_results = {"status": "tc_started",
"mode": mode}
return _results
def dpae_join(self, pkt, datapath, in_port):
"""
A DPAE may have sent us a join discovery packet (Phase 2)
Check the packet payload to see if it is valid
"""
_payload = str(pkt.protocols[-1])
self.logger.info("Phase 2 DPAE discovery packet received from dpid=%s "
"port=%s payload=%s",
datapath.id, in_port, _payload)
#*** Try decode of payload as JSON:
try:
dpae_discover = json.loads(_payload)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Phase 2 DPAE API Create exception while "
"decoding JSON body=%s Exception %s, %s, %s",
_payload, exc_type, exc_value, exc_traceback)
return 0
#*** Check to see if JSON has a uuid_controller key:
if 'uuid_controller' in dpae_discover:
uuid_controller = dpae_discover['uuid_controller']
else:
self.logger.debug("No uuid_controller field in discovery "
"packet so ignoring...")
return 0
#*** Check to see if JSON has a hostname_dpae key:
if 'hostname_dpae' in dpae_discover:
hostname_dpae = dpae_discover['hostname_dpae']
else:
self.logger.debug("No hostname_dpae field in discovery "
"packet so ignoring...")
return 0
#*** Check to see if JSON has a if_name key:
if 'if_name' in dpae_discover:
if_name = dpae_discover['if_name']
else:
self.logger.debug("No if_name field in discovery "
"packet so ignoring...")
return 0
#*** Check to see if JSON has a uuid_dpae key:
if 'uuid_dpae' in dpae_discover:
uuid_dpae = dpae_discover['uuid_dpae']
else:
self.logger.debug("No uuid_dpae field in discovery "
"packet so ignoring...")
return 0
#*** Look the key up in the database:
db_result = self.dbdpae.find_one({'_id': str(uuid_controller)})
if db_result:
#*** Check all fields match:
if not hostname_dpae == str(db_result[u'hostname_dpae']):
self.logger.error("Phase 2 hostname_dpae mismatch")
return 0
if not if_name == str(db_result[u'if_name']):
self.logger.error("Phase 2 if_name mismatch")
return 0
if not uuid_dpae == str(db_result[u'uuid_dpae']):
self.logger.error("Phase 2 uuid_dpae mismatch")
return 0
self.logger.debug("Phase 2 updating DPAE record")
db_result = self.dbdpae.update_one(
{'_id': str(uuid_controller)},
{
'$set': {
'dpid': datapath.id,
'switch_port': in_port
},
}
)
self.logger.debug("Phase 2 updated %s database record(s)",
db_result.modified_count)
else:
#*** Ignore as no uuid_controller key:
self.logger.debug("Phase 2 discovery packet uuid_controller field "
"not found in database, so ignoring...")
return 0
def tc_advice_id(self, dpid, tc_type, tc_subtype, src_mac, detail1):
"""
Process a Traffic Classification advice message from a DPAE
that relates to an identity
"""
switch = self.switches[dpid]
#*** TBD, deal with context:
context = self.context_default
#*** Look up source mac to get a port number:
port_number = switch.mactable.mac2port(src_mac, context)
#*** TBD, handle return value for port not found...
if tc_subtype == 'lldp':
#*** Check to see if we already know this identity:
db_data = {'id_type': tc_subtype,
'src_mac': src_mac, 'node_name': detail1}
db_result = self.dbidnode.find_one(db_data)
if not db_result:
#*** LLDP identity not in database so add it:
db_data = {'last_seen': time.time(), 'id_type': tc_subtype,
'src_mac': src_mac, 'node_name': detail1}
db_result = self.dbidnode.insert_one(db_data)
self.logger.info("Created new ID Node record id_type=%s "
"node_name=%s", tc_subtype, detail1)
#*** Check to see if we need to add a flow to switch:
switch.flowtables.add_fe_tc_id(tc_subtype, detail1, src_mac,
self.main_policy.optimised_rules.get_rules())
else:
#*** Just update the last_seen field:
db_result = self.dbdpae.update_one(
{'id_type': tc_subtype,
'src_mac': src_mac, 'node_name': detail1},
{
'$set': {
'last_seen': time.time()
},
}
)
self.logger.debug("Last seen updated for %s of %s ID Node "
"record(s) id_type=%s node_name=%s",
db_result.modified_count,
db_result.matched_count,
tc_subtype, detail1)
else:
self.logger.info("Didn't action tc_subtype=%s", tc_subtype)
def _packet_in_debug(self, ev, in_port):
"""
Generate a debug message describing the packet
in event
"""
#*** Extract parameters:
msg = ev.msg
datapath = msg.datapath
dpid = datapath.id
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
eth_src = eth.src
eth_dst = eth.dst
pkt_ip4 = pkt.get_protocol(ipv4.ipv4)
pkt_ip6 = pkt.get_protocol(ipv6.ipv6)
pkt_tcp = pkt.get_protocol(tcp.tcp)
#*** Some debug about the Packet In:
if pkt_ip4 and pkt_tcp:
self.logger.debug("event=pi_ipv4_tcp dpid=%s "
"in_port=%s ip_src=%s ip_dst=%s tcp_src=%s "
"tcp_dst=%s",
dpid, in_port, pkt_ip4.src, pkt_ip4.dst,
pkt_tcp.src_port, pkt_tcp.dst_port)
elif pkt_ip6 and pkt_tcp:
self.logger.debug("event=pi_ipv6_tcp dpid=%s "
"in_port=%s ip_src=%s ip_dst=%s tcp_src=%s "
"tcp_dst=%s",
dpid, in_port, pkt_ip6.src, pkt_ip6.dst,
pkt_tcp.src_port, pkt_tcp.dst_port)
elif pkt_ip4:
self.logger.debug("event=pi_ipv4 dpid="
"%s in_port=%s ip_src=%s ip_dst=%s proto=%s",
dpid, in_port,
pkt_ip4.src, pkt_ip4.dst, pkt_ip4.proto)
elif pkt_ip6:
self.logger.debug("event=pi_ipv6 dpid=%s "
"in_port=%s ip_src=%s ip_dst=%s",
dpid, in_port,
pkt_ip6.src, pkt_ip6.dst)
else:
self.logger.debug("event=pi_other dpid=%s "
"in_port=%s eth_src=%s eth_dst=%s eth_type=%s",
dpid, in_port, eth_src, eth_dst, eth.ethertype)
| apache-2.0 | 7,914,209,776,357,724,000 | 41.822932 | 79 | 0.557058 | false |
gabriel-stan/gestion-tfg | apps/upload_files/service.py | 1 | 9782 | __author__ = 'tonima'
from openpyxl import load_workbook
from authentication.models import Profesor, Alumno
from tfgs.models import Titulacion, Tfg_Asig, Tfg
import utils
from django.db.models import Q
class Tfgs_masivos(object):
def __init__(self, fichero=None):
if fichero:
self.wb = load_workbook(fichero)
self.ws = self.wb.active
self.errores = []
self.exitos = []
def upload_file_tfg(self, u_fila, p_fila, cabeceras, titulacion):
for i in range(p_fila, u_fila+1):
try:
data_tfg = self.read_data(cabeceras, i)
self.tfg = self.check_tfg(data_tfg, i, titulacion)
resul = Tfg.objects.simular_create_tfg(**self.tfg)
if self.tfg is not False and resul is True:
self.exitos.append(dict(fila=i, tfg=self.tfg))
else:
self.errores.append(dict(fila=i, message=resul))
except Profesor.DoesNotExist:
self.errores.append(dict(fila=i, message='El profesor no existe'))
continue
except Titulacion.DoesNotExist:
self.errores.append(dict(fila=i, message='La titulacion no existe'))
continue
except Exception as e:
self.errores.append(dict(fila=i, message=e.message))
continue
return dict(status=True, exitos=self.exitos, errores=self.errores)
def read_data(self, cabeceras, i):
resul = dict(tipo=self.ws[cabeceras['tipo'] + str(i)].value,
titulo=self.ws[cabeceras['titulo'] + str(i)].value,
n_alumnos=self.ws[cabeceras['n_alumnos'] + str(i)].value,
descripcion=self.ws[cabeceras['descripcion'] + str(i)].value,
conocimientos_previos=
self.ws[cabeceras['conocimientos_previos'] + str(i)].value,
hard_soft=self.ws[cabeceras['hard_soft'] + str(i)].value,
#titulacion=self.ws[cabeceras['titulacion'] + str(i)].value,
tutor=self.ws[cabeceras['tutor'] + str(i)].value,
cotutor=self.ws[cabeceras['cotutor'] + str(i)].value)
return resul
def check_tfg(self, tfg, i, titulacion):
if not tfg.get('titulo'):
raise Exception('El TFG no tiene titulo')
tfg['tutor'] = Profesor.objects.get(email=tfg.get('tutor'))
tfg['titulacion'] = Titulacion.objects.get(codigo=titulacion)
if tfg.get('cotutor'):
tfg['cotutor'] = Profesor.objects.get(email=str(tfg.get('cotutor')))
tfg = dict(tipo=tfg['tipo'], titulo=tfg['titulo'], n_alumnos=tfg['n_alumnos'],
descripcion=tfg['descripcion'], conocimientos_previos=tfg['conocimientos_previos'],
hard_soft=tfg['hard_soft'],
tutor=tfg['tutor'].email, cotutor=tfg['cotutor'].email, titulacion=tfg['titulacion'].codigo)
else:
tfg = dict(tipo=tfg['tipo'], titulo=tfg['titulo'], n_alumnos=tfg['n_alumnos'],
descripcion=tfg['descripcion'], conocimientos_previos=tfg['conocimientos_previos'],
hard_soft=tfg['hard_soft'],
tutor=tfg['tutor'].email, titulacion=tfg['titulacion'].codigo)
return tfg
def upload_file_confirm(self, tfgs):
errores = []
for index, data_tfg in enumerate(tfgs):
try:
tfg = data_tfg.get('tfg')
res = Tfg.objects.create(**tfg)
if not res.get('status'):
errores.append(dict(fila=index, tfg=tfg))
except Exception as e:
errores.append(dict(fila=index, message=e.message))
continue
return dict(status=True, errores=errores)
class Tfgs_asig_masivos(Tfgs_masivos):
def __init__(self, fichero=None):
super(Tfgs_asig_masivos, self).__init__(fichero)
def upload_file_tfg(self, u_fila, p_fila, cabeceras, titulacion):
for i in range(p_fila, u_fila+1):
try:
data_tfg = self.read_data(cabeceras, i)
self.tfg = self.check_tfg(data_tfg, i, titulacion)
resul = Tfg.objects.simular_create_tfg(**self.tfg)
if self.tfg is not False and resul is True:
model_tfg = Tfg(**data_tfg)
self.check_tfg_asig(data_tfg, cabeceras, i)
tfg_asig = dict(tfg=model_tfg, alumno_1=data_tfg['alumno_1'], alumno_2=data_tfg['alumno_2'],
alumno_3=data_tfg['alumno_3'])
resul = Tfg_Asig.objects.simular_create_tfg_asig(**tfg_asig)
if resul is True:
self.exitos.append(dict(fila=i, tfg=self.tfg))
else:
self.errores.append(dict(fila=i, message=resul))
else:
self.errores.append(dict(fila=i, message=resul))
except Profesor.DoesNotExist:
self.errores.append(dict(fila=i, message='El profesor no existe'))
continue
except Alumno.DoesNotExist:
self.errores.append(dict(fila=i, message='El alumno no existe'))
continue
except Titulacion.DoesNotExist:
self.errores.append(dict(fila=i, message='La titulacion no existe'))
continue
except Exception as e:
self.errores.append(dict(fila=i, message=e.message))
continue
return dict(status=True, exitos=self.exitos, errores=self.errores)
def check_tfg_asig(self, data_tfg, cabeceras, i):
data_tfg['alumno_1'], self.tfg['alumno_1'] = utils.alumno_email_or_dni(
unicode(self.ws[cabeceras['alumno_1'] + str(i)].value) if cabeceras.get('alumno_1') and \
self.ws[cabeceras['alumno_1'] + str(i)].value \
else None)
self.tfg['nombre_alumno_1'] = unicode(self.ws[cabeceras['nombre_alumno_1'] + str(i)].value) if cabeceras.get('nombre_alumno_1') and \
self.ws[cabeceras['nombre_alumno_1'] + str(i)].value \
else None
data_tfg['alumno_2'], self.tfg['alumno_2'] = utils.alumno_email_or_dni(
unicode(self.ws[cabeceras['alumno_2'] + str(i)].value) if cabeceras.get('alumno_2') and \
self.ws[cabeceras['alumno_2'] + str(i)].value \
else None)
self.tfg['nombre_alumno_2'] = unicode(self.ws[cabeceras['nombre_alumno_2'] + str(i)].value) if cabeceras.get('nombre_alumno_2') and \
self.ws[cabeceras['nombre_alumno_2'] + str(i)].value \
else None
data_tfg['alumno_3'], self.tfg['alumno_3'] = utils.alumno_email_or_dni(
unicode(self.ws[cabeceras['alumno_3'] + str(i)].value) if cabeceras.get('alumno_3') and \
self.ws[cabeceras['alumno_3'] + str(i)].value \
else None)
self.tfg['nombre_alumno_3'] = unicode(self.ws[cabeceras['nombre_alumno_3'] + str(i)].value) if cabeceras.get('nombre_alumno_3') and \
self.ws[cabeceras['nombre_alumno_3'] + str(i)].value \
else None
def upload_file_confirm(self, tfgs):
errores = []
for index, data_tfg in enumerate(tfgs):
try:
self.alumno_1 = self.get_or_create_alumno(data_tfg['tfg'].get('alumno_1'), data_tfg['tfg'].get('nombre_alumno_1')) if data_tfg['tfg'] \
.get('alumno_1') else None
self.alumno_2 = self.get_or_create_alumno(data_tfg['tfg'].get('alumno_2'), data_tfg['tfg'].get('nombre_alumno_2')) if data_tfg['tfg'] \
.get('alumno_2') else None
self.alumno_3 = self.get_or_create_alumno(data_tfg['tfg'].get('alumno_3'), data_tfg['tfg'].get('nombre_alumno_3')) if data_tfg['tfg'] \
.get('alumno_3') else None
self.tfg = Tfg.objects.create(**data_tfg['tfg'])
res = Tfg_Asig.objects.create(tfg=self.tfg.get('data'), alumno_1=self.alumno_1, alumno_2=self.alumno_2,
alumno_3=self.alumno_3)
if not res.get('status'):
errores.append(dict(fila=index, tfg=data_tfg))
except Exception as e:
errores.append(dict(fila=index, message=e.message))
continue
return dict(status=True, errores=errores)
def get_or_create_alumno(self, alumno, nombre=None):
if utils.is_email_alumno(alumno):
if not Alumno.objects.filter(email=alumno if alumno else None).exists():
Alumno.objects.create_user(email=alumno, first_name=nombre)
try:
return Alumno.objects.get(email=alumno)
except Alumno.DoesNotExist:
raise NameError('Error en el alumno %s' % alumno)
elif utils.is_dni(alumno):
if not Alumno.objects.filter(dni=alumno if alumno else None).exists():
Alumno.objects.create_user(dni=alumno, first_name=nombre)
try:
return Alumno.objects.get(dni=alumno)
except Alumno.DoesNotExist:
raise NameError('Error en el alumno %s' % alumno)
else:
raise NameError('Error en el alumno %s' % alumno) | gpl-2.0 | 3,073,960,981,766,819,300 | 51.315508 | 151 | 0.538336 | false |
flp9001/astrology | astro/horoscope/views.py | 1 | 3623 | #coding: utf-8
import json
import urllib
import geocoder
import pytz
from datetime import datetime
from django.shortcuts import render
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
from django.utils import timezone
from .models import Ephemeris, Event, Houses, Location
from utils import PLANET_NAMES, SIGNS, dms
def home(request):
now = timezone.now()
params = {}
params['datenow'] = now.strftime("%d/%m/%Y")
params['timenow'] = now.strftime("%H:%M")
return render(request, 'horoscope/home.html', params)
def my_events(request):
return render(request, 'horoscope/event_list.html', {'events': Event.objects.all().select_related('ephemeris', 'location')})
def parse_date(date_str, time_str):
now = datetime.now()
if not date_str and not time_str:
return now
if not date_str:
date = now.date()
else:
date = datetime.strptime(date_str, "%d-%m-%Y").date()
if not time_str:
time = now.time()
else:
time = datetime.strptime(time_str, "%H:%M").time()
return datetime.combine(date, time)
def eph(request):
data = {}
date = request.GET.get('date', None)
time = request.GET.get('time', None)
date = parse_date(date, time)
location = request.GET.get('city', None)
if date and time and location:
l = Location.create(location)
date = l.timezone.localize(date)
date = date.astimezone(pytz.utc)
houses = Houses.create(date, l.lat, l.lng)
data['houses'] = [getattr(houses, i.name) for i in houses._meta.fields[1:]]
data['location'] = {'city': l.city, 'lat': dms(l.lat), 'lng': dms(l.lng)}
data['planets'] = get_planets(date)
data['date'] = str(date)
return HttpResponse(
json.dumps(data, indent=4),
content_type='application/javascript; charset=utf8'
)
def get_planets(date):
e = Ephemeris.create(date)
planets = []
for index, field in enumerate(e._meta.fields[:11]):
if field.name != 'id':
v = getattr(e, field.name)
planet = {}
planet['index'] = index
planet['name'] = field.name
planet['angle'] = v
planet['sign_index'] = int(v/30)
planets.append(planet)
return planets
def chart(request):
date_str = request.GET.get('date', None)
time_str = request.GET.get('time', None)
date = parse_date(date_str, time_str)
planets = []
for p in get_planets(date):
p['code'] = p['name']
p['angle'] = p['angle'] % 30
p['name'] = PLANET_NAMES[p['code']]
p['sign_code'], p['sign'] = SIGNS[p['sign_index']]
planets.append(p)
return render(request, 'horoscope/chart.html', {'date': date, 'planets': planets})
def save_event(request):
print 'save_event'
if request.method == 'POST':
name = request.POST.get('name')
date = request.POST.get('date')
time = request.POST.get('time')
location = request.POST.get('location')
e = Event.create(name, date, time, location)
e.save()
print request.POST
return HttpResponse(request.POST)
def geocode(request):
query = request.GET.get('query', None)
query = urllib.unquote(query).decode('utf8')
g = geocoder.google(query)
if g.ok and g.city:
data = [{'value': g.address, 'tokens': g.city.split()}]
else:
data = {}
data = json.dumps(data, cls=DjangoJSONEncoder, indent=2, separators=(',', ': '))
return HttpResponse(data, content_type="application/json")
| lgpl-3.0 | -8,005,701,420,390,494,000 | 25.639706 | 128 | 0.606404 | false |
32bitmicro/EDA | python/eda/eda/pcb.py | 1 | 45843 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Paweł Wodnicki
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the 32bitmicro nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL Paweł Wodnicki BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from edautils import *
from eda import *
pcb_symbols= """
Symbol(' ' 18)
(
)
Symbol('!' 12)
(
SymbolLine(0 35 0 40 8)
SymbolLine(0 0 0 25 8)
)
Symbol('"' 12)
(
SymbolLine(0 0 0 10 8)
SymbolLine(10 0 10 10 8)
)
Symbol('#' 12)
(
SymbolLine(0 25 20 25 8)
SymbolLine(0 15 20 15 8)
SymbolLine(15 10 15 30 8)
SymbolLine(5 10 5 30 8)
)
Symbol('$' 12)
(
SymbolLine(15 5 20 10 8)
SymbolLine(5 5 15 5 8)
SymbolLine(0 10 5 5 8)
SymbolLine(0 10 0 15 8)
SymbolLine(0 15 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 30 8)
SymbolLine(15 35 20 30 8)
SymbolLine(5 35 15 35 8)
SymbolLine(0 30 5 35 8)
SymbolLine(10 0 10 40 8)
)
Symbol('%' 12)
(
SymbolLine(0 5 0 10 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 10 0 8)
SymbolLine(10 0 15 5 8)
SymbolLine(15 5 15 10 8)
SymbolLine(10 15 15 10 8)
SymbolLine(5 15 10 15 8)
SymbolLine(0 10 5 15 8)
SymbolLine(0 40 40 0 8)
SymbolLine(35 40 40 35 8)
SymbolLine(40 30 40 35 8)
SymbolLine(35 25 40 30 8)
SymbolLine(30 25 35 25 8)
SymbolLine(25 30 30 25 8)
SymbolLine(25 30 25 35 8)
SymbolLine(25 35 30 40 8)
SymbolLine(30 40 35 40 8)
)
Symbol('&' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 25 15 10 8)
SymbolLine(5 40 10 40 8)
SymbolLine(10 40 20 30 8)
SymbolLine(0 15 25 40 8)
SymbolLine(5 0 10 0 8)
SymbolLine(10 0 15 5 8)
SymbolLine(15 5 15 10 8)
SymbolLine(0 25 0 35 8)
)
Symbol(''' 12)
(
SymbolLine(0 10 10 0 8)
)
Symbol('(' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 35 8)
)
Symbol(')' 12)
(
SymbolLine(0 0 5 5 8)
SymbolLine(5 5 5 35 8)
SymbolLine(0 40 5 35 8)
)
Symbol('*' 12)
(
SymbolLine(0 10 20 30 8)
SymbolLine(0 30 20 10 8)
SymbolLine(0 20 20 20 8)
SymbolLine(10 10 10 30 8)
)
Symbol('+' 12)
(
SymbolLine(0 20 20 20 8)
SymbolLine(10 10 10 30 8)
)
Symbol(',' 12)
(
SymbolLine(0 50 10 40 8)
)
Symbol('-' 12)
(
SymbolLine(0 20 20 20 8)
)
Symbol('.' 12)
(
SymbolLine(0 40 5 40 8)
)
Symbol('/' 12)
(
SymbolLine(0 35 30 5 8)
)
Symbol('0' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 30 20 10 8)
)
Symbol('1' 12)
(
SymbolLine(5 40 15 40 8)
SymbolLine(10 0 10 40 8)
SymbolLine(0 10 10 0 8)
)
Symbol('2' 12)
(
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(0 40 25 15 8)
SymbolLine(0 40 25 40 8)
)
Symbol('3' 12)
(
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 20 20 20 8)
)
Symbol('4' 12)
(
SymbolLine(0 20 20 0 8)
SymbolLine(0 20 25 20 8)
SymbolLine(20 0 20 40 8)
)
Symbol('5' 12)
(
SymbolLine(0 0 20 0 8)
SymbolLine(0 0 0 20 8)
SymbolLine(0 20 5 15 8)
SymbolLine(5 15 15 15 8)
SymbolLine(15 15 20 20 8)
SymbolLine(20 20 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('6' 12)
(
SymbolLine(15 0 20 5 8)
SymbolLine(5 0 15 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(15 20 20 25 8)
SymbolLine(0 20 15 20 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 25 20 35 8)
)
Symbol('7' 12)
(
SymbolLine(0 40 25 15 8)
SymbolLine(25 0 25 15 8)
SymbolLine(0 0 25 0 8)
)
Symbol('8' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 15 5 20 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 15 8)
SymbolLine(15 20 20 15 8)
)
Symbol('9' 12)
(
SymbolLine(0 40 20 20 8)
SymbolLine(20 5 20 20 8)
SymbolLine(15 0 20 5 8)
SymbolLine(5 0 15 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 15 5 20 8)
SymbolLine(5 20 20 20 8)
)
Symbol(':' 12)
(
SymbolLine(0 15 5 15 8)
SymbolLine(0 25 5 25 8)
)
Symbol(';' 12)
(
SymbolLine(0 40 10 30 8)
SymbolLine(10 15 10 20 8)
)
Symbol('<' 12)
(
SymbolLine(0 20 10 10 8)
SymbolLine(0 20 10 30 8)
)
Symbol('=' 12)
(
SymbolLine(0 15 20 15 8)
SymbolLine(0 25 20 25 8)
)
Symbol('>' 12)
(
SymbolLine(0 10 10 20 8)
SymbolLine(0 30 10 20 8)
)
Symbol('?' 12)
(
SymbolLine(10 20 10 25 8)
SymbolLine(10 35 10 40 8)
SymbolLine(0 5 0 10 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 10 8)
SymbolLine(10 20 20 10 8)
)
Symbol('A' 12)
(
SymbolLine(0 5 0 40 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 40 8)
SymbolLine(0 20 25 20 8)
)
Symbol('B' 12)
(
SymbolLine(0 40 20 40 8)
SymbolLine(20 40 25 35 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 20 25 25 8)
SymbolLine(5 20 20 20 8)
SymbolLine(5 0 5 40 8)
SymbolLine(0 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(20 20 25 15 8)
)
Symbol('C' 12)
(
SymbolLine(5 40 20 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 20 0 8)
)
Symbol('D' 12)
(
SymbolLine(5 0 5 40 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 35 8)
SymbolLine(20 40 25 35 8)
SymbolLine(0 40 20 40 8)
SymbolLine(0 0 20 0 8)
)
Symbol('E' 12)
(
SymbolLine(0 20 15 20 8)
SymbolLine(0 40 20 40 8)
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 20 0 8)
)
Symbol('F' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 20 0 8)
SymbolLine(0 20 15 20 8)
)
Symbol('G' 12)
(
SymbolLine(20 0 25 5 8)
SymbolLine(5 0 20 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 20 40 8)
SymbolLine(20 40 25 35 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 20 25 25 8)
SymbolLine(10 20 20 20 8)
)
Symbol('H' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(25 0 25 40 8)
SymbolLine(0 20 25 20 8)
)
Symbol('I' 12)
(
SymbolLine(0 0 10 0 8)
SymbolLine(5 0 5 40 8)
SymbolLine(0 40 10 40 8)
)
Symbol('J' 12)
(
SymbolLine(0 0 15 0 8)
SymbolLine(15 0 15 35 8)
SymbolLine(10 40 15 35 8)
SymbolLine(5 40 10 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('K' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 20 20 0 8)
SymbolLine(0 20 20 40 8)
)
Symbol('L' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 40 20 40 8)
)
Symbol('M' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 15 15 8)
SymbolLine(15 15 30 0 8)
SymbolLine(30 0 30 40 8)
)
Symbol('N' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 0 5 8)
SymbolLine(0 5 25 30 8)
SymbolLine(25 0 25 40 8)
)
Symbol('O' 12)
(
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('P' 12)
(
SymbolLine(5 0 5 40 8)
SymbolLine(0 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(20 20 25 15 8)
SymbolLine(5 20 20 20 8)
)
Symbol('Q' 12)
(
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(10 30 20 40 8)
)
Symbol('R' 12)
(
SymbolLine(0 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(20 20 25 15 8)
SymbolLine(5 20 20 20 8)
SymbolLine(5 0 5 40 8)
SymbolLine(5 20 25 40 8)
)
Symbol('S' 12)
(
SymbolLine(20 0 25 5 8)
SymbolLine(5 0 20 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 15 5 20 8)
SymbolLine(5 20 20 20 8)
SymbolLine(20 20 25 25 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 40 25 35 8)
SymbolLine(5 40 20 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('T' 12)
(
SymbolLine(0 0 20 0 8)
SymbolLine(10 0 10 40 8)
)
Symbol('U' 12)
(
SymbolLine(0 0 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 0 20 35 8)
)
Symbol('V' 12)
(
SymbolLine(0 0 0 30 8)
SymbolLine(0 30 10 40 8)
SymbolLine(10 40 20 30 8)
SymbolLine(20 0 20 30 8)
)
Symbol('W' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 40 15 25 8)
SymbolLine(15 25 30 40 8)
SymbolLine(30 0 30 40 8)
)
Symbol('X' 12)
(
SymbolLine(0 0 0 5 8)
SymbolLine(0 5 25 30 8)
SymbolLine(25 30 25 40 8)
SymbolLine(0 30 0 40 8)
SymbolLine(0 30 25 5 8)
SymbolLine(25 0 25 5 8)
)
Symbol('Y' 12)
(
SymbolLine(0 0 0 5 8)
SymbolLine(0 5 10 15 8)
SymbolLine(10 15 20 5 8)
SymbolLine(20 0 20 5 8)
SymbolLine(10 15 10 40 8)
)
Symbol('Z' 12)
(
SymbolLine(0 0 25 0 8)
SymbolLine(25 0 25 5 8)
SymbolLine(0 30 25 5 8)
SymbolLine(0 30 0 40 8)
SymbolLine(0 40 25 40 8)
)
Symbol('[' 12)
(
SymbolLine(0 0 5 0 8)
SymbolLine(0 0 0 40 8)
SymbolLine(0 40 5 40 8)
)
Symbol('\' 12)
(
SymbolLine(0 5 30 35 8)
)
Symbol(']' 12)
(
SymbolLine(0 0 5 0 8)
SymbolLine(5 0 5 40 8)
SymbolLine(0 40 5 40 8)
)
Symbol('^' 12)
(
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 10 5 8)
)
Symbol('_' 12)
(
SymbolLine(0 40 20 40 8)
)
Symbol('a' 12)
(
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(20 20 20 35 8)
SymbolLine(20 35 25 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
)
Symbol('b' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 25 20 35 8)
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
)
Symbol('c' 12)
(
SymbolLine(5 20 20 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 20 40 8)
)
Symbol('d' 12)
(
SymbolLine(20 0 20 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
)
Symbol('e' 12)
(
SymbolLine(5 40 20 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(0 30 20 30 8)
SymbolLine(20 30 20 25 8)
)
Symbol('f' 10)
(
SymbolLine(5 5 5 40 8)
SymbolLine(5 5 10 0 8)
SymbolLine(10 0 15 0 8)
SymbolLine(0 20 10 20 8)
)
Symbol('g' 12)
(
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(0 50 5 55 8)
SymbolLine(5 55 15 55 8)
SymbolLine(15 55 20 50 8)
SymbolLine(20 20 20 50 8)
)
Symbol('h' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 40 8)
)
Symbol('i' 10)
(
SymbolLine(0 10 0 15 8)
SymbolLine(0 25 0 40 8)
)
Symbol('j' 10)
(
SymbolLine(5 10 5 15 8)
SymbolLine(5 25 5 50 8)
SymbolLine(0 55 5 50 8)
)
Symbol('k' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 25 15 40 8)
SymbolLine(0 25 10 15 8)
)
Symbol('l' 10)
(
SymbolLine(0 0 0 35 8)
SymbolLine(0 35 5 40 8)
)
Symbol('m' 12)
(
SymbolLine(5 25 5 40 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 40 8)
SymbolLine(20 25 25 20 8)
SymbolLine(25 20 30 20 8)
SymbolLine(30 20 35 25 8)
SymbolLine(35 25 35 40 8)
SymbolLine(0 20 5 25 8)
)
Symbol('n' 12)
(
SymbolLine(5 25 5 40 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 40 8)
SymbolLine(0 20 5 25 8)
)
Symbol('o' 12)
(
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('p' 12)
(
SymbolLine(5 25 5 55 8)
SymbolLine(0 20 5 25 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 20 20 8)
SymbolLine(20 20 25 25 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 40 25 35 8)
SymbolLine(10 40 20 40 8)
SymbolLine(5 35 10 40 8)
)
Symbol('q' 12)
(
SymbolLine(20 25 20 55 8)
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
)
Symbol('r' 12)
(
SymbolLine(5 25 5 40 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 20 20 8)
SymbolLine(0 20 5 25 8)
)
Symbol('s' 12)
(
SymbolLine(5 40 20 40 8)
SymbolLine(20 40 25 35 8)
SymbolLine(20 30 25 35 8)
SymbolLine(5 30 20 30 8)
SymbolLine(0 25 5 30 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 20 20 8)
SymbolLine(20 20 25 25 8)
SymbolLine(0 35 5 40 8)
)
Symbol('t' 10)
(
SymbolLine(5 0 5 35 8)
SymbolLine(5 35 10 40 8)
SymbolLine(0 15 10 15 8)
)
Symbol('u' 12)
(
SymbolLine(0 20 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 20 20 35 8)
)
Symbol('v' 12)
(
SymbolLine(0 20 0 30 8)
SymbolLine(0 30 10 40 8)
SymbolLine(10 40 20 30 8)
SymbolLine(20 20 20 30 8)
)
Symbol('w' 12)
(
SymbolLine(0 20 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 10 40 8)
SymbolLine(10 40 15 35 8)
SymbolLine(15 20 15 35 8)
SymbolLine(15 35 20 40 8)
SymbolLine(20 40 25 40 8)
SymbolLine(25 40 30 35 8)
SymbolLine(30 20 30 35 8)
)
Symbol('x' 12)
(
SymbolLine(0 20 20 40 8)
SymbolLine(0 40 20 20 8)
)
Symbol('y' 12)
(
SymbolLine(0 20 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(20 20 20 50 8)
SymbolLine(15 55 20 50 8)
SymbolLine(5 55 15 55 8)
SymbolLine(0 50 5 55 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
)
Symbol('z' 12)
(
SymbolLine(0 20 20 20 8)
SymbolLine(0 40 20 20 8)
SymbolLine(0 40 20 40 8)
)
Symbol('{' 12)
(
SymbolLine(5 5 10 0 8)
SymbolLine(5 5 5 15 8)
SymbolLine(0 20 5 15 8)
SymbolLine(0 20 5 25 8)
SymbolLine(5 25 5 35 8)
SymbolLine(5 35 10 40 8)
)
Symbol('|' 12)
(
SymbolLine(0 0 0 40 8)
)
Symbol('}' 12)
(
SymbolLine(0 0 5 5 8)
SymbolLine(5 5 5 15 8)
SymbolLine(5 15 10 20 8)
SymbolLine(5 25 10 20 8)
SymbolLine(5 25 5 35 8)
SymbolLine(0 40 5 35 8)
)
Symbol('~' 12)
(
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 10 20 8)
SymbolLine(10 20 15 25 8)
SymbolLine(15 25 20 25 8)
SymbolLine(20 25 25 20 8)
)
"""
pcb_layers = """
Layer(1 "solder")
(
)
Layer(2 "component")
(
)
Layer(3 "GND")
(
)
Layer(4 "power")
(
)
Layer(5 "signal1")
(
)
Layer(6 "signal2")
(
)
Layer(7 "unused")
(
)
Layer(8 "unused")
(
)
Layer(9 "silk")
(
)
Layer(10 "silk")
(
)
"""
class CPCB:
" PCB class "
def __init__(self, sch=None,brd=None):
self.name=""
self.sch=sch
self.brd=brd
self.script_path=""
def addLayers(self):
# These layers have to be added in the board
# self.brd.addLayer(CLayer("solder",1)) # bottom orientation
# self.brd.addLayer(CLayer("component",2))
# these layers are already added
## self.brd.addLayer(CLayer("GND",3))
## self.brd.addLayer(CLayer("VCC",4))
## self.brd.addLayer(CLayer("blksolder",5)) # bottom orientation
## self.brd.addLayer(CLayer("blkcomponent",6))
## self.brd.addLayer(CLayer("signal3",7))
## self.brd.addLayer(CLayer("signal4",8))
## self.brd.addLayer(CLayer("Vias",9))
## self.brd.addLayer(CLayer("silk",10))
pass
#Layer(1 "solder")
#(
# Line(1375 1075 1325 1025 40 30 0x00000020)
#)
# gen sch layers scr"
def genSchLayersScr(self):
ns = ''
CRLF = "\n"
ns = pcb_layers;
return ns;
#ADD 'C1' 'G$1' POLARISED_CASE_H@ipc-7351-capacitor R0.000 (-0.300 3.300);
#ADD 'Q1' 'G$1' -PNP-SOT23-EBC@transistor R0.000 (1.600 3.300);
#ADD 'Q5' 'G$1' MMBT2222ALT1-NPN-SOT23-BEC@transistor R0.000 (0.900 2.800);
#ADD 'V1' 'GND' GND@supply2 R0.000 (0.600 0.100);
#ADD 'V2' 'G$1' VCC@supply2 R0.000 (5.600 4.200);
# gen sch add scr"
def genSchAddScr(self):
ns = ''
CRLF = "\n"
ns += "GRID INCH 0.005" + CRLF
ns += "Layer (91 Nets;" + CRLF
ns += "Layer (92 Busses;" + CRLF
ns += "Layer (93 Pins;" + CRLF
ns += "Layer (94 Symbols;" + CRLF
ns += "Layer (95 Names;" + CRLF
ns += "Layer (96 Values;" + CRLF
ns += "Layer (250 Descript;" + CRLF
ns += "Layer (251 SMDround;" + CRLF
ns += "DISPLAY -PINS" + CRLF
ns += CRLF
ns += "EDIT .S1" + CRLF
ns += "SET WIRE_BEND 2;" + CRLF
ns += "CHANGE STYLE 'Continuous'" + CRLF
for dev in self.sch.devices:
ns += "ADD '" + str(dev.refid) + "' 'G$1' " + str(dev.name) + "@" + str(dev.libname) + " " + dev.orientation + "R%.3f"% (dev.rotation) +" (" + str(dev.position) + ");" + CRLF
ns += "GRID LAST" + CRLF
return ns
# gen cmd sch net-connect"
def genSchNetConnectScr(self):
ns = ''
CRLF = "\n"
runcmd="run " + self.script_path + "/sch-net-connect.ulp"
for net in self.sch.nets.values:
prevdev=""
prevpin=""
l = ""
first = 1
for node in net.nodes:
if first:
first = 0
prevdev=str(node.dev.refid)
prevpin=str(node.pin)
else:
l = runcmd + " " + net.name + " " + prevdev + " " + prevpin + " " + str(node.dev.refid) + " " + str(node.pin) + ";" + CRLF
ns += l
prevdev=str(node.dev.refid)
prevpin=str(node.pin)
# string function
return ns
# gen sch netlist listing
def genSchNetlistLst(self):
ns = ''
CRLF = '\n'
for net in self.sch.nets.values():
name = net.name
ns += net.name + ' '
for node in net.nodes:
ns += str(node.dev.refid) + '-' + str(node.pin.num) + ' '
ns += CRLF
ns += CRLF #extra one
# string function
return ns
# gen sch netlist script
def genSchNetlistScr(self):
ns = ''
CRLF = "\n"
ns = "# Netlist script" + CRLF
ns += "# EAGLE Version 4.11" + CRLF
ns += "# Copyright Hobby-Robotics" + CRLF
ns += expandtab("#Net\tPart\tPad",12) + CRLF
ns += CRLF
for net in self.sch.nets.values():
ns += CRLF
ns += "Change Class 0;" + CRLF
l = "Signal " + " '" + net.name + "'"
first = 1
for node in net.nodes:
if first:
first = 0
l += "\t'"
else:
l += "\t\t"
l += str(node.dev.refid) + "'\t'" + str(node.pin) + "' \\" + CRLF
ns += expandtab(str(l),12)
ns += "\t\t\t;" + CRLF
# string function
return ns
# Select
# {"All objects" Select(ObjectByName) ActiveWhen(have_regex)}
# {"Elements" Select(ElementByName) ActiveWhen(have_regex)}
# {"Pads" Select(PadByName) ActiveWhen(have_regex)}
# {"Pins" Select(PinByName) ActiveWhen(have_regex)}
# {"Text" Select(TextByName) ActiveWhen(have_regex)}
# {"Vias" Select(ViaByName) ActiveWhen(have_regex)}
# Move
# {"Move selected elements to other side" Flip(SelectedElements) a={"Shift-B" "Shift<Key>b"}}
# {"Remove selected objects" RemoveSelected()}
# {Connects m=C
# {"Lookup connection to object" GetXY(Select the object) Connection(Find) a={"Ctrl-F" "Ctrl<Key>f"}}
# {"Reset scanned pads/pins/vias" Connection(ResetPinsViasAndPads) Display(Redraw)}
# {"Reset scanned lines/polygons" Connection(ResetLinesAndPolygons) Display(Redraw)}
# {"Reset all connections" Connection(Reset) Display(Redraw) a={"Shift-F" "Shift<Key>f"}}
# {"Optimize rats nest" Atomic(Save) DeleteRats(AllRats)
# Atomic(Restore) AddRats(AllRats) Atomic(Block) a={"O" "<Key>o"}}
# {"Erase rats nest" DeleteRats(AllRats) a={"E" "<Key>e"}}
# {"Erase selected rats" DeleteRats(SelectedRats) a={"Shift-E" "Shift<Key>e"}}
#
# {"Auto-route selected rats" AutoRoute(Selected)}
# {"Auto-route all rats" AutoRoute(AllRats)}
# {"Rip up all auto-routed tracks" RipUp(All)}
# {"Optimize routed tracks"
# {"Auto-Optimize" djopt(auto) a={"Shift-=" "Shift<Key>="}}
# {"Debumpify" djopt(debumpify) }
# {"Unjaggy" djopt(unjaggy) }
# {"Vianudge" djopt(vianudge) }
# {"Viatrim" djopt(viatrim) }
# {"Ortho pull" djopt(orthopull) }
# {"Simple optimization" djopt(simple) a={"=" "<Key>="}}
# {"Miter" djopt(miter) }
# {"Puller" a={"Y" "<Key>y"} Puller() }
#
# {"Only autorouted nets" OptAutoOnly() checked=optautoonly}
# }
# {"Design Rule Checker" DRC()}
# {"Apply vendor drill mapping" ApplyVendor()}
# }
def genBrdPlaceBottom(self):
ns = ''
#Select(ElementByName|ObjectByName|PadByName|PinByName)
for dev in self.brd.devices.values():
name = str(dev.refid)
if dev.bottom:
#Select(ElementByName) ActiveWhen(have_regex)
ns += 'Select(ElementByName) ActiveWhen( ' + name + ' )\n'
ns += 'Flip(SelectedElements)\n'
ns += 'Unselect(All)\n'
return ns
# gen brd cmd scr"
def genBrdCmdScr(self):
ns = ''
CRLF = "\n"
ns += "# Gen EDA generated" + CRLF
ns += "# date:" + CRLF # version
ns += "# user:" + CRLF # version
# LoadFrom(Layout|LayoutToBuffer|ElementToBuffer|Netlist|Revert,filename)
ns += 'LoadFrom( Layout, ' + self.script_path + '/' + self.brd.name + '.pcb )' + CRLF # layout
ns += 'LoadFrom( Netlist, ' + self.script_path + '/' + self.brd.name + '.net )' + CRLF # netlist
# Do not do that, do it in the placement
# ns += self.genBrdPlaceBottom()
# AddRats(AllRats|SelectedRats|Close)
ns += 'AddRats(AllRats)' + CRLF # add all rats
# AutoRoute(AllRats|SelectedRats)
ns += 'AutoRoute(AllRats)' + CRLF # route all rats
# Auto-Optimize djopt(auto)
ns += 'djopt(auto)' + CRLF # optimize all routes
# SaveTo(Layout|LayoutAs,filename)
ns += 'SaveTo( LayoutAs, ' + self.script_path + '/' + self.brd.name + '.brd )' + CRLF # board
ns += 'Quit( )' + CRLF # Quit
return ns
#####################################
## release: pcb 1.7.1.ALPHA
## date: Sun Jul 22 15:22:22 2001
## user: tp (Terry Porter,,,)
## host: gronk.porter.net
#PCB("" 6047 4000)
#
#Grid(25 0 0 0)
#Cursor(400 0 2)
#Flags(0x000000c0)
#Groups("1,s:2,c:3:4:5:6:7:8")
#Styles("Signal,10,40,20:Power,25,60,35:Fat,40,60,35:Skinny,8,36,20")
####################################
# release: pcb 1.99v
# date: Tue May 1 07:59:48 2007
# user: pawel (pawel,U-WODNICKI\pawel,S-1-5-21-1835012242-1811546175-1750076985-1007)
# host: Wodnicki
#
#FileVersion[20070407]
#
#PCB["" 350000 330000]
#
#Grid[3937.007904 1800 100 1]
#Cursor[133000 107500 2.000000]
#PolyArea[200000000.000000]
#Thermal[0.500000]
#DRC[1000 1000 1000 1000 1500 1000]
#Flags("rubberband,nameonpcb,alldirection,uniquename,snappin")
#Groups("4,5,6,c:1,2,3,s:8:7")
#Styles["Signal,1000,4000,2000,1000:Power,2500,6000,3500,1000:Fat,4000,6000,3500,1000:Skinny,800,3600,2000,1000"]
# gen brd board scr"
def genBrdBoardScr(self):
ns = ''
CRLF = "\n"
ns += "# boostEDA generated" + CRLF
ns += "# date:" + CRLF # version
ns += "# user:" + CRLF # version
# determine board size, aka outline for rectangular ones only
self.brd.outline.calcBBox()
xsize = self.brd.outline.bbox.sizeX()
ysize = self.brd.outline.bbox.sizeY()
ns += "PCB[\"" + self.brd.name + "\" "
ns += "%d "% (xsize) # x size
ns += " %d"% (ysize) # y size
ns += "]" + CRLF
ns += "Grid(25 0 0 0)" + CRLF
ns += "Cursor(400 0 2)" + CRLF
ns += "Flags(0x000000c0)" + CRLF
ns += "Groups(\"1,s:2,c:3:4:5:6:7:8\")" + CRLF
ns += "Styles(\"Signal,10,40,20:Power,25,60,35:Fat,40,60,35:Skinny,8,36,20\")" + CRLF
return ns
#Layer(1 "solder")
#(
# Line(1375 1075 1325 1025 40 30 0x00000020)
#)
def genBrdLayerFromNet(self,layer,net):
ns = ''
# Should come from board technology
### print "out net " + net.name
### print "layer num " + str(layer.num)
for line in net.route:
#print "found line on net layer num " + str(line.layernum)
if line.layernum == layer.num:
### print "out line on net " + net.name
### print "net.route length " + str(len(net.route))
### print "line.points length " + str(len(line.points))
Thickness = line.thickness
Clearance = line.thickness * 2
first = True
prev = Point()
for pt in line.points:
#print "pt " + str(pt)
if first:
first = False
else:
X1 = int(prev._x)
Y1 = int(prev._y)
X2 = int(pt._x)
Y2 = int(pt._y)
ns += 'Line [' + " %d "% X1 + " %d "% Y1 + " %d "% X2 + " %d "% Y2
ns += " %d "% Thickness
ns += " %d "% Clearance
ns += '"auto"'
ns += ']\n'
prev = pt
return ns
def genLayerBlockages(self,layer):
ns = ''
# blockages use absolute coordinates,
for rect in layer.blockages:
# order of processing is important
X1=int(rect.ll._x)
Y1=int(rect.ll._y)
X2=int(rect.ur._x)
Y2=int(rect.ur._y)
ns += ' Polygon("clearpoly")\n'
ns += '(\n'
ns += " [%d "% X1 + " %d ]"% Y1
ns += " [%d "% X1 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y1
ns += '\n'
ns += ')\n'
return ns;
# routing
# gen brd layers scr"
def genBrdLayersScr(self):
### print "PCB! gen brd layers scr"
ns = ''
CRLF = "\n"
for l in self.brd.layers:
### print "layer " + l.name
ns += "Layer (" +str(l.num) + " \"" + l.name + "\")" + CRLF
ns += "(" + CRLF
# here go all of the layer elements
for net in self.brd.nets.values():
ns += self.genBrdLayerFromNet(l,net) # Routes
ns += self.generateNetPour(l,net) # Geometry
ns += self.genLayerBlockages(l)
ns += ")" + CRLF
return ns;
def generateRoutes(self):
return self.genBrdLayersScr()
def generateNetPour(self,layer,net):
ns = ''
CRLF = "\n"
### print " layer " + str(layer)
for geom in net.geometry:
### print " found geom in " + net.name + " type " + str(type(geom)) + " layer " + str(geom.layernum) + CRLF
if geom.layernum != layer.num :
continue
# Handle rectangle
#if type(geom) is Rectangle :
if isinstance(geom, Rectangle) :
### print " found Rectangle" + CRLF
rect = Rectangle(geom.ll._x, geom.ll._y, geom.ur._x, geom.ur._y, geom.layernum )
rect.normalize() # normalize just in case
# order of processing is important
X1=int(rect.ll._x)
Y1=int(rect.ll._y)
X2=int(rect.ur._x)
Y2=int(rect.ur._y)
ns += ' Polygon("clearpoly")\n'
ns += '(\n'
ns += " [%d "% X1 + " %d ]"% Y1
ns += " [%d "% X1 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y1
ns += '\n'
ns += ')\n'
return ns;
# Geometry on nets, aka pour
def generatePour(self):
ns = ''
CRLF = "\n"
for l in self.brd.layers:
### print "layer " + l.name
ns += "Layer (" +str(l.num) + " \"" + l.name + "\")" + CRLF
ns += "(" + CRLF
# here go through the layers
for net in self.brd.nets.values():
ns += self.generateNetPour(l,net)
ns += ")" + CRLF
return ns;
# Via[]
# Via[17000 182000 31000 3000 34000 2800 "" ""]
# Via [X Y Thickness Clearance Mask Drill "Name" SFlags]
# Via (X Y Thickness Clearance Mask Drill "Name" NFlags)
# Via (X Y Thickness Clearance Drill "Name" NFlags)
# Via (X Y Thickness Drill "Name" NFlags)
# Via (X Y Thickness "Name" NFlags)
# X Y coordinates of center
# Thickness outer diameter of copper annulus
# Clearance add to thickness to get clearance diameter
# Mask diameter of solder mask opening
# Drill diameter of drill
# Name string, name of via (vias have names?)
# SFlags symbolic or numerical flags
# NFlags numerical flags only
def generateVias(self):
ns = ''
CRLF = "\n"
### print " board vias " + str(len(self.brd.vias))
for via in self.brd.vias:
### print "via " + via.name
ns += "Via ["
ns += " %d "% int(via.pos._x) + " %d "% int(via.pos._y)
ns += ' 4000 2000 0 2000 "" "" '
ns += "]" + CRLF
return ns;
#NetList()
#(
# Net("unnamed_net1" "(unknown)")
# (
# Connect("L1-2")
# Connect("L2-1")
# Connect("C2-1")
# Connect("C1-1")
# )
#)
# gen brd net scr"
def genBrdNetlistScr(self):
ns = ''
CRLF = "\n"
ns = 'NetList()' + CRLF
ns += '(' + CRLF
for net in self.sch.nets.values():
name = net.name
ns += "Net(\"" + net.name + "\" \"(unknown)\")" + CRLF
ns += "(" + CRLF
for node in net.nodes:
ns += expandtab("\tConnect(\"") + str(node.dev.refid) + "-" + str(node.pin.num) + "\")" + CRLF
ns += ")" + CRLF
ns += ')' + CRLF
return ns
# pcb footprint file may contain any of the following commands:
# Element [element_flags, description, pcb-name, value, mark_x, mark_y, text_x, text_y, text_direction, text_scale, text_flags]
# Pad [x1 y1 x2 y2 thickness clearance mask name pad_number flags]
# Pin [x y thickness clearance mask drillholedia name number flags]
# ElementArc [x y r1 r2 startangle sweepangle thickness]
# ElementLine [x1 y1 x2 y2 thickness] > thickness != 1000 = 10 mils almost for all footprints
# Comment lines start with the #-sign
#Elements
# Element [element_flags, description, pcb-name, value, mark_x, mark_y, text_x, text_y, text_direction, text_scale, text_flags] item allowed value explanation comment
# element_flags unsigned hex value
# description string text description of footprint written by footprint author
# pcb name string refdes used on this particular pcb xxx
# value string value of component on this particular pcb layout xxx
# mark_x 1/100th mils
# mark_y 1/100th mils
# text_x 1/100th mils
# text_y 1/100th mils
# text direction decimal integer 0=horiz; 1=ccw90; 2=180; 3=cw90
# text_scale decimal integer usu. set 100
# text_flags unsigned hex
# Pads
# Pad[x1 y1 x2 y2 thickness clearance mask name pad_number flags] Item Allowed Value Explanation Comment
# x1 1/100th mils x(1st point)
# y1 1/100th mils y(1st point)
# x2 1/100th mils x(2nd point)
# y2 1/100th mils y(2nd point)
# thickness 1/100 mils width of metal surrounding line segment see Brorson .pdf
# clearance 1/100 mils distance to any other copper on any layer actually 1/2 of this number is used!
# mask 1/100th mils width of mask relief actual width of the mask centered on pad copper
# name string name of pad (arb. string) e.g. pad_1 or positive or any other string
# pad_number string pad # used for nets. it MUST be consistent with the definitions on the netlist.
# flags hex value xxx
# Pin[x y thickness clearance mask drillholedia name number flags] Item Allowed Value Explanation Comment
# x 1/100th mils pin x coord.
# y 1/100th mils pin y coord.
# thickness 1/100th mils copper diameter
# clearance 1/100th mils 2*(cu to cu clearance) if you want a 10 mil clearance, put 2000 (20 mils) here
# mask 1/100th mils diameter of mask aperture actual dia. of hole in mask
# drillholedia 1/100th mils dia. of hole
# name string arb. pin name
# number decimal integer pin number used by nets/rats
# flags hex xxx
# Via[]
# Via[17000 182000 31000 3000 34000 2800 "" ""]
# Via [X Y Thickness Clearance Mask Drill "Name" SFlags]
# Via (X Y Thickness Clearance Mask Drill "Name" NFlags)
# Via (X Y Thickness Clearance Drill "Name" NFlags)
# Via (X Y Thickness Drill "Name" NFlags)
# Via (X Y Thickness "Name" NFlags)
# X Y coordinates of center
# Thickness outer diameter of copper annulus
# Clearance add to thickness to get clearance diameter
# Mask diameter of solder mask opening
# Drill diameter of drill
# Name string, name of via (vias have names?)
# SFlags symbolic or numerical flags
# NFlags numerical flags only
# On the Layer
# Line[]
# Line[137500 107500 132500 102500 4000 3000 "clearline"]
# Text[423000 391500 2 100 "T J PORTER ELECTRONICS" "auto"]
# Polygon("clearpoly")
# (
# [2000 198000] [47000 198000] [47000 187000] [126000 187000] [126000 198000]
# [297000 198000] [297000 1000] [2000 1000]
# )
# Notes:
# Pins - Throughole
# Pads - SMD
# Examples for version 1.99
# TH
# Element["" "Cap" "C17" "" 215500 81500 -9000 -32900 0 150 ""]
# (
# Pin[0 0 8000 3000 11000 3500 "1" "1" ""]
# Pin[0 -20000 8000 3000 11000 3500 "2" "2" ""]
# ElementLine [-5000 5000 5000 5000 1000]
# ElementLine [5000 5000 5000 -25000 1000]
# ElementLine [5000 -25000 -5000 -25000 1000]
# ElementLine [-5000 -25000 -5000 5000 1000]
#
# )
# SMD
# Element["" "SMD 0805" "C13" "" 252500 151000 -3000 4500 0 150 ""]
# (
# Pad[0 0 0 0 6000 3000 9000 "1" "1" "square"]
# Pad[0 -9000 0 -9000 6000 3000 9000 "2" "2" "square"]
# ElementLine [-3500 -12500 -3500 3500 1000]
# ElementLine [3500 -12500 -3500 -12500 1000]
# ElementLine [3500 3500 3500 -12500 1000]
# ElementLine [-3500 3500 3500 3500 1000]
# )
#
# Original
#Element["" "SOT-23 package" "Q7" "" 66666 66666 3200 5900 0 100 ""]
#(
# Pad[0 -300 0 300 3400 3000 4000 "1" "1" "square,edge2"]
# Pad[7800 -300 7800 300 3400 3000 4000 "2" "2" "square,edge2"]
# Pad[3900 -8500 3900 -7900 3400 3000 4000 "3" "3" "square"]
# ElementLine [10300 -11000 -2500 -11000 1000]
# ElementLine [10300 2900 10300 -11000 1000]
# ElementLine [-2500 2900 10300 2900 1000]
# ElementLine [-2500 -11000 -2500 2900 1000]
#)
# Placed on the far side -> layer onsolder?
#Element["selected,onsolder" "SOT-23 package" "Q7" "" 66666 133334 3200 -5900 0 100 "selected,auto"]
#(
# Pad[0 300 0 -300 3400 3000 4000 "1" "1" "selected,onsolder,square"]
# Pad[7800 300 7800 -300 3400 3000 4000 "2" "2" "selected,onsolder,square"]
# Pad[3900 8500 3900 7900 3400 3000 4000 "3" "3" "selected,onsolder,square,edge2"]
# ElementLine [10300 11000 -2500 11000 1000]
# ElementLine [10300 -2900 10300 11000 1000]
# ElementLine [-2500 -2900 10300 -2900 1000]
# ElementLine [-2500 11000 -2500 -2900 1000]
#
# )
# VIAs
# Via[17000 182000 31000 3000 34000 2800 "" ""]
# Via[17000 17000 31000 3000 34000 2800 "" ""]
# Via[282000 17000 31000 3000 34000 2800 "" ""]
# Via[282000 182000 31000 3000 34000 2800 "" ""]
# Via[15500 382500 31000 3000 34000 2800 "" ""]
# Via[15500 217500 31000 3000 34000 2800 "" ""]
# Via[280500 217500 31000 3000 34000 2800 "" ""]
# Tracks are made of Line????
# Layer(1 "solder")
# (
# Line[137500 107500 132500 102500 4000 3000 "clearline"]
# Line[145000 107500 137500 107500 4000 3000 "clearline"]
# Line[85000 112500 85000 107500 4000 3000 "clearline"]
# Line[97500 90000 97500 147500 4000 3000 "clearline"]
#)
# Element [element_flags, description, pcb-name, value, mark_x, mark_y, text_x, text_y, text_direction, text_scale, text_flags]
def gen0805_resitor(self,refid,x,y,v):
CRLF = '\n'
s = 'Element["" "0805 chip resitor" "' + str(refid) + '" "' + str(v) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
s += '(' + CRLF
s += ' Pad[0 -700 0 700 4500 3000 5100 "1" "1" "square"]' + CRLF
s += ' Pad[8000 -700 8000 700 4500 3000 5100 "2" "2" "square"]' + CRLF
s += ' ElementLine [11700 -4400 -3700 -4400 800]' + CRLF
s += ' ElementLine [11700 4400 11700 -4400 800]' + CRLF
s += ' ElementLine [-3700 4400 11700 4400 800]' + CRLF
s += ' ElementLine [-3700 -4400 -3700 4400 800]' + CRLF
s += ')' + CRLF
return s
def gen0805_capacitor(self,refid,x,y,v):
CRLF = '\n'
s = 'Element["" "0805 chip cap" "' + str(refid) + '" "' + str(v) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
s += '(' + CRLF
s += ' Pad[0 -700 0 700 4500 3000 5100 "1" "1" "square"]' + CRLF
s += ' Pad[8000 -700 8000 700 4500 3000 5100 "2" "2" "square"]' + CRLF
s += ' ElementLine [11700 -4400 -3700 -4400 800]' + CRLF
s += ' ElementLine [11700 4400 11700 -4400 800]' + CRLF
s += ' ElementLine [-3700 4400 11700 4400 800]' + CRLF
s += ' ElementLine [-3700 -4400 -3700 4400 800]' + CRLF
s += ')' + CRLF
return s
def genSOT23(self, refid, x, y, v):
CRLF = '\n'
s = 'Element["" "SOT-23 package" "' + str(refid) + '" "' + str(v) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
s += '(' + CRLF
s += ' Pad[0 -300 0 300 3400 3000 4000 "1" "1" "square,edge2"]' + CRLF
s += ' Pad[7800 -300 7800 300 3400 3000 4000 "2" "2" "square,edge2"]' + CRLF
s += ' Pad[3900 -8500 3900 -7900 3400 3000 4000 "3" "3" "square"] ' + CRLF
s += ' ElementLine [10300 -11000 -2500 -11000 1000]' + CRLF
s += ' ElementLine [10300 2900 10300 -11000 1000]' + CRLF
s += ' ElementLine [-2500 2900 10300 2900 1000]' + CRLF
s += ' ElementLine [-2500 -11000 -2500 2900 1000]' + CRLF
s += ')' + CRLF
return s
def rotatePoint(self,pt,x0,y0,angle):
dX = pt._x - x0
dY = pt._y - y0
rX = pt._x
rY = pt._y
if angle == 90:
rX = x0 + dY
rY = y0 - dX
if angle == 180:
rX = x0 - dX
rY = y0 - dY
if angle == 270:
rX = x0 - dY
rY = y0 + dX
return rX,rY
def genElementLine(self,line,dev):
# order of processing is important
X1=int(line.points[0]._x)
Y1=int(line.points[0]._y)
X2=int(line.points[1]._x)
Y2=int(line.points[1]._y)
if dev.bottom:
Y1 = 0 - Y1
Y2 = 0 - Y2
X1,Y1 = self.rotatePoint(Point(X1,Y1),0,0,dev.rotation)
X2,Y2 = self.rotatePoint(Point(X2,Y2),0,0,dev.rotation)
# keep horizontal, vertical Point2 > Point1
if (X1 == X2):
if (Y1 > Y2):
t = Y1
Y1 = Y2
Y2 = t
else:
if (Y1 == Y2):
if (X1 > X2):
t = X1
X1 = X2
X2 = t
ns = 'ElementLine [' + " %d "% X1 + " %d "% Y1 + " %d "% X2 + " %d "% Y2
ns += " %d "% line.thickness
ns += ']\n'
return ns
# rotation is clockwise
def genElementArc(self,arc,dev):
# Thickness, Clearance, Mask, Drill, Name, Number, SFlags
rX = int(arc._x)
rY = int(arc._y)
# rY is
if dev.bottom:
rY = 0 - rY
if dev.rotation == 90:
arc.sangle += 90
if dev.rotation == 180:
arc.sangle += 180
if dev.rotation == 270:
arc.sangle += 270
rX,rY = self.rotatePoint(arc,0,0,dev.rotation)
arc.sangle = arc.sangle % 360
ns = 'ElementArc [' + " %d "% rX + " %d "% rY
ns += " %d "% arc.width
ns += " %d "% arc.height
ns += " %d "% arc.sangle
ns += " %d "% arc.dangle
ns += " %d "% arc.thickness
ns += ']\n'
return ns
def genElementPin(self,pin,dev):
# Thickness, Clearance, Mask, Drill, Name, Number, SFlags
rX=int(pin.pos._x)
rY=int(pin.pos._y)
# Why we do not have to do it for the pins?
# rY is
#if dev.bottom:
# rY = 0 - rY
# Package has not been rotated and must match device pins
rX,rY = self.rotatePoint(Point(rX,rY),0,0,dev.rotation)
ns = 'Pin [' + " %d "% rX + " %d "% rY
ns += " %d "% pin.thickness
ns += " %d "% pin.clearance
ns += " %d "% pin.mask
ns += " %d "% pin.drill
ns += pin.name + ' '
ns += '"' + "%d"% pin.num + '" '
ns += pin.sflags
ns += ']\n'
return ns
def genElementPad(self,pin,dev):
# Thickness, Clearance, Mask, Name, Number, SFlags
# if package was parsed then these are set, if not I need to generate correct ones
rX1=int(pin.rX1)
rY1=int(pin.rY1)
rX2=int(pin.rX2)
rY2=int(pin.rY2)
# Why we do not have to do it for the pads?
#if dev.bottom:
# rY1 = 0 - rY1
# rY2 = 0 - rY2
rX1,rY1 = self.rotatePoint(Point(rX1,rY1),0,0,dev.rotation)
rX2,rY2 = self.rotatePoint(Point(rX2,rY2),0,0,dev.rotation)
try:
sflags = pin.sflags
except:
# no PCB sflags then generate one
# square
# edge2
if pin.pad.type == "S":
sflags ='"square"'
else:
sflags ='""'
ns = 'Pad [' + " %d "% rX1 + " %d "% rY1 + " %d "% rX2 + " %d "% rY2
ns += " %d "% pin.thickness
ns += " %d "% pin.clearance
ns += " %d "% pin.mask
ns += pin.name + ' '
ns += '"' + "%d"% pin.num + '" '
ns += sflags
ns += ']\n'
return ns
def genElementBody(self,dev):
# print'name ' + dev.name
l = len(dev.pins)
# print ' len ' + str(l)
# print 'roation ' + str(dev.rotation)
ns = '(\n'
for num in range(1,l+1):
# print 'pin ' + str(num)
pin = dev.pins[num]
ppin = dev.package.pins[num]
#if dev.package.smt: # event smt packages can have pins aka mounting holes
if ppin.smt:
ns += self.genElementPad(ppin,dev)
else:
ns += self.genElementPin(ppin,dev)
for geo in dev.package.geometry:
if isinstance(geo, Line):
ns += self.genElementLine(geo,dev)
if isinstance(geo, Arc):
ns += self.genElementArc(geo,dev)
if isinstance(geo, Text):
ns += self.genElementText(geo,dev)
ns += ')\n'
return ns
# Device is on the bottom, coordinates of the pad are for the bottom
# Pcb defines package looking from top so mirror it in X back to top
# and add the flags
# For details see the core.py
def genBrdPlaceDevOnSolder(self,dev):
for pad in dev.package.pins.values():
pad.pos._y = 0 - pad.pos._y
try: # quick fix TBI
pad.rY1 = 0 - pad.rY1
except:
pad.rY1 = 0
try: # quick fix TBI
pad.rY2 = 0 - pad.rY2
except:
pad.rY2 = 0
try: # quick fix TBI
newsflags = pad.sflags.strip('"')
except:
newsflags = 'square' # default to square
if newsflags != '':
newsflags = ',' + newsflags
newsflags = '"onsolder' + newsflags + '"'
pad.sflags = newsflags
for pad in dev.package.geometry:
pass
# print pad.sflags
# gen brd place scr"
def genBrdPlaceScr(self):
ns = ''
CRLF = '\n'
devnum = 0
self.brd.outline.calcBBox()
for dev in self.brd.devices.values():
name = str(dev.refid) + CRLF
if dev.bottom:
self.genBrdPlaceDevOnSolder(dev)
x = (int)
#x = (self.brd.outline.bbox.ur._x - dev.position._x) # position is in mils
x = dev.position._x # position is in mils
y = (int)
#y = (self.brd.outline.bbox.ur._y - dev.position._y) # position is in mils
y = dev.position._y # position is in mils
placement = '"onsolder"'
else:
x = (int)
x = dev.position._x # position is in mils
y = (int)
y = dev.position._y # position is in mils
placement = '""'
# place the device
ns += 'Element[' + placement + ' "' + str(dev.package.description) + '" "' + str(dev.refid) + '" "' + str(dev.val) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
ns += self.genElementBody(dev)
# if name[0:1] == 'R':
# ns += self.gen0805_resitor(dev.refid,x,y,dev.val)
# if name[0:1] == 'C':
# ns += self.gen0805_capacitor(dev.refid,x,y,dev.val)
# if name[0:1] == 'Q':
# ns += self.genSOT23(dev.refid,x,y,dev.val)
# numpins = 0
# for pin in dev.pins:
# numpins += 1
# for k in dev.pins.keys():
# pin = dev.pins[k]
# dev.rotation ?
return ns
def Cmd(self,cmds):
gen = 0
sch = 0
brd = 0
cmd = 0
add = 0
layers = 0
net_connect = 0
netlist = 0
board = 0
place = 0
route = 0
scr = 0
lst = 0
# 0
if cmds[0:1] == ['gen']:
gen = 1
# 1
if cmds[1:2] == ['sch']:
sch = 1
if cmds[1:2] == ['brd']:
brd = 1
# 2
if cmds[2:3] == ['cmd']:
cmd = 1
if cmds[2:3] == ['add']:
add = 1
if cmds[2:3] == ['layers']:
layers = 1
if cmds[2:3] == ['netconnect']:
net_connect = 1
if cmds[2:3] == ['netlist']:
netlist = 1
if cmds[2:3] == ['board']:
board = 1
if cmds[2:3] == ['place']:
place = 1
if cmds[2:3] == ['route']:
route = 1
# 3
if cmds[3:4] == ['scr']:
scr = 1
if cmds[3:4] == ['lst']:
lst = 1
if gen:
if sch:
if add:
if scr:
s = self.genSchAddScr()
return s
if layers:
if scr:
s = self.genSchLayersScr()
return s
if net_connect:
pass
if netlist:
s = self.genSchNetlistLst()
return s
if brd:
if cmd:
if scr:
s = self.genBrdCmdScr() # commands to make the board
return s
if board:
if scr:
s = self.genBrdBoardScr()
return s
if layers:
if scr:
s = self.genBrdLayersScr()
return s
if place:
if scr:
s = self.genBrdPlaceScr()
return s
if netlist:
if scr:
s = self.genBrdNetlistScr()
return s
if route:
pass
return ""
def test(self):
ic1 = CDev("U1","","IC1")
ic1.add( CPin("GND",1) )
ic1.add( CPin("VCC",2) )
self.sch.addDev(ic1)
net1 = CNet("GND")
net1.add(CNode(ic1,"GND"))
self.sch.addNet(net1)
net2 = CNet("VCC")
net2.add(CNode(ic1,"VCC"))
self.sch.addNet(net2)
print "gen sch add scr"
s = self.genSchAddScr()
print s
print "gen sch net-connect scr"
s = self.genSchNetConnectScr()
print s
print "gen sch netlist lst"
s = self.genSchNetlistLst()
print s
print "gen sch netlist scr"
s = self.genSchNetlistScr()
print s
# Some tests
if __name__ == "__main__":
import sys
#import string
import re
schem = CSchematic()
board = CBoard(schem)
board.addFromSchematic()
mucs = CPCB(schem,board)
# open input file
if sys.argv[1:] == ['test']:
mucs.test()
| bsd-3-clause | 2,569,505,188,356,980,700 | 23.228858 | 181 | 0.616108 | false |
ecreall/nova-ideo | novaideo/connectors/yammer/views/login.py | 1 | 4288 | # -*- coding: utf8 -*-
# Copyright (c) 2017 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPForbidden
from pyramid.renderers import get_renderer
from dace.util import getSite
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView, ViewError
from novaideo import _
from novaideo.connectors.core import YAMMER_CONNECTOR_ID
from novaideo.connectors.yammer.content.behaviors import LogIn
from novaideo.content.novaideo_application import NovaIdeoApplication
from novaideo.utilities.util import generate_navbars
from novaideo.connectors.core import CONNECTOR_PROCESSES
@view_config(
name='yammerlogin',
context=NovaIdeoApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class LoginView(BasicView):
title = _('Log in')
name = 'login'
behaviors = [LogIn]
template = 'novaideo:views/user_management/templates/login.pt'
wrapper_template = 'pontus:templates/views_templates/simple_view_wrapper.pt'
viewid = 'login'
def update(self):
code = self.params('code')
error = self.params('error')
message = None
messages = {}
request = self.request
root = getSite()
yammer_connectors = list(root.get_connectors(YAMMER_CONNECTOR_ID))
yammer_connector = yammer_connectors[0] if yammer_connectors else None
login_url = request.resource_url(request.context, 'login')
login_url2 = request.resource_url(request.context, '@@login')
referrer = self.params('came_from')
if not referrer:
referrer = request.path_url
if '/auditstream-sse' in referrer:
return HTTPForbidden()
if login_url in referrer or login_url2 in referrer:
# never use the login form itself as came_from
referrer = request.resource_url(root)
came_from = request.session.setdefault(
'novaideo.came_from', referrer)
error_message = _("Failed login")
if yammer_connector and code:
trusted_networks = getattr(yammer_connector, 'networks', [])
source_data, user_data = yammer_connector.extract_data(code)
if not trusted_networks or \
any(n in trusted_networks for n in source_data['network_domains']):
result = self.execute({
'source_data': source_data,
'user_data': user_data,
'came_from': came_from
})
if result[0].get('logged', False):
return result[0].get('redirect')
elif trusted_networks:
error_message = _("You don't have the right to login with this account.")
error = True
if error:
error = ViewError()
error.principalmessage = error_message
message = error.render_message(request)
messages.update({error.type: [message]})
self.finished_successfully = False
# Pass this through FBO views (e.g., forbidden) which use its macros.
template = get_renderer(
'novaideo:views/user_management/templates/login.pt').implementation()
login_bodies = []
try:
login_navbars = generate_navbars(
request, request.root,
process_id=CONNECTOR_PROCESSES,
node_id='login',
descriminators=['body-action'])
login_bodies = login_navbars['body_actions']
except Exception:
pass
values = dict(
url=request.resource_url(request.virtual_root, 'login'),
came_from=came_from,
login='',
password='',
login_template=template,
logins=login_bodies
)
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
item['messages'] = messages
result = {}
result['coordinates'] = {self.coordinates: [item]}
return result
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{LogIn: LoginView})
| agpl-3.0 | 4,142,177,128,030,823,400 | 35.338983 | 89 | 0.620336 | false |
jjgomera/pychemqt | lib/mEoS/Ne.py | 1 | 5905 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from numpy.lib.scimath import log10
from scipy.constants import pi, Avogadro
from lib import unidades
from lib.meos import MEoS
class Ne(MEoS):
"""Multiparameter equation of state for neon"""
name = "neon"
CASNumber = "7440-01-9"
formula = "Ne"
synonym = "R-720"
_refPropName = "NEON"
_coolPropName = "Neon"
rhoc = unidades.Density(481.914888)
Tc = unidades.Temperature(44.4918)
Pc = unidades.Pressure(2678.6, "kPa")
M = 20.179 # g/mol
Tt = unidades.Temperature(24.556)
Tb = unidades.Temperature(27.104)
f_acent = -0.0387
momentoDipolar = unidades.DipoleMoment(0.0, "Debye")
id = 107
CP1 = {"ao": 2.5}
katti = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for neon of Katti (1986)",
"__doi__": {"autor": "Katti, R.S., Jacobsen, R.T, Stewart, R.B., "
"Jahangiri, M.",
"title": "Thermodynamic Properties of Neon for "
"Temperatures from the Triple Point to 700 K at "
"Pressures up to 700 MPa",
"ref": "Adv. Cryo. Eng. 31 (1986) 1189-1197",
"doi": "10.1007/978-1-4613-2213-9_132"},
"R": 8.31434,
"cp": CP1,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 6179, "so": 146.214},
"Tmin": Tt, "Tmax": 700.0, "Pmax": 700000.0, "rhomax": 90.56,
"nr1": [0.3532653449e1, -0.4513954384e1, -0.1524027959, 0.2188568609e1,
-7.44299997, 0.7755627402e1, -0.3122553128e1, 0.1014206899e1,
-0.5289214086e-1, 0.1566849239, -0.222852705, -0.1410150942e-1,
0.7036229719e-1, -0.5882048367e-1, 0.1571172741e-1,
0.1292202769e-2, 0.7902035603e-3, -0.3794403616e-3],
"d1": [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6],
"t1": [0.5, 0.75, 3.5, 0.5, 0.75, 1, 1.5, 2.5, 0.25, 0.5, 2.5, 1, 3, 4,
5, 1, 5, 6],
"nr2": [0.04652799333, 0.04524001818, -0.2383421991, 0.629359013e-2,
-0.1272313644e-2, -0.175235256e-6, 0.7188419232e-2,
-0.5403006914e-1, 0.7578222187e-1, -0.3808588254e-1,
0.6034022431e-2],
"d2": [1, 2, 2, 2, 2, 2, 4, 8, 8, 8, 8],
"t2": [4, 1, 5, 8, 12, 32, 10, 6, 7, 8, 9],
"c2": [3, 2, 2, 4, 6, 6, 2, 2, 2, 2, 2],
"gamma2": [1]*11,
"nr3": [],
"nr4": []}
eq = katti,
_surface = {"sigma": [0.012254, 0.02728, -0.025715],
"exp": [1.4136, 1.4517, 1.6567]}
_dielectric = {
"eq": 1,
"a": [0.9969, 0], "b": [-0.109, 0.0708], "c": [-2.88, -1.0],
"Au": 0, "D": 2}
_melting = {
"eq": 2,
"__doi__": {
"autor": "Santamaría-Pérez, D., Mukherjee, G.D., Schwager, B., "
"Boehler, R.",
"title": "High-pressure melting curve of helium and neon: "
"Deviations from corresponding states theory",
"ref": "Physical Review B 81 (2010) 214101",
"doi": "10.1103/PhysRevB.81.214101"},
"Tmin": 24.4, "Tmax": 700.0,
"Tref": 24.4, "Pref": 101325,
"a2": [0.17e9], "exp2": [1/0.77]}
_vapor_Pressure = {
"eq": 3,
"n": [-0.55805e1, 0.68795e-1, 0.54840e1, -0.83760e1, 0.34276e1],
"t": [1, 1.5, 2.3, 2.8, 3.4]}
_liquid_Density = {
"eq": 1,
"n": [1.0601, 120.76, -385.53, 816.55, -899.07, 354.66],
"t": [0.33, 1.4, 1.7, 2.2, 2.6, 3.0]}
_vapor_Density = {
"eq": 2,
"n": [-0.23338e1, -0.36834e1, -0.85368e2, 0.22769e3, -0.17290e3],
"t": [0.444, 0.95, 3.5, 4.1, 4.5]}
visco0 = {"__name__": "Rabinovich (1988)",
"__doi__": {
"autor": "Rabinovich, V.A., Vasserman, A.A., Nedostup, V.I.,"
" Veksler, L.S.",
"title": "Thermophysical Properties of Neon, Argon, "
"Krypton, and Xenon",
"ref": "Hemisphere Publishing Corp., 1988.",
"doi": ""},
"eq": 0,
"method": "_visco0"}
_viscosity = visco0,
def _visco0(self, rho, T, fase=None):
a = [17.67484, -2.78751, 311498.7, -48826500, 3938774000, -1.654629e11,
2.86561e12]
Tr = T/0.29944
y = 0.68321*(a[0] + a[1]*log10(Tr) + a[2]/Tr**2 + a[3]/Tr**3 +
a[4]/Tr**4 + a[5]/Tr**5 + a[6]/Tr**6)
nt = 266.93*(T*self.M)**0.5/y
om = rho/1673.0
c = [1.03010, -0.99175, 2.47127, -3.11864, 1.57066]
b = [0.48148, -1.18732, 2.80277, -5.41058, 7.04779, -3.76608]
sum1 = sum([ci*om**i for i, ci in enumerate(c)])
sum2 = sum([bi*om**i for i, bi in enumerate(b)])
sigma = 3.05e-10*(sum1-sum2*log10(T/122.1))
br = 2.0/3.0*pi*Avogadro*sigma**3
brho = rho/self.M*1000*br
d = [1, 0.27676, 0.014355, 2.6480, -1.9643, 0.89161]
nd = sum([di*brho**i for i, di in enumerate(d)])
return unidades.Viscosity(nd*nt/100, "muPas")
| gpl-3.0 | 858,815,280,121,268,900 | 37.318182 | 79 | 0.515675 | false |
intip/aldryn-bootstrap3 | aldryn_bootstrap3/fields.py | 1 | 3306 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import django.forms.fields
from django.utils.translation import ugettext_lazy as _
from . import widgets, constants
from .conf import settings
class Context(django.forms.fields.ChoiceField):
widget = widgets.Context
CHOICES = constants.CONTEXT_CHOICES
DEFAULT = constants.CONTEXT_DEFAULT
def __init__(self, *args, **kwargs):
if 'choices' not in kwargs:
kwargs['choices'] = self.CHOICES
if 'initial' not in kwargs:
kwargs['initial'] = self.DEFAULT
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(Context, self).__init__(*args, **kwargs)
class Size(django.forms.fields.ChoiceField):
widget = widgets.Size
CHOICES = constants.SIZE_WIDGET_CHOICES
DEFAULT = constants.SIZE_WIDGET_DEFAULT
def __init__(self, *args, **kwargs):
if 'choices' not in kwargs:
kwargs['choices'] = self.CHOICES
if 'initial' not in kwargs:
kwargs['initial'] = self.DEFAULT
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(Size, self).__init__(*args, **kwargs)
class Classes(django.forms.fields.CharField):
pass
class Icon(django.forms.fields.CharField):
widget = widgets.Icon
DEFAULT = ''
def __init__(self, *args, **kwargs):
if 'initial' not in kwargs:
kwargs['initial'] = self.DEFAULT
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(Icon, self).__init__(*args, **kwargs)
class Integer(django.forms.fields.IntegerField):
widget = django.forms.NumberInput
def __init__(self, *args, **kwargs):
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(Integer, self).__init__(*args, **kwargs)
class Classes(django.forms.fields.CharField):
widget = django.forms.widgets.Textarea
class MiniText(django.forms.fields.CharField):
widget = widgets.MiniTextarea
def __init__(self, *args, **kwargs):
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(MiniText, self).__init__(*args, **kwargs)
class LinkOrButton(django.forms.fields.ChoiceField):
widget = widgets.LinkOrButton
CHOICES = (
('lnk', 'link'),
('btn', 'button'),
)
DEFAULT = 'lnk'
def __init__(self, *args, **kwargs):
if 'choices' not in kwargs:
kwargs['choices'] = self.CHOICES
if 'initial' not in kwargs:
kwargs['initial'] = self.DEFAULT
kwargs.pop('coerce', None)
kwargs.pop('max_length', None)
kwargs.pop('widget', None)
kwargs['widget'] = self.widget
super(LinkOrButton, self).__init__(*args, **kwargs)
class Responsive(MiniText):
widget = widgets.Responsive
class ResponsivePrint(MiniText):
widget = widgets.ResponsivePrint
| bsd-3-clause | -1,519,764,175,685,120,000 | 28.517857 | 59 | 0.613128 | false |
olinlibrary/ABE | abe/auth/access_tokens.py | 1 | 2298 | import os
import time
from binascii import hexlify
import jwt
from abe import database as db
ADMIN_EMAILS = os.environ.get('ADMIN_EMAILS', '').split(',')
OAUTH_REQUIRES_CLIENT_ID = os.environ.get('OAUTH_REQUIRES_CLIENT_ID')
ACCESS_TOKEN_SECRET = (os.environ.get('ACCESS_TOKEN_SECRET') or hexlify(os.urandom(32)))
AUTHENTICATED_USER_CLAIMS = [
'create:events', 'edit:events', 'delete:events',
'create:ics',
'read:all_events',
'read:labels',
]
ADMIN_USER_CLAIMS = AUTHENTICATED_USER_CLAIMS + [
'create:protected_events', 'edit:protected_events', 'delete:protected_events',
'create:labels', 'edit:labels', 'delete:labels',
'admin:apps',
]
def create_access_token(**params):
payload = {}
payload.update(params)
payload.update({'iat': int(time.time())})
token = jwt.encode(payload, ACCESS_TOKEN_SECRET, algorithm='HS256').decode()
return token
def get_access_token_provider(token):
if is_valid_token(token):
payload = jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256')
return payload.get('provider')
return None
def get_access_token_role(token):
if is_valid_token(token):
payload = jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256')
return 'admin' if payload.get('email') in ADMIN_EMAILS else 'user'
return None
def access_token_scopes(token):
# The scope is computed based on the token's role, so that tokens stay
# valid if the role -> scope map changes.
scope = []
if is_valid_token(token):
payload = jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256')
app = None
if 'client_id' in payload:
app = db.App.objects(client_id=payload['client_id']).first()
if not app and OAUTH_REQUIRES_CLIENT_ID:
pass # return scope
role = get_access_token_role(token)
if app and 'admin:*' not in app.scopes:
pass # role == 'user'
scope = ADMIN_USER_CLAIMS if role == 'admin' else AUTHENTICATED_USER_CLAIMS
return scope
def is_valid_token(token):
if not token:
return False
try:
jwt.decode(token.encode(), ACCESS_TOKEN_SECRET, algorithms='HS256') # for effect
except Exception:
return False
return True
| agpl-3.0 | -8,400,357,089,113,608,000 | 29.64 | 89 | 0.654047 | false |
messersm/replay | replay/__init__.py | 1 | 1758 | r"""replay - replay the results of function calls
replay is a simple package that enables you to save the results of
time intensive deterministic function calls or random function calls
(and by function I mean any kind of callables), that should be replayed,
within a file using a simple API.
replay will never execute any code from a replay file. It hashes the
calls and looks for the hashes in the replay file, so there's no
security risk in changing replay files (other then, that you could
get other results...).
Example:
>>> import os
>>> import random
>>> import replay
>>> import tempfile
>>> fd, tmpname = tempfile.mkstemp(suffix='.replay')
>>> r1 = Replay(tmpname)
>>> random_numbers1 = [r1(random.random) for i in range(10)]
>>> r1.save()
>>> del r1
>>> r2 = Replay(tmpname)
>>> random_numbers2 = [r2(random.random) for i in range(10)]
>>> random_numbers1 == random_numbers2
True
>>> os.remove(tmpname)
Replay files can have different formats. Right now only a human-readable
and -editable format is implemented. This format looks mostly like python
except that the actual values a encoded with json.
Here's an example for such a file:
-----------------------------
random() = 0.3
pow(2, 3) = 8
# This is a comment.
random() = 0.2
sorted([7, 2, 3], reverse=true) = [7, 3, 2]
-----------------------------
You can freely edit such a file and future calls to these functions
will return the results you write into them:
Example:
>>> import os
>>> import random
>>> import replay
>>> import tempfile
>>> fd, tmpname = tempfile.mkstemp(suffix='.replay')
>>> with open(tmpname, 'w') as f: n = f.write('random() = 40\n')
>>> r = Replay(tmpname)
>>> r(random.random)
40
>>> os.remove(tmpname)
"""
__version__ = '0.1.2'
from .replay import Replay
| agpl-3.0 | 5,085,508,090,791,216,000 | 28.3 | 73 | 0.685438 | false |
google-research/world_models | bin/eval.py | 1 | 9185 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a world model on an offline dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Callable, Dict, Text
from absl import app
from absl import flags
from absl import logging
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from world_models.loops import train_eval
from world_models.planners import planners
from world_models.simulate import simulate
from world_models.tasks import tasks
from world_models.utils import npz
FLAGS = flags.FLAGS
flags.DEFINE_string("model_dir", None, "Model checkpoint directory.")
flags.DEFINE_string("data_dir", None, "data directory.")
flags.DEFINE_string("output_dir", None, "output directory.")
flags.DEFINE_multi_string(
"config_path", None,
"Newline separated list of paths to a world models gin configs.")
flags.DEFINE_multi_string("config_param", None,
"Newline separated list of Gin parameter bindings.")
flags.DEFINE_bool("enable_eager", True, "Enable eager execution mode.")
flags.DEFINE_integer("num_virtual_gpus", -1, "If >1, enables virtual gpus.")
flags.DEFINE_boolean("train", False, "Train the model on data before eval.")
flags.DEFINE_string("train_data_dir", None, "train data path.")
def frame_error(predicted_frames, ground_truth_frames):
"""Frame prediction error as average L2 norm between pixels."""
batch, prediction_horizon = predicted_frames.shape[:2]
return np.mean(
np.linalg.norm(
np.reshape(
np.asarray(predicted_frames, dtype=np.float32),
[batch, prediction_horizon, -1, 1]) - np.reshape(
np.asarray(ground_truth_frames, dtype=np.float32),
[batch, prediction_horizon, -1, 1]),
axis=-1),
axis=-1)
def reward_error(predicted_rewards, ground_truth_rewards):
"""Reward prediction error as L2 norm."""
return np.linalg.norm(predicted_rewards - ground_truth_rewards, axis=-1)
@gin.configurable(
blacklist=["eval_dir", "train_dir", "model_dir", "result_dir"])
def offline_evaluate(
predict_fn: Callable[[np.ndarray, Any], Dict[Text, np.ndarray]],
observe_fn: Callable[[np.ndarray, np.ndarray, np.ndarray, Any], Any],
reset_fn: Callable[..., Any],
train_fn: Callable[[Text], None] = None,
train_dir: Text = None,
enable_train: bool = False,
train_eval_iterations: int = 0,
online_eval_task: tasks.Task = None,
online_eval_planner: planners.Planner = None,
online_eval_episodes: int = 0,
eval_dir: Text = None,
model_dir: Text = None,
result_dir: Text = None,
episode_length: int = None,
num_episodes: int = 100,
prediction_horizon: int = 1,
batch: int = 128):
"""offline model evaluation."""
assert eval_dir, "eval_dir is required"
assert model_dir, "model_dir is required"
assert result_dir, "result_dir is required"
assert episode_length, "episode_length is required"
if enable_train:
assert train_dir, "train_dir is required for training"
assert train_eval_iterations, ("train_eval_iterations is required for "
"training")
for i in range(train_eval_iterations):
train_fn(train_dir)
result_dir_at_step = os.path.join(result_dir, "%d" % i)
eval_once(
result_dir=result_dir_at_step,
eval_dir=eval_dir,
episode_length=episode_length,
prediction_horizon=prediction_horizon,
batch=batch,
num_episodes=num_episodes,
reset_fn=reset_fn,
observe_fn=observe_fn,
predict_fn=predict_fn)
if online_eval_episodes:
summary_dir = os.path.join(result_dir, "online_eval")
episodes, predictions, score = simulate.simulate(
online_eval_task, online_eval_planner, online_eval_episodes)
train_eval.visualize(summary_dir, i, episodes, predictions,
{"score": score})
else:
eval_once(
result_dir=result_dir,
eval_dir=eval_dir,
episode_length=episode_length,
prediction_horizon=prediction_horizon,
batch=batch,
num_episodes=num_episodes,
reset_fn=reset_fn,
observe_fn=observe_fn,
predict_fn=predict_fn)
def eval_once(result_dir, eval_dir, episode_length, prediction_horizon, batch,
num_episodes, reset_fn, observe_fn, predict_fn):
"""Run offline eval once and store the results in `result_dir`."""
dataset = npz.load_dataset_from_directory(eval_dir, episode_length, batch)
iterator = dataset.as_numpy_iterator()
state = None
reward_path = os.path.join(result_dir, "rewards")
reward_error_at_prediction_horizon = np.zeros((prediction_horizon))
frame_error_at_prediction_horizon = np.zeros((prediction_horizon))
logging.info("Staring evaluation")
predictions = {}
for b, episodes in enumerate(iterator):
if b * batch >= num_episodes:
break
if episodes["image"].dtype != np.uint8:
episodes["image"] = np.clip(episodes["image"] * 255, 0,
255).astype(np.uint8)
state = reset_fn(state=state, proposals=batch)
for i in range(episode_length - prediction_horizon):
timestep = {key: value[:, i:i + 1] for key, value in episodes.items()}
frame = timestep["image"]
reward = timestep["reward"]
action = timestep["action"]
future_actions = episodes["action"][:, i:i + prediction_horizon]
future_frames = episodes["image"][:, i:i + prediction_horizon]
future_rewards = episodes["reward"][:, i:i + prediction_horizon]
state = observe_fn(frame, action, reward, state)
predictions = predict_fn(future_actions, state)
if "reward" in predictions:
npz.save_dictionary(
{
"pred": predictions["reward"],
"true": future_rewards
}, reward_path)
reward_error_at_prediction_horizon += np.sum(
reward_error(predictions["reward"], future_rewards), axis=0)
if "image" in predictions:
frame_error_at_prediction_horizon += np.sum(
frame_error(predictions["image"], future_frames), axis=0)
logging.info("Finished evaluation on %d episodes", batch)
reward_error_at_prediction_horizon /= num_episodes * (
episode_length - prediction_horizon)
frame_error_at_prediction_horizon /= num_episodes * (
episode_length - prediction_horizon)
logging.info("Finished evaluation")
results = {}
if "reward" in predictions:
logging.info(
"Average reward L2 norm error for different prediction horizons: %s",
reward_error_at_prediction_horizon)
results["reward_error"] = reward_error_at_prediction_horizon
else:
logging.info("predict_fn does not predict rewards."
" L2 norm on reward prediction could not be calculated.")
if "image" in predictions:
logging.info(
"Average frame L2 norm error for different prediction horizons: %s",
frame_error_at_prediction_horizon)
results["image_error"] = frame_error_at_prediction_horizon
else:
logging.info("predict_fn does not predict frames."
" L2 norm on frame prediction could not be calculated.")
npz.save_dictionary(results, result_dir)
def main(argv):
del argv # Unused
if FLAGS.enable_eager:
tf.enable_eager_execution()
config_params = FLAGS.config_param or []
config_params += [
"model_dir='%s'" % FLAGS.model_dir,
"episodes_dir='%s'" % FLAGS.output_dir
]
gin.parse_config_files_and_bindings(FLAGS.config_path, config_params)
if FLAGS.num_virtual_gpus > -1:
gpus = tf.config.experimental.list_physical_devices("GPU")
total_gpu_mem_limit = 8192
per_gpu_mem_limit = total_gpu_mem_limit / FLAGS.num_virtual_gpus
virtual_gpus = [
tf.config.experimental.VirtualDeviceConfiguration(
memory_limit=per_gpu_mem_limit)
] * FLAGS.num_virtual_gpus
tf.config.experimental.set_virtual_device_configuration(
gpus[0], virtual_gpus)
logical_gpus = tf.config.experimental.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
offline_evaluate( # pylint:disable=no-value-for-parameter
result_dir=FLAGS.output_dir,
model_dir=FLAGS.model_dir,
eval_dir=FLAGS.data_dir,
train_dir=FLAGS.train_data_dir,
enable_train=FLAGS.train)
if __name__ == "__main__":
flags.mark_flags_as_required(["output_dir"])
flags.mark_flags_as_required(["config_path"])
app.run(main)
| apache-2.0 | 3,375,387,708,586,462,000 | 37.592437 | 78 | 0.664453 | false |
simupy/simupy | simupy/block_diagram.py | 1 | 35233 | from scipy.integrate import ode
import numpy as np
import warnings
import sys as syslib
from simupy.utils import callable_from_trajectory
from scipy.optimize import brentq
DEFAULT_INTEGRATOR_CLASS = ode
DEFAULT_INTEGRATOR_OPTIONS = {
'name': 'dopri5',
'rtol': 1e-6,
'atol': 1e-12,
'nsteps': 500,
'max_step': 0.0
}
DEFAULT_EVENT_FINDER = brentq
DEFAULT_EVENT_FIND_OPTIONS = {
'xtol': 2e-12,
'rtol': 8.8817841970012523e-16,
'maxiter': 100
}
nan_warning_message = ("BlockDiagram encountered NaN outputs and quit during" +
" {}. This may have been intentional! NaN outputs at " +
"time t={}, state x={}, output y={}")
class SimulationResult(object):
"""
A simple class to collect simulation result trajectories.
Attributes
----------
t : array of times
x : array of states
y : array of outputs
e : array of events
"""
max_allocation = 2**7
def __init__(self, dim_states, dim_outputs, tspan, n_sys, initial_size=0):
if initial_size == 0:
initial_size = tspan.size
self.t = np.empty(initial_size)
self.x = np.empty((initial_size, dim_states))
self.y = np.empty((initial_size, dim_outputs))
self.e = np.empty((initial_size, n_sys))
self.res_idx = 0
self.tspan = tspan
self.t0 = tspan[0]
self.tF = tspan[-1]
def allocate_space(self, t):
more_rows = int((self.tF-t)*self.t.size/(t-self.t0))+1
more_rows = max(min(more_rows, self.max_allocation),1)
self.t = np.r_[self.t, np.empty(more_rows)]
self.x = np.r_[self.x, np.empty((more_rows, self.x.shape[1]))]
self.y = np.r_[self.y, np.empty((more_rows, self.y.shape[1]))]
self.e = np.r_[self.e, np.empty((more_rows, self.e.shape[1]))]
def new_result(self, t, x, y, e=None):
if self.res_idx >= self.t.size:
self.allocate_space(t)
self.t[self.res_idx] = t
self.x[self.res_idx, :] = x
self.y[self.res_idx, :] = y
if e is not None:
self.e[self.res_idx, :] = e
else:
self.e[self.res_idx, :] = np.zeros(self.e.shape[1])
self.res_idx += 1
def last_result(self, n=1, copy=False):
n = np.clip(n, 1, self.res_idx)
if copy:
return (np.copy(self.t[self.res_idx-n]),
np.copy(self.x[self.res_idx-n, :]),
np.copy(self.y[self.res_idx-n, :]))
else:
return (self.t[self.res_idx-n], self.x[self.res_idx-n, :],
self.y[self.res_idx-n, :])
class BlockDiagram(object):
"""
A block diagram of dynamical systems with their connections which can be
numerically simulated.
"""
def __init__(self, *systems):
"""
Initialize a BlockDiagram, with an optional list of systems to start
the diagram.
"""
self.systems = np.array([], dtype=object)
self.connections = np.array([], dtype=np.bool_).reshape((0, 0))
self.dts = np.array([], dtype=np.float_)
self.events = np.array([], dtype=np.bool_)
self.cum_inputs = np.array([0], dtype=np.int_)
self.cum_outputs = np.array([0], dtype=np.int_)
self.cum_states = np.array([0], dtype=np.int_)
self.cum_events = np.array([0], dtype=np.int_)
self.inputs = np.array([], dtype=np.bool_).reshape((0,0))
self.dim_input = 0
for sys in systems:
self.add_system(sys)
@property
def initial_condition(self):
x0 = np.zeros(self.cum_states[-1]) # TODO: pre-allocate?
for sysidx in np.where(self.systems)[0]:
sys = self.systems[sysidx]
state_start = self.cum_states[sysidx]
state_end = self.cum_states[sysidx+1]
x0[state_start:state_end] = sys.initial_condition
return x0
@property
def dim_state(self):
return self.cum_states[-1]
@property
def dim_output(self):
# TODO: allow internal outputs to be "closed"? For now, no
return self.cum_outputs[-1]
@property
def dt(self):
return self.dts.min()
def prepare_to_integrate(self):
for sys in self.systems:
sys.prepare_to_integrate()
if np.sum(self.events) > 0:
self.event_equation_function = self.event_equation_function_implementation
self.update_equation_function = self.update_equation_function_implementation
else:
self.event_equation_function = None
self.update_equation_function = None
def create_input(self, to_system_input, channels=[], inputs=[]):
"""
Create or use input channels to use block diagram as a subsystem.
Parameters
----------
channels : list-like
Selector index of the input channels to connect.
to_system_input : dynamical system
The system (already added to BlockDiagram) to which inputs will be
connected. Note that any previous input connections will be
over-written.
inputs : list-like, optional
Selector index of the inputs to connect. If not specified or of
length 0, will connect all of the inputs.
"""
channels = np.asarray(channels)
if len(channels) == 0:
raise ValueError("Cannot create input without specifying channel")
if np.min(channels) < 0:
raise ValueError("Cannot create input channel < 0")
if len(inputs) == 0:
inputs = np.arange(to_system_input.dim_input)
else:
inputs = np.asarray(inputs)
inputs = inputs + self.cum_inputs[
np.where(self.systems == to_system_input)
]
if len(channels) != len(inputs) and len(channels) != 1:
raise ValueError("Cannot broadcast channels to inputs")
if np.max(channels) > self.dim_input-1:
self.inputs = np.pad(self.inputs,
((0, np.max(channels) - self.dim_input+1),
(0, 0)),
'constant', constant_values = 0)
self.dim_input = np.max(channels)+1
self.inputs[:, inputs] = False
self.connections[:, inputs] = False
self.inputs[channels, inputs] = True
def connect(self, from_system_output, to_system_input, outputs=[],
inputs=[]):
"""
Connect systems in the block diagram.
Parameters
----------
from_system_output : dynamical system
The system (already added to BlockDiagram) from which outputs will
be connected. Note that the outputs of a system can be connected to
multiple inputs.
to_system_input : dynamical system
The system (already added to BlockDiagram) to which inputs will be
connected. Note that any previous input connections will be
over-written.
outputs : list-like, optional
Selector index of the outputs to connect. If not specified or of
length 0, will connect all of the outputs.
inputs : list-like, optional
Selector index of the inputs to connect. If not specified or of
length 0, will connect all of the inputs.
"""
if len(outputs) == 0:
outputs = np.arange(from_system_output.dim_output)
else:
outputs = np.asarray(outputs)
outputs = outputs + self.cum_outputs[
np.where(self.systems == from_system_output)
]
if len(inputs) == 0:
inputs = np.arange(to_system_input.dim_input)
else:
inputs = np.asarray(inputs)
inputs = inputs + self.cum_inputs[
np.where(self.systems == to_system_input)
]
# TODO: Check that this can be broadcast correctly
self.inputs[:, inputs] = False
self.connections[:, inputs] = False
self.connections[outputs, inputs] = True
def add_system(self, system):
"""
Add a system to the block diagram
Parameters
----------
system : dynamical system
System to add to BlockDiagram
"""
self.systems = np.append(self.systems, system)
self.cum_states = np.append(self.cum_states,
self.cum_states[-1] + system.dim_state)
self.cum_inputs = np.append(self.cum_inputs,
self.cum_inputs[-1] + system.dim_input)
self.cum_outputs = np.append(self.cum_outputs,
self.cum_outputs[-1] + system.dim_output)
self.events = np.append(self.events, np.bool_(
getattr(system, 'event_equation_function', None) and
getattr(system, 'update_equation_function', None)))
self.cum_events = np.append(self.cum_events,
self.cum_events[-1] + self.events[-1])
self.dts = np.append(self.dts, getattr(system, 'dt', 0))
self.connections = np.pad(self.connections,
((0, system.dim_output),
(0, system.dim_input)),
'constant', constant_values=0)
self.inputs = np.pad(self.inputs,
((0, 0),
(0, system.dim_input)),
'constant', constant_values=0)
def output_equation_function(self, t, state, input_=None, update_memoryless_event=False):
output = np.zeros(self.cum_outputs[-1])
input_ = input_ if input_ is not None else np.zeros(self.dim_input)
# compute outputs for full systems, y[t_k]=h(t_k,x[t_k])
for sysidx in np.where((np.diff(self.cum_states) > 0))[0]:
sys = self.systems[sysidx]
output_start = self.cum_outputs[sysidx]
output_end = self.cum_outputs[sysidx+1]
state_start = self.cum_states[sysidx]
state_end = self.cum_states[sysidx+1]
state_values = state[state_start:state_end]
output[output_start:output_end] = \
sys.output_equation_function(t, state_values).reshape(-1)
# compute outputs for memoryless systems, y[t_k]=h(t_k,u[t_k])
for sysidx in np.where((np.diff(self.cum_states) == 0))[0][::-1]:
sys = self.systems[sysidx]
output_start = self.cum_outputs[sysidx]
output_end = self.cum_outputs[sysidx+1]
input_start = self.cum_inputs[sysidx]
input_end = self.cum_inputs[sysidx+1]
input_values = np.zeros(sys.dim_input)
input_index, output_index = np.where(
self.connections[:, input_start:input_end].T
)
input_values[input_index] = output[output_index]
input_index, as_sys_input_index = np.where(
self.inputs[:, input_start:input_end].T
)
if as_sys_input_index.size:
input_values[input_index] = input_[as_sys_input_index]
if sys.dim_input:
if self.events[sysidx] and update_memoryless_event:
sys.update_equation_function(t, input_values)
output[output_start:output_end] = \
sys.output_equation_function(t, input_values).reshape(-1)
else:
if self.events[sysidx] and update_memoryless_event:
sys.update_equation_function(t)
output[output_start:output_end] = \
sys.output_equation_function(t).reshape(-1)
return output
def state_equation_function(self, t, state, input_=None, output=None):
# TODO: how to define available inputs??
dxdt = np.zeros(self.cum_states[-1])
output = output if output is not None else self.output_equation_function(t, state, input_)
for sysidx in np.where((np.diff(self.cum_states) > 0))[0]:
sys = self.systems[sysidx]
state_start = self.cum_states[sysidx]
state_end = self.cum_states[sysidx+1]
state_values = state[state_start:state_end]
input_start = self.cum_inputs[sysidx]
input_end = self.cum_inputs[sysidx+1]
input_values = np.zeros(sys.dim_input)
input_index, output_index = np.where(
self.connections[:, input_start:input_end].T
)
input_values[input_index] = output[output_index]
input_index, as_sys_input_index = np.where(
self.inputs[:, input_start:input_end].T
)
if as_sys_input_index.size:
input_values[input_index] = input_[as_sys_input_index]
if sys.dim_input:
dxdt[state_start:state_end] = \
sys.state_equation_function(
t, state_values, input_values
).reshape(-1)
else:
dxdt[state_start:state_end] = \
sys.state_equation_function(t, state_values).reshape(-1)
return dxdt
def systems_event_equation_functions(self, t, state, output):
events = np.zeros(self.systems.size)
# compute events for stateful systems
for sysidx in np.where(
(np.diff(self.cum_states) > 0) & self.events
)[0]:
sys = self.systems[sysidx]
state_start = self.cum_states[sysidx]
state_end = self.cum_states[sysidx+1]
state_values = state[state_start:state_end]
events[sysidx] = sys.event_equation_function(
t, state_values).reshape(-1)
# compute events for memoryless systems
for sysidx in np.where(
(np.diff(self.cum_states) == 0) & self.events
)[0]:
sys = self.systems[sysidx]
input_start = self.cum_inputs[sysidx]
input_end = self.cum_inputs[sysidx+1]
input_values = np.zeros(sys.dim_input)
input_index, output_index = np.where(
self.connections[:, input_start:input_end].T
)
input_values[input_index] = output[output_index]
if sys.dim_input:
events[sysidx] = sys.event_equation_function(
t, input_values).reshape(-1)
else:
events[sysidx] = sys.event_equation_function(
t).reshape(-1)
return events
def event_equation_function_implementation(self, t, state, output=None):
output = output or self.output_equation_function(t,state)
return np.prod(
self.systems_event_equation_functions(t, state, output))
def update_equation_function_implementation(self, t, state, input_=None, output=None):
next_state = state.copy()
output = output or self.output_equation_function(t,state)
input_ = input_ or np.zeros(self.dim_input)
# find which one(s) crossed
# call that/those systems's update_equation_function & fill in next_state
return next_state
def computation_step(self, t, state, output=None, selector=True, do_events=False):
"""
callable to compute system outputs and state derivatives
"""
# TODO: make sure this still works
# TODO: p sure I just had output_equation_function here
# I guess the outputs_in wasn't really necessary?
output = output if output is not None else \
self.output_equation_function(t, state)
# compute state equation for full systems,
# x[t_k']=f(t_k,x[t_k],u[t_k])
dxdt = self.state_equation_function(t, state, output=output)
if do_events:
events = self.systems_event_equation_functions(t, state, output)
return dxdt, output, events
return dxdt, output
def simulate(self, tspan, integrator_class=DEFAULT_INTEGRATOR_CLASS,
integrator_options=DEFAULT_INTEGRATOR_OPTIONS,
event_finder=DEFAULT_EVENT_FINDER,
event_find_options=DEFAULT_EVENT_FIND_OPTIONS):
"""
Simulate the block diagram
Parameters
----------
tspan : list-like or float
Argument to specify integration time-steps.
If a single time is specified, it is treated as the final time.
If two times are specified, they are treated as initial and
final times. In either of these conditions, it is assumed that
that every time step from a variable time-step integrator will
be stored in the result.
If more than two times are specified, these are the only times
where the trajectories will be stored.
integrator_class : class, optional
Class of integrator to use. Defaults to ``scipy.integrate.ode``.
Must provide the following subset of the ``scipy.integrate.ode``
API:
- ``__init__(derivative_callable(time, state))``
- ``set_integrator(**kwargs)``
- ``set_initial_value(state, time)``
- ``set_solout(successful_step_callable(time, state))``
- ``integrate(time)``
- ``successful()``
- ``y``, ``t`` properties
integrator_options : dict, optional
Dictionary of keyword arguments to pass to
``integrator_class.set_integrator``.
event_finder : callable, optional
Interval root-finder function. Defaults to
``scipy.optimize.brentq``, and must take the equivalent positional
arguments, ``f``, ``a``, and ``b``, and return ``x0``, where
``a <= x0 <= b`` and ``f(x0)`` is the zero.
event_find_options : dict, optional
Dictionary of keyword arguments to pass to ``event_finder``. It
must provide a key ``'xtol'``, and it is expected that the exact
zero lies within ``x0 +/- xtol/2``, as ``brentq`` provides.
"""
dense_output = True
if np.isscalar(tspan):
t0 = 0
tF = tspan
elif len(tspan) == 2:
t0 = tspan[0]
tF = tspan[1]
else:
dense_output = False
t0 = tspan[0]
tF = tspan[-1]
if dense_output:
tspan = np.array([t0, tF])
else:
tspan = np.array(tspan)
"""
tspan is used to indicate which times must be computed
these are end-points for continuous time simulations, meshed data
points for continuous.
"""
if ('max_step' in integrator_options) and \
(integrator_options['max_step'] == 0.0):
integrator_options = integrator_options.copy()
# TODO: find the harmonic to ensure no skipped steps?
if np.any(self.dts!=0.0):
integrator_options['max_step'] = np.min(self.dts[self.dts!=0.0])/2
# generate tresult arrays; initialize x0
results = SimulationResult(self.cum_states[-1], self.cum_outputs[-1],
tspan, self.systems.size)
def continuous_time_integration_step(t, state, for_integrator=True):
"""
function to manipulate stored states and integrator state
to pass to between computation_step and integrator
"""
comp_result = self.computation_step(
t, state.reshape(-1), selector=True, do_events=~for_integrator)
if not for_integrator:
return (state,) + comp_result[1:]
return comp_result[0]
# store the results from each continuous integration step
def collect_integrator_results(t, state):
dxdt, output, events = \
continuous_time_integration_step(t, state,
for_integrator=False)
test_sel = results.res_idx - np.arange(3)-1
if (t in results.t[test_sel] and
state in results.x[test_sel, :] and
output in results.y[test_sel, :]):
return
# check for events here -- before saving, because it is potentially
# invalid
prev_events = results.e[results.res_idx-1, :]
if (np.any(np.sign(prev_events) != np.sign(events)) &
(results.t[results.res_idx-1] > 0)):
return -1
else:
results.new_result(t, state, output, events)
if np.any(np.isnan(output)):
warnings.warn(nan_warning_message.format(
"variable step-size collection",
t,
state,
output
))
return -1
for sys in self.systems:
sys.prepare_to_integrate()
x0 = self.initial_condition
# initial condition computation, populate initial condition in results
#
# Initial event computation
#
# compute first output for stateful systems
y0 = self.output_equation_function(t0, x0, update_memoryless_event=True)
dx_dt_0, y0, e0 = self.computation_step( # TODO: this is where logic for events needs to happen
t0, x0, y0, selector=True, do_events=True)
# initial_computation[0] is saved for the next round of selected DTs
results.new_result(t0, x0, y0, e0)
prev_event_t = t0
# setup the integrator
r = integrator_class(continuous_time_integration_step)
r.set_integrator(**integrator_options)
r.set_initial_value(x0, t0)
if dense_output:
r.set_solout(collect_integrator_results)
# main simulation loop
t_idx = 0
next_t = tspan[1]
# TODO: fix extra points being added to results
while True:
if np.any(np.isnan(results.y[:results.res_idx, :])):
warnings.warn(nan_warning_message.format(
"tspan iteration (after event or meshed time-step)",
tspan[t_idx-1],
results.x[results.res_idx-1, :],
results.y[results.res_idx-1, :]
))
break
# loop to integrate until next_t, while handling events
try:
r.integrate(next_t)
except KeyboardInterrupt as kbi:
break
"""
possible branches:
1. if dense:
a. event occured, process it
b. integration completed (to next_t), so exit
c. some other error, abort
2. if meshed:
a. event occured, process it
b. mesh point achieved, no event
i. if next_t == tF, exit
ii. otherwise, do the next one.
c. some other error, abort
1b, 2b, require adding the final point to the system (maybe not 1b)
1a and 2a are the same, except if not dense, maybe don't save the point?? mesh should have fixed output datasize
or, just don't allow meshed datapoints??
1c and 2c are the same
TODO: decide what to do about meshed data points, stiff solvers
TODO: figure out how to run tests that don't involve those solvers
"""
if dense_output:
latest_t, latest_states, latest_outputs = \
results.last_result()
if r.t == next_t or np.any(np.isnan(latest_outputs)):
break
check_states, check_outputs, check_events = \
continuous_time_integration_step(r.t, r.y, False)
if np.any(np.isnan(check_outputs)):
warnings.warn(nan_warning_message.format(
"tspan iteration after continuous integration",
r.t,
check_states,
check_outputs
))
break
if (not dense_output and
np.all(
np.sign(results.e[results.res_idx-1, :]) ==
np.sign(check_events)
)):
latest_states, latest_outputs, = \
check_states, check_outputs
break
if not r.successful():
warnings.warn("Integrator quit unsuccessfully.")
break
#
# need to handle event
#
# results index from previous event crossing
prev_event_idx = np.where(
results.t[:results.res_idx, None] == prev_event_t
)[0][-1]
prev_event_idx = max(
min(prev_event_idx, results.res_idx-3), 0
)
# find which system(s) crossed
event_cross_check = (
np.sign(results.e[results.res_idx-1, :]) !=
np.sign(check_events)
)
event_index_crossed = np.where(event_cross_check)[0]
# interpolate to find first t crossing
# holds t's where event occured
event_ts = np.zeros(self.systems.size)
# holds callable for root finding
event_searchables = np.empty(self.systems.size,
dtype=object)
event_callables = np.empty(self.systems.size,
dtype=object)
ts_to_collect = np.r_[
results.t[prev_event_idx:results.res_idx],
]
unique_ts_to_collect, unique_ts_to_collect_idx = \
np.unique(ts_to_collect, return_index=True)
#
# use vars check_states, check_outputs, check_events, r.t, (r.y?)
# in interpolatant
PRE_CROSS_MINIMUM = 3 # interpolant requires 4, I think, so 3 before the crossing
crossed_size = max(
PRE_CROSS_MINIMUM - unique_ts_to_collect.size, 0
) + 1
crossed_times = np.zeros(crossed_size)
crossed_states = np.zeros((crossed_size, self.cum_states[-1]))
crossed_outputs = np.zeros((crossed_size, self.cum_outputs[-1]))
crossed_events = np.zeros((crossed_size, self.systems.size))
# use array allow in scope of result collector; not sure if needed
crossed_idx = [0]
def collect_integrator_results_events(t, state):
dxdt, output, events = \
continuous_time_integration_step(t, state,
for_integrator=False)
test_sel = results.res_idx - np.arange(3)-1
if ((t in results.t[test_sel] and
state in results.x[test_sel, :] and
output in results.y[test_sel, :]) or
(t in crossed_times and state in crossed_states and
output in crossed_outputs)):
return
crossed_times[crossed_idx[0]] = t
crossed_states[crossed_idx[0],:] = state
crossed_outputs[crossed_idx[0],:] = output
crossed_events[crossed_idx[0],:] = events
crossed_idx[0] += 1
if (crossed_idx[0] >= crossed_size):
return -1
r.set_initial_value(r.y, r.t)
r.set_solout(collect_integrator_results_events)
r.integrate(next_t)
if dense_output:
r.set_solout(collect_integrator_results)
else:
r.set_solout(lambda *args: None)
ts_for_interpolant = np.r_[unique_ts_to_collect, crossed_times]
state_values = results.x[
prev_event_idx:results.res_idx,
]
state_values = np.r_[
state_values[unique_ts_to_collect_idx, :],
crossed_states
]
state_traj_callable = callable_from_trajectory(
ts_for_interpolant,
state_values
)
output_values = results.y[prev_event_idx:results.res_idx]
output_values = np.r_[
output_values[unique_ts_to_collect_idx, :],
crossed_outputs
]
output_traj_callable = callable_from_trajectory(
ts_for_interpolant,
output_values
)
for sysidx in event_index_crossed:
sys = self.systems[sysidx]
state_start = self.cum_states[sysidx]
state_end = self.cum_states[sysidx+1]
input_start = self.cum_inputs[sysidx]
input_end = self.cum_inputs[sysidx+1]
if sys.dim_state:
event_searchables[sysidx] = \
lambda t: sys.event_equation_function(
t, state_traj_callable(t)[...,
state_start:state_end]
)
else:
event_searchables[sysidx] = \
lambda t: sys.event_equation_function(
t, output_traj_callable(t)[...,
np.where(
self.connections[
:, input_start:input_end
].T
)[1]
]
)
if np.prod(np.sign(np.r_[
event_searchables[sysidx](results.t[prev_event_idx]),
event_searchables[sysidx](r.t)])) not in [0,-1]:
e_checks = np.r_[
results.e[
prev_event_idx:results.res_idx,
sysidx
],
check_events[sysidx]
]
left_bracket_idx = np.where(
np.sign(e_checks[:-1]) !=
np.sign(e_checks[-1])
)[0][-1]
left_bracket = ts_to_collect[left_bracket_idx]
else:
left_bracket = results.t[prev_event_idx]
event_ts[sysidx] = event_finder(
event_searchables[sysidx],
left_bracket + np.finfo(np.float_).eps,
r.t,
**event_find_options
)
next_event_t = np.min(event_ts[event_index_crossed])
left_t = next_event_t-event_find_options['xtol']/2
left_x = state_traj_callable(left_t)
new_states, new_outputs, new_events = \
continuous_time_integration_step(
left_t, left_x, False)
results.new_result(
left_t, new_states, new_outputs, new_events)
right_t = next_event_t+event_find_options['xtol']/2
right_x = state_traj_callable(right_t).reshape(-1)
right_y = output_traj_callable(right_t).reshape(-1)
# need to update the output for any stateful, probably do full pattern (i.e., output of system with state and event, output of system (with event only?), etc. Or just leave like this ssince it works??) from continuous_time_integration_step (for_integrator = False)
# TODO: when cleaning up the integration loops, clean the event update too!
update_equation_function_indexes = np.where(
event_cross_check & (event_ts == next_event_t)
)[0]
for sysidx in update_equation_function_indexes:
sys = self.systems[sysidx]
output_start = self.cum_outputs[sysidx]
output_end = self.cum_outputs[sysidx+1]
input_start = self.cum_inputs[sysidx]
input_end = self.cum_inputs[sysidx+1]
input_values = right_y[np.where(
self.connections[:, input_start:input_end].T
)[1]]
state_start = self.cum_states[sysidx]
state_end = self.cum_states[sysidx+1]
state_values = right_x[state_start:state_end]
if sys.dim_state and sys.dim_input:
update_return_value = sys.update_equation_function(
right_t,
state_values,
input_values
)
elif sys.dim_state:
update_return_value = sys.update_equation_function(
right_t,
state_values
)
elif sys.dim_input:
update_return_value = sys.update_equation_function(
right_t, input_values)
else:
update_return_value = sys.update_equation_function(
right_t)
if sys.dim_state:
right_x[state_start:state_end] = \
update_return_value.reshape(-1)
right_y[output_start:output_end] = \
sys.output_equation_function(right_t, update_return_value).squeeze()
elif sys.dim_input:
right_y[output_start:output_end] = \
sys.output_equation_function(right_t, input_values).squeeze()
else:
right_y[output_start:output_end] = \
sys.output_equation_function(right_t).squeeze()
new_states, new_outputs, new_events = \
continuous_time_integration_step(
right_t, right_x, False)
results.new_result(
right_t, new_states, new_outputs, new_events)
# set x (r.y), store in result as t+epsilon? if not dense,
# add extra 1=-0
r.set_initial_value(right_x, right_t)
prev_event_t = right_t
# TODO: THIS IS WHERE PREVIOUS EVENT HANDLING LOOP ENDED
results.t = results.t[:results.res_idx]
results.x = results.x[:results.res_idx, :]
results.y = results.y[:results.res_idx, :]
results.e = results.e[:results.res_idx, :]
return results
| bsd-2-clause | -5,954,896,690,934,290,000 | 38.322545 | 276 | 0.519854 | false |
oracal/cppstub | tests/test_header_output.py | 1 | 11523 | #path hack.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import unittest
from cppstub import CppFile
from cppstub import CppNamespace
from cppstub import CppMethod
from cppstub import CppClass
class CppStubHeaderOutputTestSuite(unittest.TestCase):
def setUp(self):
self.cpp_file = CppFile("TestSuite")
def test_header_output_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
self.cpp_file.namespaces.append(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\n}\n\n", self.cpp_file.header())
def test_header_output_namespace_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_namespace1 = CppNamespace("test1", cpp_namespace)
cpp_namespace.namespaces.append(cpp_namespace1)
self.cpp_file.namespaces.append(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nnamespace test1\n{\n\n}\n\n}\n\n", self.cpp_file.header())
def test_header_output_multiple_namespaces(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_namespace1 = CppNamespace("test1", self.cpp_file)
self.cpp_file.namespaces.append(cpp_namespace)
self.cpp_file.namespaces.append(cpp_namespace1)
self.assertEquals("\n\nnamespace test\n{\n\n}\n\nnamespace test1\n{\n\n}\n\n", self.cpp_file.header())
def test_header_output_function_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_function = CppMethod("test1", [], "void", cpp_namespace)
cpp_namespace.methods.append(cpp_function)
self.cpp_file.namespaces.append(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nvoid test1();\n\n}\n\n", self.cpp_file.header())
def test_header_output_constructor_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("Test", [], None, cpp_class)
cpp_class.methods["public"].append(cpp_method)
self.cpp_file.namespaces.append(cpp_namespace)
cpp_namespace.classes.append(cpp_class)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\npublic:\n\n Test();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_private_access_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "void", cpp_class)
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n void test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_private_access_const_return_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_method.const_return_type = True
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n const int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_private_access_virtual_const_return_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_method.const_return_type = True
cpp_method.virtual = True
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n virtual const int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_static_return_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_method.static = True
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n static int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_public_access_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_class.add_method(cpp_method, "public")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\npublic:\n\n int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_protected_access_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "int", cpp_class)
cpp_class.add_method(cpp_method, "protected")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprotected:\n\n int test1();\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_method_with_return_type_and_arguments_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", ["int argument"], "int", cpp_class)
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n int test1(int argument);\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_method_with_different_return_type_and_multiple_arguments_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method = CppMethod("test1", ["int argument1", "std::string argument2"], "std::string", cpp_class)
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n std::string test1(int argument1, std::string argument2);\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_multiple_methods_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method1 = CppMethod("test1", ["int argument1"], "int", cpp_class)
cpp_method2 = CppMethod("test2", ["std::string argument2"], "std::string", cpp_class)
cpp_class.add_method(cpp_method1, "private")
cpp_class.add_method(cpp_method2, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\nprivate:\n\n int test1(int argument1);\n\n std::string test2(std::string argument2);\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_multiple_access_multiple_methods_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test", parent = cpp_namespace)
cpp_method1 = CppMethod("test1", ["int argument1"], "int", cpp_class)
cpp_method2 = CppMethod("test2", ["std::string argument2"], "std::string", cpp_class)
cpp_class.add_method(cpp_method1, "public")
cpp_class.add_method(cpp_method2, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test\n{\n\npublic:\n\n int test1(int argument1);\n\nprivate:\n\n std::string test2(std::string argument2);\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_private_access_class_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class1 = CppClass("Test1", parent = cpp_namespace)
cpp_class2 = CppClass("Test2", parent = cpp_class1)
cpp_class1.add_class(cpp_class2, "private")
cpp_namespace.add_class(cpp_class1)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test1\n{\n\nprivate:\n\n class Test2\n {\n\n };\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_public_access_class_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class1 = CppClass("Test1", parent = cpp_namespace)
cpp_class2 = CppClass("Test2", parent = cpp_class1)
cpp_class1.add_class(cpp_class2, "public")
cpp_namespace.add_class(cpp_class1)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test1\n{\n\npublic:\n\n class Test2\n {\n\n };\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_protected_access_class_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class1 = CppClass("Test1", parent = cpp_namespace)
cpp_class2 = CppClass("Test2", parent = cpp_class1)
cpp_class1.add_class(cpp_class2, "protected")
cpp_namespace.add_class(cpp_class1)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test1\n{\n\nprotected:\n\n class Test2\n {\n\n };\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_template_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test1", parent = cpp_namespace)
cpp_class.templated = True
cpp_class.template_type = "Test"
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\ntemplate <class Test>\nclass Test1\n{\n\n};\n\n}\n\n", self.cpp_file.header())
def test_header_output_template_method_in_class_in_namespace(self):
cpp_namespace = CppNamespace("test", self.cpp_file)
cpp_class = CppClass("Test1", parent = cpp_namespace)
cpp_method = CppMethod("test1", [], "Test&", cpp_class)
cpp_method.templated = True
cpp_method.template_type = "Test"
cpp_class.add_method(cpp_method, "private")
cpp_namespace.add_class(cpp_class)
self.cpp_file.add_namespace(cpp_namespace)
self.assertEquals("\n\nnamespace test\n{\n\nclass Test1\n{\n\nprivate:\n\n template <class Test>\n Test& test1();\n\n};\n\n}\n\n", self.cpp_file.header())
if __name__ == '__main__':
unittest.main()
| mit | 646,316,628,420,251,500 | 57.19697 | 211 | 0.657121 | false |
thegamer87/Pymbrum | hr/TimeManager.py | 1 | 7095 | import httplib, urllib, json
from urlparse import urlparse
import datetime
import pytz
import re
DATA_PROVIDER_URL = "/servlet/SQLDataProviderServer"
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M"
HUMAN_TIME_FORMAT = "%Hh %Mm %Ss"
ROWS_FIELD = "rows"
ROWS_VALUE = "10"
START_ROW_FIELD = "startrow"
START_ROW_VALUE = "0"
COUNT_FIELD = "count"
COUNT_VALUE = "true"
SQL_CMD_FIELD = "sqlcmd"
SQL_CMD_VALUE = "rows:ushp_fgettimbrus"
PDATE_FIELD = "pDATE"
TIMBR_DAY_FIELD = "DAYSTAMP"
TIMBR_TIME_FIELD = "TIMETIMBR"
TIMBR_DIRECTION_FIELD = "DIRTIMBR"
TIMBR_CAUSE_FIELD = "CAUSETIMBR"
TIMBR_TYPE_FIELD = "TYPETIMBR"
TIMBR_IP_FIELD = "IPTIMBR"
minExitTime = {"https://hr.cineca.it/HRPortal":datetime.timedelta(minutes=30), "https://saas.hrzucchetti.it/hrpmaxmara":datetime.timedelta(hours=30)}
dayWorkTime={"https://hr.cineca.it/HRPortal":datetime.timedelta(hours=7, minutes=12), "https://saas.hrzucchetti.it/hrpmaxmara":datetime.timedelta(hours=8)}
class Timbratura:
VERSO_FIELD = "verso"
VERSO_ENTRATA = "E"
VERSO_USCITA = "U"
def __init__(self,day, time, direction, cause=None, type=None, ip=None):
self.day = day
self.time = time
self.direction = direction
self.cause = cause
self.type = type
self.ip=ip
def switchVerso(verso):
if verso == Timbratura.VERSO_ENTRATA:
return Timbratura.VERSO_USCITA
elif verso == Timbratura.VERSO_USCITA:
return Timbratura.VERSO_ENTRATA
def getTimbrature(cookie, url, date):
parsedUrl = urlparse(url)
host = parsedUrl.netloc
path = parsedUrl.path+DATA_PROVIDER_URL
if not date:
date = datetime.date.today().strftime(DATE_FORMAT)
params = urllib.urlencode({ROWS_FIELD:ROWS_VALUE, START_ROW_FIELD:START_ROW_VALUE, COUNT_FIELD:COUNT_VALUE, SQL_CMD_FIELD:SQL_CMD_VALUE, PDATE_FIELD:date})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "Cookie":cookie}
connection = httplib.HTTPSConnection(host)
connection.request("POST", path, params, headers)
response = connection.getresponse()
responseStatus = response.status
responseData = response.read()
responseDict = json.loads(responseData)
headers = responseDict["Fields"]
data = responseDict["Data"]
print "RESPONSE STATUS: ", responseStatus
timbrature = []
versoActual = Timbratura.VERSO_ENTRATA
for adata in data:
if ("t" not in adata):
day = None
time = None
dir = None
cause = None
type = None
ip = None
for index, d in enumerate(adata):
if index < len(headers):
h = headers[index]
if h == TIMBR_DAY_FIELD:
day = d
if h == TIMBR_TIME_FIELD:
time = d
if h == TIMBR_DIRECTION_FIELD:
dir = d
if h == TIMBR_CAUSE_FIELD:
cause = d
if h == TIMBR_TYPE_FIELD:
type = d
if h == TIMBR_IP_FIELD:
ip = d
timbratura = Timbratura(day, time, dir, cause, type, ip)
if (timbratura.direction != versoActual):
timbratura.direction = versoActual
versoActual = switchVerso(versoActual)
timbrature.append(timbratura)
return timbrature
def getContatori(url, timbrature):
totalWorkTime = None
totalExitTime = None
precTime = None
precDir = None
time = None
precTime = None
if (timbrature):
for timbratura in timbrature:
dir = timbratura.direction
time = datetime.datetime.strptime(timbratura.time, TIME_FORMAT)
print "DIR: ",dir," TIMBR: ",str(timbratura.time)
if not precTime:
precTime = time
if dir == Timbratura.VERSO_USCITA:
workedTime = time - precTime
print "U timbr readed ... workedTime is: ",workedTime
if (not totalWorkTime):
totalWorkTime = workedTime
else:
totalWorkTime += workedTime
print "totalWorkTime updated to ",totalWorkTime
if dir == Timbratura.VERSO_ENTRATA:
exitTime = time-precTime
print "E timbr readed ... exitTime is: ",exitTime
if (not totalExitTime):
totalExitTime = exitTime
else:
totalExitTime += exitTime
print "totalExitTime updated to ",totalExitTime
precTime = time
companyMinExitTime = minExitTime[url]
nowTime = datetime.datetime.now(pytz.timezone("Europe/Rome")).time()
nowDateTime = datetime.datetime(time.year, time.month, time.day, nowTime.hour, nowTime.minute, nowTime.second)
print "now is ",nowDateTime
workedTime = nowDateTime - time
print "worked time from last timbr to now is ",workedTime
if dir == Timbratura.VERSO_ENTRATA:
if (not totalWorkTime):
totalWorkTime = workedTime
else:
totalWorkTime += workedTime
print "last timbr readed is E ... totalWorkTime updated to ",totalWorkTime
if not totalExitTime or (totalExitTime and totalExitTime < companyMinExitTime):
if not totalExitTime:
totalWorkTime -= companyMinExitTime
else:
totalWorkTime -= (companyMinExitTime - totalExitTime)
print "exitTime < minExitTime ... totalWorkTime updated to ",totalWorkTime
print "final totalWorkTime is ",totalWorkTime
print "final totalExitTime is ",totalExitTime
companyDayWorkTime = dayWorkTime[url]
timeToExit = companyDayWorkTime - totalWorkTime
timeOfExit = nowDateTime + timeToExit
workedPercent = round(totalWorkTime.total_seconds() / companyDayWorkTime.total_seconds() * 100)
if workedPercent > 100:
workedPercent = 100
print "final work time percent is: ",workedPercent
timeOfExitString = timeOfExit.strftime(TIME_FORMAT)
if timeToExit.total_seconds() < 0:
timeOfExitString = str(timeOfExit.time())+" ... che stracacchio di uno stracacchio ci fai ancora su quella sedia !!!"
print "final timeOfExit is ",timeOfExit
h,m,s = re.split(":",str(totalWorkTime))
totalWorkTimeString = h+"h "+m+"m "+s+"s"
h,m,s = re.split(":",str(totalExitTime))
totalExitTimeString = h+"h "+m+"m "+s+"s"
workedPercentString = str(workedPercent)
else:
totalWorkTimeString = "0h 0m 0s"
totalExitTimeString = "0h 0m 0s"
timeOfExitString = ""
workedPercentString = "0"
print "no timbr readed"
return {"workedTime":totalWorkTimeString, "exitTime":totalExitTimeString, "timeOfExit":timeOfExitString, "workedPercent":workedPercentString}
| gpl-2.0 | -5,927,728,885,146,939,000 | 32.625592 | 159 | 0.60451 | false |
fastflo/emma | emmalib/Query.py | 1 | 5944 | # -*- coding: utf-8 -*-
# emma
#
# Copyright (C) 2006 Florian Schmidt ([email protected])
# 2014 Nickolay Karnaukhov ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from Constants import *
from query_regular_expression import *
def read_query(query, _start=0):
r = re.compile(r"""
(?s)
(
("(?:[^\\]|\\.)*?")| # double quoted strings
('(?:[^\\]|\\.)*?')| # single quoted strings
(`(?:[^\\]|\\.)*?`)| # backtick quoted strings
(/\*.*?\*/)| # c-style comments
(\#[^\n]*)| # shell-style comments
(\--[^\n]*)| # sql-style comments
([^;]) # everything but a semicolon
)+
""", re.VERBOSE)
rw = re.compile("[ \r\n\t]+")
m = rw.match(query, _start)
if m:
_start = m.end(0)
match = r.match(query, _start)
if not match:
return None, len(query)
return match.start(0), match.end(0)
def read_expression(query, _start=0, concat=True, update_function=None, update_offset=0, icount=0):
r = query_regular_expression
# print "read expr in", query
match = r.search(query, _start)
# if match: print match.groups()
if not match:
return None, None
for i in range(1, match.lastindex + 1):
if match.group(i):
t = match.group(i)
e = match.end(i)
current_token = t
if current_token[len(current_token) - 1] == "(":
while 1:
icount += 1
if update_function is not None and icount >= 10:
icount = 0
update_function(False, update_offset + e)
# print "at", [query[e:e+15]], "..."
exp, end = read_expression(
query, e, False, update_function, update_offset, icount)
# print "got inner exp:", [exp]
if not exp:
break
e = end
if concat:
t += " " + exp
if exp == ")":
break
return t, e
print "should not happen!"
return None, None
def get_order_from_query(query):
current_order = []
r = re.compile(re_src_query_order)
# get current order by clause
match = re.search(r, query)
if not match:
# print "no order found in", [query]
# print "re:", [re_src_query_order]
return current_order
before, order, after = match.groups()
order.lower()
_start = 0
ident = None
while 1:
item = []
while 1:
ident, end = read_expression(order[_start:])
if not ident:
break
if ident == ",":
break
if ident[0] == "`":
ident = ident[1:-1]
item.append(ident)
_start += end
l = len(item)
if l == 0:
break
elif l == 1:
item.append(True)
elif l == 2:
if item[1].lower() == "asc":
item[1] = True
else:
item[1] = False
else:
print "unknown order item:", item, "ignoring..."
item = None
if item:
current_order.append(tuple(item))
if not ident:
break
_start += 1 # comma
return current_order
def is_query_appendable(query):
"""
@rtype: ()
@type query: str
@param query:
@return:
"""
pat = r'(?i)("(?:[^\\]|\\.)*?")|(\'(?:[^\\]|\\.)*?\')|(`(?:[^\\]|\\.)*?`)|(union)|(select[ \r\n\t]+(.*)[ \r\n\t]+from[ \r\n\t]+(.*))'
r = re.compile(pat)
_start = 0
while 1:
result = re.search(r, query[_start:])
if not result:
return False
_start += result.end()
if result.group(4):
return False # union
if result.group(5) and result.group(6) and result.group(7):
break # found select
return result
# def search_query_end(self, text, _start):
# try:
# r = self.query_end_re
# except:
# r = self.query_end_re = re.compile(r'("(?:[^\\]|\\.)*?")|(\'(?:[^\\]|\\.)*?\')|(`(?:[^\\]|\\.)*?`)|(;)')
# while 1:
# result = re.search(r, text[_start:])
# if not result:
# return None
#
# _start += result.end()
# if result.group(4):
# return _start
# def get_field_list(self, s):
# # todo USE IT!
# fields = []
# _start = 0
# ident = None
# while 1:
# item = []
# while 1:
# ident, end = self.read_expression(s[_start:])
# if not ident:
# break
# if ident == ",":
# break
# if ident[0] == "`":
# ident = ident[1:-1]
# item.append(ident)
# _start += end
# if len(item) == 1:
# fields.append(item[0])
# else:
# fields.append(item)
# if not ident:
# break
# print "found fields:", fields
# return fields
| gpl-2.0 | -3,587,987,859,716,708,400 | 29.797927 | 137 | 0.472577 | false |
rootulp/exercism | python/twelve-days/twelve_days.py | 1 | 1769 | class TwelveDays:
CARDINALS = {
1: 'first',
2: 'second',
3: 'third',
4: 'fourth',
5: 'fifth',
6: 'sixth',
7: 'seventh',
8: 'eighth',
9: 'ninth',
10: 'tenth',
11: 'eleventh',
12: 'twelfth'
}
PHRASES = {
2: 'two Turtle Doves',
3: 'three French Hens',
4: 'four Calling Birds',
5: 'five Gold Rings',
6: 'six Geese-a-Laying',
7: 'seven Swans-a-Swimming',
8: 'eight Maids-a-Milking',
9: 'nine Ladies Dancing',
10: 'ten Lords-a-Leaping',
11: 'eleven Pipers Piping',
12: 'twelve Drummers Drumming'
}
@classmethod
def verses(cls, start, stop):
return "\n".join([cls.verse(i) for i in range(start, stop + 1)]) + "\n"
@classmethod
def verse(cls, verse_num):
return ", ".join([_f for _f in [cls.head(verse_num),
cls.mid(verse_num),
cls.tail(verse_num)] if _f])
@classmethod
def head(cls, verse_num):
return ("On the %(cardinality)s day of Christmas my true love gave to "
"me" % ({"cardinality": cls.CARDINALS[verse_num]}))
@staticmethod
def tail(verse_num):
if verse_num == 1:
return "a Partridge in a Pear Tree.\n"
return "and a Partridge in a Pear Tree.\n"
@classmethod
def mid(cls, verse_num):
if verse_num != 1:
return ", ".join([cls.PHRASES[i] for i in range(verse_num, 1, -1)])
def verse(verse_num):
return TwelveDays.verse(verse_num)
def verses(start, stop):
return TwelveDays.verses(start, stop)
def sing():
return TwelveDays.verses(1, 12)
| mit | -7,615,109,493,597,396,000 | 25.402985 | 79 | 0.508762 | false |
bob-anderson-ok/py-ote | src/pyoteapp/iterative_logl_functions.py | 1 | 23172 | """
A collection of functions for fast MLE fits to light curves.
MLE: Maximum Likelihood Estimation
The 'fit' is to an array of intensities (y[]: float) that comprise the light
curve.
"""
import numpy as np
import sys
from math import log, pi, sqrt, exp
from typing import Tuple
from pyoteapp.solverUtils import calcNumCandidatesFromEventSize
from pyoteapp.solverUtils import calcNumCandidatesFromDandRlimits
from pyoteapp.solverUtils import model, logLikelihoodLine
from pyoteapp.likelihood_calculations import cum_loglikelihood, aicc
from numba import njit, jit
MIN_FLOAT: float = sys.float_info.min
@jit
def add_entry(ynew: float, s: float, s2: float, n: int, calc_var: bool):
"""Adds an entry to the metrics, s, s2, and n.
s: previous value of sum of y[]
s2: previous value of sum of y[]*y[]
n: previous number of entries in the metric
"""
n = n + 1
s = s + ynew
s2 = s2 + ynew * ynew
if calc_var:
var = (s2 - s * s / n) / n # This is sigma**2
else:
var = None
return s, s2, n, var
@jit
def sub_entry(ynew: float, s: float, s2: float, n: int, calc_var: bool):
"""Subtracts an entry from the metrics, s, s2, and n.
s: previous value of sum of y[]
s2: previous value of sum of y[]*y[]
n: previous number of entries in the metric
"""
n = n - 1
s = s - ynew
s2 = s2 - ynew * ynew
if calc_var:
var = (s2 - s * s / n) / n # This is sigma**2
else:
var = None
return s, s2, n, var
def calc_metric_iteratively(y: np.ndarray) -> Tuple[float, float, int, float]:
"""Calculates a metric iteratively (term by term) for test purposes only.
This is expected to be very slow compared to simply using numpy and on an
array of size 1000, the numpy version was 600 times faster.
y: array of floats
"""
# Set initial values for iteration
s = 0.0
s2 = 0.0
n = 0
var = None
for ynew in np.nditer(y):
s, s2, n, var = add_entry(ynew, s, s2, n, calc_var=True)
return s, s2, n, var
@jit
def calc_metric_numpy(y: np.ndarray):
"""Used for timing comparisons and initializing a metric from a large y[].
It calculates the metrics using fast numpy operations.
"""
n = y.size
s2 = np.sum(y * y)
s = y.sum()
var = (s2 - s * s / n) / n # This is sigma**2
return s, s2, n, var
StdAnswer = Tuple[int, int, float, float, float, float, float]
"""StdAnswer is: d, r, b, a, sigmaB, sigmaA, metric """
@njit # cache=True did not work for this function --- gave a pickling error
def find_best_event_from_min_max_size(
y: np.ndarray, left: int, right: int, min_event: int, max_event: int):
"""Finds the best size and location for an event >= min and <= max"""
# The only time this function is called with a y containing 1 element
# is during the import of this module where the call is made to force the jit
# compiler into action --- a work-around to the pickle error problem.
if y.size == 1:
yield -1.0, 0.0, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0
max_metric = 0.0
d_best = 0
r_best = 0
b_best = 0.0
a_best = 0.0
sigma_b_best = 0.0
sigma_a_best = 0.0
sigma_a = sigma_b = 0.0 # To satisfy PEP8
not_started = True
num_candidates = calcNumCandidatesFromEventSize(eventType="DandR",
left=left, right=right, minSize=min_event, maxSize=max_event)
solution_counter = 0
for event in range(min_event, max_event + 1):
d, r, b, a, sigma_b, sigma_a, metric, sol_count = \
locate_fixed_event_position(y, left, right, event)
# Initialize the 'best' values
if not_started:
max_metric = metric
d_best = d
r_best = r
b_best = b
a_best = a
sigma_b_best = sigma_b
sigma_a_best = sigma_a
not_started = False
if metric >= max_metric and b > a:
max_metric = metric
d_best = d
r_best = r
b_best = b
a_best = a
sigma_b_best = sigma_b
sigma_a_best = sigma_a
solution_counter += sol_count
# yield 'fractionDone', solution_counter / num_candidates
yield 1.0, solution_counter / num_candidates, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0
# Here we test for solution being better than straight line
if not solution_is_better_than_straight_line(
y, left, right, d_best, r_best, b_best, a_best, sigma_b_best, sigma_a_best):
# yield 'no event present', solution_counter / num_candidates
yield -1.0, solution_counter / num_candidates, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0
# yield d_best, r_best, b_best, a_best, sigma_b_best, sigma_a_best, max_metric
yield 0.0, 1.0, d_best, r_best, b_best, a_best, sigma_b_best, sigma_a_best, max_metric
def find_best_r_only_from_min_max_size(
y: np.ndarray, left: int, right: int, min_event: int, max_event: int):
"""Finds the best r-only location for r >= min_event and <= max_event"""
assert min_event >= 1
assert max_event <= right - left
def update_best_solution():
nonlocal max_metric, b_best, a_best, sigma_b, sigma_a
nonlocal r_best
max_metric = metric
b_best = b
a_best = a
sigma_b = sqrt(b_var)
sigma_a = sqrt(a_var)
r_best = r
def calc_metric():
nonlocal a_var, b_var
max_var = max(a_var, b_var, sys.float_info.min)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
return -b_n * log(b_var) - a_n * log(a_var)
# These get changed by the first call to update_best_solution but
# have the be set to proper type to satisfy type checking.
metric = 0.0
max_metric = 0.0
r_best = 0
b_best = 0.0
a_best = 0.0
sigma_b = 0.0
sigma_a = 0.0
r = left + min_event
# Use numpy version of metric calculator to initialize iteration variables
b_s, b_s2, b_n, b_var = calc_metric_numpy(y[r + 1:right + 1])
a_s, a_s2, a_n, a_var = calc_metric_numpy(y[left:r])
b = b_s / b_n
a = a_s / a_n
# Calculate metric for initial position of r
metric = calc_metric()
update_best_solution()
r_final = left + max_event
while r < r_final:
# calc metric for next r position from current position
b_s, b_s2, b_n, b_var = sub_entry(y[r+1], b_s, b_s2, b_n, True)
a_s, a_s2, a_n, a_var = add_entry(y[r], a_s, a_s2, a_n, True)
r += 1
metric = calc_metric()
b = b_s / b_n
a = a_s / a_n
goodSolution = solution_is_better_than_straight_line(
y, left, right, -1, r, b, a, sqrt(b_var), sqrt(a_var), k=3)
if metric > max_metric and b > a and goodSolution:
update_best_solution()
if b_best <= a_best:
# yield 'no event present', 1.0
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
event_size_found = r_best - left
if event_size_found == max_event or event_size_found == min_event:
# Invalid event size --- invalid limit
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
# Here we test for the best solution being better than straight line
if not solution_is_better_than_straight_line(
y, left, right, -1, r_best, b, a, sigma_b, sigma_a, k=3):
# yield 'no event present', 1.0
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
# yield None, r_best, b_best, a_best, sigma_b, sigma_a, max_metric
yield 0.0, 1.0, -1, r_best, b_best, a_best, sigma_b, sigma_a, max_metric
def find_best_d_only_from_min_max_size(
y: np.ndarray, left: int, right: int, min_event: int, max_event: int):
"""Finds the best d-only location for max_event >= event >= min_event """
assert min_event >= 1
assert max_event <= right - left
def update_best_solution():
nonlocal max_metric, b_best, a_best, sigma_b, sigma_a
nonlocal d_best
max_metric = metric
b_best = b
a_best = a
sigma_b = sqrt(b_var)
sigma_a = sqrt(a_var)
d_best = d
def calc_metric():
nonlocal a_var, b_var
max_var = max(a_var, b_var, sys.float_info.min)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
return -b_n * log(b_var) - a_n * log(a_var)
# These get changed by the first call to update_best_solution but
# have the be set to proper type to satisfy type checking.
metric = 0.0
max_metric = 0.0
d_best = 0
b_best = 0.0
a_best = 0.0
sigma_b = 0.0
sigma_a = 0.0
d = right - max_event # Initial d position
# Use numpy version of metric calculator to initialize iteration variables
b_s, b_s2, b_n, b_var = calc_metric_numpy(y[left:d])
a_s, a_s2, a_n, a_var = calc_metric_numpy(y[d+1:right+1])
b = b_s / b_n
a = a_s / a_n
# print(b, a, b_n, a_n)
# Calculate metric for initial position of d
metric = calc_metric()
update_best_solution()
d_final = right - min_event
while d < d_final:
# calc metric for next d position from current position
b_s, b_s2, b_n, b_var = add_entry(y[d], b_s, b_s2, b_n, True)
a_s, a_s2, a_n, a_var = sub_entry(y[d+1], a_s, a_s2, a_n, True)
d += 1
metric = calc_metric()
b = b_s / b_n
a = a_s / a_n
goodSolution = solution_is_better_than_straight_line(
y, left, right, d, -1, b, a, sqrt(b_var), sqrt(a_var), k=3)
if metric > max_metric and b > a and goodSolution:
update_best_solution()
if b_best <= a_best:
# yield 'no event present', 1.0
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
event_size_found = right - d_best
if event_size_found == max_event or event_size_found == min_event:
# Invalid event size --- invalid limit
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
if not solution_is_better_than_straight_line(
y, left, right, d_best, -1, b, a, sigma_b, sigma_a, k=3):
# yield 'no event present', 1.0
yield -1.0, 1.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
# yield d_best, None, b_best, a_best, sigma_b, sigma_a, max_metric
yield 0.0, 1.0, d_best, -1, b_best, a_best, sigma_b, sigma_a, max_metric
@njit(cache=True)
def locate_fixed_event_position(
y: np.ndarray, left: int, right: int,
event_size: int) -> Tuple[int, int, float, float, float, float,
float, int]:
"""Finds the best location for a fixed size event"""
d = left
r = d + event_size + 1
# assert(r < right)
# Use numpy version of metric calculator to initialize iteration variables
b_s, b_s2, b_n, b_var = calc_metric_numpy(y[r+1:right+1])
a_s, a_s2, a_n, a_var = calc_metric_numpy(y[left+1:r])
b = b_s / b_n
a = a_s / a_n
# Calculate metric for initial position of event at extreme left
# ========== calc_metric() ===========
max_var = max(a_var, b_var, MIN_FLOAT)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
metric = - b_n * log(b_var) - a_n * log(a_var)
# ========== calc_metric() ===========
# ======= update_best_solution() ========
max_metric = metric
b_max = b
a_max = a
sigma_b = sqrt(b_var)
sigma_a = sqrt(a_var)
d_max = d
r_max = r
# ======= update_best_solution() ========
# The metric used is the variable part of logL(D,R), droping the constant
# part and ignoring a factor of 2. The full logL(D,R) would have been:
#
# -0.5 * (b_n*log(b_var) + a_n*log(a_var) + (b_n + a_n) * (1 + log(2*pi))
#
# We use the reduced form to speed the calculation yet achieve a MLE
# solution
# metrics = [metric] # For use during development
solution_count = 0
while r < right - 1:
# calc metric for next event position from current position
b_s, b_s2, b_n, b_var = add_entry(y[d], b_s, b_s2, b_n, False)
b_s, b_s2, b_n, b_var = sub_entry(y[r+1], b_s, b_s2, b_n, True)
a_s, a_s2, a_n, a_var = add_entry(y[r], a_s, a_s2, a_n, False)
a_s, a_s2, a_n, a_var = sub_entry(y[d + 1], a_s, a_s2, a_n, True)
b = b_s / b_n
a = a_s / a_n
# ========== calc_metric() ===========
max_var = max(a_var, b_var, MIN_FLOAT)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
metric = - b_n * log(b_var) - a_n * log(a_var)
# ========== calc_metric() ===========
# Move to next position
d += 1
r += 1
goodSolution = solution_is_better_than_straight_line(
y, left, right, d, r, b, a, sqrt(b_var), sqrt(a_var), k=4)
if metric > max_metric and b > a and goodSolution:
# ======= update_best_solution() ========
max_metric = metric
b_max = b
a_max = a
sigma_b = sqrt(b_var)
sigma_a = sqrt(a_var)
d_max = d
r_max = r
# ======= update_best_solution() ========
solution_count += 1
return d_max, r_max, b_max, a_max, sigma_b, sigma_a, max_metric, solution_count
@njit # cache=True gave pickling error
def locate_event_from_d_and_r_ranges(
y: np.ndarray, left: int, right: int, d_start: int, d_end: int,
r_start: int, r_end: int):
"""Finds the best size and location for event specified by d & r ranges"""
# The only time this function is called with a y containing 1 element
# is during the import of this module where the call is made to force the jit
# compiler into action --- a work-around to the pickle error problem.
if y.size == 1:
yield -1.0, 0.0, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
num_candidates = calcNumCandidatesFromDandRlimits(
eventType='DandR',
d_start=d_start, d_end=d_end, r_start=r_start, r_end=r_end)
clump_size = np.ceil(num_candidates / 50)
solution_counter = 0
d = d_start
max_metric = 0.0
d_best = 0
r_best = 0
b_s_best = 0.0
a_s_best = 0.0
b_var_best = 0.0
a_var_best = 0.0
b_n_best = 0
a_n_best = 0
not_started = True
while d <= d_end:
# Use numpy version of metric calculator to initialize iteration
# variables for current d and initial r_start
r = r_start
if d > left:
b_sl, b_s2l, b_nl, b_varl = calc_metric_numpy(y[left:d])
# Lefthand wing
else:
b_sl = 0.0
b_s2l = 0.0
b_nl = 0
b_varl = 0.0
b_sr, b_s2r, b_nr, b_varr = calc_metric_numpy(y[r+1:right+1])
# Righthand wing
b_s = b_sl + b_sr
b_s2 = b_s2l + b_s2r
b_n = b_nl + b_nr
b_var = b_varl + b_varr
a_s, a_s2, a_n, a_var = calc_metric_numpy(y[d+1:r])
# ============== calc_metric() =================
max_var = max(a_var, b_var, MIN_FLOAT)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
metric = - b_n * log(b_var) - a_n * log(a_var)
# ============== calc_metric() =================
if not_started:
# =========== update_best_solution() =======
max_metric = metric
d_best = d
r_best = r
b_s_best = b_s
a_s_best = a_s
b_var_best = b_var
a_var_best = a_var
b_n_best = b_n
a_n_best = a_n
# =========== update_best_solution() =======
not_started = False
b = b_s / b_n
a = a_s / a_n
if metric >= max_metric and b > a:
# =========== update_best_solution() =======
max_metric = metric
d_best = d
r_best = r
b_s_best = b_s
a_s_best = a_s
b_var_best = b_var
a_var_best = a_var
b_n_best = b_n
a_n_best = a_n
# =========== update_best_solution() =======
while r < r_end:
r += 1
b_s, b_s2, b_n, b_var = sub_entry(y[r], b_s, b_s2, b_n, True)
a_s, a_s2, a_n, a_var = add_entry(y[r-1], a_s, a_s2, a_n, True)
# ============== calc_metric() =================
max_var = max(a_var, b_var, MIN_FLOAT)
if a_var <= 0.0:
a_var = max_var
if b_var <= 0.0:
b_var = max_var
metric = - b_n * log(b_var) - a_n * log(a_var)
# ============== calc_metric() =================
b = b_s / b_n
a = a_s / a_n
goodSolution = solution_is_better_than_straight_line(
y, left, right, d, r, b, a, sqrt(b_var), sqrt(a_var), k=4)
if metric >= max_metric and b > a and goodSolution:
# =========== update_best_solution() =======
max_metric = metric
d_best = d
r_best = r
b_s_best = b_s
a_s_best = a_s
b_var_best = b_var
a_var_best = a_var
b_n_best = b_n
a_n_best = a_n
# =========== update_best_solution() =======
solution_counter += 1
if solution_counter % clump_size == 0:
# yield 'fractionDone', solution_counter / num_candidates
yield 1.0, solution_counter / num_candidates, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
d += 1
b = b_s_best / b_n_best
a = a_s_best / a_n_best
sigma_b = sqrt(b_var_best)
sigma_a = sqrt(a_var_best)
# Here we test for solution being better than straight line
if not solution_is_better_than_straight_line(
y, left, right, d_best, r_best, b, a, sigma_b, sigma_a, k=4):
# yield 'no event present', solution_counter / num_candidates
yield -1.0, solution_counter / num_candidates, -1, -1, 0.0, 0.0, 0.0, 0.0, 0.0
yield 0.0, 1.0, d_best, r_best, b, a, sigma_b, sigma_a, max_metric
@njit(cache=True)
def solution_is_better_than_straight_line(y, left=-1, right=-1, d=-1, r=-1, b=0.0, a=0.0, sigma_b=0.0,
sigma_a=0.0, k=4):
# The only time that the result of this routine is important is for very
# low snr signals. In that case, sigma_b and sigma_a are approximately
# equal anyway. For other cases, we want to 'score' the straight line
# against the signal in as equal a manner as possible, so we will use a
# common noise value for all points. Here we calculate that value...
big_sigma = np.float64(max(sigma_b, sigma_a))
# And here we make sure it never gets too small...
if big_sigma < (b - a) / 100.0: # 100 == max snr
big_sigma = (b - a) / 100.0
# If the current snr is greater than 3, the solution is always better
# than a straight line, so we skip the calculations.
if (b - a) / big_sigma > 3.0:
return True
# If this point is reached, a valid scoring needs to be performed.
num_pts = y.size
m, sigma = model(
B=b, A=a, D=d, R=r, sigmaB=big_sigma, sigmaA=big_sigma,
numPts=num_pts)
solution_logl = cum_loglikelihood(y, m, sigma, left, right)
# lineScore = logLikelihoodLine(y, sigmaB=big_sigma, left=left, right=right)
lineScore = logLikelihoodLine(y, sigmaB=np.sqrt(np.var(y)), left=left, right=right)
aiccSol = aicc(solution_logl, right - left + 1, k)
aiccLine = aicc(lineScore, right - left + 1, 1)
if aiccSol < aiccLine:
pLine = exp(-(aiccLine - aiccSol) / 2)
else:
pLine = 1.00
if pLine > 0.001:
return False
else:
return True
def calc_logl_from_metric(s: float, s2: float, n: int) -> Tuple[float, float]:
sigma2 = (s2 / n - (s / n) * (s / n))
# -log(sqrt(2*pi)) = -0.9189385332046727
return -n * 0.9189385332046727 - n / 2.0 - n * log(sigma2) / 2.0, sigma2
def cum_loglikelihood_raw(y, m, sigma):
""" numpy accelerated sum of loglikelihoods --- for test purposes
Args:
y (ndarray): measured values
m (ndarray): associated mean values (the 'model')
sigma (ndarray): associated stdev values
"""
n = len(y)
ans = -n * np.log(np.sqrt(2*pi))
ans -= np.sum(np.log(sigma))
ans -= (np.sum((y - m) ** 2 / sigma ** 2) / 2.0)
return ans, np.var(y)
def loglikelihood(y, m, sigma):
""" calculate ln(likelihood) of a single point from a gaussian distribution
Args:
y (float): measured value
m (float): mean (expected model value)
sigma (float): stdev of measurements
Returns:
natural logarithm of un-normalized probability based on Gaussian
distribution
"""
# log(x) is natural log (base e)
# -log(sqrt(2*pi)) = -0.9189385332046727 = -log(2*pi)/2
# t1 = -log(sqrt(2*pi))
t1 = -0.9189385332046727
t2 = -log(sigma)
t3 = -(y - m) ** 2 / (2 * sigma ** 2)
return t1 + t2 + t3
@njit
def bob():
print("Hello from Bob")
num_pts = 30
m, sigma = model(
B=4.0, A=1.0, D=19, R=29, sigmaB=1.0, sigmaA=1.0,
numPts=num_pts)
# m += np.random.normal(0.0, 0.01, 30)
# print(m.size, sigma.size)
# model.inspect_types()
# ans = solution_is_better_than_straight_line(m, 0, 29, 10, 20, 4.0, 1.0, 1.0, 1.0)
# print(ans)
# solution_is_better_than_straight_line.inspect_types()
left = 1
right = num_pts
evt = 9
noise_sigma = 0.01
noise = np.random.normal(0.0, noise_sigma, num_pts)
ans = locate_fixed_event_position(m + noise, left, right, evt)
print(ans)
noise = np.random.normal(0.0, noise_sigma, num_pts)
ans = locate_fixed_event_position(m + noise, left, right, evt)
print(ans)
noise = np.random.normal(0.0, noise_sigma, num_pts)
ans = locate_fixed_event_position(m + noise, left, right, evt)
print(ans)
print(m)
# ans = locate_fixed_event_position(m, 0, 29, 10)
# print(ans)
# ans = locate_fixed_event_position(m, 0, 29, 11)
# locate_fixed_event_position.inspect_types()
# print(ans)
# ans = calc_metric_numpy(m)
# calc_metric_numpy.inspect_types()
# print(ans)
# We perform the following calls to force the njit of the functions. This hides the
# compile time from the user (extends the load time a bit) and thus eliminates
# the slightly disconcerting 1 or 2 second delay before this functions start to
# operate if we wait until the user first invokes them after starting ppyote. We
# do this as a work-around to the pickle problem that keeps normal caching from working.
_ = find_best_event_from_min_max_size(np.zeros(1), 0, 0, 0, 0)
_ = locate_event_from_d_and_r_ranges(np.zeros(1), 0, 0, 0, 0, 0, 0)
if __name__ == "__main__":
bob()
# bob.inspect_types()
pass
| mit | -327,251,744,266,116,200 | 30.02008 | 113 | 0.541516 | false |
alexarnautu/deenux | src/components/player/trackcontrols/TrackcontrolsController.py | 1 | 1518 | from src.components.Controller import Controller
from random import shuffle
from src.deenuxapi.deezer.DeezerProvider import DeezerProvider
from src.utils.Sequence import Sequence
class TrackcontrolsController(Controller):
def __init__(self, view, context):
super(TrackcontrolsController, self).__init__(view, context)
def on_track_play_start(self, sender, content_url, is_playing, active):
pass
def on_shuffle_click(self):
self.context.deezer.jukebox.toggle_random()
self.view.shuffle_button.setText('not' if self.context.shuffle else 'sh')
self.context.shuffle = not self.context.shuffle
def on_next_clicked(self):
self.context.deezer.jukebox.start(
self.context.mix[self.context.sequence.next]
)
def on_prev_clicked(self, *args):
self.context.deezer.jukebox.start(
self.context.mix[self.context.sequence.prev]
)
def on_play_pause_click(self):
jb = self.context.deezer.jukebox
if not self.context.player.active:
jb.start(self.context.to_play)
else:
jb.toggle_play_pause()
def on_volume_change(self, val):
was_blocked = self.view.volume_slider.blockSignals(True)
self.context.deezer.jukebox.set_volume(val * 5)
self.view.volume_slider.blockSignals(was_blocked)
def on_track_stop(self):
self.view.active = False
self.view.play_pause_button.setText('▶')
self.on_next_clicked()
| gpl-3.0 | 6,504,543,133,638,958,000 | 30.583333 | 81 | 0.662929 | false |
vmonaco/rpscontest | agents/switching11.py | 1 | 2056 | # See http://overview.cc/RockPaperScissors for more information about rock, paper, scissors
# Similar to switching10 with an additional beat2 and complement function
import random
if input == "":
hist = ""
opp_played = []
beat = {'P': 'S', 'S': 'R', 'R': 'P'}
beat2 = {'PP': 'S', 'SS': 'R', 'RR':'P', 'PS': 'S', 'PR': 'P', 'RS': 'R', 'RP': 'P', 'SP': 'S', 'SR': 'R'}
complement = {'PS': 'R', 'PR': 'S', 'RS': 'P', 'RP': 'S', 'SP': 'R', 'SR': 'P'}
score = {'RR': 0, 'PP': 0, 'SS': 0, 'PR': 1, 'RS': 1, 'SP': 1, 'RP':-1, 'SR':-1, 'PS':-1, }
output = random.choice(["R", "P", "S"])
candidates1 = [output, output]
candidates2 = [output] * 5
performance1 = [0, 0]
performance2 = [(0, 0)] * 5
else:
hist += output.lower() + input
opp_played.append(input)
performance1[0] += score[candidates1[0] + input]
performance1[1] += score[candidates1[1] + input]
for i, p in enumerate(candidates2):
performance2[i] = ({1:performance2[i][0] + 1, 0: performance2[i][0], -1: 0}[score[p + input]],
performance2[i][1] + score[p + input])
index1 = performance1.index(max(performance1))
index2 = performance2.index(max(performance2, key=lambda x: x[0] ** 3 + x[1]))
candidates1[1] = beat[random.choice(opp_played)]
for length in range(min(10, len(hist) - 2), 0, -2):
search = hist[-length:]
idx = hist.rfind(search, 0, -2)
if idx != -1:
my = hist[idx + length].upper()
opp = hist[idx + length + 1]
candidates2[0] = beat[opp]
candidates2[1] = beat[beat[my]]
candidates2[2] = beat2[beat[my] + beat[beat[opp]]]
candidates2[3] = beat2[beat[opp] + beat[beat[my]]]
candidates2[4] = complement[''.join(sorted(set(candidates2[0] + candidates2[1] + candidates2[3])))]
break
else:
candidates = [random.choice(['R', 'P', 'S'])] * 5
candidates1[0] = candidates2[index2]
output = candidates1[index1]
| bsd-3-clause | -7,585,800,557,904,085,000 | 39.313725 | 111 | 0.529669 | false |
joshwatson/binaryninja-api | python/examples/kaitai/bson.py | 1 | 18953 | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
import collections
from enum import Enum
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class Bson(KaitaiStruct):
"""BSON, short for Binary JSON, is a binary-encoded serialization of JSON-like documents. Like JSON, BSON supports the embedding of documents and arrays within other documents and arrays. BSON also contains extensions that allow representation of data types that are not part of the JSON spec. For example, BSON has a Date type and a BinData type. BSON can be compared to binary interchange formats, like Protocol Buffers. BSON is more "schemaless" than Protocol Buffers, which can give it an advantage in flexibility but also a slight disadvantage in space efficiency (BSON has overhead for field names within the serialized data). BSON was designed to have the following three characteristics:
* Lightweight. Keeping spatial overhead to a minimum is important for any data representation format, especially when used over the network.
* Traversable. BSON is designed to be traversed easily. This is a vital property in its role as the primary data representation for MongoDB.
* Efficient. Encoding data to BSON and decoding from BSON can be performed very quickly in most languages due to the use of C data types.
"""
SEQ_FIELDS = ["len", "fields", "terminator"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len']['start'] = self._io.pos()
self.len = self._io.read_s4le()
self._debug['len']['end'] = self._io.pos()
self._debug['fields']['start'] = self._io.pos()
self._raw_fields = self._io.read_bytes((self.len - 5))
io = KaitaiStream(BytesIO(self._raw_fields))
self.fields = self._root.ElementsList(io, self, self._root)
self.fields._read()
self._debug['fields']['end'] = self._io.pos()
self._debug['terminator']['start'] = self._io.pos()
self.terminator = self._io.ensure_fixed_contents(b"\x00")
self._debug['terminator']['end'] = self._io.pos()
class Timestamp(KaitaiStruct):
"""Special internal type used by MongoDB replication and sharding. First 4 bytes are an increment, second 4 are a timestamp."""
SEQ_FIELDS = ["increment", "timestamp"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['increment']['start'] = self._io.pos()
self.increment = self._io.read_u4le()
self._debug['increment']['end'] = self._io.pos()
self._debug['timestamp']['start'] = self._io.pos()
self.timestamp = self._io.read_u4le()
self._debug['timestamp']['end'] = self._io.pos()
class BinData(KaitaiStruct):
"""The BSON "binary" or "BinData" datatype is used to represent arrays of bytes. It is somewhat analogous to the Java notion of a ByteArray. BSON binary values have a subtype. This is used to indicate what kind of data is in the byte array. Subtypes from zero to 127 are predefined or reserved. Subtypes from 128-255 are user-defined."""
class Subtype(Enum):
generic = 0
function = 1
byte_array_deprecated = 2
uuid_deprecated = 3
uuid = 4
md5 = 5
custom = 128
SEQ_FIELDS = ["len", "subtype", "content"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len']['start'] = self._io.pos()
self.len = self._io.read_s4le()
self._debug['len']['end'] = self._io.pos()
self._debug['subtype']['start'] = self._io.pos()
self.subtype = KaitaiStream.resolve_enum(self._root.BinData.Subtype, self._io.read_u1())
self._debug['subtype']['end'] = self._io.pos()
self._debug['content']['start'] = self._io.pos()
_on = self.subtype
if _on == self._root.BinData.Subtype.byte_array_deprecated:
self._raw_content = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_content))
self.content = self._root.BinData.ByteArrayDeprecated(io, self, self._root)
self.content._read()
else:
self.content = self._io.read_bytes(self.len)
self._debug['content']['end'] = self._io.pos()
class ByteArrayDeprecated(KaitaiStruct):
"""The BSON "binary" or "BinData" datatype is used to represent arrays of bytes. It is somewhat analogous to the Java notion of a ByteArray. BSON binary values have a subtype. This is used to indicate what kind of data is in the byte array. Subtypes from zero to 127 are predefined or reserved. Subtypes from 128-255 are user-defined."""
SEQ_FIELDS = ["len", "content"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len']['start'] = self._io.pos()
self.len = self._io.read_s4le()
self._debug['len']['end'] = self._io.pos()
self._debug['content']['start'] = self._io.pos()
self.content = self._io.read_bytes(self.len)
self._debug['content']['end'] = self._io.pos()
class ElementsList(KaitaiStruct):
SEQ_FIELDS = ["elements"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['elements']['start'] = self._io.pos()
self.elements = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['elements']:
self._debug['elements']['arr'] = []
self._debug['elements']['arr'].append({'start': self._io.pos()})
_t_elements = self._root.Element(self._io, self, self._root)
_t_elements._read()
self.elements.append(_t_elements)
self._debug['elements']['arr'][len(self.elements) - 1]['end'] = self._io.pos()
i += 1
self._debug['elements']['end'] = self._io.pos()
class Cstring(KaitaiStruct):
SEQ_FIELDS = ["str"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['str']['start'] = self._io.pos()
self.str = (self._io.read_bytes_term(0, False, True, True)).decode(u"UTF-8")
self._debug['str']['end'] = self._io.pos()
class String(KaitaiStruct):
SEQ_FIELDS = ["len", "str", "terminator"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len']['start'] = self._io.pos()
self.len = self._io.read_s4le()
self._debug['len']['end'] = self._io.pos()
self._debug['str']['start'] = self._io.pos()
self.str = (self._io.read_bytes((self.len - 1))).decode(u"UTF-8")
self._debug['str']['end'] = self._io.pos()
self._debug['terminator']['start'] = self._io.pos()
self.terminator = self._io.ensure_fixed_contents(b"\x00")
self._debug['terminator']['end'] = self._io.pos()
class Element(KaitaiStruct):
class BsonType(Enum):
min_key = -1
end_of_object = 0
number_double = 1
string = 2
document = 3
array = 4
bin_data = 5
undefined = 6
object_id = 7
boolean = 8
utc_datetime = 9
jst_null = 10
reg_ex = 11
db_pointer = 12
javascript = 13
symbol = 14
code_with_scope = 15
number_int = 16
timestamp = 17
number_long = 18
number_decimal = 19
max_key = 127
SEQ_FIELDS = ["type_byte", "name", "content"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['type_byte']['start'] = self._io.pos()
self.type_byte = KaitaiStream.resolve_enum(self._root.Element.BsonType, self._io.read_u1())
self._debug['type_byte']['end'] = self._io.pos()
self._debug['name']['start'] = self._io.pos()
self.name = self._root.Cstring(self._io, self, self._root)
self.name._read()
self._debug['name']['end'] = self._io.pos()
self._debug['content']['start'] = self._io.pos()
_on = self.type_byte
if _on == self._root.Element.BsonType.code_with_scope:
self.content = self._root.CodeWithScope(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.reg_ex:
self.content = self._root.RegEx(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.number_double:
self.content = self._io.read_f8le()
elif _on == self._root.Element.BsonType.symbol:
self.content = self._root.String(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.timestamp:
self.content = self._root.Timestamp(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.number_int:
self.content = self._io.read_s4le()
elif _on == self._root.Element.BsonType.document:
self.content = Bson(self._io)
self.content._read()
elif _on == self._root.Element.BsonType.object_id:
self.content = self._root.ObjectId(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.javascript:
self.content = self._root.String(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.utc_datetime:
self.content = self._io.read_s8le()
elif _on == self._root.Element.BsonType.boolean:
self.content = self._io.read_u1()
elif _on == self._root.Element.BsonType.number_long:
self.content = self._io.read_s8le()
elif _on == self._root.Element.BsonType.bin_data:
self.content = self._root.BinData(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.string:
self.content = self._root.String(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.db_pointer:
self.content = self._root.DbPointer(self._io, self, self._root)
self.content._read()
elif _on == self._root.Element.BsonType.array:
self.content = Bson(self._io)
self.content._read()
elif _on == self._root.Element.BsonType.number_decimal:
self.content = self._root.F16(self._io, self, self._root)
self.content._read()
self._debug['content']['end'] = self._io.pos()
class DbPointer(KaitaiStruct):
SEQ_FIELDS = ["namespace", "id"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['namespace']['start'] = self._io.pos()
self.namespace = self._root.String(self._io, self, self._root)
self.namespace._read()
self._debug['namespace']['end'] = self._io.pos()
self._debug['id']['start'] = self._io.pos()
self.id = self._root.ObjectId(self._io, self, self._root)
self.id._read()
self._debug['id']['end'] = self._io.pos()
class U3(KaitaiStruct):
"""Implements unsigned 24-bit (3 byte) integer.
"""
SEQ_FIELDS = ["b1", "b2", "b3"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['b1']['start'] = self._io.pos()
self.b1 = self._io.read_u1()
self._debug['b1']['end'] = self._io.pos()
self._debug['b2']['start'] = self._io.pos()
self.b2 = self._io.read_u1()
self._debug['b2']['end'] = self._io.pos()
self._debug['b3']['start'] = self._io.pos()
self.b3 = self._io.read_u1()
self._debug['b3']['end'] = self._io.pos()
@property
def value(self):
if hasattr(self, '_m_value'):
return self._m_value if hasattr(self, '_m_value') else None
self._m_value = ((self.b1 | (self.b2 << 8)) | (self.b3 << 16))
return self._m_value if hasattr(self, '_m_value') else None
class CodeWithScope(KaitaiStruct):
SEQ_FIELDS = ["id", "source", "scope"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['id']['start'] = self._io.pos()
self.id = self._io.read_s4le()
self._debug['id']['end'] = self._io.pos()
self._debug['source']['start'] = self._io.pos()
self.source = self._root.String(self._io, self, self._root)
self.source._read()
self._debug['source']['end'] = self._io.pos()
self._debug['scope']['start'] = self._io.pos()
self.scope = Bson(self._io)
self.scope._read()
self._debug['scope']['end'] = self._io.pos()
class F16(KaitaiStruct):
"""128-bit IEEE 754-2008 decimal floating point."""
SEQ_FIELDS = ["str", "exponent", "significand_hi", "significand_lo"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['str']['start'] = self._io.pos()
self.str = self._io.read_bits_int(1) != 0
self._debug['str']['end'] = self._io.pos()
self._debug['exponent']['start'] = self._io.pos()
self.exponent = self._io.read_bits_int(15)
self._debug['exponent']['end'] = self._io.pos()
self._debug['significand_hi']['start'] = self._io.pos()
self.significand_hi = self._io.read_bits_int(49)
self._debug['significand_hi']['end'] = self._io.pos()
self._io.align_to_byte()
self._debug['significand_lo']['start'] = self._io.pos()
self.significand_lo = self._io.read_u8le()
self._debug['significand_lo']['end'] = self._io.pos()
class ObjectId(KaitaiStruct):
"""https://docs.mongodb.com/manual/reference/method/ObjectId/."""
SEQ_FIELDS = ["epoch_time", "machine_id", "process_id", "counter"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['epoch_time']['start'] = self._io.pos()
self.epoch_time = self._io.read_u4le()
self._debug['epoch_time']['end'] = self._io.pos()
self._debug['machine_id']['start'] = self._io.pos()
self.machine_id = self._root.U3(self._io, self, self._root)
self.machine_id._read()
self._debug['machine_id']['end'] = self._io.pos()
self._debug['process_id']['start'] = self._io.pos()
self.process_id = self._io.read_u2le()
self._debug['process_id']['end'] = self._io.pos()
self._debug['counter']['start'] = self._io.pos()
self.counter = self._root.U3(self._io, self, self._root)
self.counter._read()
self._debug['counter']['end'] = self._io.pos()
class RegEx(KaitaiStruct):
SEQ_FIELDS = ["pattern", "options"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['pattern']['start'] = self._io.pos()
self.pattern = self._root.Cstring(self._io, self, self._root)
self.pattern._read()
self._debug['pattern']['end'] = self._io.pos()
self._debug['options']['start'] = self._io.pos()
self.options = self._root.Cstring(self._io, self, self._root)
self.options._read()
self._debug['options']['end'] = self._io.pos()
| mit | -7,375,716,033,907,808,000 | 46.029777 | 699 | 0.542289 | false |
shopkeep/shpkpr | shpkpr/deployment/standard.py | 1 | 1338 | # stdlib imports
import logging
logger = logging.getLogger(__name__)
class StandardDeployment(object):
"""StandardDeployment implements Marathon's basic deployment workflow and
uses the primitives provided by the Marathon API to perform a standard
rolling deploy according to application settings.
This deployment strategy is best suited for non-web-facing applications or
those that can tolerate minor downtime during deployment e.g. consumer or
worker applications.
"""
def __init__(self, marathon_client, timeout, app_definitions, **kw):
self.marathon_client = marathon_client
self.timeout = timeout
self.app_definitions = app_definitions
def execute(self, force=False):
"""Execute standard Marathon deployment.
"""
app_ids = ", ".join([a["id"] for a in self.app_definitions])
logger.info("Executing standard deployment: {0}".format(app_ids))
deployment = self.marathon_client.deploy(
self.app_definitions,
force=force,
)
logger.info("Waiting for marathon deployment to complete: {0}".format(deployment.deployment_id))
result = deployment.wait(timeout=self.timeout)
logger.info("Marathon deployment complete: {0}".format(deployment.deployment_id))
return result
| mit | -2,837,895,533,692,400,000 | 35.162162 | 104 | 0.684604 | false |
andreagrandi/drf3-test | drftest/shop/tests/factories.py | 1 | 1174 | import factory
from django.contrib.auth.models import User
from django.utils.timezone import now
from shop.models import (Product, Stamp, Order, OrderDetails, Voucher)
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
first_name = 'DRF'
last_name = 'Test'
username = 'drftest'
password = 'drftest'
is_active = True
is_superuser = False
last_login = now()
date_joined = now()
class ProductFactory(factory.DjangoModelFactory):
FACTORY_FOR = Product
name = "Product 1"
collect_stamp = True
class StampFactory(factory.DjangoModelFactory):
FACTORY_FOR = Stamp
user = factory.SubFactory(UserFactory)
redeemed = False
class OrderFactory(factory.DjangoModelFactory):
FACTORY_FOR = Order
user = factory.SubFactory(UserFactory)
date = now()
class OrderDetailsFactory(factory.DjangoModelFactory):
FACTORY_FOR = OrderDetails
order = factory.SubFactory(OrderFactory)
product = factory.SubFactory(ProductFactory)
quantity = 4
class VoucherFactory(factory.DjangoModelFactory):
FACTORY_FOR = Voucher
user = factory.SubFactory(UserFactory)
redeemed = False
| mit | 1,264,818,017,404,281,300 | 21.150943 | 70 | 0.719761 | false |
jinook0707/CATOS_alpha | 2016_paper/utils/Util_m_drawer.py | 1 | 10078 | '''
Util_m_drawer.py
This is for generating a JPG file per a MP4 file.
The generated JPG file shows the movement pattern
appeared thoughout the MP4 file.
(Movement summary of the movie file)
--------------------------------------------------------------------
Copyright (C) 2013 Jinook Oh ([email protected])
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os, sys
from glob import glob
from math import sqrt
from copy import copy
import cv ###
FLAG_DISPLAY_JPG = False
#------------------------------------------------------------------------------------
class M_drawer:
def __init__(self, target_path):
self.target_path = target_path
if FLAG_DISPLAY_JPG == True: cv.NamedWindow('disp', cv.CV_WINDOW_NORMAL)
if FLAG_DISPLAY_JPG == True: cv.MoveWindow('disp', 50, 50)
self.total_nFrames = 0
#---------------------------------------------------------------------------------
def get_FG_rect_value(self, inString):
# get overall foreground rect value
if inString.strip() == '(-1/ -1/ -1/ -1)': return -1
else:
b_rect = inString.replace("(","").replace(")","").split("/")
b_rect = (int(b_rect[0])*2, int(b_rect[1])*2, int(b_rect[2])*2, int(b_rect[3])*2)
return b_rect
#---------------------------------------------------------------------------------
def get_FGB_center_pt(self, fgb_pts):
# get each foreground blob's center point
number_of_pts = len(fgb_pts)
x = 0; y = 0
for i in range(number_of_pts):
x += fgb_pts[i][0]
y += fgb_pts[i][1]
x = x/number_of_pts
y = y/number_of_pts
return [x,y]
#---------------------------------------------------------------------------------
def get_img_from_video(self, cap_vf):
frames = []
nFrames = int(cv.GetCaptureProperty(cap_vf, cv.CV_CAP_PROP_FRAME_COUNT))
fps = int(cv.GetCaptureProperty(cap_vf, cv.CV_CAP_PROP_FPS))
self.total_nFrames += nFrames
for i in xrange(nFrames):
frame = cv.QueryFrame(cap_vf)
if i == 0 or i == nFrames/2: # store first, middle frame
frames.append(cv.CloneImage(frame))
### try to store the last frame, but if it fails, ignore it.
if i == nFrames-1:
try: frames.append(cv.CloneImage(frame))
except: pass
### Add differences between the middle-first & last-first onto the first frame
diff_img0 = cv.CreateImage(cv.GetSize(frames[0]), 8, 3)
diff_img1 = cv.CreateImage(cv.GetSize(frames[0]), 8, 3)
cv.AbsDiff(frames[0], frames[1], diff_img0)
if len(frames) > 2: cv.AbsDiff(frames[0], frames[2], diff_img1)
cv.Add(diff_img0, frames[0], frames[0])
if len(frames) > 2: cv.Add(diff_img1, frames[0], frames[0])
return frames[0]
#---------------------------------------------------------------------------------
def run(self):
#first_img_path = os.path.join(self.target_path, '_first_color_img_cID00.jpg')
#first_img = cv.LoadImage(first_img_path)
d_color1 = (0, 0, 0)
d_color2 = (255, 255, 255)
cat_margin = 30 # due to dilate function, etc
font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 3, 8)
dr_cnt = 0 # counter for drawing
for f in glob(os.path.join(self.target_path, '*_MR.csv')): # open all the MovementRecord files
mr_f = open(f, 'r')
mr_f_lines = mr_f.readlines()
jpg_file = f.replace(".csv", ".jpg")
if not os.path.isfile(jpg_file): # jpg file doesn't exist
video_file = f.replace("_MR.csv", ".mp4")
if os.path.isfile(video_file): # video file exist
cap_vf = cv.CaptureFromFile(video_file)
img_from_video = self.get_img_from_video(cap_vf)
last_center_pt = (-1,-1)
last_center_pt_b = (-1,-1)
last_center_pt_white = (-1,-1)
last_center_pt_black = (-1,-1)
lines_cnt = len(mr_f_lines)
for i in range(2, lines_cnt):
items = mr_f_lines[i].split(",")
if len(items) > 1:
number_of_blobs = int(items[4])
#d_color_e_dec = 255-(float(i)/lines_cnt*255)
d_color_e_inc = float(i)/lines_cnt*255
d_color = (d_color_e_inc, d_color_e_inc, d_color_e_inc)
'''
### Drawing for movement rects
b_rect = items[0].replace("(","").replace(")","").split("/")
b_rect = (int(b_rect[0]), int(b_rect[1]), int(b_rect[2]), int(b_rect[3]))
d_color_e = 255-(float(i+1)/lines_cnt*255)
d_color = (d_color_e, d_color_e, d_color_e)
cv.Rectangle(img_from_video, (b_rect[0],b_rect[1]), (b_rect[0]+b_rect[2],b_rect[1]+b_rect[3]), d_color1, 1)
center_pt = items[1].split("/")
center_pt = (int(center_pt[0]), int(center_pt[1]))
cv.Circle(img_from_video, center_pt, 3, d_color, 1)
if last_center_pt != (-1,-1): cv.Line(img_from_video, last_center_pt, center_pt, d_color, 1)
last_center_pt = tuple(center_pt)
'''
'''
### rect bounding all the foreground features
b_rect = self.get_FG_rect_value(items[2])
#cv.Rectangle(img_from_video, (b_rect[0],b_rect[1]), (b_rect[0]+b_rect[2],b_rect[1]+b_rect[3]), d_color, 1)
if b_rect != -1:
### Drawing the center point of the movement whole bounding rect (and the connecting lines between the center points.)
center_pt = items[3].split("/")
center_pt = (int(center_pt[0])*2, int(center_pt[1])*2)
cv.Circle(img_from_video, center_pt, 5, d_color, 2)
if last_center_pt_b != (-1,-1): cv.Line(img_from_video, last_center_pt_b, center_pt, d_color, 1)
last_center_pt_b = tuple(center_pt)
'''
### rects for each foreground blob
fB_grouped_pts = eval(items[8].replace("/", ","))
if fB_grouped_pts != []:
for fB_idx in range(len(fB_grouped_pts)):
fgb_pts = fB_grouped_pts[fB_idx]
if fgb_pts != '[]': fgb_center_pt = self.get_FGB_center_pt(fgb_pts)
fgb_center_pt = (fgb_center_pt[0]*2, fgb_center_pt[1]*2)
cv.Circle(img_from_video, fgb_center_pt, 3, d_color, 2)
if fB_idx > 0:
cv.Line(img_from_video, last_fgb_center_pt, fgb_center_pt, d_color, 1)
last_fgb_center_pt = copy(fgb_center_pt)
### Drawing the center point of the whitish blob
if items[7].strip() == '-1/-1': wbpt = (-1,-1)
else:
wbpt = items[7].split("/") # white blob center point
wbpt = (int(wbpt[0])*2, int(wbpt[1])*2)
if wbpt != (-1, -1):
# draw a rectangle at the center of white blob(s)
cv.Rectangle(img_from_video, (wbpt[0]-2,wbpt[1]-2), (wbpt[0]+2,wbpt[1]+2), (0,0,125), 1)
if FLAG_DISPLAY_JPG == True: cv.ShowImage('disp', img_from_video)
mr_img_path = f.replace(".csv", ".jpg")
cv.SaveImage(mr_img_path, img_from_video)
dr_cnt += 1
print "An image, %s, is generated."%mr_img_path
mr_f.close()
else:
# Video file doesn't exist (means it wasn't generated due to lack of enough JPEG files)
# Usually it's meaningless very short record. (or no record at all)
mr_f.close()
os.remove(f) # remove the MR-csv file
for f in glob(os.path.join(self.target_path, '*.log')):
log_f = open(f, 'a')
log_f.write("\n* Total number of frames of movie files: %i\n"%(self.total_nFrames))
log_f.close()
print "Number of images generated: %i"%(dr_cnt)
print "Image drawing process is complete."
#------------------------------------------------------------------------------------
if __name__ == '__main__':
path = os.getcwd()
input_path = ''
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)): input_path += sys.argv[i] + ' '
input_path = input_path.strip()
else: input_path = 'output'
path = os.path.join(path, input_path)
mDrawer = M_drawer(path)
mDrawer.run()
| gpl-3.0 | 8,860,168,671,217,057,000 | 48.64532 | 150 | 0.465668 | false |
shiehinms/vminspector | ui/ui_newconnectdlg.py | 1 | 2422 | # -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class Ui_NewConnectDlg(object):
def setupUi(self, NewConnectDlg):
NewConnectDlg.setWindowTitle(u"Connect VHD")
self.gridlayout = QGridLayout(NewConnectDlg)
self.gridlayout.setMargin(9)
self.gridlayout.setSpacing(6)
urlLabel = QLabel(u"URL*:")
self.urlLineEdit = QLineEdit()
urlLabel.setBuddy(self.urlLineEdit)
self.gridlayout.addWidget(urlLabel, 0, 0)
self.gridlayout.addWidget(self.urlLineEdit, 0, 1)
accountkeyLabel = QLabel(u"ACCOUNT_KEY:")
self.accountkeyLineEdit = QLineEdit()
accountkeyLabel.setBuddy(self.accountkeyLineEdit)
self.gridlayout.addWidget(accountkeyLabel, 1, 0)
self.gridlayout.addWidget(self.accountkeyLineEdit, 1, 1)
filenameLabel = QLabel(u"FILENAME:")
self.filenameLineEdit = QLineEdit()
filenameLabel.setBuddy(self.filenameLineEdit)
self.gridlayout.addWidget(filenameLabel, 2, 0)
self.gridlayout.addWidget(self.filenameLineEdit, 2, 1)
pathLabel = QLabel(u"PATH*:")
self.pathLineEdit = QLineEdit()
pathLabel.setBuddy(self.pathLineEdit)
self.gridlayout.addWidget(pathLabel, 3, 0)
self.gridlayout.addWidget(self.pathLineEdit, 3, 1)
extensionLabel = QLabel(u"EXTENSION:")
self.extensionLineEdit = QLineEdit()
extensionLabel.setBuddy(self.extensionLineEdit)
self.gridlayout.addWidget(extensionLabel, 4, 0)
self.gridlayout.addWidget(self.extensionLineEdit, 4, 1)
typeLabel = QLabel(u"TYPE:")
self.typeLineEdit = QLineEdit()
typeLabel.setBuddy(self.typeLineEdit)
self.gridlayout.addWidget(typeLabel, 5, 0)
self.gridlayout.addWidget(self.typeLineEdit, 5, 1)
self.buttonBox = QDialogButtonBox(NewConnectDlg)
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|
QDialogButtonBox.NoButton|QDialogButtonBox.Ok)
self.gridlayout.addWidget(self.buttonBox, 6, 1)
QObject.connect(self.buttonBox, SIGNAL("accepted()"),
NewConnectDlg.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"),
NewConnectDlg.reject)
QMetaObject.connectSlotsByName(NewConnectDlg)
| apache-2.0 | 3,554,032,290,002,449,400 | 39.366667 | 66 | 0.669282 | false |
PaddlePaddle/models | PaddleCV/tracking/ltr/models/bbreg/atom_iou_net.py | 1 | 12326 | """
the implementation of ATOM iou net
"""
import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph.nn as nn
import numpy as np
import os.path as osp
import sys
CURRENT_DIR = osp.dirname(__file__)
sys.path.append(osp.join(CURRENT_DIR, '..', '..', '..'))
def weight_init():
init = fluid.initializer.MSRAInitializer(uniform=False)
param = fluid.ParamAttr(initializer=init)
return param
def bias_init():
init = fluid.initializer.ConstantInitializer(value=0.)
param = fluid.ParamAttr(initializer=init)
return param
def norm_weight_init():
# init = fluid.initializer.ConstantInitializer(1.0)
init = fluid.initializer.Uniform(low=0., high=1.)
param = fluid.ParamAttr(initializer=init)
return param
def norm_bias_init():
init = fluid.initializer.ConstantInitializer(value=0.)
param = fluid.ParamAttr(initializer=init)
return param
class ConvBNReluLayer(fluid.dygraph.Layer):
def __init__(self,
in_channels,
out_channels,
filter_size,
stride=1,
groups=1,
padding=1,
is_test=False):
super(ConvBNReluLayer, self).__init__()
self.conv = nn.Conv2D(
num_channels=in_channels,
filter_size=filter_size,
num_filters=out_channels,
stride=stride,
padding=padding,
groups=groups,
bias_attr=bias_init(),
param_attr=weight_init())
self.bn = nn.BatchNorm(
out_channels,
param_attr=norm_weight_init(),
bias_attr=norm_bias_init(),
act=None,
momentum=0.9,
use_global_stats=is_test)
def forward(self, inputs):
res = self.conv(inputs)
self.conv_res = res
res = self.bn(res)
res = fluid.layers.relu(res)
return res
class FCBNReluLayer(fluid.dygraph.Layer):
def __init__(self,
in_channels,
out_channels,
in_size,
is_bias=True,
is_bn=True,
is_relu=True,
is_test=False):
super(FCBNReluLayer, self).__init__()
self.is_bn = is_bn
self.is_relu = is_relu
if is_bias:
bias_init = fluid.ParamAttr(
initializer=fluid.initializer.ConstantInitializer(0.))
else:
bias_init = False
self.linear = nn.Linear(
in_channels * in_size * in_size, out_channels, bias_attr=bias_init)
self.bn = nn.BatchNorm(
out_channels,
param_attr=norm_weight_init(),
bias_attr=norm_bias_init(),
act=None,
momentum=0.9,
use_global_stats=is_test)
def forward(self, x):
x = fluid.layers.reshape(x, [x.shape[0], -1])
x = self.linear(x)
if self.is_bn:
x = self.bn(x)
if self.is_relu:
x = fluid.layers.relu(x)
return x
class AtomIouNet(fluid.dygraph.Layer):
def __init__(self,
name,
input_dim=(128, 256),
pred_input_dim=(256, 256),
pred_inter_dim=(256, 256),
is_test=False):
super(AtomIouNet, self).__init__(name)
self.name = self.full_name()
self.conv3_1r = ConvBNReluLayer(
input_dim[0], 128, filter_size=3, stride=1, is_test=is_test)
self.conv3_1t = ConvBNReluLayer(
input_dim[0], 256, filter_size=3, stride=1, is_test=is_test)
self.conv3_2t = ConvBNReluLayer(
256, pred_input_dim[0], filter_size=3, stride=1, is_test=is_test)
self.fc3_1r = ConvBNReluLayer(
128, 256, filter_size=3, stride=1, padding=0, is_test=is_test)
self.conv4_1r = ConvBNReluLayer(
input_dim[1], 256, filter_size=3, stride=1, is_test=is_test)
self.conv4_1t = ConvBNReluLayer(
input_dim[1], 256, filter_size=3, stride=1, is_test=is_test)
self.conv4_2t = ConvBNReluLayer(
256, pred_input_dim[1], filter_size=3, stride=1, is_test=is_test)
self.fc34_3r = ConvBNReluLayer(
512,
pred_input_dim[0],
filter_size=1,
stride=1,
padding=0,
is_test=is_test)
self.fc34_4r = ConvBNReluLayer(
512,
pred_input_dim[1],
filter_size=1,
stride=1,
padding=0,
is_test=is_test)
self.fc3_rt = FCBNReluLayer(
pred_input_dim[0], pred_inter_dim[0], in_size=5, is_test=is_test)
self.fc4_rt = FCBNReluLayer(
pred_input_dim[1], pred_inter_dim[1], in_size=3, is_test=is_test)
bias_init = fluid.initializer.ConstantInitializer(0.)
self.iou_predictor = nn.Linear(
pred_inter_dim[0] + pred_inter_dim[1], 1, bias_attr=bias_init)
self.outs = {}
def predict_iou(self, filter, feat2, proposals):
"""
predicts IOU for the given proposals
:param modulation: Modulation vectors for the targets. Dims (batch, feature_dim).
:param feat: IoU features (from get_iou_feat) for test images. Dims (batch, feature_dim, H, W).
:param proposals: Proposal boxes for which the IoU will be predicted (batch, num_proposals, 4).
:return:
"""
fc34_3_r, fc34_4_r = filter
c3_t, c4_t = feat2
batch_size = c3_t.shape[0]
# Modulation
c3_t_att = c3_t * fluid.layers.reshape(fc34_3_r, [batch_size, -1, 1, 1])
c4_t_att = c4_t * fluid.layers.reshape(fc34_4_r, [batch_size, -1, 1, 1])
# add batch roi nums
num_proposals_per_batch = proposals.shape[1]
batch_roi_nums = np.array([num_proposals_per_batch] *
batch_size).astype(np.int64)
batch_roi_nums = fluid.dygraph.to_variable(batch_roi_nums)
# input proposals2 is in format xywh, convert it to x0y0x1y1 format
proposals_xyxy = fluid.layers.concat(
[
proposals[:, :, 0:2],
proposals[:, :, 0:2] + proposals[:, :, 2:4]
],
axis=2)
roi2 = fluid.layers.reshape(proposals_xyxy, [-1, 4])
roi2.stop_gradient = False
roi3t = fluid.layers.prroi_pool(
c3_t_att, roi2, 1 / 8., 5, 5, batch_roi_nums=batch_roi_nums)
roi4t = fluid.layers.prroi_pool(
c4_t_att, roi2, 1 / 16., 3, 3, batch_roi_nums=batch_roi_nums)
fc3_rt = self.fc3_rt(roi3t)
fc4_rt = self.fc4_rt(roi4t)
fc34_rt_cat = fluid.layers.concat([fc3_rt, fc4_rt], axis=1)
iou_pred = self.iou_predictor(fc34_rt_cat)
iou_pred = fluid.layers.reshape(iou_pred,
[batch_size, num_proposals_per_batch])
return iou_pred
def forward(self, feat1, feat2, bb1, proposals2):
"""Runs the ATOM IoUNet during training operation.
This forward pass is mainly used for training. Call the individual functions during tracking instead.
args:
feat1: Variable, Features from the reference frames (4 or 5 dims).
feat2: Variable, Features from the test frames (4 or 5 dims).
bb1: Target boxes (x,y,x2,y2) in image coords in the reference samples. Dims (images, sequences, 4).
proposals2: Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4)."""
assert len(feat1[0].shape) == 5, 'Expect 5 dimensional feat1'
num_test_images = feat2[0].shape[0]
batch_size = feat2[0].shape[1]
# Extract first train sample
feat1 = [f[0] for f in feat1]
bb1 = bb1[0]
# Get modulation vector
modulation = self.get_filter(feat1, bb1)
feat2 = [
fluid.layers.reshape(f,
(batch_size * num_test_images, *f.shape[-3:]))
for f in feat2
]
iou_feat = self.get_iou_feat(feat2)
new_modulation = []
for i in range(0, len(modulation)):
tmp = modulation[i]
tmp = fluid.layers.reshape(tmp, [1, batch_size, -1])
tmp = fluid.layers.expand(tmp, [num_test_images, 1, 1])
tmp = fluid.layers.reshape(tmp, [batch_size * num_test_images, -1])
new_modulation.append(tmp)
proposals2 = fluid.layers.reshape(
proposals2, [batch_size * num_test_images, -1, 4])
pred_iou = self.predict_iou(new_modulation, iou_feat, proposals2)
pred_iou = fluid.layers.reshape(pred_iou,
[num_test_images, batch_size, -1])
return pred_iou
def get_filter(self, feat1, bb1):
"""
get modulation feature [feature1, feature2] for the targets
:param feat1: variable, Backbone features from reference images. shapes (batch, feature_dim, H, W).
:param bb1: variable, Target boxes (x,y,w,h) in image coords in the reference samples. shapes (batch, 4).
:return:
"""
feat3_r, feat4_r = feat1
c3_r = self.conv3_1r(feat3_r)
# Add batch_index to rois
batch_size = bb1.shape[0]
batch_roi_nums = np.array([1] * batch_size).astype(np.int64)
batch_roi_nums = fluid.dygraph.to_variable(batch_roi_nums)
# input bb is in format xywh, convert it to x0y0x1y1 format
roi1 = fluid.layers.concat(
[bb1[:, 0:2], bb1[:, 0:2] + bb1[:, 2:4]], axis=1)
roi1.stop_gradient = False
roi3r = fluid.layers.prroi_pool(c3_r, roi1, 1 / 8., 3, 3,
batch_roi_nums)
c4_r = self.conv4_1r(feat4_r)
roi4r = fluid.layers.prroi_pool(c4_r, roi1, 1 / 16., 1, 1,
batch_roi_nums)
fc3_r = self.fc3_1r(roi3r)
# Concatenate
fc34_r = fluid.layers.concat([fc3_r, roi4r], axis=1)
fc34_3_r = self.fc34_3r(fc34_r)
fc34_4_r = self.fc34_4r(fc34_r)
return fc34_3_r, fc34_4_r
def get_iou_feat(self, feat2):
"""
Get IoU prediction features from a 4 or 5 dimensional backbone input.
:param feat2: variable, Backbone features from reference images. [feature1, feature2]
:return: features, variable
"""
feat3_t, feat4_t = feat2
c3_t = self.conv3_2t(self.conv3_1t(feat3_t))
c4_t = self.conv4_2t(self.conv4_1t(feat4_t))
return c3_t, c4_t
def atom_iounet(name,
input_dim=(128, 256),
pred_input_dim=(256, 256),
pred_inter_dim=(256, 256)):
return AtomIouNet(
name,
input_dim=input_dim,
pred_input_dim=pred_input_dim,
pred_inter_dim=pred_inter_dim)
def test_paddle_iounet():
a = np.random.uniform(-1, 1, [1, 1, 512, 18, 18]).astype(np.float32)
b = np.random.uniform(-1, 1, [1, 1, 1024, 9, 9]).astype(np.float32)
bbox = [[3, 4, 10, 11]]
proposal_bbox = [[4, 5, 11, 12] * 16]
bbox = np.reshape(np.array(bbox), [1, 1, 4]).astype(np.float32)
proposal_bbox = np.reshape(np.array(proposal_bbox),
[1, 16, 4]).astype(np.float32)
with fluid.dygraph.guard():
a_pd = fluid.dygraph.to_variable(a)
b_pd = fluid.dygraph.to_variable(b)
bbox_pd = fluid.dygraph.to_variable(bbox)
proposal_bbox_pd = fluid.dygraph.to_variable(proposal_bbox)
feat1 = [a_pd, b_pd]
feat2 = [a_pd, b_pd]
model = AtomIouNet('IOUNet', input_dim=(512, 1024))
res = model(feat1, feat2, bbox_pd, proposal_bbox_pd)
print(res.shape)
params = model.state_dict()
for v in params:
print(v, '\t', params[v].shape)
print(len(params))
if __name__ == '__main__':
test_paddle_iounet()
| apache-2.0 | -1,201,066,844,394,714,600 | 33.217143 | 117 | 0.536589 | false |
p3ck/fedmsg-download | fedmsg_download/download.py | 1 | 6751 | import os, os.path
import sys
from optparse import OptionParser
import logging
import ConfigParser
import tempfile
import shutil
import datetime
import time
from subprocess import Popen, PIPE
import select
log = logging.getLogger(__name__)
def _mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
_mkdir(head)
#print "_mkdir %s" % repr(newdir)
if tail:
os.mkdir(newdir)
def run_command(commandline):
command = commandline.split()
proc = Popen(command, stdout=PIPE, stderr=PIPE)
input = [proc.stdout, proc.stderr]
running = 1
while running:
inputready, outputready, exceptready = select.select(input,[],[],5)
for s in inputready:
for line in s:
if s == proc.stderr:
log.error("run_command: %s" % line)
else:
log.info("run_command: %s" % line)
if proc.poll() is not None:
break
return proc.returncode
class DownloadException(Exception):
def __init__(self, value, *args):
self.value = value % args
def __str__(self):
return repr(self.value)
class DX(DownloadException):
pass
class Downloader(object):
def __init__(self, rsync_url=None, branch=None,
req_compose=True, ignore_name=False,
local_dir=None, rsync_opts=None,
delete_old=False, compose_dir=None,
command=None):
self.rsync = RSync(rsync_url)
self.branch = branch
self.parser = CParser()
self.req_compose = req_compose
self.ignore_name = ignore_name
self.local_dir = local_dir
self.rsync_opts = rsync_opts
self.delete_old = delete_old
self.compose_dir = compose_dir
self.command = command
try:
tmp_file = tempfile.mktemp()
self.rsync.get(self.parser.infofile, tmp_file)
self.parser.parse(tmp_file)
except DX, e:
if self.req_compose:
raise
else:
pass
finally:
if os.path.isfile(tmp_file):
os.unlink(tmp_file)
def sync_it_down(self):
product_name = "%s-%s" % ( self.branch, str(datetime.date.today()))
if not self.ignore_name:
product_name = self.parser.get('product','name', product_name)
# Default options
opts = '--archive --verbose --delete'
# Add in rsync_opts
if self.rsync_opts:
opts = '%s %s' % (opts, self.rsync_opts)
# read 'latest' symlink from local_dir
latest = os.path.realpath('%s/latest-%s' % (self.local_dir, self.branch))
# Add in link-dest if symlink points to valid dir
if os.path.isdir(latest):
opts = opts + ' --link-dest=%s' % latest
local_path = os.path.join(self.local_dir, product_name)
# compare local and remote
if os.path.realpath(local_path) == latest:
log.info('Already have %s' % local_path)
return 0
# Create dir if needed
_mkdir(local_path)
# Get it.
log.info("Downloading: %s" % product_name)
self.rsync.get('', local_path, opts)
# update symlink to newly downloaded
if os.path.exists(os.path.join(self.local_dir,'latest-%s' % self.branch)):
os.unlink(os.path.join(self.local_dir,'latest-%s' % self.branch))
os.symlink(product_name, '%s/latest-%s' % (self.local_dir, self.branch))
# If delete_old is set remove previous tree
if os.path.isdir(latest) and self.delete_old:
# The variable latest actually holds the old dir
shutil.rmtree('%s' % latest)
# If command is not None then run it with product_name
if self.command:
rel_path = "%s/%s" % (self.compose_dir, product_name)
commandline = self.command % dict(tree = rel_path)
log.debug(commandline)
rc = run_command(commandline)
if rc != 0:
raise DX('Unable to run command %s' % commandline)
return 0
class RSync(object):
def __init__(self, rsync_url):
self.rsync_url = rsync_url
self.cmd = "rsync"
def get(self, remote_filename, local_filename, opts=''):
"""Use rsync to get a remote file"""
remote_path = os.path.join(self.rsync_url, remote_filename)
commandline = "%s %s %s %s" % (self.cmd,
opts,
remote_path,
local_filename)
log.debug(commandline)
# Try the rsync twice to get around horrible
# issues on server where files sometimes go away
for x in range(0,5):
loop = 0
while loop < 1800:
loop += 1
rc = run_command(commandline)
# rsync rc 5 : Error starting client-server protocol
# This usually is because the server is too busy
if rc != 5:
break
else:
time.sleep(2)
if rc == 0:
break
log.debug("Retry: %s" % x)
if rc != 0:
raise DX('RC:%s Unable to rsync %s -> %s' %
(rc, remote_path, local_filename))
return rc
class Parser(object):
def __init__(self, rsync_url=None):
self.parser = None
def get(self, section, key, default=None):
if self.parser:
try:
default = self.parser.get(section, key)
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError), e:
if default is None:
raise
return default
def parse(self, filename):
try:
self.parser = ConfigParser.ConfigParser()
self.parser.read(filename)
except ConfigParser.MissingSectionHeaderError, e:
raise DX('%s/%s is not parsable: %s' % (filename,
self.infofile,
e))
return True
class CParser(Parser):
infofile = '.composeinfo'
| lgpl-2.1 | 6,401,465,013,478,749,000 | 32.256158 | 82 | 0.539624 | false |
msimet/Stile | stile/data_handler.py | 1 | 4204 | """
data_handler.py: defines the classes that serve data to the various Stile systematics tests in the
default drivers.
"""
import os
import glob
class DataHandler:
"""
.. warning::
The behavior of this class is still under development; we do not suggest using or relying
on ``DataHandler`` instances at this time.
A class which contains information about the data set Stile is to be run on. This is used for
the default drivers, not necessarily the pipeline-specific drivers (such as HSC/LSST).
The class needs to be able to do two things:
* List some data given a set of requirements (:func:`listData`). The
requirements generally follow the form:
* object_types: a list of strings such as ``"star"``, ``"PSF star"``, ``"galaxy"`` or
``"galaxy random"`` describing the objects that are needed for the tests.
* epoch: whether this is a single/summary catalog, or a multiepoch time series. (Coadded
catalogs with no per-epoch information count as a single/summary catalog!)
* extent: ``"CCD"``, ``"field"``, ``"patch"`` or ``"tract"``. This can be ignored if you
don't mind running some extra inappropriate tests! ``"CCD"`` should be a CCD-type
dataset, ``"field"`` a single pointing/field-of-view, ``"patch"`` an intermediate-size
area, and ``"tract"`` a large area. (These terms have specific meanings in the LSST
pipeline, but are used for convenience here.)
* data_format: ``"image"`` or ``"catalog"``. Right now no image-level tests are
implemented but we request this kwarg for future development.
* Take each element of the data list from :func:`DataHandler.listData` and retrieve it for use
(:func:`DataHandler.getData`), optionally with bins defined. (Bins can also be defined on a
test-by-test basis, depending on which format makes the most sense for your data setup.)
Additionally, the class can define a ``.getOutputPath()`` function to place the data in a more
complex system than the default (all in one directory with long output path names).
"""
def __init__(self):
raise NotImplementedError()
def listData(self, object_types, epoch, extent, data_format, required_fields=None):
raise NotImplementedError()
def getData(self, ident, object_types=None, epoch=None, extent=None, data_format=None,
bin_list=None):
"""
Return some data matching the ``ident`` for the given kwargs. This can be a numpy array, a
tuple ``(file_name, field_schema)`` for a file already existing on the filesystem, or a list
of either of those things.
If it's a tuple ``(file_name, field_schema)``, the assumption is that it can be read by a
simple FITS or ASCII reader. The format will be determined from the file extension.
"""
raise NotImplementedError()
def getOutputPath(self, extension='.dat', multi_file=False, *args):
"""
Return a path to an output file given a list of strings that should appear in the output
filename, taking care not to clobber other files (unless requested).
:param args: A list of strings to appear in the file name
:param extension: The file extension to be used
:param multi_file: Whether multiple files with the same args will be created within a single
run of Stile. This appends a number to the file name; if clobbering is
allowed, this argument also prevents Stile from writing over outputs from
the same systematics test during the same run.
:returns: A path to an output file meeting the input specifications.
"""
#TODO: no-clobbering case
sys_test_string = '_'.join(args)
if multi_file:
nfiles = glob.glob(os.path.join(self.output_path, sys_test_string)+'*'+extension)
return os.path.join(self.output_path, sys_test_string+'_'+str(nfiles)+extension)
else:
return os.path.join(self.output_path, sys_test_string+extension)
| bsd-3-clause | -2,461,164,493,397,819,000 | 52.21519 | 100 | 0.653901 | false |
mnori/foldatlas | foldatlas/controllers.py | 1 | 31387 | from sqlalchemy import and_
import json
import uuid
import settings
import os
from models import Feature, Transcript, NucleotideMeasurementSet, Structure, \
GeneLocation, NucleotideMeasurementRun, StructurePredictionRun, \
values_str_unpack_float, values_str_unpack_int, RawReactivities, RawReplicateCounts, Bppm
from utils import ensure_dir, insert_newlines, build_dot_bracket
import database
from database import db_session
# Fetches sequence annotation data from the DB and sends it to the genome
# browser front end as JSON.
class GenomeBrowser():
def get_transcripts(self, request):
chromosome_id = "Chr"+str(int(request.args.get('chr'))) # SQL-injection safe
start = int(request.args.get('start'))
end = int(request.args.get('end'))
# Retrieve features using the gene location cache table
sql = ( "SELECT feature.* "
"FROM gene_location, transcript, feature "
"WHERE gene_location.strain_id = '"+settings.reference_strain_id+"' "
"AND gene_location.chromosome_id = '"+chromosome_id+"' "
"AND gene_location.end > '"+str(start)+"' "
"AND gene_location.start < '"+str(end)+"' "
"AND gene_location.gene_id = transcript.gene_id "
"AND transcript.id = feature.transcript_id "
"AND feature.strain_id = '"+settings.reference_strain_id+"'")
results = database.engine.execute(sql)
# collect transcript data
transcripts = {}
feature_rows = []
for result in results:
if result.transcript_id not in transcripts:
transcripts[result.transcript_id] = {
"Parent": result.transcript_id,
"feature_type": "transcript", # without this, it won't draw
"direction": result.direction,
"start": None,
"end": None,
"id": result.transcript_id
}
transcript = transcripts[result.transcript_id]
# keep track of total start and end
if transcript["start"] == None or result.start < transcript["start"]:
transcript["start"] = result.start
if transcript["end"] == None or result.end > transcript["end"]:
transcript["end"] = result.end
feature_rows.append(result)
out = []
# add the transcript metadata to the output. make sure the transcripts are added
# in alphabetical order
transcript_ids = []
for transcript_id in transcripts:
transcript_ids.append(transcript_id)
transcript_ids = sorted(transcript_ids)
for transcript_id in transcript_ids:
out.append(transcripts[transcript_id])
# also add all the feature metadata to the output
for feature_row in feature_rows:
out.append({
"Parent": feature_row.transcript_id,
"feature_type": feature_row.type_id,
"direction": result.direction,
"start": feature_row.start,
"end": feature_row.end,
"id": feature_row.transcript_id+"-"+str(feature_row.id)
})
return json.dumps(out)
def get_genes(self, request):
from utils import Timeline
chromosome_id = "Chr"+str(int(request.args.get('chr'))) # SQL-injection safe
start = int(request.args.get('start'))
end = int(request.args.get('end'))
# fetch gene data from the location cache table.
sql = ( "SELECT * FROM gene_location "
"WHERE strain_id = '"+settings.reference_strain_id+"' "
"AND chromosome_id = '"+chromosome_id+"' "
"AND end > '"+str(start)+"' "
"AND start < '"+str(end)+"'")
results = database.engine.execute(sql)
out = []
for result in results:
out.append({
"feature_type": "gene", # without this, it won't draw
"direction": result.direction,
"id": result.gene_id,
"start": result.start,
"end": result.end,
})
buf = json.dumps(out)
return buf
# Fetch chromosome IDs and their lengths. Used for chromosome menu and also initialising the genome browser.
def get_chromosomes(self):
sql = ( "SELECT chromosome_id, CHAR_LENGTH(sequence) length FROM chromosome "
"WHERE strain_id = '"+settings.reference_strain_id+"' "
"ORDER BY chromosome_id ASC")
results = database.engine.execute(sql)
out = []
for result in results:
out.append({
"chromosome_id": result.chromosome_id,
"length": result.length,
"int_id": int(result.chromosome_id[3])
})
return out
class TranscriptView():
def __init__(self, transcript_id):
self.transcript_id = transcript_id
# Get the coords of the associated gene
data = db_session \
.query(Transcript, GeneLocation) \
.filter(
Transcript.id==transcript_id,
Transcript.gene_id==GeneLocation.gene_id,
GeneLocation.strain_id==settings.reference_strain_id
) \
.all()
self.gene_id = data[0][1].gene_id
self.transcript_data = json.dumps({
"gene_id": self.gene_id,
"transcript_id": transcript_id,
"chromosome_id": data[0][1].chromosome_id,
"start": data[0][1].start,
"end": data[0][1].end
})
self.structure_view = StructureView(self.transcript_id, settings.reference_strain_id)
self.nucleotide_measurement_view = NucleotideMeasurementView(self.transcript_id, settings.reference_strain_id)
self.empty = self.structure_view.empty and self.nucleotide_measurement_view.empty
# disable alignment view... revisit later with SNPstructure
# self.alignment_view = AlignmentView(self.transcript_id)
class NucleotideMeasurementView():
def __init__(self, transcript_id, strain_id):
self.transcript_id = transcript_id
self.strain_id = strain_id
self.build_entries([1])
def build_entries(self, experiment_ids):
from models import NucleotideMeasurementRun
# Load experiments
experiments = db_session \
.query(NucleotideMeasurementRun) \
.filter(NucleotideMeasurementRun.id.in_(experiment_ids)) \
.all()
# Load measurements
seq_str = str(Transcript(self.transcript_id).get_sequence(self.strain_id).seq)
measurements_data = db_session \
.query(NucleotideMeasurementSet) \
.filter(
NucleotideMeasurementSet.nucleotide_measurement_run_id.in_(experiment_ids),
NucleotideMeasurementSet.transcript_id==self.transcript_id
) \
.all()
data = {}
# Populate experiment rows
for experiment in experiments:
experiment_data = {
"id": experiment.id,
"description": experiment.description,
"data": []
}
for n in range(len(seq_str)): # initialise the array
experiment_data["data"].append({
"position": n,
"nuc": seq_str[n],
"measurement": None
})
data[experiment.id] = experiment_data
# Add measurements to each experiment json element
# Loop since we might be dealing with > 1 measurement set
for measurement_set in measurements_data:
experiment_id = measurement_set.nucleotide_measurement_run_id
measurements = values_str_unpack_float(measurement_set.values)
for pos in range(0, len(measurements)):
measurement = measurements[pos]
data[experiment_id]["data"][pos]["measurement"] = measurement
# For each experiment, check whether there is no data and set empty flags accordingly.
self.empty = True # all empty flag
for experiment_id in data:
entry = data[experiment_id]
empty = True
for pos in entry["data"]:
if pos["measurement"] != 0 and pos["measurement"] != None:
empty = False
self.empty = False
if empty:
del entry["data"]
entry["empty"] = True
else:
entry["empty"] = False
self.data_json = json.dumps(data)
class AlignmentView():
alignment_line_length = 80
def __init__(self, transcript_id):
self.transcript_id = transcript_id
self.build_alignment_entries()
def build_alignment_entries(self):
self.alignment_rows = []
# fetch the alignment rows from the DB, using the ORM
alignment_entries = db_session \
.query(AlignmentEntry) \
.filter(AlignmentEntry.transcript_id==self.transcript_id) \
.all()
if (len(alignment_entries) == 0):
return # not enough transcripts to align
aln_len = len(alignment_entries[0].sequence) # length of alignment, including gaps
row_n = 0
reached_end = False
seq_len_processed = 0
# initialise tot_nucs counters. these are for showing nuc counts at the ends of each alignment row.
nuc_counts = {}
for alignment_entry in alignment_entries:
nuc_counts[alignment_entry.strain_id] = 0
while(True): # Each iteration builds 1 row of alignment data
start = row_n * self.alignment_line_length
end = start + self.alignment_line_length
if aln_len < end:
reached_end = True
end = aln_len
self.alignment_rows.append({
"strain_data": {},
"diff": list("*" * (end - start))
})
# create diff - as "*" - then change to "." when a difference is encountered
# create alignment entries data structure, for showing the sequences
for alignment_entry in alignment_entries:
self.alignment_rows[row_n]["strain_data"][alignment_entry.strain_id] = {
"nuc_count": 0, # TODO fill this shiz out
"sequence": list(alignment_entry.sequence[start : end])
}
# Loop through each nucleotide in the sequence. Determine any differences between the
# strains at the position of interest. Store in "diff" variable
for n in range(start, end):
different = False
old_nuc = None
for alignment_entry in alignment_entries:
new_nuc = alignment_entry.sequence[n]
if new_nuc != "-": # keep track of nucleotide counts, for showing on the end
nuc_counts[alignment_entry.strain_id] += 1
if old_nuc != None and new_nuc != old_nuc:
self.alignment_rows[row_n]["diff"][n - start] = "."
old_nuc = new_nuc
# add nucleotide counts to the ends of the sequence alignment.
for alignment_entry in alignment_entries:
self.alignment_rows[row_n]["strain_data"][alignment_entry.strain_id]["nuc_count"] = nuc_counts[alignment_entry.strain_id]
if reached_end:
break
row_n += 1
class TranscriptSearcher():
def search(self, search_string):
from flask import abort
transcripts = db_session \
.query(Transcript) \
.filter(Transcript.id.like("%"+search_string+"%")) \
.all()
if len(transcripts) == 0: # no transcripts found
abort(404)
out = []
for transcript in transcripts:
out.append(transcript.id)
return json.dumps(out)
class CoverageSearcher():
def __init__(self):
# size of pages
self.page_size = 25
# The experiment ID to sort by. Ideally this should have a value for each
# transcript, otherwise there will be some missing transcripts...
self.nucleotide_measurement_run_id = 1
def fetch_page_count(self):
# better to do the imports closer to where they are needed
from sqlalchemy import func
from math import ceil
transcript_count = db_session \
.query(func.count('*')) \
.select_from(NucleotideMeasurementSet) \
.filter(NucleotideMeasurementSet.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id) \
.scalar()
page_count = ceil(transcript_count / self.page_size)
return page_count
def fetch_transcript_data(self, page_num):
from utils import Timeline
from sqlalchemy import func, and_
from models import Structure, GeneLocation
offset = (int(page_num) - 1) * self.page_size
limit = self.page_size
sql = (
"SELECT "
" transcript.id AS transcript_id, "
" gene_location.start AS gene_start, "
" gene_location.end AS gene_end, "
" jnms.coverage AS coverage, "
" jnms.structure_transcript_id AS structure_transcript_id "
"FROM ( "
" SELECT "
" nms.*, "
" structure.transcript_id AS structure_transcript_id "
" FROM ( "
" SELECT nucleotide_measurement_set.* "
" FROM nucleotide_measurement_set "
" ORDER BY nucleotide_measurement_set.coverage DESC "
" LIMIT "+str(limit)+" OFFSET "+str(offset)+" "
" ) AS nms LEFT OUTER JOIN structure ON "
" structure.transcript_id = nms.transcript_id AND "
" structure.structure_prediction_run_id = 2 "
") AS jnms, "
" transcript, "
" gene_location "
"WHERE "
" jnms.nucleotide_measurement_run_id = 1 AND "
" transcript.id = jnms.transcript_id AND "
" transcript.gene_id = gene_location.gene_id AND "
" gene_location.strain_id = 'Col_0' "
"GROUP BY jnms.transcript_id "
"ORDER BY coverage DESC"
)
results = database.engine.execute(sql)
out = []
for row in results:
out.append({
"transcript_id": row["transcript_id"],
"gene_length": (row["gene_end"] - row["gene_start"]) + 1,
"coverage": row["coverage"],
"has_structure": False if (row["structure_transcript_id"] == None) else True
})
return out
# q = db_session \
# .query(NucleotideMeasurementSet, Transcript, GeneLocation,) \
# .filter(
# NucleotideMeasurementSet.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
# Transcript.id==NucleotideMeasurementSet.transcript_id,
# Transcript.gene_id==GeneLocation.gene_id,
# GeneLocation.strain_id==settings.reference_strain_id # get this for gene len
# ) \
# .outerjoin(( # Left join to find in-vivo structures for structure indicator
# Structure,
# and_(
# Structure.transcript_id==NucleotideMeasurementSet.transcript_id,
# # this filters so it's only in vivo joined against
# Structure.structure_prediction_run_id==2
# )
# )) \
# .add_entity(Structure) \
# .group_by(Transcript.id) \
# .order_by(NucleotideMeasurementSet.coverage.desc()) \
# .offset((int(page_num) - 1) * self.page_size) \
# .limit(str(self.page_size)) \
# GROUP BY eliminates structures with the same transcript ID \
# results = q.all()
# tl.log("c")
# tl.dump()
# get the SQL so we can optimise the query
# from sqlalchemy.dialects import postgresql
# q_str = str(q.statement.compile(compile_kwargs={"literal_binds": True}))
# print(q_str)
# mandatory in vivo query - just for screenshot purposes
# results = db_session \
# .query(NucleotideMeasurementSet, Transcript, GeneLocation, Structure, ) \
# .filter(
# NucleotideMeasurementSet.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
# Transcript.id==NucleotideMeasurementSet.transcript_id,
# Transcript.gene_id==GeneLocation.gene_id,
# GeneLocation.strain_id==settings.reference_strain_id, # get this for gene len
# Structure.transcript_id==NucleotideMeasurementSet.transcript_id,
# # this filters so it's only in vivo considered
# Structure.structure_prediction_run_id==2
# ) \
# .add_entity(Structure) \
# .group_by(NucleotideMeasurementSet.transcript_id) \
# .order_by(NucleotideMeasurementSet.coverage.desc()) \
# .offset((int(page_num) - 1) * self.page_size) \
# .limit(str(self.page_size)) \
# .all()
class StructureView():
def __init__(self, transcript_id, strain_id):
self.transcript_id = transcript_id
self.strain_id = strain_id
self.build_entries([1, 2])
def build_entries(self, structure_prediction_run_ids):
from models import Structure, StructurePredictionRun
# Load experiments
runs = db_session \
.query(StructurePredictionRun) \
.filter(StructurePredictionRun.id.in_(structure_prediction_run_ids)) \
.all()
data = {}
for run in runs:
run_data = {
"id": run.id,
"description": run.description,
"data": []
}
# fetch all Structure objects that match the experiment ID and the transcript ID
results = db_session \
.query(Structure) \
.filter(
Structure.structure_prediction_run_id==run.id,
Structure.transcript_id==self.transcript_id
) \
.all()
# add the structures to output json
for structure in results:
run_data["data"].append({
"id": structure.id,
"energy": structure.energy,
"pc1": structure.pc1,
"pc2": structure.pc2
})
data[run.id] = run_data
self.empty = True
for experiment_id in data:
entry = data[experiment_id]
if len(entry["data"]) > 0:
self.empty = False
if not self.empty:
self.data_json = json.dumps(data)
# Plots a single RNA structure using the RNAplot program from the ViennaRNA package.
class StructureDiagramView():
def __init__(self, structure_id):
self.structure_id = structure_id
self.build_plot()
def build_plot(self):
# convert entities to dot bracket string
data = self.build_dot_bracket()
# use ViennaRNA to get 2d plot coords
data["coords"] = self.get_vienna_layout(data)
# return the results as a json string
self.data_json = json.dumps(data)
def build_dot_bracket(self):
# get all the positions
results = db_session \
.query(Structure, Transcript) \
.filter(
Structure.id==self.structure_id,
Transcript.id==Structure.transcript_id
) \
.all()
# Get position values from Structure entity
positions = results[0][0].get_values()
seq_str = results[0][1].get_sequence_str()
dot_bracket_str = build_dot_bracket(positions)
return {
"sequence": seq_str.replace("T", "U"),
"structure": dot_bracket_str
}
# Grab 2d coords from viennaRNA
# There is a python2 wrapper for vienna RNA but not python 3 compatible
def get_vienna_layout(self, data):
temp_folder = "/tmp/"+str(uuid.uuid4())
ensure_dir(temp_folder)
dot_bracket_filepath = temp_folder+"/dotbracket.txt"
f = open(dot_bracket_filepath, "w")
f.write(data["sequence"]+"\n"+data["structure"]+"\n")
f.close()
# change to tmp folder
os.chdir(temp_folder)
# use RNAplot CLI to generate the xrna tab delimited file
os.system("RNAplot -o xrna < "+dot_bracket_filepath)
# get the coords out by parsing the file
coords = []
with open(temp_folder+"/rna.ss") as f:
for line in f:
line = line.strip()
if line == "" or line[0] == "#":
continue
bits = line.split()
x = float(bits[2])
y = float(bits[3])
coords.append([x, y])
os.system("rm -rf "+temp_folder)
return coords
# return result
class StructureCirclePlotView():
def __init__(self, structure_id):
self.structure_id = structure_id
self.get_values()
def get_values(self):
# get all the positions
results = db_session \
.query(Structure) \
.filter(Structure.id==self.structure_id) \
.all()
result = results[0]
positions = result.get_values()
bpps = result.get_bpp_values()
# build the output. backward facing links are left blank
# results must be shifted back to array indexes, since they start at 1 in the DB.
out = [];
for curr_position in range(1, len(positions) + 1):
paired_to_position = positions[curr_position - 1]
if paired_to_position == 0 or \
paired_to_position < curr_position:
link = None
else:
link = paired_to_position - 1
if link != None:
link = int(link)
out.append({
"name": curr_position - 1,
"link": link,
"bpp": None if bpps == None else bpps[curr_position - 1]
})
self.data_json = json.dumps(out)
# Generates plaintext structure text files for download
class StructureDownloader():
def __init__(self, structure_prediction_run_ids, transcript_id):
self.structure_prediction_run_ids = structure_prediction_run_ids
self.transcript_id = transcript_id
def generate(self):
# Fetch the data
results = db_session \
.query(Structure, StructurePredictionRun, Transcript) \
.filter(
StructurePredictionRun.id==Structure.structure_prediction_run_id,
Structure.structure_prediction_run_id.in_(self.structure_prediction_run_ids),
Structure.transcript_id==self.transcript_id,
Transcript.id==self.transcript_id
) \
.order_by(
Structure.structure_prediction_run_id,
Structure.id
) \
.all()
return self.generate_txt(results)
# Generates text using a more compact file format
def generate_txt(self, results):
# first we must extract and display the sequence, using the transcript object. output
# in fasta-like format
transcript = results[0][2]
buf = ">"+self.transcript_id+"\n"
buf += insert_newlines(transcript.get_sequence_str())+"\n"
for result in results:
structure = result[0]
run = result[1]
transcript = result[2]
positions = structure.get_values()
# generate and add the header text for this structure
buf += (
">sid_"+str(structure.id)+"\t"+
"ENERGY:"+str(structure.energy)+" kcal/mol\t"+
run.description+"\n")
# generate and add dot bracket text
buf += insert_newlines(build_dot_bracket(positions))+"\n"
return buf
# Generates the older and far more cluttered txt format for structures
def generate_txt_old(self, results):
# Generate tab delimited text from the data
buf = ""
for result in results:
structure = result[0]
run = result[1]
transcript = result[2]
seq_str = transcript.get_sequence_str()
positions = structure.get_values()
for curr_position in range(1, len(positions) + 1):
paired_to_position = positions[curr_position - 1]
letter = seq_str[curr_position - 1].replace("T", "U")
buf += str(structure.id)+"\t"+ \
str(run.description)+"\t"+ \
str(structure.transcript_id)+"\t"+ \
str(structure.energy)+"\t"+ \
str(structure.pc1)+"\t"+ \
str(structure.pc2)+"\t"+ \
str(letter)+"\t"+ \
str(curr_position)+"\t"+ \
str(paired_to_position)+"\n"
return buf
# Generates plain text nucleotide measurements for user download
# Includes raw and normalised
class NucleotideMeasurementDownloader():
def __init__(self, nucleotide_measurement_run_id, transcript_id):
self.nucleotide_measurement_run_id = nucleotide_measurement_run_id
self.transcript_id = transcript_id
# Retrieves raw reactivity values and outputs as text
def get_raw(self):
seq_str = Transcript(self.transcript_id).get_sequence_str()
# Use the ORM to grab compiled counts
results = db_session \
.query(RawReactivities) \
.filter(
RawReactivities.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
RawReactivities.transcript_id==self.transcript_id
) \
.all()
measurement_set = results[0]
# minus_unpacked =
# plus_unpacked = values_str_unpack_int(measurement_set.plus_values)
cols = [
values_str_unpack_int(measurement_set.minus_values),
values_str_unpack_int(measurement_set.plus_values)
]
# Grab the raw replicate lanes data
lanes = db_session \
.query(RawReplicateCounts) \
.filter(
RawReplicateCounts.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
RawReplicateCounts.transcript_id==self.transcript_id
) \
.order_by(
RawReplicateCounts.minusplus_id,
RawReplicateCounts.bio_replicate_id,
RawReplicateCounts.tech_replicate_id
) \
.all()
# gather the data
tech_rep_ids = set()
for lane in lanes:
cols.append(values_str_unpack_int(lane.values))
tech_rep_ids.add(lane.tech_replicate_id)
# make headers
headers = []
for lane in lanes:
# tech replicate notation only added for experiments with > 1 tech replicate
tech_str = "" if len(tech_rep_ids) == 1 else "_T"+str(lane.tech_replicate_id)
headers.append(str(lane.minusplus_id)+"_B"+str(lane.bio_replicate_id)+tech_str)
# Build and return the output
buf = "position\tsequence\tsum_minus\tsum_plus\t"+"\t".join(headers)+"\n"
for n in range(0, len(cols[0])):
# add position and seq letter
buf += str(n + 1)+"\t"+seq_str[n]
for col in cols: # add the dynamic columns
buf += "\t"+str(int(col[n]))
buf += "\n"
return buf
# Retrieves normalised reactivities and outputs as text
def get_normalised(self):
# Grab sequence string
seq_str = Transcript(self.transcript_id).get_sequence_str()
# Use the ORM to grab all the normalised stuff
results = db_session \
.query(NucleotideMeasurementSet) \
.filter(
NucleotideMeasurementSet.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
NucleotideMeasurementSet.transcript_id==self.transcript_id
) \
.all()
measurement_set = results[0]
# TODO detect whether float or int and use the correct unpacker.
# Needed for raw count values download option
unpacked = values_str_unpack_float(measurement_set.values)
# index measurements by pos
measurements = {}
for pos in range(0, len(unpacked)):
value = unpacked[pos]
measurements[pos + 1] = "NA" if value == None else value
# build the output string
buf = ""
n = 0
for n in range(0, len(seq_str)):
pos = n + 1
measurement = "NA" if pos not in measurements else measurements[pos]
buf += str(pos)+"\t"+ \
seq_str[n]+"\t"+ \
str(measurement)+"\n"
n += 1
return buf
# Retrieves the BPPM for this transcript_id
class BppmDownloader():
def fetch(self, transcript_id):
import os
sauce_filepath = settings.bppms_folder+"/"+transcript_id+".bppm"
if not os.path.isfile(sauce_filepath):
return "No BPPM data available for "+transcript_id
buf = ""
# Open the raw BPPM and convert to our simpler format
with open(sauce_filepath, "r") as f:
first = True
for line in f:
if first: # skip the first line, which shows the length
first = False
continue
# add the text for the bppm table
if "Probability" in line: # skip header lines
continue
# extract the data, this will be used for structure BPPMs
bits = line.strip().split("\t")
pos_a = int(bits[0])
pos_b = int(bits[1])
bpp = -float(bits[2])
buf += str(pos_a)+"\t"+str(pos_b)+"\t"+str(bpp)+"\n"
return buf
# OLD method - storing in the database is not a good way to do it
# import zlib, base64
# # fetch from database
# results = db_session \
# .query(Bppm) \
# .filter(Bppm.transcript_id==transcript_id) \
# .all()
# bppm = results[0]
# # decode and return the BPPM
# decoded = base64.b64decode(bppm.data)
# data_txt = zlib.decompress(decoded)
# return data_txt
| mit | 4,064,572,954,846,963,000 | 35.454123 | 137 | 0.550355 | false |
kevgliss/lemur | lemur/plugins/lemur_linuxdst/remote_host.py | 1 | 2482 | #!/usr/bin/python
from lemur.certificates import service
import paramiko
import stat
def copy_cert(cert_cn, dst_user, dst_priv, dst_priv_key, dst_host, dst_port, dst_dir, dst_file, dst_data):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# include the private key password if required
if dst_priv_key is None:
priv_key = paramiko.RSAKey.from_private_key_file(dst_priv)
else:
priv_key = paramiko.RSAKey.from_private_key_file(dst_priv, dst_priv_key)
# open the sftp connection
ssh.connect(dst_host, username=dst_user, port=dst_port, pkey=priv_key)
sftp = ssh.open_sftp()
# make the directory on the destination server
# files will be in a folder based on the cert_cn
# example:
# destination folder: /etc/nginx/certs/
# files will go in: /etc/nginx/certs/your.cn.com/cert.pem
try:
sftp.mkdir(dst_dir)
except IOError:
pass
try:
dst_dir_cn = dst_dir + '/' + cert_cn
sftp.mkdir(dst_dir_cn)
except IOError:
pass
cert_out = sftp.open(dst_dir_cn + '/' + dst_file, 'w')
cert_out.write(dst_data)
cert_out.close()
sftp.chmod(dst_dir_cn + '/' + dst_file, (stat.S_IRUSR))
ssh.close()
def create_cert(name, dst_dir, export_type, dst_user, dst_priv, dst_priv_key, dst_host, dst_host_port):
lem_cert = service.get_by_name(name)
dst_file = 'cert.pem'
chain_req = False
if export_type == 'NGINX':
# This process will result in a cert.pem file with the body and chain in a single file
if lem_cert.chain is None:
dst_data = lem_cert.body
else:
dst_data = lem_cert.body + '\n' + lem_cert.chain
chain_req = False
elif export_type == '3File':
# This process will results in three files. cert.pem, priv.key, chain.pem
dst_data = lem_cert.body
chain_req = True
else:
dst_data = lem_cert.body
copy_cert(lem_cert.cn, dst_user, dst_priv, dst_priv_key, dst_host, dst_host_port, dst_dir, dst_file, dst_data)
if chain_req is True:
dst_file = 'chain.pem'
dst_data = lem_cert.chain_req
copy_cert(lem_cert.cn, dst_user, dst_priv, dst_priv_key, dst_host, dst_host_port, dst_dir, dst_file, dst_data)
dst_file = 'priv.key'
dst_data = lem_cert.private_key
copy_cert(lem_cert.cn, dst_user, dst_priv, dst_priv_key, dst_host, dst_host_port, dst_dir, dst_file, dst_data)
| apache-2.0 | 1,856,185,568,808,146,400 | 33 | 118 | 0.631346 | false |
ShipleyCollege/ViPteam1 | GUI/genBlank.py | 1 | 1267 | import sys
sys.path.append('../ExtractAndAnalyzeCode')
import Node
import Pin
'''
Inputs
p1 - output Folder
p2 - Build mode (compact, exploded, explodedLego)
p3 - Title
p4 - Row 1 Col 1 : pinType (exec, data)
p5 - Row 1 Col 1 : text (pin title or "")
p6 - Row 1 Col 2 : pinType
p7 - Row 1 Col 2 : text
p8 - Row 2 Col 1 : pinType
p9 - Row 2 Col 1 : text
p10- Row 2 Col 2 : pinType
p11- Row 2 Col 2 : text
etc.
'''
print( 'Number of arguments:', len(sys.argv), 'arguments.')
print( 'Argument List:', str(sys.argv))
print(" ")
if len(sys.argv) < 4:
print("Error, insuficient parameters. These are expected;\nP1 : Output Folder Name\nP2 : Build Mode\nP3 : Title\nP4-n : [Pin Type, Pin Name] * n")
sys.exit(0)
outputFolder = sys.argv[1]
node = Node.Node(sys.argv[3], sys.argv[2])
pins = []
for c in range(4, len(sys.argv), 2):
if c >= len(sys.argv):
continue
if sys.argv[c] == 0:
continue
if (c+1) == len(sys.argv):
continue
pin = Pin.Pin(sys.argv[c+1], sys.argv[c]) # name , type
print("Pin : " + str(pin))
pins.append(pin)
pins.reverse() # make sure the pins are the correct way round
for c in range(len(pins)):
if c % 2:
node.addPin(pins[c], "Left")
else:
node.addPin(pins[c], "Right")
print(node)
node.writeNode("0", outputFolder)
| gpl-3.0 | -3,516,996,449,689,524,700 | 20.116667 | 147 | 0.64562 | false |
OthmanEmpire/project_monies | test/system/test_int_santander.py | 1 | 1060 | import os
import unittest
import datetime
import pandas as pd
from pandas.util.testing import assert_frame_equal
import monies.monies.santander as san
# Declaring test resource path
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(THIS_DIR, "res")
class SantanderInt(unittest.TestCase):
def testParseSantanderFile(self):
header = ["DATES", "BALANCE", "AMOUNT", "DESCRIPTION"]
body = \
[
[datetime.datetime(2012, 12, 29),
3472.63,
-10.45,
"CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, "
"RATE 1.00/GBP ON 26-12-2012"],
[datetime.datetime(2012, 12, 29),
3472.63,
-10.45,
"CARD PAYMENT TO WWW.JUST EAT.CO.UK,10.45 GBP, "
"RATE 1.00/GBP ON 26-12-2012"],
]
inp = pd.DataFrame(body, columns=header)
iPath = os.path.join(DATA_PATH, "san_input.txt")
out = san.readFile(iPath)
out = san.parse(out)
assert_frame_equal(inp, out)
| mit | -574,953,423,522,849,150 | 27.648649 | 62 | 0.579245 | false |
Tonyll/MyCode | EMDemo/tools/EMReplace.py | 1 | 9844 |
# -*- coding: utf-8 -*-
__author__ = "xyjxyf"
"替换枚举类型"
import os
import re
import sys
walk_path = sys.argv[1]
# 需要替换的字典:key->旧值, value->新值
replace_dic = {
#EMClient
'"EaseMob.h"': '"EMClient.h"',
'\[EaseMob sharedInstance\]': '[EMClient shareClient]',
'IChatManager': 'IEMChatManager',
'EMCommandMessageBody': 'EMCmdMessageBody',
'IChatManagerDelegate': 'EMChatManagerDelegate',
#Group
'"EMGroupStyleSetting.h"': '"EMGroupOptions.h"',
'EMGroupStyleSetting': 'EMGroupOptions',
'.groupSubject': '.subject',
'.groupDescription': '.description',
'.groupOccupantsCount': '.occupantsCount',
'.groupSetting': '.setting',
'.groupStyle': '.style',
'.groupMaxUsersCount': '.maxUsersCount',
'eGroupStyle_PrivateOnlyOwnerInvite': 'EMGroupStylePrivateOnlyOwnerInvite',
'eGroupStyle_PrivateMemberCanInvite': 'EMGroupStylePrivateMemberCanInvite',
'eGroupStyle_PublicJoinNeedApproval': 'EMGroupStylePublicJoinNeedApproval',
'eGroupStyle_PublicOpenJoin': 'EMGroupStylePublicOpenJoin',
'eGroupStyle_Default': 'EMGroupStylePrivateOnlyOwnerInvite',
'eGroupLeaveReason_BeRemoved': 'EMGroupLeaveReasonBeRemoved',
'eGroupLeaveReason_UserLeave': 'EMGroupLeaveReasonUserLeave',
'eGroupLeaveReason_Destroyed': 'EMGroupLeaveReasonDestroyed',
'fetchMyGroupsListWithError:': 'getMyGroupsFromServerWithError:',
'chatManager destroyGroup:': 'groupManager leaveGroup:',
'chatManager leaveGroup:': 'groupManager leaveGroup:',
'chatManager addOccupants:': 'groupManager addOccupants:',
'chatManager removeOccupants:': 'groupManager removeOccupants:',
'chatManager blockOccupants:': 'groupManager blockOccupants:',
'chatManager unblockOccupants:': 'groupManager unblockOccupants:',
'chatManager changeGroupSubject:': 'groupManager changeGroupSubject:',
'chatManager changeDescription:': 'groupManager changeDescription:',
'chatManager fetchGroupBansList:': 'groupManager fetchGroupBansList:',
'chatManager joinPublicGroup:': 'groupManager joinPublicGroup:',
'chatManager searchPublicGroupWithGroupId:': 'groupManager searchPublicGroupWithGroupId:',
#Contact
'didReceiveBuddyRequest:': 'didReceiveFriendInvitationFromUsername:',
'didAcceptedByBuddy:': 'didReceiveAgreedFromUsername:',
'didRejectedByBuddy:': 'didReceiveDeclinedFromUsername:',
'didRemovedByBuddy:': 'didReceiveDeletedFromUsernames:',
#Chat
'.messageBodyType': '.type',
'.attachmentDownloadStatus': '.downloadStatus',
'.chatter': '.conversationId',
'.conversationType': '.type',
'.conversationChatter': '.conversationId',
'.groupSenderName': '.from',
'.deliveryState': '.status',
'.messageType': '.chatType',
'.chatId': '.messageId',
'id<IEMMessageBody>': 'EMMessageBody',
'removeMessageWithId:': 'deleteMessageWithId:',
'removeAllMessages': 'deleteAllMessages',
'MessageBodyType': 'EMMessageBodyType',
'eMessageBodyType_Text': 'EMMessageBodyTypeText',
'eMessageBodyType_Image': 'EMMessageBodyTypeImage',
'eMessageBodyType_Video': 'EMMessageBodyTypeVideo',
'eMessageBodyType_Location': 'EMMessageBodyTypeLocation',
'eMessageBodyType_Voice': 'EMMessageBodyTypeVoice',
'eMessageBodyType_File': 'EMMessageBodyTypeFile',
'eMessageBodyType_Command': 'EMMessageBodyTypeCmd',
'EMAttachmentDownloadStatus': 'EMDownloadStatus',
'EMAttachmentDownloading': 'EMDownloadStatusDownloading',
'EMAttachmentDownloadSuccessed': 'EMDownloadStatusSuccessed',
'EMAttachmentDownloadFailure': 'EMDownloadStatusFailed',
'EMAttachmentNotStarted': 'EMDownloadStatusPending',
'eConversationTypeChat': 'EMConversationTypeChat',
'eConversationTypeGroupChat': 'EMConversationTypeGroupChat',
'eConversationTypeChatRoom': 'EMConversationTypeChatRoom',
'EMMessageType': 'EMChatType',
'eMessageTypeChat': 'EMChatTypeChat',
'eMessageTypeGroupChat': 'EMChatTypeGroupChat',
'eMessageTypeChatRoom': 'EMChatTypeChatRoom',
'MessageDeliveryState': 'EMMessageStatus',
'eMessageDeliveryState_Pending': 'EMMessageStatusPending',
'eMessageDeliveryState_Delivering': 'EMMessageStatusDelivering',
'eMessageDeliveryState_Delivered': 'EMMessageStatusSuccessed',
'eMessageDeliveryState_Failure': 'EMMessageStatusFailed',
#ChatRoom
'.chatroomSubject': '.subject',
'.chatroomDescription': '.description',
'.chatroomMaxOccupantsCount': '.maxOccupantsCount',
'eChatroomBeKickedReason_BeRemoved': 'EMChatroomBeKickedReasonBeRemoved',
'eChatroomBeKickedReason_Destroyed': 'EMChatroomBeKickedReasonDestroyed',
'beKickedOutFromChatroom:': 'didReceiveKickedFromChatroom:',
#Call
'.sessionChatter': '.remoteUsername',
'asyncAnswerCall:': 'answerCall:',
'asyncEndCall:': 'endCall:',
'eCallSessionStatusDisconnected': 'EMCallSessionStatusDisconnected',
'eCallSessionStatusRinging': 'EMCallSessionStatusRinging',
'eCallSessionStatusAnswering': 'EMCallSessionStatusConnecting',
'eCallSessionStatusPausing': 'EMCallSessionStatusConnecting',
'eCallSessionStatusConnecting': 'EMCallSessionStatusConnecting',
'eCallSessionStatusConnected': 'EMCallSessionStatusConnected',
'eCallSessionStatusAccepted': 'EMCallSessionStatusAccepted',
'eCallConnectTypeNone': 'EMCallConnectTypeNone',
'eCallConnectTypeDirect': 'EMCallConnectTypeDirect',
'eCallConnectTypeRelay': 'EMCallConnectTypeRelay',
'EMCallSessionType': 'EMCallType',
'eCallSessionTypeAudio': 'EMCallTypeVoice',
'eCallSessionTypeVideo': 'EMCallTypeVideo',
'eCallSessionTypeContent': 'EMCallTypeVoice',
'EMCallStatusChangedReason': 'EMCallEndReason',
'eCallReasonNull': 'EMCallEndReasonHangup',
'eCallReasonOffline': 'EMCallEndReasonNoResponse',
'eCallReasonNoResponse': 'EMCallEndReasonNoResponse',
'eCallReasonHangup': 'EMCallEndReasonHangup',
'eCallReasonReject': 'EMCallEndReasonDecline',
'eCallReasonBusy': 'EMCallEndReasonBusy',
'eCallReasonFailure': 'EMCallEndReasonFailed',
'eCallReason_Null': 'EMCallEndReasonHangup',
'eCallReason_Offline': 'EMCallEndReasonNoResponse',
'eCallReason_NoResponse': 'EMCallEndReasonNoResponse',
'eCallReason_Hangup': 'EMCallEndReasonHangup',
'eCallReason_Reject': 'EMCallEndReasonReject',
'eCallReason_Busy': 'EMCallEndReasonBusy',
'eCallReason_Failure': 'EMCallEndReasonFailed',
#Apns
'"EMPushNotificationOptions.h"': '"EMPushOptions.h"',
'EMPushNotificationOptions': 'EMPushOptions',
#Error
'.errorCode': '.code',
'.description': '.domain',
'EMErrorType': 'EMErrorCode',
'EMErrorNotFound': 'EMErrorNotExist',
# 'EMErrorServerMaxCountExceeded': '',
'EMErrorConfigInvalidAppKey': 'EMErrorInvalidAppkey',
'EMErrorServerAuthenticationFailure': 'EMErrorUserAuthenticationFailed',
'EMErrorServerAPNSRegistrationFailure': 'EMErrorApnsBindDeviceTokenFailed',
'EMErrorServerDuplicatedAccount': 'EMErrorUserAlreadyExist',
'EMErrorServerInsufficientPrivilege': 'EMErrorUserIllegalArgument',
'EMErrorServerTooManyOperations': 'EMErrorServerBusy',
'EMErrorAttachmentNotFound': 'EMErrorFileNotFound',
'EMErrorAttachmentUploadFailure': 'EMErrorFileUploadFailed',
'EMErrorIllegalURI': 'EMErrorInvalidURL',
'EMErrorMessageInvalid_NULL': 'EMErrorMessageInvalid',
'EMErrorMessageContainSensitiveWords': 'EMErrorMessageIncludeIllegalSpeech',
'EMErrorGroupInvalidID_NULL': 'EMErrorGroupInvalidId',
'EMErrorGroupJoined': 'EMErrorGroupAlreadyJoined',
'EMErrorGroupJoinNeedRequired': 'EMErrorGroupPermissionDenied',
# 'EMErrorGroupFetchInfoFailure': '',
# 'EMErrorGroupInvalidRequired': '',
# 'EMErrorGroupInvalidSubject_NULL': '',
# 'EMErrorGroupAddOccupantFailure': '',
'EMErrorInvalidUsername_NULL': 'EMErrorInvalidUsername',
'EMErrorInvalidUsername_Chinese': 'EMErrorInvalidUsername',
'EMErrorInvalidPassword_NULL': 'EMErrorInvalidPassword',
'EMErrorInvalidPassword_Chinese': 'EMErrorInvalidPassword',
# 'EMErrorApnsInvalidOption': '',
# 'EMErrorHasFetchedBuddyList': '',
# 'EMErrorBlockBuddyFailure': '',
# 'EMErrorUnblockBuddyFailure': '',
'EMErrorCallConnectFailure': 'EMErrorCallConnectFailed',
# 'EMErrorExisted': '',
# 'EMErrorInitFailure': '',
'EMErrorNetworkNotConnected': 'EMErrorNerworkUnavailable',
'EMErrorFailure': 'EMErrorGeneral',
# 'EMErrorFeatureNotImplemented': '',
# 'EMErrorRequestRefused': '',
'EMErrorChatroomInvalidID_NULL': 'EMErrorChatroomInvalidId',
'EMErrorChatroomJoined': 'EMErrorChatroomAlreadyJoined',
# 'EMErrorReachLimit': '',
# 'EMErrorOutOfRateLimited': '',
# 'EMErrorGroupOccupantsReachLimit': '',
# 'EMErrorTooManyLoginRequest': '',
# 'EMErrorTooManyLogoffRequest': '',
# 'EMErrorPermissionFailure': '',
# 'EMErrorIsExist': '',
# 'EMErrorPushNotificationInvalidOption': '',
# 'EMErrorCallChatterOffline': '',
}
def check_main(root_path):
for root, dirs, files in os.walk(root_path):
for file_path in files:
if file_path.endswith('.m') or file_path.endswith('.h') or file_path.endswith('.pch'):
full_path = os.path.join(root, file_path)
# 不检查 pod 第三方库
if 'Pods/' in full_path:
break
fr = open(full_path, 'r')
content = fr.read()
fr.close()
for key in replace_dic:
match = re.search(key, content)
if match:
#替换
content = re.sub(key, replace_dic[key], content);
#重新写入文件
open(full_path,'w').write(content)
if __name__ == '__main__':
check_main(walk_path)
| mit | -8,191,097,917,055,163,000 | 44.896714 | 98 | 0.722074 | false |
gogoprog/gengine | scripts/emscripten.py | 1 | 1275 | #!/usr/bin/python3
import platform
import os
import sys
import argparse
import multiprocessing
import os.path
import common
def emcc(appDir, outputDir, includeEmptyData):
previous_dir = os.getcwd()
os.chdir(os.environ['GENGINE']+"/build")
cmd = "emcc "
cmd += "" if common.debugMode else "-O3"
cmd += " --bind gengine" + ('d' if common.debugMode else '') + ".bc"
cmd += " -o " + outputDir + "/index.html"
cmd += " --preload-file " + common.rootPath + "/res/coreData@coreData"
if includeEmptyData:
cmd += " --preload-file " + common.rootPath + "/res/data@data "
else:
cmd += " --preload-file " + appDir + "/data@data "
cmd += " --use-preload-plugins -s TOTAL_MEMORY=134217728 -s TOTAL_STACK=1048576"
cmd += " --shell-file " + common.rootPath + "/src/shell.html"
os.system(cmd)
os.chdir(previous_dir)
def build(appDir, outputDir):
common.log("Running emcc...")
current_dir = os.getcwd()
os.chdir(appDir)
os.system("rm -rf index.data index.html index.js index.html.mem")
emcc(current_dir, outputDir, True)
os.chdir(current_dir)
def runServer(targetDir):
os.chdir(targetDir)
common.log("Running HTTP server in '" + targetDir + "'...")
os.system("python -m http.server");
| mit | -973,317,280,959,787,900 | 31.692308 | 84 | 0.63451 | false |
bijilap/NER-Tagger | netrain.py | 1 | 2159 | import sys
import subprocess
class netrain:
feature_set={}
features_fname='ne.in'
model_fname="ne.model"
def __init__(self,mname):
self.model_fname=mname
def read_training_file(self,fname):
f=open(fname,'r')
fout=open(self.features_fname,'w')
for line in f:
#print line
pword='BOS' #previous word
ppostag='BOS' #previous POS tag
nword='EOS' #next word
npostag='EOS'
pnetag='None' #previous netag
pwprefix='None'
wprefix='None'
nwprefix='None'
words_tags=line.split()
for i in range(len(words_tags)):
#print words_tags[i]+' '+str(len(words_tags[i].split('/')))
#if len(words_tags[i].split('/'))>3:
#print 'here'
#continue
word_list=words_tags[i].split('/')
postag=word_list[len(word_list)-2]
netag=word_list[len(word_list)-1]
word=words_tags[i][:len(words_tags[i])-((len(postag)+len(netag))+2)]
#(word,postag,netag)=
wprefix=word[0]
#word=word+'/'+postag
#print word+" "+tag
if i+1>=len(words_tags):
nword='EOS'
npostag='EOS'
nwprefix='None'
else:
word_list=words_tags[i+1].split('/')
npostag=word_list[len(word_list)-2]
nword=words_tags[i+1][:len(words_tags[i+1])-((len(word_list[len(word_list)-2])+len(word_list[len(word_list)-1]))+2)]
#nwprefix=nword[0]
feature=netag+" "+"pw:"+str(pword)+" w:"+str(word)+" nw:"+str(nword)+" pnetag:"+str(pnetag)+" ppostag:"+str(ppostag)+" postag:"+str(postag)+ " npostag:"+str(npostag)+'\n'
#print feature
pnetag=netag
pword=word
ppostag=postag
#pwprefix=pword[0]
fout.write(feature)
#print feature
f.close()
fout.close
def learn(self):
subprocess.call('python ./perceplearn.py '+self.features_fname+' '+self.model_fname+' -i 20',shell=True)
fname=sys.argv[1]
mname=sys.argv[2]
pobj=netrain(mname)
pobj.read_training_file(fname)
pobj.learn()
pobj.read_training_file(fname)
| apache-2.0 | -3,337,249,659,875,602,000 | 30.75 | 186 | 0.557202 | false |
sebastian-ecki-eckstein/kreuzschiene | client/shell/tcp-switch-client.py | 1 | 4797 | #!/usr/bin/env python
import socket
class kreuz_tcp_client:
def __init__(self,ip='127.0.0.1',port=4242):
self.TCP_IP = '127.0.0.1'
self.TCP_PORT = 4242
self.BUFFER_SIZE = 1024
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((TCP_IP, TCP_PORT))
self.length = -1
self.output = []
self.outputname = []
self.inputname = []
ergebnis = self.f_get_data()
self.output = ergebnis[0]
self.outputname = ergebnis[1]
self.inputname = ergebnis[2]
def f_get_data(self):
self.sock.send("GET:DATA:".encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
output = []
outputname = []
inputname = []
if len(splitted)>3:
anzahl = int(splitted[2])
else:
return False
if self.length == -1:
self.length = anzahl
i = 0
while i < self.length:
output.append(0)
outputname.append('out_'+str(i))
inputname.append('in_'+str(i))
i = i + 1
if anzahl != self.length:
return False
if len(splitted)<((self.length*3)+3):
return False
i = 0
while i < self.length:
output[i] = splitted[3+i]
outputname[i] = splitted[(i+self.length)+3]
inputname[i] = splitted[i+(self.length*2)+3]
i = i + 1
return [output,outputname,inputname]
def f_set_output_name(self,number,name):
print("set output name")
if int(number) > self.length:
return False
sendstr = "SET:PORT:O"+str(number)+":"+name
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
ergebnis = self.f_get_data()
self.outputname = ergebnis[1]
return True
def f_set_input_name(self,number,name):
print("set input name")
if int(number) > self.length:
return False
sendstr = "SET:PORT:I"+str(number)+":"+name
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
ergebnis = self.f_get_data()
self.inputname = ergebnis[2]
return True
def f_set_output(self,outnum,innum):
print("set output input")
if int(outnum) > self.length or int(innum) > self.length:
return False
sendstr = "SET:PORT:O"+str(outnum)+":I"+str(innum)
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
ergebnis = self.f_get_data()
self.output = ergebnis[0]
return True
def f_update(self):
ergebnis = self.f_get_data()
self.output = ergebnis[0]
self.outputname = ergebnis[1]
self.inputname = ergebnis[2]
return True
def f_load(self,name):
print("load config")
sendstr = "SET:LOAD:"+str(name)
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
self.f_update()
return True
def f_save(self,name):
print("save config")
sendstr = "SET:SAVE:"+str(name)
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
datastr = data.decode(encoding='UTF-8',errors='ignore')
splitted = datastr.split(':')
if splitted[0] == "NACK":
return False
return True
def f_get_config(self):
print("get config names")
sendstr = "GET:CONFIG:"
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
return "test"
def f_lock(self,locker):
print("lock/unlock")
sendstr = "SET:LOCK:"+str(locker)
self.sock.send(sendstr.encode('UTF-8'))
data = self.sock.recv(self.BUFFER_SIZE)
return True
def end(self):
self.sock.close()
if __name__ == '__main__':
print("start client")
| apache-2.0 | -5,766,154,938,342,356,000 | 31.632653 | 69 | 0.543673 | false |
DreamSourceLab/DSView | libsigrokdecode4DSL/decoders/usb_power_delivery/pd.py | 1 | 20611 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Google, Inc
## Copyright (C) 2018 davidanger <[email protected]>
## Copyright (C) 2018 Peter Hazenberg <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
import struct
import zlib # for crc32
# BMC encoding with a 600kHz datarate
UI_US = 1000000/600000.0
# Threshold to discriminate half-1 from 0 in Binary Mark Conding
THRESHOLD_US = (UI_US + 2 * UI_US) / 2
# Control Message type
CTRL_TYPES = {
0: 'reserved',
1: 'GOOD CRC',
2: 'GOTO MIN',
3: 'ACCEPT',
4: 'REJECT',
5: 'PING',
6: 'PS RDY',
7: 'GET SOURCE CAP',
8: 'GET SINK CAP',
9: 'DR SWAP',
10: 'PR SWAP',
11: 'VCONN SWAP',
12: 'WAIT',
13: 'SOFT RESET',
14: 'reserved',
15: 'reserved',
16: 'Not Supported',
17: 'Get_Source_Cap_Extended',
18: 'Get_Status',
19: 'FR_Swap',
20: 'Get_PPS_Status',
21: 'Get_Country_Codes',
}
# Data message type
DATA_TYPES = {
1: 'SOURCE CAP',
2: 'REQUEST',
3: 'BIST',
4: 'SINK CAP',
5: 'Battery_Status',
6: 'Alert',
7: 'Get_Country_Info',
15: 'VDM'
}
# 4b5b encoding of the symbols
DEC4B5B = [
0x10, # Error 00000
0x10, # Error 00001
0x10, # Error 00010
0x10, # Error 00011
0x10, # Error 00100
0x10, # Error 00101
0x13, # Sync-3 00110
0x14, # RST-1 00111
0x10, # Error 01000
0x01, # 1 = 0001 01001
0x04, # 4 = 0100 01010
0x05, # 5 = 0101 01011
0x10, # Error 01100
0x16, # EOP 01101
0x06, # 6 = 0110 01110
0x07, # 7 = 0111 01111
0x10, # Error 10000
0x12, # Sync-2 10001
0x08, # 8 = 1000 10010
0x09, # 9 = 1001 10011
0x02, # 2 = 0010 10100
0x03, # 3 = 0011 10101
0x0A, # A = 1010 10110
0x0B, # B = 1011 10111
0x11, # Sync-1 11000
0x15, # RST-2 11001
0x0C, # C = 1100 11010
0x0D, # D = 1101 11011
0x0E, # E = 1110 11100
0x0F, # F = 1111 11101
0x00, # 0 = 0000 11110
0x10, # Error 11111
]
SYM_ERR = 0x10
SYNC1 = 0x11
SYNC2 = 0x12
SYNC3 = 0x13
RST1 = 0x14
RST2 = 0x15
EOP = 0x16
SYNC_CODES = [SYNC1, SYNC2, SYNC3]
HRST_CODES = [RST1, RST1, RST1, RST2]
SOP_SEQUENCES = [
(SYNC1, SYNC1, SYNC1, SYNC2),
(SYNC1, SYNC1, SYNC3, SYNC3),
(SYNC1, SYNC3, SYNC1, SYNC3),
(SYNC1, RST2, RST2, SYNC3),
(SYNC1, RST2, SYNC3, SYNC2),
(RST1, SYNC1, RST1, SYNC3),
(RST1, RST1, RST1, RST2),
]
START_OF_PACKETS = {
SOP_SEQUENCES[0]: 'SOP',
SOP_SEQUENCES[1]: "SOP'",
SOP_SEQUENCES[2]: 'SOP"',
SOP_SEQUENCES[3]: "SOP' Debug",
SOP_SEQUENCES[4]: 'SOP" Debug',
SOP_SEQUENCES[5]: 'Cable Reset',
SOP_SEQUENCES[6]: 'Hard Reset',
}
SYM_NAME = [
['0x0', '0'],
['0x1', '1'],
['0x2', '2'],
['0x3', '3'],
['0x4', '4'],
['0x5', '5'],
['0x6', '6'],
['0x7', '7'],
['0x8', '8'],
['0x9', '9'],
['0xA', 'A'],
['0xB', 'B'],
['0xC', 'C'],
['0xD', 'D'],
['0xE', 'E'],
['0xF', 'F'],
['ERROR', 'X'],
['SYNC-1', 'S1'],
['SYNC-2', 'S2'],
['SYNC-3', 'S3'],
['RST-1', 'R1'],
['RST-2', 'R2'],
['EOP', '#'],
]
RDO_FLAGS = {
(1 << 23): 'unchunked',
(1 << 24): 'no_suspend',
(1 << 25): 'comm_cap',
(1 << 26): 'cap_mismatch',
(1 << 27): 'give_back'
}
BIST_MODES = {
0: 'Receiver',
1: 'Transmit',
2: 'Counters',
3: 'Carrier 0',
4: 'Carrier 1',
5: 'Carrier 2',
6: 'Carrier 3',
7: 'Eye',
}
VDM_CMDS = {
1: 'Disc Ident',
2: 'Disc SVID',
3: 'Disc Mode',
4: 'Enter Mode',
5: 'Exit Mode',
6: 'Attention',
# 16..31: SVID Specific Commands
# DisplayPort Commands
16: 'DP Status',
17: 'DP Configure',
}
VDM_ACK = ['REQ', 'ACK', 'NAK', 'BSY']
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'usb_power_delivery'
name = 'USB PD'
longname = 'USB Power Delivery'
desc = 'USB Power Delivery protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['usb_pd']
tags = ['PC']
channels = (
{'id': 'cc1', 'name': 'CC1', 'desc': 'Configuration Channel 1'},
)
optional_channels = (
{'id': 'cc2', 'name': 'CC2', 'desc': 'Configuration Channel 2'},
)
options = (
{'id': 'fulltext', 'desc': 'Full text decoding of packets',
'default': 'no', 'values': ('yes', 'no')},
)
annotations = (
('type', 'Packet Type'),
('preamble', 'Preamble'),
('sop', 'Start of Packet'),
('header', 'Header'),
('data', 'Data'),
('crc', 'Checksum'),
('eop', 'End Of Packet'),
('sym', '4b5b symbols'),
('warnings', 'Warnings'),
('src', 'Source Message'),
('snk', 'Sink Message'),
('payload', 'Payload'),
('text', 'Plain text'),
)
annotation_rows = (
('4b5b', 'Symbols', (7,)),
('phase', 'Parts', (1, 2, 3, 4, 5, 6)),
('payload', 'Payload', (11,)),
('type', 'Type', (0, 9, 10)),
('warnings', 'Warnings', (8,)),
('text', 'Full text', (12,)),
)
binary = (
('raw-data', 'RAW binary data'),
)
stored_pdos = {}
def get_request(self, rdo):
pos = (rdo >> 28) & 7
op_ma = ((rdo >> 10) & 0x3ff) * 0.01
max_ma = (rdo & 0x3ff) * 0.01
mark = self.cap_mark[pos]
if mark == 3:
op_v = ((rdo >> 9) & 0x7ff) * 0.02
op_a = (rdo & 0x3f) * 0.05
t_settings = '%gV %gA' % (op_v, op_a)
elif mark == 2:
op_w = ((rdo >> 10) & 0x3ff) * 0.25
mp_w = (rdo & 0x3ff) * 0.25
t_settings = '%gW (operating)' % op_w
else:
op_a = ((rdo >> 10) & 0x3ff) * 0.01
max_a = (rdo & 0x3ff) * 0.01
t_settings = '%gA (operating) / %gA (max)' % (op_a, max_a)
t_flags = ''
for f in sorted(RDO_FLAGS.keys(), reverse = True):
if rdo & f:
t_flags += ' [' + RDO_FLAGS[f] + ']'
if pos in self.stored_pdos.keys():
t_pdo = '#%d: %s' % (pos, self.stored_pdos[pos])
else:
t_pdo = '#%d' % (pos)
return '(PDO %s) %s%s' % (t_pdo, t_settings, t_flags)
def get_source_sink_cap(self, pdo, idx, source):
t1 = (pdo >> 30) & 3
self.cap_mark[idx] = t1
flags = {}
if t1 == 0:
t_name = 'Fixed'
if source:
flags = {
(1 << 29): 'dual_role_power',
(1 << 28): 'suspend',
(1 << 27): 'unconstrained',
(1 << 26): 'comm_cap',
(1 << 25): 'dual_role_data',
(1 << 24): 'unchunked',
}
else: # Sink
flags = {
(1 << 29): 'dual_role_power',
(1 << 28): 'high_capability',
(1 << 27): 'unconstrained',
(1 << 26): 'comm_cap',
(1 << 25): 'dual_role_data',
(0b01 << 23): 'fr_swap default power',
(0b10 << 23): 'fr_swap 1.5 A',
(0b11 << 23): 'fr_swap 3.0 A',
}
mv = ((pdo >> 10) & 0x3ff) * 0.05
ma = ((pdo >> 0) & 0x3ff) * 0.01
p = '%gV %gA (%gW)' % (mv, ma, mv*ma)
self.stored_pdos[idx] = '%s %gV' % (t_name, mv)
elif t1 == 1:
t_name = 'Battery'
flags = {} # No flags defined for Battery PDO in PD 3.0 spec
minv = ((pdo >> 10) & 0x3ff) * 0.05
maxv = ((pdo >> 20) & 0x3ff) * 0.05
mw = ((pdo >> 0) & 0x3ff) * 0.25
p = '%g/%gV %gW' % (minv, maxv, mw)
self.stored_pdos[idx] = '%s %g/%gV' % (t_name, minv, maxv)
elif t1 == 2:
t_name = 'Variable'
flags = {} # No flags defined for Variable PDO in PD 3.0 spec
minv = ((pdo >> 10) & 0x3ff) * 0.05
maxv = ((pdo >> 20) & 0x3ff) * 0.05
ma = ((pdo >> 0) & 0x3ff) * 0.01
p = '%g/%gV %gA' % (minv, maxv, ma)
self.stored_pdos[idx] = '%s %g/%gV' % (t_name, minv, maxv)
elif t1 == 3:
t2 = (pdo >> 28) & 3
if t2 == 0:
t_name = 'Programmable|PPS'
flags = {
(1 << 29): 'power_limited',
}
minv = ((pdo >> 8) & 0xff) * 0.1
maxv = ((pdo >> 17) & 0xff) * 0.1
ma = ((pdo >> 0) & 0xff) * 0.05
p = '%g/%gV %gA' % (minv, maxv, ma)
if (pdo >> 27) & 0x1:
p += ' [limited]'
self.stored_pdos[idx] = '%s %g/%gV' % (t_name, minv, maxv)
else:
t_name = 'Reserved APDO: '+bin(t2)
p = '[raw: %s]' % (bin(pdo))
self.stored_pdos[idx] = '%s %s' % (t_name, p)
t_flags = ''
for f in sorted(flags.keys(), reverse = True):
if pdo & f:
t_flags += ' [' + flags[f] + ']'
return '[%s] %s%s' % (t_name, p, t_flags)
def get_vdm(self, idx, data):
if idx == 0: # VDM header
vid = data >> 16
struct = data & (1 << 15)
txt = 'VDM'
if struct: # Structured VDM
cmd = data & 0x1f
src = data & (1 << 5)
ack = (data >> 6) & 3
pos = (data >> 8) & 7
ver = (data >> 13) & 3
txt = VDM_ACK[ack] + ' '
txt += VDM_CMDS[cmd] if cmd in VDM_CMDS else 'cmd?'
txt += ' pos %d' % (pos) if pos else ' '
else: # Unstructured VDM
txt = 'unstruct [%04x]' % (data & 0x7fff)
txt += ' SVID:%04x' % (vid)
else: # VDM payload
txt = 'VDO:%08x' % (data)
return txt
def get_bist(self, idx, data):
mode = data >> 28
counter = data & 0xffff
mode_name = BIST_MODES[mode] if mode in BIST_MODES else 'INVALID'
if mode == 2:
mode_name = 'Counter[= %d]' % (counter)
# TODO: Check all 0 bits are 0 / emit warnings.
return 'mode %s' % (mode_name) if idx == 0 else 'invalid BRO'
def putpayload(self, s0, s1, idx):
t = self.head_type()
txt = '['+str(idx+1)+'] '
if t == 2:
txt += self.get_request(self.data[idx])
elif t == 1 or t == 4:
txt += self.get_source_sink_cap(self.data[idx], idx+1, t==1)
elif t == 15:
txt += self.get_vdm(idx, self.data[idx])
elif t == 3:
txt += self.get_bist(idx, self.data[idx])
self.putx(s0, s1, [11, [txt, txt]])
self.text += ' - ' + txt
def puthead(self):
ann_type = 9 if self.head_power_role() else 10
role = 'SRC' if self.head_power_role() else 'SNK'
if self.head_data_role() != self.head_power_role():
role += '/DFP' if self.head_data_role() else '/UFP'
t = self.head_type()
if self.head_count() == 0:
shortm = CTRL_TYPES[t]
else:
shortm = DATA_TYPES[t] if t in DATA_TYPES else 'DAT???'
longm = '(r{:d}) {:s}[{:d}]: {:s}'.format(self.head_rev(), role, self.head_id(), shortm)
self.putx(0, -1, [ann_type, [longm, shortm]])
self.text += longm
def head_id(self):
return (self.head >> 9) & 7
def head_power_role(self):
return (self.head >> 8) & 1
def head_data_role(self):
return (self.head >> 5) & 1
def head_rev(self):
return ((self.head >> 6) & 3) + 1
def head_type(self):
return self.head & 0xF
def head_count(self):
return (self.head >> 12) & 7
def putx(self, s0, s1, data):
self.put(self.edges[s0], self.edges[s1], self.out_ann, data)
def putwarn(self, longm, shortm):
self.putx(0, -1, [8, [longm, shortm]])
def compute_crc32(self):
bdata = struct.pack('<H'+'I'*len(self.data), self.head & 0xffff,
*tuple([d & 0xffffffff for d in self.data]))
return zlib.crc32(bdata)
def rec_sym(self, i, sym):
self.putx(i, i+5, [7, SYM_NAME[sym]])
def get_sym(self, i, rec=True):
v = (self.bits[i] | (self.bits[i+1] << 1) | (self.bits[i+2] << 2) |
(self.bits[i+3] << 3) | (self.bits[i+4] << 4))
sym = DEC4B5B[v]
if rec:
self.rec_sym(i, sym)
return sym
def get_short(self):
i = self.idx
# Check it's not a truncated packet.
if len(self.bits) - i <= 20:
self.putwarn('Truncated', '!')
return 0x0BAD
k = [self.get_sym(i), self.get_sym(i+5),
self.get_sym(i+10), self.get_sym(i+15)]
# TODO: Check bad symbols.
val = k[0] | (k[1] << 4) | (k[2] << 8) | (k[3] << 12)
self.idx += 20
return val
def get_word(self):
lo = self.get_short()
hi = self.get_short()
return lo | (hi << 16)
def find_corrupted_sop(self, k):
# Start of packet are valid even if they have only 3 correct symbols
# out of 4.
for seq in SOP_SEQUENCES:
if [k[i] == seq[i] for i in range(len(k))].count(True) >= 3:
return START_OF_PACKETS[seq]
return None
def scan_eop(self):
for i in range(len(self.bits) - 19):
k = (self.get_sym(i, rec=False), self.get_sym(i+5, rec=False),
self.get_sym(i+10, rec=False), self.get_sym(i+15, rec=False))
sym = START_OF_PACKETS.get(k, None)
if not sym:
sym = self.find_corrupted_sop(k)
# We have an interesting symbol sequence.
if sym:
# Annotate the preamble.
self.putx(0, i, [1, ['Preamble', '...']])
# Annotate each symbol.
self.rec_sym(i, k[0])
self.rec_sym(i+5, k[1])
self.rec_sym(i+10, k[2])
self.rec_sym(i+15, k[3])
if sym == 'Hard Reset':
self.text += 'HRST'
return -1 # Hard reset
elif sym == 'Cable Reset':
self.text += 'CRST'
return -1 # Cable reset
else:
self.putx(i, i+20, [2, [sym, 'S']])
return i+20
self.putx(0, len(self.bits), [1, ['Junk???', 'XXX']])
self.text += 'Junk???'
self.putwarn('No start of packet found', 'XXX')
return -1 # No Start Of Packet
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.idx = 0
self.packet_seq = 0
self.previous = 0
self.startsample = None
self.bits = []
self.edges = []
self.bad = []
self.half_one = False
self.start_one = 0
self.stored_pdos = {}
self.cap_mark = [0, 0, 0, 0, 0, 0, 0, 0]
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
# 0 is 2 UI, space larger than 1.5x 0 is definitely wrong.
self.maxbit = self.us2samples(3 * UI_US)
# Duration threshold between half 1 and 0.
self.threshold = self.us2samples(THRESHOLD_US)
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
self.out_bitrate = self.register(
srd.OUTPUT_META,
meta=(int, 'Bitrate', 'Bitrate during the packet')
)
def us2samples(self, us):
return int(us * self.samplerate / 1000000)
def decode_packet(self):
self.data = []
self.idx = 0
self.text = ''
if len(self.edges) < 50:
return # Not a real PD packet
self.packet_seq += 1
tstamp = float(self.startsample) / self.samplerate
self.text += '#%-4d (%8.6fms): ' % (self.packet_seq, tstamp*1000)
self.idx = self.scan_eop()
if self.idx < 0:
# Full text trace of the issue.
self.putx(0, self.idx, [12, [self.text, '...']])
return # No real packet: ABORT.
# Packet header
self.head = self.get_short()
self.putx(self.idx-20, self.idx, [3, ['H:%04x' % (self.head), 'HD']])
self.puthead()
# Decode data payload
for i in range(self.head_count()):
self.data.append(self.get_word())
self.putx(self.idx-40, self.idx,
[4, ['[%d]%08x' % (i, self.data[i]), 'D%d' % (i)]])
self.putpayload(self.idx-40, self.idx, i)
# CRC check
self.crc = self.get_word()
ccrc = self.compute_crc32()
if self.crc != ccrc:
self.putwarn('Bad CRC %08x != %08x' % (self.crc, ccrc), 'CRC!')
self.putx(self.idx-40, self.idx, [5, ['CRC:%08x' % (self.crc), 'CRC']])
# End of Packet
if len(self.bits) >= self.idx + 5 and self.get_sym(self.idx) == EOP:
self.putx(self.idx, self.idx + 5, [6, ['EOP', 'E']])
self.idx += 5
else:
self.putwarn('No EOP', 'EOP!')
# Full text trace
if self.options['fulltext'] == 'yes':
self.putx(0, self.idx, [12, [self.text, '...']])
# Meta data for bitrate
ss, es = self.edges[0], self.edges[-1]
bitrate = self.samplerate*len(self.bits) / float(es - ss)
self.put(es, ss, self.out_bitrate, int(bitrate))
# Raw binary data (BMC decoded)
self.put(es, ss, self.out_binary, [0, bytes(self.bits)])
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
while True:
self.wait([{0: 'e'}, {1: 'e'}, {'skip': int(self.samplerate/1e3)}])
# First sample of the packet, just record the start date.
if not self.startsample:
self.startsample = self.samplenum
self.previous = self.samplenum
continue
diff = self.samplenum - self.previous
# Large idle: use it as the end of packet.
if diff > self.maxbit:
# The last edge of the packet.
self.edges.append(self.previous)
# Export the packet.
self.decode_packet()
# Reset for next packet.
self.startsample = self.samplenum
self.bits = []
self.edges = []
self.bad = []
self.half_one = False
self.start_one = 0
else: # Add the bit to the packet.
is_zero = diff > self.threshold
if is_zero and not self.half_one:
self.bits.append(0)
self.edges.append(self.previous)
elif not is_zero and self.half_one:
self.bits.append(1)
self.edges.append(self.start_one)
self.half_one = False
elif not is_zero and not self.half_one:
self.half_one = True
self.start_one = self.previous
else: # Invalid BMC sequence
self.bad.append((self.start_one, self.previous))
# TODO: Try to recover.
self.bits.append(0)
self.edges.append(self.previous)
self.half_one = False
self.previous = self.samplenum
| gpl-3.0 | 2,815,251,685,627,898,400 | 31.255086 | 96 | 0.467857 | false |
gpoulter/python-ngram | tests/test_csvjoin.py | 1 | 1590 | #!/usr/bin/python
import os
import os.path
import subprocess
import sys
import tempfile
import textwrap
import unittest
class CsvjoinTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix="csvjoin_test_")
self.leftpath = os.path.join(self.tmpdir, 'left')
self.rightpath = os.path.join(self.tmpdir, 'right')
self.outpath = os.path.join(self.tmpdir, 'output')
with open(self.leftpath, 'w') as left:
left.write('''ID,NAME\n1,Joe\n2,Kin\n3,ZAS''')
with open(self.rightpath, 'w') as right:
right.write('''ID,NAME\nID,NAME\nA,Joe\nB,Jon\nC,Job\nD,Kim''')
def tearDown(self):
os.remove(self.leftpath)
os.remove(self.rightpath)
os.remove(self.outpath)
os.rmdir(self.tmpdir)
def test_csvjoin(self):
args = [
sys.executable,
'scripts/csvjoin.py', '--titles', '-j', 'outer', '--minscore=0.24',
'--count=5', '--warp=1.0',
self.leftpath, '1', self.rightpath, '1', self.outpath
]
print(args)
subprocess.call(args)
with open(self.outpath, 'r') as out:
result = '\n'.join(s.strip() for s in out.readlines())
correct = textwrap.dedent("""\
ID,NAME,Rank,Similarity,ID,NAME
1,Joe,1,1.0,A,Joe
1,Joe,2,0.25,B,Jon
1,Joe,3,0.25,C,Job
2,Kin,1,0.25,D,Kim
3,ZAS""")
self.assertEqual(result, correct)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | -4,381,302,981,549,351,400 | 30.8 | 79 | 0.551572 | false |
MeirKriheli/statirator | statirator/core/models.py | 1 | 1691 | from __future__ import absolute_import
from django.conf import settings
from .utils import path_to_lang, LANGS_DICT
class TranslationsMixin(object):
"Helper for getting transalations"
SLUG_FIELD_FOR_TRANSLATIONS = 'slug' # Overide in models if needed
LANG_FIELD_FOR_TRANSLATIONS = 'language' # Overide in models if needed
def get_translations(self):
"Query set for the translations"
self_slug = getattr(self, self.SLUG_FIELD_FOR_TRANSLATIONS)
self_lang = getattr(self, self.LANG_FIELD_FOR_TRANSLATIONS)
slug = {self.SLUG_FIELD_FOR_TRANSLATIONS + '__exact': self_slug}
lang = {self.LANG_FIELD_FOR_TRANSLATIONS + '__exact': self_lang}
return self.__class__.objects.filter(**slug).exclude(**lang)
def get_language(self):
"Get the language display for this item's language"
attr = 'get_{0}_display'.format(self.LANG_FIELD_FOR_TRANSLATIONS)
return getattr(self, attr)()
class DummyTranslation(object):
"""Dummy translations for views to put in template context in case there's no
actual object"""
def __init__(self, request, language=None, title=None, path=None):
self.title = title
self.request = request
self.language = language or request.LANGUAGE_CODE
self.path = path or request.path
def get_translations(self):
for code, name in settings.LANGUAGES:
if code != self.language:
yield DummyTranslation(self.request, code, name, self.path)
def get_language(self):
return LANGS_DICT.get(self.language)
def get_absolute_url(self):
return path_to_lang(self.path, self.language)
| mit | 1,598,948,057,908,974,000 | 34.229167 | 81 | 0.66647 | false |
TheRedFireFox/AnimeSubBot | src/main.py | 1 | 10335 | #!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
"""
The main.py this file is the entry to the programs execution.
This will initialise the main classes and hold (for now) the main loop this
will be changed as soon as multipossessing is implemented.
"""
# standard modules import
import os
import sys
import time
import getpass
import platform
import multiprocessing
# if only windows is supported else use the curses module on linux (-.-)
try:
import msvcrt
except ImportError:
try:
import curses
except ImportError:
raise
# personal imports
import sql
import installer
import gobjects
import language
import clogging
import parsers.commandline
import parsers.configuration
import worker
def RestartProgram():
"""
Restarts the current program.
Note: this function does not return. Any cleanup action (like
saving data) must be done before calling this function.
"""
python = sys.executable
os.execl(python, python, * sys.argv)
def Install(Configuration,
SConfiguration,
MasterLanguage,
MasterLogger):
import installer
Install = installer.Installer(
Configuration = Configuration,
SConfiguration = SConfiguration,
Language = MasterLanguage,
Logging = MasterLogger,
)
Install.Install()
def TestSql(Configuration, MasterLogger, MasterLanguage):
SqlObject = None
NoConnection = True
NrTry = 0
while NrTry < 3:
SqlObject = sql.Api(
User = Configuration["Security"]["DatabaseUser"],
Password = Configuration["Security"]["DatabasePassword"],
DatabaseName = Configuration["MySQL"]["DatabaseName"],
Host=Configuration["MySQL"]["DatabaseHost"],
Port=Configuration["MySQL"]["DatabasePort"],
ReconnectTimer=int(Configuration["MySQL"]
["ReconnectionTimer"]),
LoggingObject = MasterLogger,
LanguageObject = MasterLanguage
)
if SqlObject.DatabaseConnection is None:
NrTry += 1
else:
break
SqlObject.CloseConnection()
if NrTry == 3:
return False
else:
return True
def Main():
"""
The main function that let's the application roll.
This function will initialise all the needed objects
and see that there will be always something to do.
"""
# this module is needed for the curses module for all unix distributions
CursesMasterObject = None
CursesObject = None
# if program is run not on a windows system:
if platform.system() != "Windows":
# init the curses screen
CursesMasterObject = curses.initscr()
# Use cbreak to not require a return key press
# The system will not be waiting so but continue to work.
curses.cbreak()
curses.noecho()
CursesMasterObject.nodelay(1)
maxy, maxx = CursesMasterObject.getmaxyx()
begin_x = 0
begin_y = 0
height = maxy
width = maxx
CursesObject = curses.newwin(height, width, begin_y, begin_x)
CursesObject.nodelay(1)
curses.setsyx(-1, -1)
CursesMasterObject.refresh()
CursesObject.refresh()
CursesObject.scrollok(True)
CursesObject.idlok(True)
CursesObject.leaveok(True)
# This object in needed for the main process to interact with the
# subprocess (the worker).
# SecondQueue = multiprocessing.Queue(1)
# This object is the event to shutdown all the subprocesses
# it defaults to true and will be set to false in the end.
ShutdownEventObject = multiprocessing.Event()
try:
# initialising the first logger and the language master object
# this object will be recreated later on
MasterLogger = clogging.Logger()
MasterLanguage = language.Language()
Language = MasterLanguage.CreateTranslationObject()
_ = Language.gettext
# Create the configuration class and read the configuration class.
Configuration = parsers.configuration.ConfigurationParser()
SConfiguration = parsers.configuration.SecureConfigurationParser(INTERNAL_KEY)
# check if default files exist if not install them
if ((Configuration.CheckIfExists() is False) or
(SConfiguration.CheckIfExists() is False)):
import installer
installer.Installer(Configuration,
SConfiguration,
MasterLanguage,
MasterLogger).Install("A")
else:
Configuration.ReadConfigurationFile()
SConfiguration.ReadConfigurationFile()
Configuration.AddSecureConfigurationParser(SConfiguration)
# deleting the object so that it will be garbage collected
del SConfiguration
Configuration = Configuration.ReturnClean()
# Create the language processor
MasterLanguage = language.Language()
Language = MasterLanguage.CreateTranslationObject(
Configuration["Telegram"]["DefaultLanguage"].split(","))
# This is the language object that will call the translation
# function.
_ = Language.gettext
# init parser
Parser = parsers.commandline.CustomParser(ConfigurationObject=Configuration,
LanguageObject=MasterLanguage
)
Parser.RunParser()
ParserArguments = Parser.GetArguments()
if ParserArguments.Installer is True:
# checking the installation
# reseting the configurations
import installer
Configuration = parsers.configuration.ConfigurationParser()
SConfiguration = parsers.configuration.SecureConfigurationParser(INTERNAL_KEY)
installer.Installer(Configuration,
SConfiguration,
MasterLanguage,
MasterLogger).Install()
# deleting the object so that it will be garbage collected
del SConfiguration
Configuration = Configuration.ReturnClean()
# Initialise the rest of the objects.
# first the multiprocess logger
MasterLogger.CloseHandlers()
MasterLogger = clogging.LoggingProcessSender(
LogToConsole = ParserArguments.PrintToConsole,
FileName = Configuration["Logging"]["LoggingFileName"],
MaxLogs = Configuration["Logging"]["MaxLogs"],
LoggingFormat = Configuration["Logging"]["LoggingFormat"],
Dateformat = Configuration["Logging"]["DateFormat"],
LoggingLevel = "debug",
CursesObject = CursesObject,
ShutdownEvent = ShutdownEventObject
)
MasterLogger.info(_("{AppName} has been started.").format(
AppName=gobjects.__AppName__
))
# test if there is a MySql connection
if TestSql(Configuration, MasterLogger, MasterLanguage) is False:
MasterLogger.critical(
_("{AppName} has been stopped, because you didn't "
"input the correct user name or password.").format(
AppName=gobjects.__AppName__)
)
time.sleep(0.5)
raise SystemExit
# starting the Worker
MainWorker = worker.MainWorker(
MaxWorker = Configuration["Telegram"]["MaxWorker"],
ShutDownEvent = ShutdownEventObject,
Configuration = Configuration,
Logging = MasterLogger,
Language = MasterLanguage,
BotName = None)
MainWorker.start()
# Initialise the main loop (it's a endless loop, it breaks when a
# key is pressed.)
MasterLogger.info(_("Exit loop by pressing <Esc>, <q> or <Space>"))
MasterLogger.info(_("Getting updates from the telegram api."))
# Add a comment number to the telegram request, so that the old
# messages will be sorted out.
while True:
# check if a key is pressed by user and stop if pressed.
# if windows use msvcrt
if platform.system() == "Windows":
if msvcrt.kbhit():
PressedKey = ord(msvcrt.getch())
if PressedKey == 27 or PressedKey == 113 or \
PressedKey == 32:
MasterLogger.info(_("A user shutdown was requested "
"will now shutdown."))
break
# use curses
else:
PressedKey = CursesObject.getch()
if (PressedKey == 27 or PressedKey == 113 or
PressedKey == 32):
MasterLogger.info(_("A user shutdown was requested will "
"now shutdown.")
)
break
else:
pass
time.sleep(0.5)
MasterLogger.info(_("The system is shutting down, please be patient"
" until all the workload has been cleared."))
finally:
ShutdownEventObject.set()
try:
MainWorker.join()
except UnboundLocalError:
pass
except:
raise
MasterLogger.join()
if platform.system() != "Windows":
# clean after the curses module
time.sleep(1)
curses.nocbreak()
curses.echo()
curses.endwin()
# Raise the terror of the curses module for a second time.
# (It's correctly formatted now)
try:
raise
except RuntimeError:
pass
if __name__ == "__main__":
INTERNAL_KEY = r"2#<&Sd8!upX.jm(n"
multiprocessing.freeze_support()
Main()
| gpl-2.0 | 6,433,128,542,332,228,000 | 33.915541 | 90 | 0.575907 | false |
eriksonJAguiar/TCC-UENP-Codigos | My_codes/coleta/twitter_extract_tweets.py | 1 | 3251 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from TwitterAPI import *
from datetime import *
from pymongo import MongoClient
import sys
import json
import os.path
import time
#timeout
timeout = 60*3
timeout_start = time.time()
#Credencias de acesso App Twitter
consumer_key = "NBL0CtVrn2ajbpaGEWC1GBY2c"
consumer_secret = "2F5Uz5VYg0ONu4xTYYZsWkAGfc3TYXCkXLCsXMJ1eCKOfhBTfS"
access_token = "2345718031-we2K2PETQXkz7NCexjdGuvE2L2rnd5KfouzN3Up"
access_token_secret = "aEQPKGifu1y29Wbh3u6Z0YIcjAsBC8VeD4Y75CDL2r12o"
#acessa OAuth
# Referencia para API: https://dev.twitter.com/rest/reference
twitter = TwitterAPI(consumer_key, consumer_secret,auth_type='oAuth2')
##DataBase
client = MongoClient()
db = client.baseTweetsTCC
def saveTrends(tag,date):
try:
db.trends.insert_one(
{
'tag':tag,
'date':date
}
)
except Exception as inst:
pass
result_max = 10000
result_cont = 0
dh = datetime.now()
#tags = ['hiv','aids','viagra','tinder','menopausa','dst','ist','sifilis','usecamisinha','hpv','camisinha']
tags = []
#param = sys.argv[1:]
#print(param[0])
try:
trends_br = twitter.request('trends/place', {'id': 23424768})
trends_eua = twitter.request('trends/place', {'id': 23424977})
trends_eng = twitter.request('trends/place', {'id': 24554868})
#trends_esp = twitter.request('trends/place', {'id': 23424950})
#trends_ger = twitter.request('trends/place', {'id': 23424829})
n_trends = 10
i = 0
for br in trends_br.get_iterator():
tags.append(br['name'])
saveTrends(br['name'],dh.now())
i += 1
if i > n_trends: break
i = 0
for eua in trends_eua.get_iterator():
tags.append(eua['name'])
saveTrends(eua['name'],dh.now())
if i > n_trends: break
i += 1
i = 0
for eng in trends_eua.get_iterator():
tags.append(eng['name'])
saveTrends(eng['name'],dh.now())
if i > n_trends: break
i += 1
i = 0
#for esp in trends_esp.get_iterator():
# tags.append(esp['name'])
# saveTrends(esp['name'],dh.now())
# if i > n_trends: break
# i += 1
#i = 0
#for ger in trends_ger.get_iterator():
# tags.append(ger['name'])
# saveTrends(ger['name'],dh.now())
# if i > n_trends: break
# i += 1
except Exception as inst:
pass
while result_cont < result_max:
#print('Buscando...\n')
#print('Isso Pode Demorar Um Pouco..\n')
tag_cont = 0
while tag_cont < len(tags):
r = twitter.request('search/tweets', {'q': tags[tag_cont]})
for item in r.get_iterator():
#tweet = 'ID: %d, Usuario: %s, texto: %s, Horario: %s, Criado: %s \n'%(item['id'],item['user']['screen_name'],item['text'],dh.now(),item['created_at'])
#print(item['text'])
try:
db.tweets.insert_one(
{
'_id':item['id'],
'id_user':item['user']['id'],
'name':item['user']['screen_name'],
'text':item['text'],
'hourGet':dh.now(),
'created_at':item['created_at'],
'location':item['user']['location'],
'retweets_count':item['retweet_count']
}
)
result_cont += 1
except Exception as inst:
#print(type(inst))
pass
tag_cont += 1
#print("%d tweets capturados"%result_cont)
if time.time() >= timeout_start + timeout:
break
#print('Resultados = %d \n'%(result_cont))
#print('Coleta Relalizada com Sucesso! \n')
| gpl-3.0 | 1,860,964,976,097,334,500 | 21.115646 | 154 | 0.637958 | false |
EmanueleLM/SAAP-Sequencial-Attacks-in-Adversarial-Patrolling | solvesaap.py | 1 | 19333 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 14:33:27 2017
@author: Emanuele
Solve SAAP() script:
takes as input a series of graphs encoded in a XML file filled up with an adjacency matrix A, and a description of each node
encoded as type of node (simple vertex or target), its value (0 if it's not a target, between 0 and 1
if it's a target) and a deadline (0 if it's a vertex, greater than zero and integer if it's a target)
"""
import pathfinder as pf
import numpy as np
import xml.etree.ElementTree as et
from xml.dom import minidom
import graph as gr
import os.path
import time
graphs_input_path = "C:\\Users\\Ga\\Desktop\\15_5_025\\"; # path to find the graphs' description
output_path = "C:\\Users\\Ga\\Desktop\\15_5_025\\results\\"; # path to store the output in pseudo-xml format
graphs = list(); # list that contains the name of each graph file in txt format
k = 1; # number of resources we want the solver solves for each instance of the graphs specified in graphs
# complete description of each tag in our xml file (intermediate aggregate file)
#
# .. TO_DO
#
graph_tags = list(['G', 'A', 'VERTICES', 'V0', 'T0', 'NUM_V', 'NUM_T', 'DENSITY', 'TOPOLOGY']);
other_tags = list(['K', 'PATH', 'COVERED', 'LOST', 'HISTORY', 'UTILITY', 'EXEC_TIME', 'ROUTES']);
aggregate_filepath = "C:\\Users\\Ga\\Desktop\\"; # filepath to the aggregate (.dat) file
aggregate_output = "aggregate15_5_density013.dat"; # name of the aggregate file
aggregate_prefix = ['NAME', 'TOPOLOGY', 'NUM_V', 'NUM_T', 'K', 'EXEC_TIME', 'UTILITY', 'V0', 'DENSITY']; # prefix for the aggregate file: specifies each entry on that file
#==============================================================================
# function that invokes pathfinder for a given specification of the SAAP game
# and solves it for a given number of resources k
# creates a "dom"-like structure that is used to store all the salient elements of the saap solution
# takes as input
# the file, filepath, where there's the graph specification
# number of resources avilable to A
# returns a list of the files that contains the results of the various saap instances
#==============================================================================
def solveSAAP(filepath, k):
files = list();
G, vertices, density, topology = createGraphFromFile(filepath);
equilibrium_route = list();
equilibrium_utility = -np.inf;
equilibrium_history = list();
vertex_at_equilibrium = 0;
start_time = time.time(); # start measuring the time of execution (we don't care if we have a small overhead since we don't start measuring int in the true function, that's because eery instance will have the same (little) overhead)
for v in range(len(G.getVertices())):
partial_utility = 0;
partial_history = list();
partial_route = list();
for t in G.getTargets():
partial_time = time.time();
u, route, hist = pf.PathFinder(G, v, t, k); # solve the game for a specific instance with a given number of resources 'k' for the Attacker
print(u, route, hist);
print("Partial time for a (v,t) processing: ", (time.time() - partial_time));
if u < partial_utility: # if a given instance is "pejorative(???)" A chooses that instance
partial_history = hist;
partial_route = route;
partial_utility = u;
if partial_utility > equilibrium_utility: # among all the worst attacks, D chooses the best placement and the best response
equilibrium_history = partial_history;
equilibrium_route = partial_route;
equilibrium_utility = partial_utility;
vertex_at_equilibrium = v;
exec_time = (time.time() - start_time); # calculate execution time (little overhead introduced by returning of the function, still not important since we are facing an exponential problem)
print("Equilibrium vertex: ", vertex_at_equilibrium);
print("Equilibrium Path: ", equilibrium_route, "\nEquilibrium utility ", equilibrium_utility, "\nEquilibrium history ", equilibrium_history);
print("Execution time: ", exec_time); # write all the stuff to a file in a xml pseudo-format
g_tags = list();
o_tags = list();
root = et.Element("ROOT");
g_tags.append(et.SubElement(root, graph_tags[0])); # G (graph) is the first child node of ROOT
for j in range(1,len(graph_tags)):
g_tags.append(et.SubElement(g_tags[0], graph_tags[j])); # every element of the graph is a subelement of the graph itself
for j in range(len(other_tags)):
o_tags.append(et.SubElement(root, other_tags[j]));
# follow the order in graph_tags to see what's the content of each of the following element
g_tags[1].text = str(list(G.getAdjacencyMatrix())); # adjacency matrix
g_tags[2].text = str(vertices); # specification of each vertex
g_tags[3].text = str(vertex_at_equilibrium); # vertex at the equilibrium
g_tags[4].text = str(t); # initial target
g_tags[5].text = str(len(vertices)); # number of vertices on the graph
g_tags[6].text = str(len(G.getTargets())); # number of targets on the graph
g_tags[7].text = str(G.getDensity()); # edge density
g_tags[8].text = topology; # topology of the graph
# follow the order in other_tags to see what's the content of each of the following element
o_tags[0].text = str(k+1); # number of resources
# fill this section up with the other o_tags
# o_tags[1].text =
# o_tags[2].text =
# o_tags[3].text =
# ...
o_tags[1].text = str(equilibrium_route);
o_tags[4].text = str(equilibrium_history);
o_tags[5].text = str(equilibrium_utility); # execution time
o_tags[6].text = str(exec_time); # execution time
#o_tags[7].text = str(routes); # list of all the routes generated by the saap instance
tree = et.ElementTree(root);
files.append(output_path+"topology_"+topology+"_vertices_"+str(len(G.getVertices()))+"_density_"+str(G.getDensity())+"_resources_"+str(k+1)+"_salt_"+filepath[-5:]);
tree.write(files[-1]); # write on file
return files;
#==============================================================================
# function that invokes pathfinder for a given specification of the SAAP game
# and solves it for a given number of resources k
#
# This is the fast version of the solveSAAP function since it solves the games for just each vertex
# on a random initial target under attack: we will OBVIOUSLY have a pessimistic estimate for the Attacker's utility, so please note that before using this function
# The way it extract the utility and equilibrium path is the same: it changes the execution time that is an
# estimate of the real time (it just multiplies the average of the execution time of each instance to obtain the total time)
#
# creates a "dom"-like structure that is used to store all the salient elements of the saap solution
# takes as input
# the file, filepath, where there's the graph specification
# number of resources avilable to A
# returns a list of the files that contains the results of the various saap instances
#==============================================================================
def fastSolveSAAP(filepath, k):
files = list();
G, vertices, density, topology = createGraphFromFile(filepath);
equilibrium_route = list();
equilibrium_utility = -np.inf;
equilibrium_history = list();
vertex_at_equilibrium = 0;
start_time = time.time(); # start measuring the time of execution (we don't care if we have a small overhead since we don't start measuring int in the true function, that's because eery instance will have the same (little) overhead)
for v in range(len(G.getVertices())):
partial_utility = 0;
partial_history = list();
partial_route = list();
partial_time = time.time();
t = G.getTargets()[np.random.randint(len(G.getTargets()))]; # random target
u, route, hist = pf.PathFinder(G, v, t, k); # solve the game for a specific instance with a given number of resources 'k' for the Attacker
print(u, route, hist);
print("Partial time for a (v,t) processing: ", (time.time() - partial_time));
if u < partial_utility: # if a given instance is "pejorative", A chooses that instance
partial_history = hist;
partial_route = route;
partial_utility = u;
if partial_utility > equilibrium_utility: # among all the worst attacks, D chooses the best placement and the best response
equilibrium_history = partial_history;
equilibrium_route = partial_route;
equilibrium_utility = partial_utility;
vertex_at_equilibrium = v;
exec_time = len(G.getTargets())*(time.time() - start_time); # ESTIMATE the execution time by multiplying what we've spent so far to the number of targets
print("Equilibrium vertex: ", vertex_at_equilibrium);
print("Equilibrium Path: ", equilibrium_route, "\nEquilibrium utility ", equilibrium_utility, "\nEquilibrium history ", equilibrium_history);
print("Execution time: ", exec_time); # write all the stuff to a file in a xml pseudo-format
g_tags = list();
o_tags = list();
root = et.Element("ROOT");
g_tags.append(et.SubElement(root, graph_tags[0])); # G (graph) is the first child node of ROOT
for j in range(1,len(graph_tags)):
g_tags.append(et.SubElement(g_tags[0], graph_tags[j])); # every element of the graph is a subelement of the graph itself
for j in range(len(other_tags)):
o_tags.append(et.SubElement(root, other_tags[j]));
# follow the order in graph_tags to see what's the content of each of the following element
g_tags[1].text = str(list(G.getAdjacencyMatrix())); # adjacency matrix
g_tags[2].text = str(vertices); # specification of each vertex
g_tags[3].text = str(vertex_at_equilibrium); # vertex at the equilibrium
g_tags[4].text = str(t); # initial target
g_tags[5].text = str(len(vertices)); # number of vertices on the graph
g_tags[6].text = str(len(G.getTargets())); # number of targets on the graph
g_tags[7].text = str(G.getDensity()); # edge density
g_tags[8].text = topology; # topology of the graph
# follow the order in other_tags to see what's the content of each of the following element
o_tags[0].text = str(k+1); # number of resources
# fill this section up with the other o_tags
# o_tags[1].text =
# o_tags[2].text =
# o_tags[3].text =
# ...
o_tags[1].text = str(equilibrium_route);
o_tags[4].text = str(equilibrium_history);
o_tags[5].text = str(equilibrium_utility); # execution time
o_tags[6].text = str(exec_time); # execution time
#o_tags[7].text = str(routes); # list of all the routes generated by the saap instance
tree = et.ElementTree(root);
files.append(output_path+"topology_"+topology+"_vertices_"+str(len(G.getVertices()))+"_density_"+str(G.getDensity())+"_resources_"+str(k+1)+"_salt_"+filepath[-5:]);
tree.write(files[-1]); # write on file
return files;
#==============================================================================
# function that create a graph G from a file that specifies the adjacency matrix at first
# the initial vertex v, the first target under attack t and how the graph is (vertices, targets, their values and deadlines..)
# the format of the file is the following and in this order:
# adjacency matrix A specified as [[1,0],[0,1]]
# a list of each vertex charachteristic as [vertex/target, value, deadline] where vertex=0, target=1
# , value is a real number in [0,1](0 is for vertices)
# , deadline is a natural number (0 for vertices, any other for targets)
# , e.g. [0,0,0] --> vertex, [1, 0.5, 10] --> target with 0.5 as value, 10 as deadline
# , an example of a 3*3 vertices' specification is [[0,0,0],[1,1,4],[1,0.3,5]]
# edge density defined as density = 2|E|/|V|(|V|-1)
## topology of the graph, the possible choices are {'graph', 'line', 'star', 'crique', ..}
# all this stuff must be encoded in a pseudo-xml format (just to be a little more polite and clean)
# even if you can find an example of psuedo-xml graph in the repo on github, here's one:
# <G>
# <A>[[1,1,1],[1,1,1],[1,1,1]]</A>
# <V>[[1,0.3,3],[0,0,0],[1,0.8,12]]</V>
# <DENSITY>0.3</DENSITY>
# <TOPOLOGY>graph</TOPOLOGY>
# </G>
# the previous example specifies a fully connected graph with 3 vertices, 2 targets (index 0 and 2) and a vertex (index 1)
# the density is set to 0.3
# the topology of the graph ('graph' if it's not a specific topology, 'crique', 'line', 'start' etc. otherwise)
# the function returns
# a graph G,
# the vertices that compose the graph (each one specify if it's a vertex or a target, its value and its deadline)
# the density of the graph
# the topology of the graph
#==============================================================================
def createGraphFromFile(filepath):
# elements_check = ["A", "V", "DENSITY", "TOPOLOGY"]; # elements to check if all the graph's elements are present in the file
tree = et.parse(filepath);
root = tree.getroot();
# create the empty Graph and the adjacency matrix by parsing the file (using the eval function :P bad bad bad)
adj_matrix = np.array(eval(root[0].text));
vertices = np.array(eval(root[1].text));
V = list();
# for each vertex create the graph G
for v in vertices:
V = np.append(V, gr.Vertex(int(v[0]), float(v[1]), int(v[2])));
G = gr.Graph(np.array(V));
n = 0;
for v in vertices:
G.setAdjacents(V[n], np.array(adj_matrix[n]));
n += 1;
return [G, vertices, float(root[2].text), root[3].text]; # return the graph, the vertices, the density, the topology
#==============================================================================
# function that given a xml result coming from a saap solution, prints on screen all the xml file
# takes as input the filepath of the xml file
# returns none
# please note that if verbose is set to True it will print out all the routes generated (usually a lot)
# otherwise it does not print them
#==============================================================================
def printSaapDOM(filepath, verbose):
root = et.parse(filepath).getroot();
for j in root[0]:
print(j.tag);
print(j.text, "\n");
if verbose:
nop = len(root);
else:
nop = -1;
for i in root[1:nop]:
print(i.tag);
print(i.text, "\n");
#==============================================================================
# function that "prettifies" the output
# takes as input the element in ElementTree to be prettyfied
# returns the string prettified
#==============================================================================
def prettify(elem):
rough_string = et.tostring(elem, 'utf-8');
reparsed = minidom.parseString(rough_string);
return reparsed.toprettyxml(indent="\t");
#==============================================================================
# function that returns the root of the xml file, given the path of the xml file
# it takes as input the xml file
# it returns the root element of the file
#==============================================================================
def getRootElement(filepath):
return et.parse(filepath).getroot();
#==============================================================================
# function that turns a xml file into aggregate data, useful to plot the data
# takes as input the result of a saap instance as filepath + filename
# returns a new line in the aggregate.dat file file that is composed in this way:
# filename num_nodes num_targets resources exec_time utility length_eq_path average_length_path density
#==============================================================================
def fromXml2Aggregate(filepath, filename):
data_to_find = ['TOPOLOGY', 'NUM_V', 'NUM_T', 'K', 'EXEC_TIME', 'UTILITY', 'V0', 'DENSITY'];
result = list([filename]);
root = et.parse(filepath+filename).getroot();
for i in data_to_find:
if root[0].find(str(i)) != None:
result.append(root[0].find(i).text);
else:
if root.find(i) != None:
result.append(root.find(i).text);
else:
result.append('None');
return result;
#==============================================================================
# function that creates from a graph specification a string that is used to feed the
# function that create the aggregate file from the various xml instances of saaps
# takes as input
# file, which is the filename (filepath+filename)
# returns
# the filename of the xml file to be used to feed the aggregate file
# the salt used to distinguish between graphs with same features but different topologies
#==============================================================================
def fromGraphToXmlName(file):
G, vertices, density, topology = createGraphFromFile(file);
filename = "topology_"+topology+"_vertices_"+str(len(G.getVertices()))+"_density_"+str(G.getDensity())+"_resources_"+str(2)+"_salt_"+file[-5:];
return filename;
"""
Little testing to see if the algorithms work as expected
"""
verbose = True; # this variable controls whether the output is printed
if verbose:
# extract elements from the graph file
for inputgraph in os.listdir(graphs_input_path):
if inputgraph=="results": # skipt the folder with the results
continue;
[printSaapDOM(i, True) for i in solveSAAP(graphs_input_path+inputgraph, 1)]; # solve all the SAAP instances in a given directory for a specified number of resources
if not(os.path.isfile(aggregate_filepath + aggregate_output)): # if the file does not exists, create it with the prefix
prefix = str();
for i in aggregate_prefix:
prefix += str(i)+'\t';
f = open(aggregate_filepath + aggregate_output, "w"); # create the file with the prefix
f.write(prefix + '\n');
else:
f = open(aggregate_filepath + aggregate_output, "a"); # open in appendix mode
# write all the results row by row, using the fromGraphToXmlName function as "feeder" to the fromXml2Aggregate function, plus the number of resources of a given instance
aggregatefilename = fromGraphToXmlName(graphs_input_path+inputgraph);
line = fromXml2Aggregate(output_path, aggregatefilename);
f.write(str(line)+'\n');
f.close(); # close the file | gpl-3.0 | 7,431,354,720,745,216,000 | 55.373887 | 240 | 0.605597 | false |
Moguri/odin | src/combat/terrain.py | 1 | 3190 | import random
from panda3d.core import *
MAP_SIZE = 32
CELL_SIZE = 1
SEL_NONE = 0
SEL_CURS = 1 << 0
SEL_MOVE = 1 << 1
SEL_ATTK = 1 << 2
class Terrain(object):
# UNTESTED
# @classmethod
# def world_to_grid(cls, x, y, z):
# position = [x, y, z]
# half_size = MAP_SIZE / 2
# position[0] = int(position[0] * half_size + half_size) / CELL_SIZE
# position[1] = int(position[1] * half_size + half_size) / CELL_SIZE
#
# return position
@classmethod
def grid_to_world(cls, x, y, z):
position = [x, y, z]
position[0] = position[0] - MAP_SIZE/2 + CELL_SIZE / 2.0
position[1] = position[1] - MAP_SIZE/2 + CELL_SIZE / 2.0
return position
@classmethod
def get_random_tile(cls):
x = random.randint(0, MAP_SIZE-1)
y = random.randint(0, MAP_SIZE-1)
return [x, y, 0]
@classmethod
def _iterate_circle(cls, center, radius):
for y in range(center[1]-radius, center[1]+radius+1):
for x in range(center[0]-radius, center[0]+radius+1):
if Terrain.check_distance(radius, (x, y), center):
yield x, y
@classmethod
def check_distance(cls, range, p0, p1):
if abs(p1[0] - p0[0]) + abs(p1[1] - p0[1]) <= range:
return True
return False
@classmethod
def get_distance(cls, p0, p1):
return abs(p1[0] - p0[0]) + abs(p1[1] - p0[1])
@classmethod
def find_closest_in_range(cls, center, radius, target_pos):
closest = None
for x, y in Terrain._iterate_circle(center, radius):
if not closest:
closest = [x, y]
else:
cur_dist = Terrain.get_distance(closest, target_pos)
new_dist = Terrain.get_distance((x, y), target_pos)
if new_dist < cur_dist:
closest = [x, y]
return closest + [0]
def __init__(self):
# Load the environment model.
self.model = base.loader.loadModel("terrain")
# Reparent the model to render.
self.model.reparentTo(base.render)
# Load and set terrain shader
terrain_shader = Shader.load(Shader.SLGLSL, "shaders/basic.vs", "shaders/terrain.fs", "")
self.model.setShader(terrain_shader)
# Setup selection map
self.selection_texture = Texture()
self.selection_texture.set_compression(Texture.CMOff)
self.selection_texture.set_component_type(Texture.TUnsignedByte)
self.selection_texture.set_format(Texture.FRed)
self.model.setShaderInput("selection_map", self.selection_texture)
# Setup selection data
self.selection_image = PNMImage(MAP_SIZE, MAP_SIZE, 1)
def clear_selection(self):
self.selection_image.fill(SEL_NONE)
def set_cursor_selection(self, x, y):
self.selection_image.setXelVal(x, y, SEL_CURS)
def _display_range(self, center, radius, value):
for x, y in Terrain._iterate_circle(center, radius):
if x < 0 or x >= MAP_SIZE or y < 0 or y >= MAP_SIZE:
continue
old = self.selection_image.getGrayVal(x, y)
self.selection_image.setXelVal(x, y, old+value)
def display_move_range(self, player):
center = player.grid_position
radius = player.remaining_movement
self._display_range(center, radius, SEL_MOVE)
def display_attack_range(self, player):
center = player.grid_position
radius = player.range
self._display_range(center, radius, SEL_ATTK)
def update_selection(self):
self.selection_texture.load(self.selection_image) | apache-2.0 | 3,266,489,896,179,958,000 | 26.747826 | 91 | 0.676803 | false |
stromnet/pyowmaster | pyowmaster/device/pio.py | 1 | 14846 | # vim: set expandtab sw=4 softtabstop=4 fileencoding=utf8 :
#
# Copyright 2014-2015 Johan Ström
#
# This python package is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from pyowmaster.device.base import OwChannel, OwDevice
from pyowmaster.event.events import OwPIOEvent
from pyowmaster.exception import ConfigurationError, InvalidChannelError
import logging, time
# Modes of operation, per channel
PIO_MODE_OUTPUT = 0b00001
PIO_MODE_INPUT = 0b00010
PIO_MODE_INPUT_MOMENTARY = 0b00100 | PIO_MODE_INPUT
PIO_MODE_INPUT_TOGGLE = 0b01000 | PIO_MODE_INPUT
PIO_MODE_ACTIVE_LOW = 0b00000
PIO_MODE_ACTIVE_HIGH = 0b10000
def test_bits(value, mask):
return (value & mask) == mask
class OwPIOBase(object):
"""A shared base class for basic PIO pin channels"""
def pio_base_init(self, cfg):
"""Init a new OwPIOBase, with a "mode" configuration parsed
from the cfg dict key 'mode'
To be called from __init__ in sub class.
The mode string should be a combination of the following strings:
input momentary (default)
input toggle
or
output
combined with
active low (default)
active high
"""
modestr = cfg.get('mode', 'input momentary')
self.mode = self.parse_pio_mode(modestr)
# Updated in OwPIODevice.on_alarm, or similar
self.value = None
self.state = None
def parse_pio_mode(self, mode):
cfg = 0
if mode.find('output') != -1:
cfg |= PIO_MODE_OUTPUT
else:
# Default input
cfg |= PIO_MODE_INPUT
if mode.find('toggle') != -1:
cfg |= PIO_MODE_INPUT_TOGGLE
else:
# Default momentary
cfg |= PIO_MODE_INPUT_MOMENTARY
if mode.find('active low') != -1:
cfg |= PIO_MODE_ACTIVE_LOW
elif mode.find('active high') != -1:
cfg |= PIO_MODE_ACTIVE_HIGH
else:
# For input, "ON" means connected to GND (only option in parsite-powered nets)
# For outputs, "ON" means PIO transistor is active and the sensed output is LOW.
cfg |= PIO_MODE_ACTIVE_LOW
return cfg
def modestr(self):
if self.is_output:
s = "output "
elif self.is_input:
s = "input "
if self.is_input_toggle:
s += "toggle "
else:
raise ConfigurationError("Unknown mode %d" % self.mode)
if self.is_active_low:
s += "active low"
elif self.is_active_high:
s += "active high"
return s
def get_event_types(self):
"""pywomaster.event.actionhandler uses this to determine which PIO event types this channel may dispatch"""
if self.is_input_momentary:
return ('trigged',)
else:
return ('on', 'off')
@property
def is_output(self):
return test_bits(self.mode, PIO_MODE_OUTPUT)
@property
def is_input(self):
return test_bits(self.mode, PIO_MODE_INPUT)
@property
def is_input_momentary(self):
return test_bits(self.mode, PIO_MODE_INPUT_MOMENTARY)
@property
def is_input_toggle(self):
return test_bits(self.mode, PIO_MODE_INPUT_TOGGLE)
@property
def is_active_high(self):
return test_bits(self.mode, PIO_MODE_ACTIVE_HIGH)
@property
def is_active_low(self):
return not test_bits(self.mode, PIO_MODE_ACTIVE_HIGH)
class OwPIOChannel(OwPIOBase, OwChannel):
"""A OwChannel for devices with PIO"""
def __init__(self, num, name, cfg):
"""Create a new OwPIOChannel, a OwChannel with an OwPIOBase"""
super(OwPIOChannel, self).__init__(num, name, cfg)
self.pio_base_init(cfg)
def is_set(self, value):
"""Given a bitmask value, return this channels bit position value as a True(1)/False(0)"""
return (value & (1 << self.num)) != 0
def __str__(self):
return "%s %s (alias %s), mode=%s [%s,%s]" % (self.__class__.__name__, self.name, self.alias, self.modestr(), self.value, self.state)
class OwPIODevice(OwDevice):
"""Abstract base class for use with DS2406, DS2408 and similar PIO devices.
Subclass must implement:
- A property named "num_channels" must exist, which tells how
many channels this device has.
- Method _calculate_alarm_setting
- Method _on_alarm_handled
"""
def __init__(self, _alarm_supported, ow, owid):
"""Subclass should set the _alarm_supported flag acordingly"""
super(OwPIODevice, self).__init__(ow, owid)
self.alarm_supported = _alarm_supported
self.inital_setup_done = False
self._last_sensed = None
def custom_config(self, config, is_initial):
self.channels = []
# For each channel on the device, create a OwPIOChannel object and put in channels list
for chnum in range(self.num_channels):
chname = str(self._ch_translate(chnum))
# Primarily read section with <device-id>:<ch.X>,
# fall back on <device-type>:<ch.X>
# The value should be a mode-string, or a dict which is passed to OwPIOChannel
cfgval = config.get(('devices', (self.id, self.type), 'ch.' + chname), {})
if isinstance(cfgval, str):
cfgval = {'mode': cfgval}
sw = OwPIOChannel(chnum, chname, cfgval)
self.log.debug("Ch %d configured as %s", chnum, sw)
self.channels.append(sw)
if self.alarm_supported:
self._calculate_alarm_setting()
# Apply alarm config directly
self.check_alarm_config()
else:
for ch in self.channels:
if ch.is_input:
self.log.warning("Channel configured as Input, but this device does not have alarm support. No polling implemented!")
break
def _calculate_alarm_setting(self):
"""Override this and set self.wanted_alarm, this will be feed to set_alarm"""
self.wanted_alarm = None # silence pylint
raise NotImplementedError("_calculate_alarm_setting property must be implemented by sub class")
def on_seen(self, timestamp):
# We have nothing to do here
if not self.alarm_supported:
return
# But we are using alarm, ensure proper config..
self.check_alarm_config()
if self._last_sensed is not None:
# xXX: If already read, skip re-read... When is this
# required? On re-start?
return
# refresh sensed; mainly for startup
sensed = int(self.ow_read_str('sensed.BYTE', uncached=True))
# if self._last_sensed != None and self._last_sensed != sensed:
# # XXX: Racey with alarm
# self.log.warning("%s: Sensed altered without on_alarm being notified. Last=%d, now=%d",\
# self, self._last_sensed, sensed)
#
# elif self._last_sensed == None:
# self.log.debug("last_sensed inited %d", sensed)
self._last_sensed = sensed
def _emit_init_state(self, sensed):
"""During alarm reconfigure (due to startup, or device reset), emit special events
for all Toggle inputs, and outputs, to let global system know it may have changed"""
timestamp = time.time()
for ch in self.channels:
ch_sensed = ch.is_set(sensed)
ch.value = ch_sensed
if not ch.is_input_toggle and not ch.is_output:
continue
ch_active_level = ch.is_active_high
if ch_sensed == ch_active_level:
event_type = OwPIOEvent.ON
else:
event_type = OwPIOEvent.OFF
event = OwPIOEvent(timestamp, ch.name, event_type, True)
self.log.debug("%s: ch %s event: %s",
self, ch.name, event_type)
self.emit_event(event)
ch.state = event_type
def on_alarm(self, timestamp):
if not self.alarm_supported:
self.log.error("%s: Ignoring alarm, device should not get alarms!", self)
return
if self.check_alarm_config():
self.log.warning("%s: Ignoring alarm, device was not ready", self)
return
# Read latch + sensed
# XXX: in owlib DS2406 code we read register,
# and could then read the uncached sensed.byte to get
# the truely same sensed.
# For DS2408 however, these are separate reads operations,
# even if all data is read at both times
latch = int(self.ow_read_str('latch.BYTE', uncached=True))
sensed = int(self.ow_read_str('sensed.BYTE', uncached=True))
# And clear the alarm
self.ow_write('latch.BYTE', '1')
last_sensed = self._last_sensed
self.log.debug("%s: alarmed, latch=%d, sensed=%d, last_sensed=%s",
self, latch, sensed, last_sensed)
self._handle_alarm(timestamp, latch, sensed, last_sensed)
self._last_sensed = sensed
def _ch_translate(self, ch):
"""Optional overridable channel name function; return channel identifier based on 0-based index"""
return ch
def _ch_translate_rev(self, ch):
"""Optional overridable channel resolve function; return 0-baesd index based on channel identifier"""
return int(ch)
def _handle_alarm(self, timestamp, latch, sensed, last_sensed):
for ch in self.channels:
chnum = ch.num
mode = ch.mode
is_input = ch.is_input
is_output = ch.is_output
# 1 = True
# 0 = False
ch_latch = ch.is_set(latch)
if not ch_latch:
# Our latch was not triggered
continue
ch_sensed = ch.is_set(sensed)
ch_active_level = ch.is_active_high
ch_last_sensed = ch.is_set(last_sensed) if last_sensed is not None else None
ch_has_changed = ch_last_sensed != ch_sensed if ch_last_sensed is not None else None
ch.value = ch_sensed
event_type = None
if is_output or (is_input and ch.is_input_toggle):
if ch_has_changed != False:
if ch_sensed == ch_active_level:
event_type = OwPIOEvent.ON
else:
event_type = OwPIOEvent.OFF
ch.state = event_type
elif ch.is_input_momentary:
# Two scenarios we must handle (active_level=1):
# 1. Button is pressed [latch triggers]
# 2. Button is released [latch already triggered, no change]
# 3. We get the alarm, clear latch, sensed=0 (ch_sensed != ch_active_level)
# or
# 1. Button is pressed [latch triggers]
# 2. We get alarm, clear latch, sensed=1 (ch_sensed == ch_active_level)
# 3. Button is released [latch triggers]
# 4. We get alarm, clear latch, sensed=0 (ch_last_sensed == ch_active_level)
#
# In the second scenario, we want to avoid trig on the second latch
if ch_sensed == ch_active_level or ch_last_sensed != ch_active_level:
event_type = OwPIOEvent.TRIGGED
else:
raise RuntimeError("Invalid input mode %d for channel %s" % (mode, ch))
if event_type:
event = OwPIOEvent(timestamp, ch.name, event_type)
self.log.debug("%s: ch %s event: %s",
self, ch.name, event_type)
self.emit_event(event)
else:
self.log.debug("%s: channel %s latch change ignored", self, ch)
def check_alarm_config(self):
"""Ensure the alarm property is configured as intended.
Returns True if change was applied, False if it was correct"""
alarm = self.ow_read_str('set_alarm', uncached=True)
reconfigured = False
if alarm != self.wanted_alarm:
self.log.log((logging.WARNING if self.inital_setup_done else logging.INFO),
"%s: reconfiguring alarm from %s to %s", self, alarm, self.wanted_alarm)
self.ow_write('set_alarm', self.wanted_alarm)
# And clear any alarm if already set
self.ow_write('latch.BYTE', '1')
reconfigured = True
if reconfigured or not self.inital_setup_done:
# Emit current state of all devices
sensed = int(self.ow_read_str('sensed.BYTE', uncached=True))
self._emit_init_state(sensed)
self.inital_setup_done = True
return reconfigured
def set_output(self, channel, value):
"""Control a channel configured as output, setting the new value to ON or OFF.
The actual PIO state is controlled by the output "active high" or "active low" configuration
mode.
value should be True or False. If set to true, we set the output "active".
If channel is not configured as output, an exception is thrown.
Note that "active low" refers to the actual logic level, i.e this will
write PIO.xx 1, to enable the transistor, pulling down the line, and activating
something by grounding the pin (low).
"""
if isinstance(channel, OwPIOChannel):
ch = channel
else:
ch_num = self._ch_translate_rev(channel)
ch = self.channels[ch_num]
if not ch.is_output:
raise InvalidChannelError("Channel not configured as output")
active_high = ch.is_active_high
if (value and active_high) or (not value and not active_high):
# PIO off => external pull-up possible => "high"
out_value = 0
else:
# PIO on => pulled to ground => "low"
out_value = 1
self.log.info("%s: Writing PIO.%s = %d", self, ch.name, out_value)
self.ow_write('PIO.%s' % ch.name, out_value)
| gpl-3.0 | 376,626,504,327,405,400 | 35.654321 | 141 | 0.583766 | false |
nodakai/watchman | build/fbcode_builder/getdeps/buildopts.py | 1 | 17188 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import errno
import glob
import ntpath
import os
import subprocess
import sys
import tempfile
from .copytree import containing_repo_type
from .envfuncs import Env, add_path_entry
from .fetcher import get_fbsource_repo_data
from .manifest import ContextGenerator
from .platform import HostType, is_windows
try:
import typing # noqa: F401
except ImportError:
pass
def detect_project(path):
repo_type, repo_root = containing_repo_type(path)
if repo_type is None:
return None, None
# Look for a .projectid file. If it exists, read the project name from it.
project_id_path = os.path.join(repo_root, ".projectid")
try:
with open(project_id_path, "r") as f:
project_name = f.read().strip()
return repo_root, project_name
except EnvironmentError as ex:
if ex.errno != errno.ENOENT:
raise
return repo_root, None
class BuildOptions(object):
def __init__(
self,
fbcode_builder_dir,
scratch_dir,
host_type,
install_dir=None,
num_jobs=0,
use_shipit=False,
vcvars_path=None,
allow_system_packages=False,
):
""" fbcode_builder_dir - the path to either the in-fbsource fbcode_builder dir,
or for shipit-transformed repos, the build dir that
has been mapped into that dir.
scratch_dir - a place where we can store repos and build bits.
This path should be stable across runs and ideally
should not be in the repo of the project being built,
but that is ultimately where we generally fall back
for builds outside of FB
install_dir - where the project will ultimately be installed
num_jobs - the level of concurrency to use while building
use_shipit - use real shipit instead of the simple shipit transformer
vcvars_path - Path to external VS toolchain's vsvarsall.bat
"""
if not num_jobs:
import multiprocessing
num_jobs = multiprocessing.cpu_count()
if is_windows():
# On Windows the cpu count tends to be the HT count.
# Running with that level of concurrency tends to
# swamp the system and make hard to perform other
# light work. Let's halve the number of cores here
# to win that back. The user can still specify a
# larger number if desired.
num_jobs = int(num_jobs / 2)
if not install_dir:
install_dir = os.path.join(scratch_dir, "installed")
self.project_hashes = None
for p in ["../deps/github_hashes", "../project_hashes"]:
hashes = os.path.join(fbcode_builder_dir, p)
if os.path.exists(hashes):
self.project_hashes = hashes
break
# Detect what repository and project we are being run from.
self.repo_root, self.repo_project = detect_project(os.getcwd())
# If we are running from an fbsource repository, set self.fbsource_dir
# to allow the ShipIt-based fetchers to use it.
if self.repo_project == "fbsource":
self.fbsource_dir = self.repo_root
else:
self.fbsource_dir = None
self.num_jobs = num_jobs
self.scratch_dir = scratch_dir
self.install_dir = install_dir
self.fbcode_builder_dir = fbcode_builder_dir
self.host_type = host_type
self.use_shipit = use_shipit
self.allow_system_packages = allow_system_packages
if vcvars_path is None and is_windows():
# On Windows, the compiler is not available in the PATH by
# default so we need to run the vcvarsall script to populate the
# environment. We use a glob to find some version of this script
# as deployed with Visual Studio 2017. This logic can also
# locate Visual Studio 2019 but note that at the time of writing
# the version of boost in our manifest cannot be built with
# VS 2019, so we're effectively tied to VS 2017 until we upgrade
# the boost dependency.
vcvarsall = []
for year in ["2017", "2019"]:
vcvarsall += glob.glob(
os.path.join(
os.environ["ProgramFiles(x86)"],
"Microsoft Visual Studio",
year,
"*",
"VC",
"Auxiliary",
"Build",
"vcvarsall.bat",
)
)
vcvars_path = vcvarsall[0]
self.vcvars_path = vcvars_path
@property
def manifests_dir(self):
return os.path.join(self.fbcode_builder_dir, "manifests")
def is_darwin(self):
return self.host_type.is_darwin()
def is_windows(self):
return self.host_type.is_windows()
def get_vcvars_path(self):
return self.vcvars_path
def is_linux(self):
return self.host_type.is_linux()
def get_context_generator(self, host_tuple=None, facebook_internal=None):
""" Create a manifest ContextGenerator for the specified target platform. """
if host_tuple is None:
host_type = self.host_type
elif isinstance(host_tuple, HostType):
host_type = host_tuple
else:
host_type = HostType.from_tuple_string(host_tuple)
# facebook_internal is an Optional[bool]
# If it is None, default to assuming this is a Facebook-internal build if
# we are running in an fbsource repository.
if facebook_internal is None:
facebook_internal = self.fbsource_dir is not None
return ContextGenerator(
{
"os": host_type.ostype,
"distro": host_type.distro,
"distro_vers": host_type.distrovers,
"fb": "on" if facebook_internal else "off",
"test": "off",
}
)
def compute_env_for_install_dirs(self, install_dirs, env=None, manifest=None):
if env is not None:
env = env.copy()
else:
env = Env()
env["GETDEPS_BUILD_DIR"] = os.path.join(self.scratch_dir, "build")
env["GETDEPS_INSTALL_DIR"] = self.install_dir
# On macOS we need to set `SDKROOT` when we use clang for system
# header files.
if self.is_darwin() and "SDKROOT" not in env:
sdkroot = subprocess.check_output(["xcrun", "--show-sdk-path"])
env["SDKROOT"] = sdkroot.decode().strip()
if self.fbsource_dir:
env["YARN_YARN_OFFLINE_MIRROR"] = os.path.join(
self.fbsource_dir, "xplat/third-party/yarn/offline-mirror"
)
yarn_exe = "yarn.bat" if self.is_windows() else "yarn"
env["YARN_PATH"] = os.path.join(
self.fbsource_dir, "xplat/third-party/yarn/", yarn_exe
)
node_exe = "node-win-x64.exe" if self.is_windows() else "node"
env["NODE_BIN"] = os.path.join(
self.fbsource_dir, "xplat/third-party/node/bin/", node_exe
)
env["RUST_VENDORED_CRATES_DIR"] = os.path.join(
self.fbsource_dir, "third-party/rust/vendor"
)
hash_data = get_fbsource_repo_data(self)
env["FBSOURCE_HASH"] = hash_data.hash
env["FBSOURCE_DATE"] = hash_data.date
lib_path = None
if self.is_darwin():
lib_path = "DYLD_LIBRARY_PATH"
elif self.is_linux():
lib_path = "LD_LIBRARY_PATH"
elif self.is_windows():
lib_path = "PATH"
else:
lib_path = None
for d in install_dirs:
bindir = os.path.join(d, "bin")
if not (
manifest and manifest.get("build", "disable_env_override_pkgconfig")
):
pkgconfig = os.path.join(d, "lib/pkgconfig")
if os.path.exists(pkgconfig):
add_path_entry(env, "PKG_CONFIG_PATH", pkgconfig)
pkgconfig = os.path.join(d, "lib64/pkgconfig")
if os.path.exists(pkgconfig):
add_path_entry(env, "PKG_CONFIG_PATH", pkgconfig)
if not (manifest and manifest.get("build", "disable_env_override_path")):
add_path_entry(env, "CMAKE_PREFIX_PATH", d)
# Allow resolving shared objects built earlier (eg: zstd
# doesn't include the full path to the dylib in its linkage
# so we need to give it an assist)
if lib_path:
for lib in ["lib", "lib64"]:
libdir = os.path.join(d, lib)
if os.path.exists(libdir):
add_path_entry(env, lib_path, libdir)
# Allow resolving binaries (eg: cmake, ninja) and dlls
# built by earlier steps
if os.path.exists(bindir):
add_path_entry(env, "PATH", bindir, append=False)
# If rustc is present in the `bin` directory, set RUSTC to prevent
# cargo uses the rustc installed in the system.
if self.is_windows():
cargo_path = os.path.join(bindir, "cargo.exe")
rustc_path = os.path.join(bindir, "rustc.exe")
rustdoc_path = os.path.join(bindir, "rustdoc.exe")
else:
cargo_path = os.path.join(bindir, "cargo")
rustc_path = os.path.join(bindir, "rustc")
rustdoc_path = os.path.join(bindir, "rustdoc")
if os.path.isfile(rustc_path):
env["CARGO_BIN"] = cargo_path
env["RUSTC"] = rustc_path
env["RUSTDOC"] = rustdoc_path
if self.is_windows():
libcrypto = os.path.join(d, "lib/libcrypto.lib")
else:
libcrypto = os.path.join(d, "lib/libcrypto.so")
openssl_include = os.path.join(d, "include/openssl")
if os.path.isfile(libcrypto) and os.path.isdir(openssl_include):
# This must be the openssl library, let Rust know about it
env["OPENSSL_DIR"] = d
return env
def list_win32_subst_letters():
output = subprocess.check_output(["subst"]).decode("utf-8")
# The output is a set of lines like: `F:\: => C:\open\some\where`
lines = output.strip().split("\r\n")
mapping = {}
for line in lines:
fields = line.split(": => ")
if len(fields) != 2:
continue
letter = fields[0]
path = fields[1]
mapping[letter] = path
return mapping
def find_existing_win32_subst_for_path(
path, # type: str
subst_mapping, # type: typing.Mapping[str, str]
):
# type: (...) -> typing.Optional[str]
path = ntpath.normcase(ntpath.normpath(path))
for letter, target in subst_mapping.items():
if ntpath.normcase(target) == path:
return letter
return None
def find_unused_drive_letter():
import ctypes
buffer_len = 256
blen = ctypes.c_uint(buffer_len)
rv = ctypes.c_uint()
bufs = ctypes.create_string_buffer(buffer_len)
rv = ctypes.windll.kernel32.GetLogicalDriveStringsA(blen, bufs)
if rv > buffer_len:
raise Exception("GetLogicalDriveStringsA result too large for buffer")
nul = "\x00".encode("ascii")
used = [drive.decode("ascii")[0] for drive in bufs.raw.strip(nul).split(nul)]
possible = [c for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
available = sorted(list(set(possible) - set(used)))
if len(available) == 0:
return None
# Prefer to assign later letters rather than earlier letters
return available[-1]
def create_subst_path(path):
for _attempt in range(0, 24):
drive = find_existing_win32_subst_for_path(
path, subst_mapping=list_win32_subst_letters()
)
if drive:
return drive
available = find_unused_drive_letter()
if available is None:
raise Exception(
(
"unable to make shorter subst mapping for %s; "
"no available drive letters"
)
% path
)
# Try to set up a subst mapping; note that we may be racing with
# other processes on the same host, so this may not succeed.
try:
subprocess.check_call(["subst", "%s:" % available, path])
return "%s:\\" % available
except Exception:
print("Failed to map %s -> %s" % (available, path))
raise Exception("failed to set up a subst path for %s" % path)
def _check_host_type(args, host_type):
if host_type is None:
host_tuple_string = getattr(args, "host_type", None)
if host_tuple_string:
host_type = HostType.from_tuple_string(host_tuple_string)
else:
host_type = HostType()
assert isinstance(host_type, HostType)
return host_type
def setup_build_options(args, host_type=None):
""" Create a BuildOptions object based on the arguments """
fbcode_builder_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
scratch_dir = args.scratch_path
if not scratch_dir:
# TODO: `mkscratch` doesn't currently know how best to place things on
# sandcastle, so whip up something reasonable-ish
if "SANDCASTLE" in os.environ:
if "DISK_TEMP" not in os.environ:
raise Exception(
(
"I need DISK_TEMP to be set in the sandcastle environment "
"so that I can store build products somewhere sane"
)
)
scratch_dir = os.path.join(
os.environ["DISK_TEMP"], "fbcode_builder_getdeps"
)
if not scratch_dir:
try:
scratch_dir = (
subprocess.check_output(
["mkscratch", "path", "--subdir", "fbcode_builder_getdeps"]
)
.strip()
.decode("utf-8")
)
except OSError as exc:
if exc.errno != errno.ENOENT:
# A legit failure; don't fall back, surface the error
raise
# This system doesn't have mkscratch so we fall back to
# something local.
munged = fbcode_builder_dir.replace("Z", "zZ")
for s in ["/", "\\", ":"]:
munged = munged.replace(s, "Z")
if is_windows() and os.path.isdir("c:/open"):
temp = "c:/open/scratch"
else:
temp = tempfile.gettempdir()
scratch_dir = os.path.join(temp, "fbcode_builder_getdeps-%s" % munged)
if not is_windows() and os.geteuid() == 0:
# Running as root; in the case where someone runs
# sudo getdeps.py install-system-deps
# and then runs as build without privs, we want to avoid creating
# a scratch dir that the second stage cannot write to.
# So we generate a different path if we are root.
scratch_dir += "-root"
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
if is_windows():
subst = create_subst_path(scratch_dir)
print(
"Mapping scratch dir %s -> %s" % (scratch_dir, subst), file=sys.stderr
)
scratch_dir = subst
else:
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
# Make sure we normalize the scratch path. This path is used as part of the hash
# computation for detecting if projects have been updated, so we need to always
# use the exact same string to refer to a given directory.
# But! realpath in some combinations of Windows/Python3 versions can expand the
# drive substitutions on Windows, so avoid that!
if not is_windows():
scratch_dir = os.path.realpath(scratch_dir)
host_type = _check_host_type(args, host_type)
return BuildOptions(
fbcode_builder_dir,
scratch_dir,
host_type,
install_dir=args.install_prefix,
num_jobs=args.num_jobs,
use_shipit=args.use_shipit,
vcvars_path=args.vcvars_path,
allow_system_packages=args.allow_system_packages,
)
| apache-2.0 | -3,480,103,009,987,581,000 | 36.610503 | 87 | 0.556726 | false |
shirkey/macaroons-kopdar | create_the_token.py | 1 | 1563 | #!/usr/bin/env python
# encoding: utf-8
# START 1 OMIT
import macaroons
# a basic macaroon consists of three elements
# 1) the secret key known only to the credential authority (a web service or software)
secret = 'kopdar_python_rocks' # // HL
# 2) some interesting metadata about this macaroon (can be anything)
public = 'kopdar_members_only' # // HL
# 3) a URI/URL, possibly referencing a targeted web service (again, can be anything)
location = 'http://www.python.or.id/' # // HL
# END 1 OMIT
def get_macaroon():
servis_kopdar = macaroons.create(location, secret, public)
return servis_kopdar.serialize()
def get_secret():
return secret
if __name__ == "__main__":
# START 2 OMIT
# with these three arguments, we can now create the macaroon
servis_kopdar = macaroons.create(location, secret, public) # // HL
# we now hold a reference to our newly instantiated macaroon object
print(servis_kopdar)
# we can inspect the HMAC signature of this message
print('.signature: %s' % servis_kopdar.signature) # // HL
# or the other public metadata, like identifier or location
print('.identifier: %s' % servis_kopdar.identifier) # // HL
print('.location: %s' % servis_kopdar.location) # // HL
# or all the metadata + signature in a single call
print('.inspect():')
print servis_kopdar.inspect() # // HL
# finally, we can convert the macaroon object to a serialized form for transport
print '.serialize(): %s' % servis_kopdar.serialize() # // HL
# END 2 OMIT
get_macaroon()
| mit | -5,930,907,426,016,740,000 | 29.057692 | 86 | 0.678823 | false |
husk00/pantaliQa | libs/pyata/src/basic_classes/connection.py | 1 | 3674 | ##########################################################
##########################################################
# description: abstract class that represents any Connection between boxes
#
# autor: jeraman
# date: 15/04/2010
##########################################################
##########################################################
from box import *
from time import *
memory_connections = []
#connects two generic boxes
def connect (b1, outlet, b2, inlet):
c = Connection(b1, outlet, b2, inlet)
return c.status
#disconnect a connection
def disconnect(b1, outlet, b2, inlet):
#procura a conexao
i = search_connection(b1, outlet, b2, inlet)
#se realmente existir
if i>-1:
return memory_connections[i].delete()
else:
return False
#searchs a generic connection
def search_connection (b1, outlet, b2, inlet):
i=0
#seraching for a specific box in memory
for c in memory_connections:
if (b1==c.box_orig) & (outlet==c.outlet) & (b2==c.box_dest) & (inlet==c.inlet):
return i
i+=1
#return -1 if not
if i==len(memory_connections):
return -1
class Connection:
canvas = " "
snd = ""
#constructor
def __init__(self, box_orig, outlet, box_dest, inlet):
self.box_orig = box_orig
self.outlet = outlet
self.box_dest = box_dest
self.inlet = inlet
self.status = self.create()
#creates a connection in Pd
def create(self):
b1 = search_box(self.box_orig)
b2 = search_box(self.box_dest)
if (b1 > -1) & (b2 > -1):
#get the state before inserting the connection
Connection.snd.save_state(Connection.canvas)
t1 = self.snd.get_file()
#try to build the connection
command = Connection.canvas + "connect " + str(b1) + " " + str(self.outlet) + " " + str(b2) + " " + str(self.inlet) + " ; "
Connection.snd.send_pd(command)
#get the state after insertin the connection
Connection.snd.save_state(Connection.canvas)
t2 = self.snd.get_file()
#verifies if changed
if t1 != t2:
memory_connections.append(self)
return True
else:
return False
#creates a connection in Pd
def delete(self):
b1 = search_box(self.box_orig)
b2 = search_box(self.box_dest)
if (b1 > -1) & (b2 > -1):
#get the state before removing the connection
Connection.snd.save_state(Connection.canvas)
t1 = self.snd.get_file()
#try to remove the connection
command = Connection.canvas + "disconnect " + str(b1) + " " + str(self.outlet) + " " + str(b2) + " " + str(self.inlet) + " ; "
Connection.snd.send_pd(command)
#get the state after removing the connection
Connection.snd.save_state(Connection.canvas)
t2 = self.snd.get_file()
#verifies if changed
if t1 != t2:
i=search_connection(self.box_orig, self.outlet, self.box_dest, self.inlet)
memory_connections.pop(i)
return True
else:
return False
#method that sets the canvas
@staticmethod
def set_canvas(nc):
Connection.canvas = nc
#method that sets the sender
@staticmethod
def set_sender(s):
Connection.snd = s
| gpl-2.0 | 8,921,326,554,972,775,000 | 27.929134 | 138 | 0.504899 | false |
krishauser/pyOptimalMotionPlanning | main.py | 1 | 6097 | from __future__ import print_function,division
from six import iteritems
from pomp.planners import allplanners
from pomp.planners import test
from pomp.example_problems import *
from pomp.spaces.objectives import *
import time
import copy
import sys
import os,errno
numTrials = 10
def mkdir_p(path):
"""Quiet path making"""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def testPlannerDefault(problem,problemName,maxTime,plannerType,**plannerParams):
global numTrials
print("Planning with",plannerType,'on problem',problemName)
planner = problem.planner(plannerType,**plannerParams)
folder = os.path.join("data",problemName)
mkdir_p(folder)
test.testPlanner(planner,numTrials,maxTime,os.path.join(folder,allplanners.filename[plannerType]+'.csv'))
all_planners = ['ao-est','ao-rrt','r-est','r-est-prune','r-rrt','r-rrt-prune','rrt*','anytime-rrt','stable-sparse-rrt']
rrt_planners = ['ao-rrt','anytime-rrt','r-rrt','r-rrt-prune','stable-sparse-rrt']
est_planners = ['ao-est','r-est','r-est-prune']
all_problems = {'Kink':geometric.kinkTest(),
'Bugtrap':geometric.bugtrapTest(),
'Dubins':dubins.dubinsCarTest(),
'Dubins2':dubins.dubinsTest2(),
'Flappy':flappy.flappyTest(),
'DoubleIntegrator':doubleintegrator.doubleIntegratorTest(),
'Pendulum':pendulum.pendulumTest(),
'LQR':lqr.lqrTest()}
defaultParameters = {'maxTime':30}
customParameters = {'Kink':{'maxTime':40,'nextStateSamplingRange':0.15},
'Bugtrap':{'maxTime':40,'nextStateSamplingRange':0.15},
'Pendulum':{'maxTime':120,'edgeCheckTolerance':0.1,'selectionRadius':.3,'witnessRadius':0.16},
'Flappy':{'maxTime':120,'edgeCheckTolerance':4,'selectionRadius':70,'witnessRadius':35},
'DoubleIntegrator':{'maxTime':60,'selectionRadius':0.3,'witnessRadius':0.3},
'Dubins':{'selectionRadius':0.25,'witnessRadius':0.2},
'Dubins2':{'selectionRadius':0.25,'witnessRadius':0.2}
}
def parseParameters(problem,planner):
global defaultParameters,customParameters
params = copy.deepcopy(defaultParameters)
if problem in customParameters:
params.update(customParameters[problem])
if '(' in planner:
#parse out key=value,... string
name,args = planner.split('(',1)
if args[-1] != ')':
raise ValueError("Planner string expression must have balanced parenthesis, i.e.: func ( arglist )")
args = args[:-1]
args = args.split(',')
for arg in args:
kv = arg.split("=")
if len(kv) != 2:
raise ValueError("Unable to parse argument "+arg)
try:
params[kv[0]] = int(kv[1])
except ValueError:
try:
params[kv[0]] = float(kv[1])
except ValueError:
params[kv[0]] = kv[1]
planner = name
return planner,params
def runTests(problems = None,planners = None):
global all_planners,all_problems
if planners == None or planners == 'all' or planners[0] == 'all':
planners = all_planners
if problems == None or problems == 'all' or problems[0] == 'all':
problems = all_problems.keys()
for prname in problems:
pr = all_problems[prname]
for p in planners:
p,params = parseParameters(prname,p)
maxTime = params['maxTime']
del params['maxTime']
if pr.differentiallyConstrained() and p in allplanners.kinematicPlanners:
#p does not support differentially constrained problems
continue
testPlannerDefault(pr,prname,maxTime,p,**params)
print("Finished test on problem",prname,"with planner",p)
print("Parameters:")
for (k,v) in iteritems(params):
print(" ",k,":",v)
return
def runViz(problem,planner):
#runVisualizer(rrtChallengeTest(),type=planner,nextStateSamplingRange=0.15,edgeCheckTolerance = 0.005)
planner,params = parseParameters(problem,planner)
if 'maxTime' in params:
del params['maxTime']
print("Planning on problem",problem,"with planner",planner)
print("Parameters:")
for (k,v) in iteritems(params):
print(" ",k,":",v)
runVisualizer(all_problems[problem],type=planner,**params)
if __name__=="__main__":
#HACK: uncomment one of these to test manually
#runViz('Kink','rrt*')
#test KD-tree in noneuclidean spaces
#runViz('Pendulum','ao-rrt(numControlSamples=10,nearestNeighborMethod=bruteforce)')
#runViz('Pendulum','ao-rrt')
#runViz('Dubins','stable-sparse-rrt(selectionRadius=0.25,witnessRadius=0.2)')
#runViz('DoubleIntegrator','stable-sparse-rrt(selectionRadius=0.3,witnessRadius=0.3)')
#runViz('Pendulum','stable-sparse-rrt(selectionRadius=0.3,witnessRadius=0.16)')
#runViz('Flappy','stable-sparse-rrt(selectionRadius=70,witnessRadius=35)')
if len(sys.argv) < 3:
print("Usage: main.py [-v] Problem Planner1 ... Plannerk")
print()
print(" Problem can be one of:")
print(" ",",\n ".join(sorted(all_problems)))
print(" or 'all' to test all problems.")
print()
print(" Planner can be one of:")
print(" ",",\n ".join(sorted(all_planners)))
print(" or 'all' to test all planners.")
print()
print(" If -v is provided, runs an OpenGL visualization of planning")
exit(0)
if sys.argv[1] == '-v':
from pomp.visualizer import runVisualizer
#visualization mode
print("Testing visualization with problem",sys.argv[2],"and planner",sys.argv[3])
runViz(sys.argv[2],sys.argv[3])
else:
print()
print("Testing problems",sys.argv[1],"with planners",sys.argv[2:])
runTests(problems=[sys.argv[1]],planners=sys.argv[2:])
| apache-2.0 | 4,687,380,318,268,090,000 | 39.919463 | 119 | 0.61358 | false |
atria-soft/zeus | lutinMacro_zeus.py | 1 | 40436 | #!/usr/bin/python
import realog.debug as debug
import lutin.tools as tools
import os
import copy
list_of_known_type = [
["void", "void"],
["bool", "bool"],
["string", "etk::String"],
["uri", "etk::Uri"],
["path", "etk::Path"],
["int8", "int8_t"],
["int16", "int16_t"],
["int32", "int32_t"],
["int64", "int64_t"],
["uint8", "uint8_t"],
["uint16", "uint16_t"],
["uint32", "uint32_t"],
["uint64", "uint64_t"],
["float32", "float"],
["float64", "double"],
["vector:bool", "etk::Vector<bool>"],
["vector:string", "etk::Vector<etk::String>"],
["vector:int8", "etk::Vector<int8_t>"],
["vector:int16", "etk::Vector<int16_t>"],
["vector:int32", "etk::Vector<int32_t>"],
["vector:int64", "etk::Vector<int64_t>"],
["vector:uint8", "etk::Vector<uint8_t>"],
["vector:uint16", "etk::Vector<uint16_t>"],
["vector:uint32", "etk::Vector<uint32_t>"],
["vector:uint64", "etk::Vector<uint64_t>"],
["vector:float32", "etk::Vector<float>"],
["vector:float64", "etk::Vector<double>"],
["duration", "echrono::Duration"],
["time", "echrono::Time"],
["file", "zeus::File"],
["stream", "zeus::Stream"],
["json", "ejson::Object"],
["raw", "zeus::Raw"],
["ActionNotif", "zeus::ActionNotification<etk::String>"],
]
def get_list_type():
out = []
for elem in list_of_known_type:
out.append(elem[0])
return out
def validate_type(data):
if data in get_list_type():
return True
val = data.split(":")
if val[0] == "obj":
return True
return False
def zeus_object_to_dictionary(name):
out = {}
if type(name) == str:
name = name.split("-")
debug.debug("transform: " + str(name))
# set first capital of the class name
if len(name) != 0:
name[-1] = capital_first(name[-1])
out["namespace"] = ""
for elem in name[:-1]:
out["namespace"] += elem + "::"
out["name_class"] = out["namespace"] + name[-1]
out["name_class_short"] = name[-1]
out["name_class_proxy"] = out["namespace"] + "Proxy" + name[-1]
out["name_class_proxy_short"] = "Proxy" + name[-1]
out["name_class_register"] = out["namespace"] + "register" + name[-1]
out["name_class_register_short"] = "register" + name[-1]
out["name_class_macro"] = ""
for elem in name:
out["name_class_macro"] += elem.upper() + "_"
base_path = ""
for elem in name[:-1]:
base_path += elem + "/"
out["file_name_class_src"] = base_path + name[-1] + ".cpp";
out["file_name_class_header"] = base_path + name[-1] + ".hpp"
out["file_name_class_proxy_src"] = base_path + "Proxy" + name[-1] + ".cpp";
out["file_name_class_proxy_header"] = base_path + "Proxy" + name[-1] + ".hpp"
out["file_name_class_register_src"] = base_path + "register" + name[-1] + ".cpp";
out["file_name_class_register_header"] = base_path + "register" + name[-1] + ".hpp"
debug.debug(" class name : " + out["name_class"])
debug.debug(" class Proxy name : " + out["name_class_proxy"])
debug.debug(" path class name src : " + out["file_name_class_src"])
debug.debug(" path class name header : " + out["file_name_class_header"])
debug.debug(" path class Proxy name src : " + out["file_name_class_proxy_src"])
debug.debug(" path class Proxy name header : " + out["file_name_class_proxy_header"])
debug.debug(" path class Proxy name src : " + out["file_name_class_register_src"])
debug.debug(" path class Proxy name header : " + out["file_name_class_register_header"])
return out
def convert_type_in_cpp(data, proxy=False, argument=False):
for elem in list_of_known_type:
if data == elem[0]:
return elem[1]
val = data.split(":")
if val[0] == "obj":
prop = zeus_object_to_dictionary(val[1])
if proxy == True:
if argument == False:
return prop["name_class_proxy"]
else:
return "ememory::SharedPtr<" + prop["name_class"] + ">"
else:
if argument == True:
return prop["name_class_proxy"]
else:
return "ememory::SharedPtr<" + prop["name_class"] + ">"
debug.error(" can not find type in IDL : '" + data + "'")
def remove_start_stop_spacer(data):
dataout = copy.deepcopy(data)
while len(dataout) >= 1 \
and ( dataout[0] == " " \
or dataout[0] == "\t"):
dataout = dataout[1:]
while len(dataout) >= 1 \
and ( dataout[-1] == " " \
or dataout[-1] == "\t"):
dataout = dataout[:-1]
return dataout
def capital_first(data):
return data[0].upper() + data[1:]
class AttributeDefinition:
def __init__(self):
self.name = "";
self.brief = "";
self.type = "";
def set_name(self, name):
self.name = remove_start_stop_spacer(name);
def set_brief(self, desc):
self.name = "";
self.brief = remove_start_stop_spacer(desc).replace("\"", "\\\"")
self.type = "";
def set_type(self, type):
self.type = remove_start_stop_spacer(type);
def display(self):
debug.info(" BRIEF: " + self.brief)
debug.info(" " + self.type + " " + self.name + ";")
def generate_cpp(self, space):
out = "";
out += space + "eproperty::Value<" + convert_type_in_cpp(self.type) + "> " + self.name + "; //!<" + self.brief + "\n"
out += space + "//! Internal interface to call property\n"
out += space + "virtual " + convert_type_in_cpp(self.type) + " _internalWrapperProperty_get" + capital_first(self.name) + "() {\n"
out += space + " return " + self.name + ".get();\n"
out += space + "}\n"
out += space + "//! Internal interface to call property\n"
out += space + "virtual void _internalWrapperProperty_set" + capital_first(self.name) + "(" + convert_type_in_cpp(self.type) + " _value) {\n"
out += space + " " + self.name + ".set(_value);\n"
out += space + "}\n"
return out;
def generate_hpp_proxy(self, space):
out = "";
out += space + "zeus::RemoteProperty<" + convert_type_in_cpp(self.type) + "> " + self.name + "; //!<" + self.brief + "\n"
return out;
def generate_cpp_proxy(self, space, class_name):
out = "";
return out;
class FunctionDefinition:
def __init__(self):
self.name = ""
self.brief = ""
self.action_type = "void_tmp"
self.return_type = ""
self.return_brief = ""
self.parameters = []
self.is_action = False
def set_action(self, type):
self.action_type = remove_start_stop_spacer(type)
self.is_action = True
def set_function_name(self, name):
self.name = remove_start_stop_spacer(name);
def set_brief(self, desc):
self.name = "";
self.brief = remove_start_stop_spacer(desc).replace("\"", "\\\"")
self.return_type = "";
self.return_brief = "";
self.parameters = []
def add_param_comment(self, name, desc):
for elem in self.parameters:
if elem["name"] == "" \
and elem["brief"] == "":
elem["name"] = remove_start_stop_spacer(name)
elem["brief"] = remove_start_stop_spacer(desc).replace("\"", "\\\"")
return;
self.parameters.append({
"type":"",
"name":remove_start_stop_spacer(name),
"brief":remove_start_stop_spacer(desc).replace("\"", "\\\"")
})
def set_return_comment(self, desc):
self.return_brief = remove_start_stop_spacer(desc)
def set_return_type(self, type):
self.return_type = remove_start_stop_spacer(type)
def add_parameter_type(self, type):
for elem in self.parameters:
if elem["type"] == "":
elem["type"] = remove_start_stop_spacer(type)
return;
self.parameters.append({
"type":remove_start_stop_spacer(type),
"name":"",
"brief":""
})
def display(self):
debug.info(" BRIEF: " + self.brief)
debug.info(" BRIEF-return: " + self.return_brief)
debug.info(" " + self.return_type + " " + self.name + "(")
for elem in self.parameters:
debug.info(" " + elem["type"] + " " + elem["name"] + ", # " + elem["brief"])
if action_type == "void":
debug.info(" )")
else:
debug.info(" ) action/event type = '" + action_type + "'")
def generate_doxy(self, space):
# generate doxygen comment:
out = space + "/**\n"
if self.brief != "":
out += space + " * @brief " + self.brief + "\n"
for elem in self.parameters:
if elem["name"] == "" \
and elem["brief"] == "":
continue
out += space + " * @param[in] "
if elem["name"] != "":
out += elem["name"] + " "
if elem["brief"] != "":
out += elem["brief"] + " "
out += "\n"
if self.is_action == True:
out += space + " * @note: This is an action ==> it can notify of the progression of the call\n"
if self.return_brief != "":
out += space + " * @return " + self.return_brief + "\n"
out += space + " */\n"
return out
def generate_cpp(self, space, class_name="", virtual=True, action=False):
out = "";
out += self.generate_doxy(space)
out += space
if self.return_type != "":
if virtual == True:
out += "virtual "
out += convert_type_in_cpp(self.return_type, False, False) + " "
else:
out += "static ememory::SharedPtr<" + class_name + "> "
out += self.name + "("
param_data = ""
id_parameter = 0
if self.is_action == True:
param_data += "zeus::ActionNotification<" + convert_type_in_cpp(self.action_type, False, True) + ">& _notifs"
id_parameter += 1
for elem in self.parameters:
id_parameter += 1
if len(param_data) != 0:
param_data += ", "
param_data += convert_type_in_cpp(elem["type"], False, True) + " _"
if elem["name"] == "":
param_data += "no_name_param_" + str(id_parameter)
else:
param_data += elem["name"]
out += param_data
out += ")"
if self.return_type != "" \
and virtual == True:
out += " = 0"
out += ";\n"
return out;
def generate_hpp_proxy(self, space):
out = "";
out += self.generate_doxy(space)
out += space + "virtual zeus::Future<" + convert_type_in_cpp(self.return_type, True, False)
if self.action_type != "void_tmp":
out += "," + convert_type_in_cpp(self.action_type, True, False)
out += "> " + self.name + "("
param_data = ""
id_parameter = 0
for elem in self.parameters:
id_parameter += 1
if len(param_data) != 0:
param_data += ", "
param_data += "const " + convert_type_in_cpp(elem["type"], True, True) + "& _"
if elem["name"] == "":
param_data += "no_name_param_" + str(id_parameter)
else:
param_data += elem["name"]
out += param_data
out += ");\n"
return out;
def generate_cpp_proxy(self, space, class_name):
out = "";
out += space + "zeus::Future<" + convert_type_in_cpp(self.return_type, True, False)
if self.action_type != "void_tmp":
out += "," + convert_type_in_cpp(self.action_type, True, False)
out += "> " + class_name + "::" + self.name + "("
param_data = ""
id_parameter = 0
for elem in self.parameters:
id_parameter += 1
if len(param_data) != 0:
param_data += ", "
param_data += "const " + convert_type_in_cpp(elem["type"], True, True) + "& _"
if elem["name"] == "":
param_data += "no_name_param_" + str(id_parameter)
else:
param_data += elem["name"]
out += param_data
out += ") {\n"
space += " "
if self.is_action == True:
out += space + 'return m_obj.callAction("' + self.name + '"'
else:
out += space + 'return m_obj.call("' + self.name + '"'
id_parameter = 0
for elem in self.parameters:
id_parameter += 1
out += ", "
out += "_"
if elem["name"] == "":
out += "no_name_param_" + str(id_parameter)
else:
out += elem["name"]
out += ');\n'
out += "}\n"
space = space[:-1]
return out;
class ServiceDefinition:
def __init__(self):
self.name = [""];
self.name_prop = {}
self.brief = "";
self.version = "";
self.api = "";
self.authors = []
self.attributes = []
self.functions = []
self.factories = []
self.tools = []
self.imports = []
self.licence_header = "/** @file\n"
self.licence_header += " * @note Generated file !!! Do not modify !!!\n"
self.licence_header += " * @license MPL-2\n"
self.licence_header += " * @copyright none\n"
self.licence_header += " */\n"
def set_name(self, value):
self.name = value
# TODO : Check range ...
self.prop = zeus_object_to_dictionary(self.name)
def set_brief(self, value):
self.brief = remove_start_stop_spacer(value).replace("\"", "\\\"")
def set_version(self, value):
self.version = remove_start_stop_spacer(value).replace("\"", "\\\"")
def set_api(self, value):
self.api = remove_start_stop_spacer(value).replace("\"", "\\\"")
def add_author(self, value):
self.authors.append(remove_start_stop_spacer(value).replace("\"", "\\\""))
def add_factory(self, value):
# TODO : Check if function already exist
self.factories.append(value)
def add_tool(self, value):
# TODO : Check if function already exist
self.tools.append(value)
def add_function(self, value):
# TODO : Check if function already exist
self.functions.append(value)
def add_attribute(self, value):
# TODO : Check if attribute already exist
self.attributes.append(value)
def add_import(self, value):
self.imports.append(value)
def display(self):
debug.info("Display service definition : ")
debug.info(" name: " + str(self.name))
debug.info(" brief: '" + str(self.brief) + "'")
debug.info(" version: '" + str(self.version) + "'")
debug.info(" api: '" + str(self.api) + "'")
debug.info(" authors: '" + str(self.authors) + "'")
debug.info(" functions: ")
for elem in self.functions:
elem.display();
##
## CLASS.hpp
##
def generate_header(self):
out = ""
# TODO: add global header:
out += self.licence_header
out += "#pragma once\n"
out += "\n"
out += "#include <etk/types.hpp>\n"
out += "#include <eproperty/Value.hpp>\n"
out += "#include <zeus/Raw.hpp>\n"
out += "#include <etk/uri/uri.hpp>\n"
out += "#include <etk/String.hpp>\n"
out += "#include <etk/Vector.hpp>\n"
out += "#include <ememory/memory.hpp>\n"
out += "#include <zeus/ActionNotification.hpp>\n"
for elem in self.imports:
prop = zeus_object_to_dictionary(elem)
out += "#include <" + prop["file_name_class_header"] + ">\n"
out += "#include <" + prop["file_name_class_proxy_header"] + ">\n"
out += "\n"
space = ""
for elem in self.name[:-1]:
out += space + "namespace " + elem + " {\n"
space += " "
out += space + "class " + self.prop["name_class_proxy_short"] + ";\n"
out += space + " /**\n"
if self.brief != "":
out += space + " * @brief " + self.brief + " \n"
if self.version != "":
out += space + " * version:" + self.version + "\n"
if self.api != "":
out += space + " * api:" + self.api + "\n"
for elem in self.authors:
out += space + " * authors:" + elem + "\n"
out += space + " */\n"
out += space + "class " + self.prop["name_class_short"] + " {\n"
space += " "
out += space + "public:\n"
space += " "
if len(self.factories) == 0:
out += space + "/**\n"
out += space + " * @brief generic factory, pay attention when set arguments...\n"
out += space + " */\n"
out += space + "template<typename ... ZEUS_OBJECT_CREATE>\n"
out += space + "static ememory::SharedPtr<" + self.prop["name_class"] + "> create(ZEUS_OBJECT_CREATE ...);\n"
else:
for elem in self.factories:
out += elem.generate_cpp(space, self.prop["name_class"])
out += space + "/**\n"
out += space + " * @brief Generic virtual destructor\n"
out += space + " */\n"
out += space + "virtual ~" + self.prop["name_class_short"] + "() = default;\n"
for elem in self.attributes:
out += elem.generate_cpp(space)
for elem in self.functions:
out += elem.generate_cpp(space)
space = space[:-2]
out += space + "};\n"
# now we simply add tools provided:
for elem in self.tools:
out += elem.generate_cpp(space, virtual=False)
for elem in self.name[:-1]:
space = space[:-1]
out += space + "}\n"
return [self.prop["file_name_class_header"], out]
##
## CLASS.cpp
##
def generate_source(self):
out = ""
out += self.licence_header
out += "\n"
out += "#include <" + self.prop["file_name_class_register_header"] + ">\n"
out += "#include <" + self.prop["file_name_class_header"] + ">\n"
out += "#include <" + self.prop["file_name_class_proxy_header"] + ">\n"
out += "#include <etk/types.hpp>\n"
out += "#include <zeus/debug.hpp>\n"
out += "#include <zeus/message/Message.hpp>\n"
out += "#include <zeus/message/Data.hpp>\n"
out += "#include <zeus/message/ParamType.hpp>\n"
out += "#include <zeus/message/Parameter.hpp>\n"
out += "#include <zeus/Future.hpp>\n"
out += "#include <etk/stdTools.hpp>\n"
out += "#include <zeus/AbstractFunction.hpp>\n"
out += "#include <climits>\n"
out += "#include <etk/path/fileSystem.hpp>\n"
out += "#include <zeus/WebServer.hpp>\n"
out += "#include <zeus/Object.hpp>\n"
out += "\n"
# now gebnerate the get and set parameter object ...
out += "namespace zeus {\n"
out += " namespace message {\n"
out += " template<> const zeus::message::ParamType& createType<ememory::SharedPtr<" + self.prop["name_class"] + ">>() {\n"
out += " static zeus::message::ParamType type(\"obj:" + self.prop["name_class"] + "\", zeus::message::paramTypeObject, false, false);\n"
out += " return type;\n"
out += " }\n"
out += " \n"
out += " template<> const zeus::message::ParamType& createType<" + self.prop["name_class_proxy"] + ">() {\n"
out += " static zeus::message::ParamType type(\"obj:" + self.prop["name_class"] + "\", zeus::message::paramTypeObject, false, false);\n"
out += " return type;\n"
out += " }\n"
out += " \n"
out += " template<>\n"
out += " void Parameter::addParameter<ememory::SharedPtr<" + self.prop["name_class"] + ">>(uint16_t _paramId, const ememory::SharedPtr<" + self.prop["name_class"] + ">& _value) {\n"
out += " etk::Vector<uint8_t> data;\n"
"""
out += " addType(data, createType<" + class_name + ">());\n"
"""
out += " addTypeObject(data, \"obj:" + self.prop["name_class"] + "\");\n"
out += " int32_t currentOffset = data.size();\n"
out += " int32_t startOffset = data.size();\n"
out += " data.resize(data.size()+4);\n"
out += " uint32_t fullId = 0;\n"
# convert the object in a real System Object ....
out += " if (m_iface != null) {\n"
out += " uint16_t id = m_iface->getAddress();\n"
out += " uint16_t idObj = m_iface->getNewObjectId();\n"
out += " ememory::SharedPtr<zeus::ObjectType<" + self.prop["name_class"] + ">> obj = ememory::makeShared<zeus::ObjectType<" + self.prop["name_class"] + ">>(m_iface, idObj, _value);\n"
out += " " + self.prop["name_class_register"] + "(*obj);\n"
out += " obj->addRemote(getDestination());\n"
out += " m_iface->addWebObj(obj);\n"
out += " ZEUS_DEBUG(\"Create object ID : \" << idObj);\n"
out += " fullId = (uint32_t(id)<<16)+idObj;\n"
out += " }\n"
# return Object ID and interface adress
out += " memcpy(&data[currentOffset], &fullId, 4);\n"
out += " m_parameter.pushBack(etk::makePair(startOffset,data));\n"
out += " }\n"
out += " \n"
out += " template<>\n"
out += " " + self.prop["name_class_proxy"] + " Parameter::getParameter<" + self.prop["name_class_proxy"] + ">(int32_t _id) const {\n"
out += " ememory::SharedPtr<zeus::ObjectRemoteBase> out;\n"
out += " out = zeus::message::Parameter::getParameter<ememory::SharedPtr<zeus::ObjectRemoteBase>>(_id);\n"
out += " return zeus::ObjectRemote(out);\n"
out += " }\n"
out += " }\n"
out += " \n"
out += " template<> " + self.prop["name_class_proxy"] + " futureGetValue<" + self.prop["name_class_proxy"] + ">(ememory::SharedPtr<zeus::Promise>& _promise) {\n"
out += " ememory::SharedPtr<zeus::ObjectRemoteBase> out;\n"
out += " if ( _promise == null\n"
out += " || _promise->getRaw() == null) {\n"
out += " return zeus::ObjectRemote(out);\n"
out += " }\n"
out += " if (_promise->getRaw()->getType() != zeus::message::type::answer) {\n"
out += " ZEUS_WARNING(\"No Return value ...\");\n"
out += " return zeus::ObjectRemote(out);\n"
out += " }\n"
out += " out = static_cast<zeus::message::Answer*>(_promise->getRaw().get())->getAnswer<ememory::SharedPtr<zeus::ObjectRemoteBase>>();\n"
out += " \n"
out += " return zeus::ObjectRemote(out);\n"
out += " }\n"
out += " \n"
out += "}\n"
return [self.prop["file_name_class_src"], out]
##
## registerClass.hpp
##
def generate_register_header(self):
out = self.licence_header
out += "#pragma once\n"
out += "\n"
out += "#include <etk/types.hpp>\n"
out += "#include <zeus/Object.hpp>\n"
out += "#include <zeus/Client.hpp>\n"
out += "#include <" + self.prop["file_name_class_header"] + ">\n"
out += "#include <etk/String.hpp>\n"
out += "#include <etk/Vector.hpp>\n"
out += "\n"
space = ""
for elem in self.name[:-1]:
out += space + "namespace " + elem + " {\n"
space += " "
out += space + "\n"
out += space + "void " + self.prop["name_class_register_short"] + "(zeus::ObjectType<" + self.prop["name_class"] + ">& _interface);\n"
out += space + "\n"
for elem in self.name[:-1]:
space = space[:-1]
out += space + "}\n"
out += space + "\n"
out += space + "#define " + self.prop["name_class_macro"] + "DECLARE(type) \\\n"
out += space + " ETK_EXPORT_API void SERVICE_IO_instanciate(uint32_t _transactionId, ememory::SharedPtr<zeus::WebServer>& _iface, uint32_t _destination) { \\\n"
out += space + " ememory::SharedPtr<type> tmp; \\\n"
out += space + " tmp = ememory::makeShared<type>(_destination>>16); \\\n"
out += space + " ememory::SharedPtr<" + self.prop["name_class"] + "> tmp2 = tmp; \\\n"
out += space + " _iface->answerValue(_transactionId, uint32_t(_iface->getAddress())<<16, _destination, tmp2); \\\n"
out += space + " }\n"
out += space + "\n"
return [self.prop["file_name_class_register_header"], out]
##
## registerClass.cpp
##
def generate_register_code(self):
out = self.licence_header
for elem in self.imports:
prop = zeus_object_to_dictionary(elem)
out += "#include <" + prop["file_name_class_header"] + ">\n"
out += "#include <" + prop["file_name_class_proxy_header"] + ">\n"
out += "#include <" + self.prop["file_name_class_register_header"] + ">\n"
out += "#include <zeus/debug.hpp>\n"
out += "\n"
space = ""
out += space + "void " + self.prop["name_class_register"] + "(zeus::ObjectType<" + self.prop["name_class"] + ">& _interface) {\n"
space += " "
out += space + 'ZEUS_VERBOSE("===========================================================");\n';
out += space + 'ZEUS_VERBOSE("== Instanciate service: ' + self.prop["name_class"] + '");\n';
out += space + 'ZEUS_VERBOSE("===========================================================");\n';
#out += space + '_serviceInterface.propertyNameService.set("' + self.name[-1].lower() + '");\n'
if self.brief != "":
out += space + '_interface.setDescription("' + self.brief + '");\n';
if self.version != "":
out += space + '_interface.setVersion("' + self.version + '");\n';
if self.api != "":
out += space + '_interface.setType("' + self.api + '");\n';
for elem in self.authors:
out += space + '_interface.addAuthor("' + elem.split("<")[0] + '", "' + elem.split("<")[1].replace(">","") + '");\n';
if len(self.functions) != 0 \
or len(self.attributes) != 0:
out += space + "zeus::AbstractFunction* func = null;\n"
for elem in self.attributes:
out += space + 'func = _interface.advertise("' + elem.name + '.set", &' + self.prop["name_class"] + '::_internalWrapperProperty_set' + capital_first(elem.name) + ');\n'
out += space + 'if (func != null) {\n'
if elem.brief != "":
out += space + ' func->setDescription("Set parameter ' + elem.brief + '");\n'
out += space + '}\n'
out += space + 'func = _interface.advertise("' + elem.name + '.get", &' + self.prop["name_class"] + '::_internalWrapperProperty_get' + capital_first(elem.name) + ');\n'
out += space + 'if (func != null) {\n'
if elem.brief != "":
out += space + ' func->setDescription("Get parameter ' + elem.brief + '");\n'
out += space + '}\n'
for elem in self.functions:
out += space + 'func = _interface.advertise("' + elem.name + '", &' + self.prop["name_class"] + '::' + elem.name + ');\n'
out += space + 'if (func != null) {\n'
space += " "
if elem.brief != "":
out += space + 'func->setDescription("' + elem.brief + '");\n'
for elem_p in elem.parameters:
if elem_p["name"] == "" \
and elem_p["brief"] == "":
continue
out += space + 'func->addParam("'
if elem_p["name"] != "":
out += elem_p["name"]
out += '", "'
if elem_p["brief"] != "":
out += elem_p["brief"]
out += '");\n'
if elem.return_brief != "":
out += space + 'func->setReturn("' + elem.return_brief + '");\n'
space = space[:-1]
out += space + '}\n'
out += space + 'ZEUS_VERBOSE("===========================================================");\n';
out += space + 'ZEUS_VERBOSE("== Instanciate service: ' + self.prop["name_class"] + ' [DONE]");\n';
out += space + 'ZEUS_VERBOSE("===========================================================");\n';
out += "}\n"
out += "\n"
return [self.prop["file_name_class_register_src"], out]
##
## ProxyClass.hpp
##
def generate_proxy_header(self):
out = ""
out += self.licence_header
out += "#pragma once\n"
out += "\n"
out += "#include <zeus/ObjectRemote.hpp>\n"
out += "#include <zeus/Proxy.hpp>\n"
out += "#include <zeus/RemoteProperty.hpp>\n"
out += "#include <etk/String.hpp>\n"
out += "#include <etk/Vector.hpp>\n"
out += "#include <" + self.prop["file_name_class_header"] + ">\n"
for elem in self.imports:
prop = zeus_object_to_dictionary(elem)
#out += "#include <" + prop["file_name_class_header"] + ">\n"
out += "#include <" + prop["file_name_class_proxy_header"] + ">\n"
out += "\n"
space = ""
for elem in self.name[:-1]:
out += space + "namespace " + elem + " {\n"
space += " "
out += space + " /**\n"
if self.brief != "":
out += space + " * @brief " + self.brief + " \n"
if self.version != "":
out += space + " * version:" + self.version + "\n"
if self.api != "":
out += space + " * api:" + self.api + "\n"
for elem in self.authors:
out += space + " * authors:" + elem + "\n"
out += space + " */\n"
out += space + "class " + self.prop["name_class_proxy_short"] + " :public zeus::Proxy {\n"
space += " "
out += space + "public:\n"
out += space + " const " + self.prop["name_class_proxy_short"] + "& operator= (const zeus::ObjectRemote& _srv) {\n"
out += space + " m_obj = _srv;\n"
out += space + " return *this;\n"
out += space + " }\n"
out += space + " const " + self.prop["name_class_proxy_short"] + "& operator= (const " + self.prop["name_class_proxy_short"] + "& _obj) {\n"
out += space + " m_obj = _obj.m_obj;\n"
out += space + " return *this;\n"
out += space + " }\n"
out += space + " ~" + self.prop["name_class_proxy_short"] + "() = default;\n"
out += space + " " + self.prop["name_class_proxy_short"] + "()"
if len(self.attributes) != 0:
out += ": \n"
first = True
for elem in self.attributes:
if first == False:
out += ",\n"
out += space + " " + elem.name + "(m_obj, \"" + elem.name + "\")"
first = False
out += " {}\n"
out += space + " " + self.prop["name_class_proxy_short"] + "(const zeus::ObjectRemote& _srv) :\n"
out += space + " zeus::Proxy(_srv)"
for elem in self.attributes:
out += ",\n"
out += space + " " + elem.name + "(m_obj, \"" + elem.name + "\")"
first = False
out += " {\n"
out += space + " \n"
out += space + " }\n"
"""
out += space + " bool exist() const {\n"
out += space + " return m_obj.exist();\n"
out += space + " }\n"
"""
out += space + "public:\n"
space += " "
"""
out += space + "/**\n"
out += space + " * @brief Generic virtual destructor\n"
out += space + " */\n"
out += space + "virtual ~" + self.name[-1] + "() = default;\n"
"""
for elem in self.attributes:
out += elem.generate_hpp_proxy(space)
for elem in self.functions:
out += elem.generate_hpp_proxy(space)
space = space[:-2]
out += space + "};\n"
for elem in self.name[:-1]:
space = space[:-1]
out += space + "}\n"
return [self.prop["file_name_class_proxy_header"], out]
##
## ProxyClass.cpp
##
def generate_proxy_code(self):
out = ""
out += self.licence_header
out += "\n"
out += "#include <" + self.prop["file_name_class_proxy_header"] + ">\n"
out += "\n"
for elem in self.attributes:
out += elem.generate_cpp_proxy("", self.prop["name_class_proxy"])
for elem in self.functions:
out += elem.generate_cpp_proxy("", self.prop["name_class_proxy"])
return [self.prop["file_name_class_proxy_src"], out]
def tool_generate_idl(target, module, data_option):
data_path = data_option["path"]
debug.debug("Parsing .zeus.idl [start] " + str(data_path))
name_file = os.path.basename(data_path)
if len(name_file) < 9 \
and name_file[-9:] != ".zeus.idl":
debug.error("IDL must have an extention ended with '.zeus.idl' and not with '" + name_file[-9:] + "'")
elem_name = ""
type_of_object = "unknow"
if len(name_file) >= 13 \
and name_file[-13:] == ".srv.zeus.idl":
elem_name = name_file[:-13]
type_of_object = "srv"
elif len(name_file) >= 16 \
and name_file[-16:] == ".struct.zeus.idl":
elem_name = name_file[:-16]
type_of_object = "struct"
elif len(name_file) >= 13 \
and name_file[-13:] == ".obj.zeus.idl":
elem_name = name_file[:-13]
type_of_object = "obj"
else:
debug.error("IDL must have an extention ended with '(struct|obj|srv).zeus.idl' and not with '" + name_file + "'")
service_def = ServiceDefinition()
service_def.set_name(elem_name.split("-"))
data = tools.file_read_data(os.path.join(module.get_origin_path(), data_path))
if len(data) == 0:
debug.error("Can not parse zeus.idl ==> no data in the file, or no file : " + os.path.join(module.get_origin_path(), data_path))
return;
# standardise windows/Mac file in Linux file.
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
id_line = 0
multi_comment = False
current_def = FunctionDefinition()
current_attr = AttributeDefinition()
for line in data.split("\n"):
id_line += 1;
if len(line) == 0:
# empty line
debug.extreme_verbose("find line " + str(id_line) + " ==> empty line")
continue
if multi_comment == False:
if len(line) >= 2 \
and line[:2] == "/*":
# Comment multi-line
debug.extreme_verbose("find line " + str(id_line) + " ==> comment multi-line [START]")
if len(line) > 2:
debug.error("line " + str(id_line) + " ==> /* must be alone in the line (no text after)")
multi_comment = True
continue
if len(line) >= 2 \
and line[:2] == "*/":
debug.error("line " + str(id_line) + " ==> find '*/' Without a start multiline-comment '/*'")
else:
if len(line) >= 2 \
and line[:2] == "*/":
# Comment multi-line
debug.extreme_verbose("find line " + str(id_line) + " ==> comment multi-line [STOP]")
multi_comment = False
if len(line) > 2:
debug.error("line " + str(id_line) + " ==> find '/*' must be alone in the line (no text after)")
continue
continue
if len(line) >= 2 \
and line[:2] == "//":
# Comment line
debug.extreme_verbose("find line " + str(id_line) + " ==> comment line")
continue
if len(line) >= 1 \
and line[0] == "#":
# Documentation line
debug.extreme_verbose("find line " + str(id_line) + " ==> documentation line")
#get keyword:
list_elems = line.split(":")
if len(list_elems) < 1:
debug.error("line " + str(id_line) + " ==> Missing Keyword ... ");
doc_keyword = list_elems[0] + ":"
doc_data = line[len(doc_keyword):]
if doc_keyword == "#brief:":
debug.extreme_verbose(" BRIEF: '" + doc_data + "'")
current_def = FunctionDefinition()
current_def.set_brief(doc_data)
current_attr.set_brief(doc_data)
elif doc_keyword == "#param:":
debug.extreme_verbose(" PARAMETER: '" + doc_data + "'")
# TODO : Do it better ...
current_def.add_param_comment(doc_data.split(":")[0], doc_data.split(":")[1])
elif doc_keyword == "#return:":
debug.extreme_verbose(" RETURN: '" + doc_data + "'")
current_def.set_return_comment(doc_data)
elif doc_keyword == "#elem-brief:":
debug.extreme_verbose(" SRV-BRIEF: '" + doc_data + "'")
service_def.set_brief(doc_data)
elif doc_keyword == "#elem-version:":
debug.extreme_verbose(" SRV-VERSION: '" + doc_data + "'")
service_def.set_version(doc_data)
elif doc_keyword == "#elem-type:":
debug.extreme_verbose(" SRV-TYPE: '" + doc_data + "'")
service_def.set_api(doc_data)
elif doc_keyword == "#elem-author:":
debug.extreme_verbose(" SRV-AUTHOR: '" + doc_data + "'")
service_def.add_author(doc_data)
else:
debug.warning("line " + str(id_line) + " ==> Unknow: keyword: '" + doc_keyword + "'")
debug.error(" support only: '#brief:' '#param:' '#return:' '#elem-brief:' '#elem-version:' '#elem-type:' '#elem-author:'")
continue
debug.extreme_verbose("Need to parse the function/attribute line:")
debug.extreme_verbose(" '" + line + "'")
if line[:7] == "import ":
debug.debug("find import : " + line)
# TODO : Add check ...
service_def.add_import(line.split(" ")[1])
elif line[-1] == ")":
# Find a function ==> parse it
#debug.error("line " + str(id_line) + " Can not parse function the line dos not ended by a ')'")
#get first part (befor '('):
# get type of the function (factory, tool, action, function(default))
type_function = "function"
if line[0] == "[":
if line[:13] == "[tool-remote]":
type_function = "tool-remote"
line = line[13:]
if line[:9] == "[factory]":
type_function = "factory"
line = line[9:]
if line[:10] == "[function]":
type_function = "function"
line = line[10:]
if line[:8] == "[action ":
type_function = "action"
line = line[8:]
type_event = "";
for elem in line:
if elem == "]":
break
type_event += elem
line = line[len(type_event)+1:]
if validate_type(type_event) == False:
debug.error("line " + str(id_line) + " action type unknow : '" + type_event + "' not in " + str(get_list_type()))
# remove wihte space
while len(line)>0 \
and line[0] == " ":
line = line[1:]
if type_function == "factory":
line = " " + line
# parse the fuction
list_elems = line.split("(")
if len(list_elems) <= 1:
debug.error("line " + str(id_line) + " function parsing error missing the '(' element")
fist_part = list_elems[0].replace(" ", " ").replace(" ", " ").replace(" ", " ")
argument_list = list_elems[1].replace(" ", "").replace(" ", "").replace(" ", "")[:-1]
if len(argument_list) != 0:
argument_list = argument_list.split(",")
else:
argument_list = []
# separate the
list_elems = fist_part.split(" ")
if len(list_elems) <= 1:
debug.error("line " + str(id_line) + " function return and name is not parsable")
return_value = list_elems[0]
function_name = list_elems[1]
# check types:
debug.extreme_verbose(" Parse of function done :")
current_def.set_function_name(function_name)
if type_function == "tool":
current_def.set_return_type(return_value)
debug.extreme_verbose(" return:" + return_value)
if validate_type(return_value) == False:
debug.error("line " + str(id_line) + " function return type unknow : '" + return_value + "' not in " + str(get_list_type()))
elif type_function == "factory":
if function_name != "create":
debug.error("line " + str(id_line) + " factory function name must be 'create' not '" + function_name + "'")
debug.extreme_verbose(" return: --- ")
elif validate_type(return_value) == False:
debug.error("line " + str(id_line) + " function return type unknow : '" + return_value + "' not in " + str(get_list_type()))
else:
current_def.set_return_type(return_value)
debug.extreme_verbose(" return:" + return_value)
for elem in argument_list:
if validate_type(elem) == False:
debug.error("line " + str(id_line) + " function argument type unknow : '" + elem + "' not in " + str(get_list_type()))
debug.extreme_verbose(" name:" + function_name)
debug.extreme_verbose(" arguments:" + str(argument_list))
for elem in argument_list:
current_def.add_parameter_type(elem)
if type_function == "function":
service_def.add_function(current_def)
elif type_function == "action":
current_def.set_action(type_event)
service_def.add_function(current_def)
elif type_function == "factory":
service_def.add_factory(current_def)
elif type_function == "tool-remote":
service_def.add_tool(current_def)
else:
debug.error("line " + str(id_line) + " Unknow type : " + str(type_function))
else:
# remove optionnal "property " at the start
if line[:9] == "property ":
line = line[9:]
# attribute parsing ==> parameters
# if must be a simple element separate with a space
if len(line.split("(")) != 1:
debug.error("line " + str(id_line) + " Can not parse function the line does not ended by a ')'")
elem = line.split(" ")
if len(elem) != 2:
debug.error("line " + str(id_line) + " Can not parse attribute must be constituated with the type and the name")
if validate_type(elem[0]) == False:
debug.error("line " + str(id_line) + " Attribute type unknow : '" + elem[0] + "' not in " + str(get_list_type()))
current_attr.set_type(elem[0]);
current_attr.set_name(elem[1]);
service_def.add_attribute(current_attr)
# reset it ...
current_def = FunctionDefinition()
current_attr = AttributeDefinition()
if multi_comment == True:
debug.error("reach end of file and missing end of multi-line comment */")
debug.verbose("Parsing idl Done (no error ...)")
#service_def.display()
service_header = service_def.generate_header()
service_source = service_def.generate_source()
register_header = service_def.generate_register_header()
register_code = service_def.generate_register_code()
proxy_header = service_def.generate_proxy_header()
proxy_code = service_def.generate_proxy_code()
debug.verbose("----------------- " + service_header[0] + " -----------------")
debug.verbose("\n" + service_header[1])
debug.verbose("----------------- " + service_source[0] + " -----------------")
debug.verbose("\n" + service_source[1])
debug.verbose("----------------- " + register_header[0] + " -----------------")
debug.verbose("\n" + register_header[1])
debug.verbose("----------------- " + register_code[0] + " -----------------")
debug.verbose("\n" + register_code[1])
debug.verbose("----------------- " + proxy_header[0] + " -----------------")
debug.verbose("\n" + proxy_header[1])
debug.verbose("----------------- " + proxy_code[0] + " -----------------")
debug.verbose("\n" + proxy_code[1])
tmp_path = os.path.join(target.get_build_path_temporary_generate(module.get_name()), "idl_src")
module.add_generated_header_file(service_header[1], service_header[0], install_element=True)
module.add_generated_src_file(service_source[1], service_source[0])
module.add_generated_header_file(register_header[1], register_header[0], install_element=True)
module.add_generated_src_file(register_code[1], register_code[0])
module.add_generated_header_file(proxy_header[1], proxy_header[0], install_element=True)
module.add_generated_src_file(proxy_code[1], proxy_code[0])
# if service, we need to intall a simple empty file to register the service as availlable ...
if type_of_object == "srv":
module.add_generated_data_file("", "zeus/" + elem_name + ".srv", install_element=True)
debug.debug("Parsing .zeus.idl [DONE]")
def parse_object_idl(module, idl_path):
module.add_action(tool_generate_idl, data={"path":idl_path, "type":"object"})
def parse_struct_idl(module, idl_path):
module.add_action(tool_generate_idl, data={"path":idl_path, "type":"struct"})
| apache-2.0 | -5,072,047,862,792,280,000 | 36.233886 | 188 | 0.571694 | false |
zonca/petsc4py | conf/baseconf.py | 1 | 27657 | # --------------------------------------------------------------------
__all__ = ['PetscConfig',
'setup', 'Extension',
'config', 'build', 'build_src', 'build_ext',
'clean', 'test', 'sdist',
'log',
]
# --------------------------------------------------------------------
import sys, os
try:
import setuptools
except ImportError:
setuptools = None
def import_command(cmd):
try:
from importlib import import_module
except ImportError:
import_module = lambda n: __import__(n, fromlist=[None])
try:
if not setuptools: raise ImportError
mod = import_module('setuptools.command.' + cmd)
return getattr(mod, cmd)
except ImportError:
mod = import_module('distutils.command.' + cmd)
return getattr(mod, cmd)
if setuptools:
from setuptools import setup
from setuptools import Extension as _Extension
from setuptools import Command
else:
from distutils.core import setup
from distutils.core import Extension as _Extension
from distutils.core import Command
_config = import_command('config')
_build = import_command('build')
_build_ext = import_command('build_ext')
_install = import_command('install')
_clean = import_command('clean')
_sdist = import_command('sdist')
from distutils import sysconfig
from distutils import log
from distutils.util import split_quoted, execute
from distutils.errors import DistutilsError
# --------------------------------------------------------------------
def fix_config_vars(names, values):
import os, re
values = list(values)
if sys.platform == 'darwin':
if 'ARCHFLAGS' in os.environ:
ARCHFLAGS = os.environ['ARCHFLAGS']
for i, flag in enumerate(list(values)):
flag, count = re.subn('-arch\s+\w+', ' ', flag)
if count and ARCHFLAGS:
flag = flag + ' ' + ARCHFLAGS
values[i] = flag
if 'SDKROOT' in os.environ:
SDKROOT = os.environ['SDKROOT']
for i, flag in enumerate(list(values)):
flag, count = re.subn('-isysroot [^ \t]*', ' ', flag)
if count and SDKROOT:
flag = flag + ' ' + '-isysroot ' + SDKROOT
values[i] = flag
return values
def get_config_vars(*names):
# Core Python configuration
values = sysconfig.get_config_vars(*names)
# Do any distutils flags fixup right now
values = fix_config_vars(names, values)
return values
from distutils.unixccompiler import UnixCCompiler
rpath_option_orig = UnixCCompiler.runtime_library_dir_option
def rpath_option(compiler, dir):
option = rpath_option_orig(compiler, dir)
if sys.platform[:5] == 'linux':
if option.startswith('-R'):
option = option.replace('-R', '-Wl,-rpath,', 1)
elif option.startswith('-Wl,-R'):
option = option.replace('-Wl,-R', '-Wl,-rpath,', 1)
return option
UnixCCompiler.runtime_library_dir_option = rpath_option
# --------------------------------------------------------------------
class PetscConfig:
def __init__(self, petsc_dir, petsc_arch):
self.configdict = { }
if not petsc_dir:
raise DistutilsError("PETSc not found")
if not os.path.isdir(petsc_dir):
raise DistutilsError("invalid PETSC_DIR: %s" % petsc_dir)
self.version = self._get_petsc_version(petsc_dir)
self.configdict = self._get_petsc_config(petsc_dir, petsc_arch)
self.PETSC_DIR = self['PETSC_DIR']
self.PETSC_ARCH = self['PETSC_ARCH']
language_map = {'CONLY':'c', 'CXXONLY':'c++'}
self.language = language_map[self['PETSC_LANGUAGE']]
def __getitem__(self, item):
return self.configdict[item]
def configure(self, extension, compiler=None):
self.configure_extension(extension)
if compiler is not None:
self.configure_compiler(compiler)
def _get_petsc_version(self, petsc_dir):
import re
version_re = {
'major' : re.compile(r"#define\s+PETSC_VERSION_MAJOR\s+(\d+)"),
'minor' : re.compile(r"#define\s+PETSC_VERSION_MINOR\s+(\d+)"),
'micro' : re.compile(r"#define\s+PETSC_VERSION_SUBMINOR\s+(\d+)"),
'patch' : re.compile(r"#define\s+PETSC_VERSION_PATCH\s+(\d+)"),
'release': re.compile(r"#define\s+PETSC_VERSION_RELEASE\s+(\d+)"),
}
petscversion_h = os.path.join(petsc_dir, 'include', 'petscversion.h')
with open(petscversion_h, 'rt') as f: data = f.read()
major = int(version_re['major'].search(data).groups()[0])
minor = int(version_re['minor'].search(data).groups()[0])
micro = int(version_re['micro'].search(data).groups()[0])
release = int(version_re['release'].search(data).groups()[0])
return (major, minor, micro), bool(release)
def _get_petsc_config(self, petsc_dir, petsc_arch):
from os.path import join, isdir, exists
PETSC_DIR = petsc_dir
PETSC_ARCH = petsc_arch
#
confdir = join('lib', 'petsc', 'conf')
if not (PETSC_ARCH and isdir(join(PETSC_DIR, PETSC_ARCH))):
petscvars = join(PETSC_DIR, confdir, 'petscvariables')
PETSC_ARCH = makefile(open(petscvars, 'rt')).get('PETSC_ARCH')
if not (PETSC_ARCH and isdir(join(PETSC_DIR, PETSC_ARCH))):
PETSC_ARCH = ''
#
variables = join(PETSC_DIR, confdir, 'variables')
if not exists(variables):
variables = join(PETSC_DIR, PETSC_ARCH, confdir, 'variables')
petscvariables = join(PETSC_DIR, PETSC_ARCH, confdir, 'petscvariables')
#
with open(variables) as f:
contents = f.read()
with open(petscvariables) as f:
contents += f.read()
#
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
confstr = 'PETSC_DIR = %s\n' % PETSC_DIR
confstr += 'PETSC_ARCH = %s\n' % PETSC_ARCH
confstr += contents
confdict = makefile(StringIO(confstr))
return confdict
def _configure_ext(self, ext, dct, preppend=False):
extdict = ext.__dict__
for key, values in dct.items():
if key in extdict:
for value in values:
if value not in extdict[key]:
if preppend:
extdict[key].insert(0, value)
else:
extdict[key].append(value)
def configure_extension(self, extension):
# define macros
macros = [('PETSC_DIR', self['PETSC_DIR'])]
extension.define_macros.extend(macros)
# includes and libraries
petsc_inc = flaglist(self['PETSC_CC_INCLUDES'])
petsc_lib = flaglist(
'-L%s %s' % (self['PETSC_LIB_DIR'], self['PETSC_LIB_BASIC']))
petsc_lib['runtime_library_dirs'].append(self['PETSC_LIB_DIR'])
# Link in extra libraries on static builds
if self['BUILDSHAREDLIB'] != 'yes':
petsc_ext_lib = split_quoted(self['PETSC_EXTERNAL_LIB_BASIC'])
petsc_lib['extra_link_args'].extend(petsc_ext_lib)
self._configure_ext(extension, petsc_inc, preppend=True)
self._configure_ext(extension, petsc_lib)
def configure_compiler(self, compiler):
if compiler.compiler_type != 'unix': return
(cc, cxx, cflags, ccshared,
ldflags, ldshared, so_ext) = get_config_vars(
'CC', 'CXX', 'CFLAGS', 'CCSHARED',
'LDFLAGS', 'LDSHARED', 'SO')
cflags = cflags or ''
ldflags = ldflags or ''
cflags = cflags.replace('-Wstrict-prototypes', '')
ld = cc
ldshared = ldshared.replace(ld, '', 1).strip()
ldshared = [flg for flg in split_quoted(ldshared)
if flg not in split_quoted(ldflags)]
ldshared = str.join(' ', ldshared)
#
getenv = os.environ.get
def get_flags(cmd):
try: return ' '.join(split_quoted(cmd)[1:])
except: return ''
# C compiler
PCC = self['PCC']
PCC_FLAGS = get_flags(cc) + ' ' + self['PCC_FLAGS']
PCC_FLAGS = PCC_FLAGS.replace('-fvisibility=hidden', '')
if sys.version_info[:2] < (2, 5):
PCC_FLAGS = PCC_FLAGS.replace('-Wwrite-strings', '')
PCC = getenv('PCC', PCC) + ' ' + getenv('PCCFLAGS', PCC_FLAGS)
ccshared = getenv('CCSHARED', ccshared)
cflags = getenv('CFLAGS', cflags)
PCC_SHARED = str.join(' ', (PCC, ccshared, cflags))
# C++ compiler
if self.language == 'c++':
PCXX = PCC
else:
try:
PCXX = self['CXX']
except KeyError:
PCXX = cxx
# linker
PLD = self['PCC_LINKER']
PLD_FLAGS = get_flags(ld) + ' ' + self['PCC_LINKER_FLAGS']
PLD_FLAGS = PLD_FLAGS.replace('-fvisibility=hidden', '')
PLD = getenv('PLD', PLD) + ' ' + getenv('PLDFLAGS', PLD_FLAGS)
ldshared = getenv('LDSHARED', ldshared)
ldflags = getenv('LDFLAGS', cflags + ' ' + ldflags)
PLD_SHARED = str.join(' ', (PLD, ldshared, ldflags))
#
compiler.set_executables(
compiler = PCC,
compiler_cxx = PCXX,
linker_exe = PLD,
compiler_so = PCC_SHARED,
linker_so = PLD_SHARED,
)
compiler.shared_lib_extension = so_ext
#
if sys.platform == 'darwin':
for attr in ('preprocessor',
'compiler', 'compiler_cxx', 'compiler_so',
'linker_so', 'linker_exe'):
compiler_cmd = getattr(compiler, attr, [])
while '-mno-fused-madd' in compiler_cmd:
compiler_cmd.remove('-mno-fused-madd')
def log_info(self):
PETSC_DIR = self['PETSC_DIR']
PETSC_ARCH = self['PETSC_ARCH']
version = ".".join([str(i) for i in self.version[0]])
release = ("development", "release")[self.version[1]]
version_info = version + ' ' + release
scalar_type = self['PETSC_SCALAR']
precision = self['PETSC_PRECISION']
language = self['PETSC_LANGUAGE']
compiler = self['PCC']
linker = self['PCC_LINKER']
log.info('PETSC_DIR: %s' % PETSC_DIR )
log.info('PETSC_ARCH: %s' % PETSC_ARCH )
log.info('version: %s' % version_info)
log.info('scalar-type: %s' % scalar_type)
log.info('precision: %s' % precision)
log.info('language: %s' % language)
log.info('compiler: %s' % compiler)
log.info('linker: %s' % linker)
# --------------------------------------------------------------------
class Extension(_Extension):
pass
# --------------------------------------------------------------------
cmd_petsc_opts = [
('petsc-dir=', None,
"define PETSC_DIR, overriding environmental variables"),
('petsc-arch=', None,
"define PETSC_ARCH, overriding environmental variables"),
]
class config(_config):
Configure = PetscConfig
user_options = _config.user_options + cmd_petsc_opts
def initialize_options(self):
_config.initialize_options(self)
self.petsc_dir = None
self.petsc_arch = None
def get_config_arch(self, arch):
return config.Configure(self.petsc_dir, arch)
def run(self):
_config.run(self)
self.petsc_dir = config.get_petsc_dir(self.petsc_dir)
if self.petsc_dir is None: return
petsc_arch = config.get_petsc_arch(self.petsc_dir, self.petsc_arch)
log.info('-' * 70)
log.info('PETSC_DIR: %s' % self.petsc_dir)
arch_list = petsc_arch
if not arch_list :
arch_list = [ None ]
for arch in arch_list:
conf = self.get_config_arch(arch)
archname = conf.PETSC_ARCH or conf['PETSC_ARCH']
scalar_type = conf['PETSC_SCALAR']
precision = conf['PETSC_PRECISION']
language = conf['PETSC_LANGUAGE']
compiler = conf['PCC']
linker = conf['PCC_LINKER']
log.info('-'*70)
log.info('PETSC_ARCH: %s' % archname)
log.info(' * scalar-type: %s' % scalar_type)
log.info(' * precision: %s' % precision)
log.info(' * language: %s' % language)
log.info(' * compiler: %s' % compiler)
log.info(' * linker: %s' % linker)
log.info('-' * 70)
#@staticmethod
def get_petsc_dir(petsc_dir):
if not petsc_dir: return None
petsc_dir = os.path.expandvars(petsc_dir)
if not petsc_dir or '$PETSC_DIR' in petsc_dir:
try:
import petsc
petsc_dir = petsc.get_petsc_dir()
except ImportError:
log.warn("PETSC_DIR not specified")
return None
petsc_dir = os.path.expanduser(petsc_dir)
petsc_dir = os.path.abspath(petsc_dir)
return config.chk_petsc_dir(petsc_dir)
get_petsc_dir = staticmethod(get_petsc_dir)
#@staticmethod
def chk_petsc_dir(petsc_dir):
if not os.path.isdir(petsc_dir):
log.error('invalid PETSC_DIR: %s (ignored)' % petsc_dir)
return None
return petsc_dir
chk_petsc_dir = staticmethod(chk_petsc_dir)
#@staticmethod
def get_petsc_arch(petsc_dir, petsc_arch):
if not petsc_dir: return None
petsc_arch = os.path.expandvars(petsc_arch)
if (not petsc_arch or '$PETSC_ARCH' in petsc_arch):
petsc_arch = ''
petsc_conf = os.path.join(petsc_dir, 'lib', 'petsc', 'conf')
if os.path.isdir(petsc_conf):
petscvariables = os.path.join(petsc_conf, 'petscvariables')
if os.path.exists(petscvariables):
conf = makefile(open(petscvariables, 'rt'))
petsc_arch = conf.get('PETSC_ARCH', '')
petsc_arch = petsc_arch.split(os.pathsep)
petsc_arch = unique(petsc_arch)
petsc_arch = [arch for arch in petsc_arch if arch]
return config.chk_petsc_arch(petsc_dir, petsc_arch)
get_petsc_arch = staticmethod(get_petsc_arch)
#@staticmethod
def chk_petsc_arch(petsc_dir, petsc_arch):
valid_archs = []
for arch in petsc_arch:
arch_path = os.path.join(petsc_dir, arch)
if os.path.isdir(arch_path):
valid_archs.append(arch)
else:
log.warn("invalid PETSC_ARCH: %s (ignored)" % arch)
return valid_archs
chk_petsc_arch = staticmethod(chk_petsc_arch)
class build(_build):
user_options = _build.user_options + cmd_petsc_opts
def initialize_options(self):
_build.initialize_options(self)
self.petsc_dir = None
self.petsc_arch = None
def finalize_options(self):
_build.finalize_options(self)
self.set_undefined_options('config',
('petsc_dir', 'petsc_dir'),
('petsc_arch', 'petsc_arch'))
self.petsc_dir = config.get_petsc_dir(self.petsc_dir)
self.petsc_arch = config.get_petsc_arch(self.petsc_dir,
self.petsc_arch)
sub_commands = \
[('build_src', lambda *args: True)] + \
_build.sub_commands
class build_src(Command):
description = "build C sources from Cython files"
user_options = [
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['force']
def initialize_options(self):
self.force = False
def finalize_options(self):
self.set_undefined_options('build',
('force', 'force'),
)
def run(self):
pass
class build_ext(_build_ext):
user_options = _build_ext.user_options + cmd_petsc_opts
def initialize_options(self):
_build_ext.initialize_options(self)
self.petsc_dir = None
self.petsc_arch = None
self._outputs = []
def finalize_options(self):
_build_ext.finalize_options(self)
self.set_undefined_options('build',
('petsc_dir', 'petsc_dir'),
('petsc_arch', 'petsc_arch'))
if ((sys.platform.startswith('linux') or
sys.platform.startswith('gnu') or
sys.platform.startswith('sunos')) and
sysconfig.get_config_var('Py_ENABLE_SHARED')):
py_version = sysconfig.get_python_version()
bad_pylib_dir = os.path.join(sys.prefix, "lib",
"python" + py_version,
"config")
try:
self.library_dirs.remove(bad_pylib_dir)
except ValueError:
pass
pylib_dir = sysconfig.get_config_var("LIBDIR")
if pylib_dir not in self.library_dirs:
self.library_dirs.append(pylib_dir)
if pylib_dir not in self.rpath:
self.rpath.append(pylib_dir)
if sys.exec_prefix == '/usr':
self.library_dirs.remove(pylib_dir)
self.rpath.remove(pylib_dir)
def _copy_ext(self, ext):
from copy import deepcopy
extclass = ext.__class__
fullname = self.get_ext_fullname(ext.name)
modpath = str.split(fullname, '.')
pkgpath = os.path.join('', *modpath[0:-1])
name = modpath[-1]
sources = list(ext.sources)
newext = extclass(name, sources)
newext.__dict__.update(deepcopy(ext.__dict__))
newext.name = name
return pkgpath, newext
def _build_ext_arch(self, ext, pkgpath, arch):
build_temp = self.build_temp
build_lib = self.build_lib
try:
self.build_temp = os.path.join(build_temp, arch)
self.build_lib = os.path.join(build_lib, pkgpath, arch)
_build_ext.build_extension(self, ext)
finally:
self.build_temp = build_temp
self.build_lib = build_lib
def get_config_arch(self, arch):
return config.Configure(self.petsc_dir, arch)
def build_extension(self, ext):
if not isinstance(ext, Extension):
return _build_ext.build_extension(self, ext)
petsc_arch = self.petsc_arch
if not petsc_arch:
petsc_arch = [ None ]
for arch in petsc_arch:
config = self.get_config_arch(arch)
ARCH = arch or config['PETSC_ARCH']
if ARCH not in self.PETSC_ARCH_LIST:
self.PETSC_ARCH_LIST.append(ARCH)
ext.language = config.language
config.log_info()
pkgpath, newext = self._copy_ext(ext)
config.configure(newext, self.compiler)
name = self.distribution.get_name()
version = self.distribution.get_version()
distdir = "%s-%s/" % (name, version)
self._build_ext_arch(newext, pkgpath, ARCH)
def build_extensions(self, *args, **kargs):
self.PETSC_ARCH_LIST = []
_build_ext.build_extensions(self, *args,**kargs)
if not self.PETSC_ARCH_LIST: return
self.build_configuration(self.PETSC_ARCH_LIST)
def build_configuration(self, arch_list):
#
template, variables = self.get_config_data(arch_list)
config_data = template % variables
#
build_lib = self.build_lib
dist_name = self.distribution.get_name()
config_file = os.path.join(build_lib, dist_name, 'lib',
dist_name.replace('4py', '') + '.cfg')
#
def write_file(filename, data):
with open(filename, 'w') as fh:
fh.write(config_data)
execute(write_file, (config_file, config_data),
msg='writing %s' % config_file,
verbose=self.verbose, dry_run=self.dry_run)
def get_config_data(self, arch_list):
template = """\
PETSC_DIR = %(PETSC_DIR)s
PETSC_ARCH = %(PETSC_ARCH)s
"""
variables = {'PETSC_DIR' : self.petsc_dir,
'PETSC_ARCH' : os.path.pathsep.join(arch_list)}
return template, variables
def get_outputs(self):
self.check_extensions_list(self.extensions)
outputs = []
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
if isinstance(ext, Extension):
head, tail = os.path.split(filename)
for arch in self.petsc_arch:
outfile = os.path.join(self.build_lib,
head, arch, tail)
outputs.append(outfile)
else:
outfile = os.path.join(self.build_lib, filename)
outputs.append(outfile)
outputs = list(set(outputs))
return outputs
class install(_install):
def run(self):
_install.run(self)
class clean(_clean):
def run(self):
_clean.run(self)
from distutils.dir_util import remove_tree
if self.all:
# remove the <package>.egg_info directory
try:
egg_info = self.get_finalized_command('egg_info').egg_info
if os.path.exists(egg_info):
remove_tree(egg_info, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
egg_info)
except DistutilsError:
pass
class test(Command):
description = "run the test suite"
user_options = [('args=', None, "options")]
def initialize_options(self):
self.args = None
def finalize_options(self):
if self.args:
self.args = split_quoted(self.args)
else:
self.args = []
def run(self):
pass
class sdist(_sdist):
def run(self):
build_src = self.get_finalized_command('build_src')
build_src.run()
_sdist.run(self)
# --------------------------------------------------------------------
def append(seq, item):
if item not in seq:
seq.append(item)
def append_dict(conf, dct):
for key, values in dct.items():
if key in conf:
for value in values:
if value not in conf[key]:
conf[key].append(value)
def unique(seq):
res = []
for item in seq:
if item not in res:
res.append(item)
return res
def flaglist(flags):
conf = {
'define_macros' : [],
'undef_macros' : [],
'include_dirs' : [],
'libraries' : [],
'library_dirs' : [],
'runtime_library_dirs': [],
'extra_compile_args' : [],
'extra_link_args' : [],
}
if type(flags) is str:
flags = flags.split()
switch = '-Wl,'
newflags = []
linkopts = []
for f in flags:
if f.startswith(switch):
if len(f) > 4:
append(linkopts, f[4:])
else:
append(newflags, f)
if linkopts:
newflags.append(switch + ','.join(linkopts))
flags = newflags
append_next_word = None
for word in flags:
if append_next_word is not None:
append(append_next_word, word)
append_next_word = None
continue
switch, value = word[0:2], word[2:]
if switch == "-I":
append(conf['include_dirs'], value)
elif switch == "-D":
try:
idx = value.index("=")
macro = (value[:idx], value[idx+1:])
except ValueError:
macro = (value, None)
append(conf['define_macros'], macro)
elif switch == "-U":
append(conf['undef_macros'], value)
elif switch == "-l":
append(conf['libraries'], value)
elif switch == "-L":
append(conf['library_dirs'], value)
elif switch == "-R":
append(conf['runtime_library_dirs'], value)
elif word.startswith("-Wl"):
linkopts = word.split(',')
append_dict(conf, flaglist(linkopts[1:]))
elif word == "-rpath":
append_next_word = conf['runtime_library_dirs']
elif word == "-Xlinker":
append_next_word = conf['extra_link_args']
else:
#log.warn("unrecognized flag '%s'" % word)
pass
return conf
# --------------------------------------------------------------------
from distutils.text_file import TextFile
# Regexes needed for parsing Makefile-like syntaxes
import re as _re
_variable_rx = _re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = _re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = _re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def makefile(fileobj, dct=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
fp = TextFile(file=fileobj,
strip_comments=1,
skip_blanks=1,
join_lines=1)
if dct is None:
dct = {}
done = {}
notdone = {}
while 1:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = str.strip(v)
if "$" in v:
notdone[n] = v
else:
try: v = int(v)
except ValueError: pass
done[n] = v
try: del notdone[n]
except KeyError: pass
fp.close()
# do variable interpolation here
while notdone:
for name in list(notdone.keys()):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = str.strip(value)
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference;
# just drop it since we can't deal
del notdone[name]
# save the results in the global dictionary
dct.update(done)
return dct
# --------------------------------------------------------------------
| bsd-2-clause | 4,960,099,820,873,872,000 | 34.918182 | 79 | 0.523086 | false |
rohitwaghchaure/erpnext_smart | erpnext/manufacturing/doctype/work_management/work_management.py | 1 | 3038 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, cint, cstr, date_diff, flt, getdate, nowdate, \
get_first_day, get_last_day
from frappe.model.document import Document
class WorkManagement(Document):
def get_invoice_details(self, invoice_no=None):
self.set('production_details', [])
sales_invoices = self.get_invoice(invoice_no)
if sales_invoices:
for si_no in sales_invoices:
branch = frappe.db.get_value('User',frappe.session.user,'branch')
if frappe.db.get_value('Process Log',{'branch':branch,'parent':si_no.name},'name'):
si = self.append('production_details', {})
self.create_invoice_bundle(si_no, si)
return "Done"
def get_invoice(self, invoice_no=None):
cond = "1=1"
if invoice_no and not self.services:
cond = "sales_invoice_no='%s'"%(invoice_no)
elif self.services and not invoice_no:
cond = "tailoring_service='%s'"%(self.services)
elif self.services and invoice_no:
cond = "sales_invoice_no='%s' and tailoring_service='%s'"%(invoice_no, self.services)
return frappe.db.sql("select * from `tabProduction Dashboard Details` where %s order by sales_invoice_no desc"%(cond),as_dict=1, debug=1)
def create_invoice_bundle(self, invoice_detail, si):
color = {'Completed':'green','Pending':'red', 'Trial':'#1F8C83'}
value = '<h style="color:red">Pending</h>'
si.sales_invoice = invoice_detail.sales_invoice_no
si.article_code = invoice_detail.article_code
si.article_qty = invoice_detail.article_qty
si.work_order = invoice_detail.work_order
si.stock_entry = invoice_detail.stock_entry
si.process_allotment = invoice_detail.name
si.actual_qty = invoice_detail.fabric_qty
si.fabric_code = invoice_detail.fabric_code
si.serial_no = invoice_detail.serial_no
si.size = invoice_detail.size
if invoice_detail.status == 'Completed':
value = '<h style="color:%s">%s</h>'%(color.get(invoice_detail.status), invoice_detail.status)
elif cint(invoice_detail.trial_no) > 0:
value = '<h style="color:%s">Ready For %s %s</h>'%(color.get(invoice_detail.status), invoice_detail.status, invoice_detail.trial_no)
si.process_status = value
si.cut_order_status ='<h style="color:%s">%s</h>'%(color.get(invoice_detail.cut_order_status), invoice_detail.cut_order_status)
def save_data(self, args):
for d in self.get('production_details'):
if cint(args.get('select')) ==1 and cint(d.idx)==cint(args.get('idx')):
self.save(ignore_permissions=True)
elif cint(args.get('select')) ==0 and cint(d.idx)==cint(args.get('idx')):
self.clear_data(args.get('sales_invoice'), args.get('article_code'))
def clear_data(self, inv_no=None, item_code=None):
self.get_invoice_details()
cond = "1=1"
if inv_no and item_code:
cond = "sales_invoice= '%s' and article_code='%s'"%(inv_no, item_code)
frappe.db.sql("delete from `tabProduction Details` where %s"%(cond),debug=1) | agpl-3.0 | 7,046,307,035,014,875,000 | 45.753846 | 139 | 0.703423 | false |
bl4de/security-tools | redir_gen/redirgen.py | 1 | 1060 | #!/usr/bin/env python3
# Forked from https://gist.github.com/zPrototype/b211ae91e2b082420c350c28b6674170
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--target", "-t", action="store", help="Enter the target address", required=True)
parser.add_argument("--dest", "-d", action="store", help="Enter the address where you want to redirect to",
required=True)
parser.add_argument("--output", "-o", action="store", help="Enter output file name")
args = parser.parse_args()
payloads = []
# Remove protocol from url
junk = re.compile(r"https?://")
target = junk.sub("", args.target)
dest = junk.sub("", args.dest)
with open("payloads.txt", "r") as handle:
templates = handle.readlines()
for payload in templates:
payload = payload.rstrip()
payload = re.sub("TARGET", target, payload)
payload = re.sub("DEST", dest, payload)
print(payload)
payloads.append(payload)
if args.output:
with open(args.output, "w")as handle:
[handle.write(f"{x.rstrip()}\n") for x in payloads] | mit | -8,893,788,218,060,954,000 | 31.151515 | 107 | 0.679245 | false |
suitmyself/Physika | Documentation/Cuda_Scons_Tool/cuda.py | 1 | 5959 | """
SCons.Tool.cuda
@author: WeiChen, 07/02/2016
@breif: Tool for Scons used to support compiling of cuda code
@usage:
1. this file is used in SConstruct script through codes like:
env.Tool('cuda', toolpath = 'documentation/Cuda_Scons_Tool/')
2. you can also put this file to PYTHON_HOME/Lib/site-packages/scons-x.x.x/SCons/Tool
@reference:
https://bitbucket.org/scons/scons/wiki/CudaTool
https://github.com/bryancatanzaro/cuda-scons/blob/master/nvcc.py
"""
import SCons.Tool
import SCons.Scanner.C
import SCons.Defaults
import os
import sys
import platform
#cuda suffix
cuda_suffix = '.cu'
# make a CUDAScanner for finding #includes
# cuda uses the c preprocessor, so we can use the CScanner
cuda_scanner = SCons.Scanner.C.CScanner()
def generate(env):
os_name = platform.system()
os_architecture = platform.architecture()[0]
#cuda path
cuda_bin_path = ''
cuda_inc_path = ''
cuda_lib_path = ''
cuda_dll_path = ''
cuda_path = None
if 'CUDA_PATH' in os.environ:
cuda_path = os.environ['CUDA_PATH']
elif 'CUDA_PATH' in env:
cuda_path = env['CUDA_PATH']
else:
guess_path = [ '/usr/local/NVIDIA_CUDA_TOOLKIT',
'/usr/local/CUDA_TOOLKIT',
'/usr/local/cuda_toolkit',
'/usr/local/CUDA',
'/usr/local/cuda'
]
for path in guess_path:
if os.path.isdir(path):
cuda_path = path
break
if cuda_path == None:
sys.exit("Cannot find the CUDA_PATH. Please install CUDA OR add CUDA_PATH in your environment variables OR explictly specify env['CUDA_PATH']!")
cuda_inc_path = cuda_path+'/include/'
cuda_bin_path = cuda_path+'/bin/'
cuda_version_str = os.path.basename(cuda_path)
cuda_version_id = filter(str.isdigit, cuda_version_str)
if os_name == 'Windows':
if os_architecture == '32bit':
cuda_lib_path = cuda_path+'/lib/Win32/'
cuda_dll_path = cuda_path+'/bin/cudart32_'+cuda_version_id+'.dll'
else:
cuda_lib_path = cuda_path+'/lib/X64/'
cuda_dll_path = cuda_path+'/bin/cudart64_'+cuda_version_id+'.dll'
elif os_name == 'Linux':
if os_architecture == '32bit':
cuda_lib_path = cuda_path+'/lib/'
else:
cuda_lib_path = cuda_path+'/lib64/'
elif os_name == 'Darwin':
cuda_lib_path = cuda_path+'/lib/'
#add include path
env.Append(CPPPATH = cuda_inc_path)
#add cuda runtime libpath and lib
env.Append(LIBPATH = cuda_lib_path)
env.Append(LIBS = 'cudart')
env.Append(LIBS = 'cudadevrt')
env.Append(LIBS = 'curand')
env['CUDA_DLL_PATH'] = cuda_dll_path
# "NVCC common command line"
if not env.has_key('_NVCCCOMCOM'):
# nvcc needs '-I' prepended before each include path, regardless of platform
env['_NVCCWRAPCPPPATH'] = '${_concat("-I ", CPPPATH, "", __env__)}'
# prepend -Xcompiler before each flag
env['_NVCCWRAPCFLAGS'] = '${_concat("-Xcompiler ", CFLAGS, "", __env__)}'
env['_NVCCWRAPSHCFLAGS'] = '${_concat("-Xcompiler ", SHCFLAGS, "", __env__)}'
#special treatment for Darwin(Mac)
#since clang could report an error if '-Xcompiler -std-gnu++11' is used
#while g++ just report a warning
if os_name == 'Darwin':
DARWIN_CCFLAGS = env['CCFLAGS'][:] #copy
if '-std=gnu++11' in DARWIN_CCFLAGS:
DARWIN_CCFLAGS.remove('-std=gnu++11')
env['DARWIN_CCFLAGS'] = DARWIN_CCFLAGS
DARWIN_SHCCFLAGS = env['SHCCFLAGS'][:] #copy
if '-std=gnu++11' in DARWIN_SHCCFLAGS:
DARWIN_SHCCFLAGS.remove('-std=gnu++11')
env['DARWIN_SHCCFLAGS'] = DARWIN_SHCCFLAGS
env['_NVCCWRAPCCFLAGS'] = '${_concat("-Xcompiler ", DARWIN_CCFLAGS, "", __env__)}'
env['_NVCCWRAPSHCCFLAGS'] = '${_concat("-Xcompiler ", DARWIN_SHCCFLAGS, "", __env__)}'
else:
env['_NVCCWRAPCCFLAGS'] = '${_concat("-Xcompiler ", CCFLAGS, "", __env__)}'
env['_NVCCWRAPSHCCFLAGS'] = '${_concat("-Xcompiler ", SHCCFLAGS, "", __env__)}'
# assemble the common command line
env['_NVCCCOMCOM'] = '${_concat("-Xcompiler ", CPPFLAGS, "", __env__)} $_CPPDEFFLAGS $_NVCCWRAPCPPPATH'
# set the include path, and pass both c compiler flags and c++ compiler flags
env['NVCCFLAGS'] = SCons.Util.CLVar('')
env['SHNVCCFLAGS'] = SCons.Util.CLVar('') + ' -shared'
# set cuda complier
env['NVCC'] = 'nvcc'
env['SHNVCC'] = 'nvcc'
# set cuda compute arch
env['CUDA_ARCH'] = '-arch=compute_52'
# 'NVCC Command'
env['NVCCCOM'] = '$NVCC -o $TARGET $CUDA_ARCH -dlink -c -dc -std=c++11 $NVCCFLAGS $_NVCCWRAPCFLAGS $_NVCCWRAPCCFLAGS $_NVCCCOMCOM $SOURCES'
env['SHNVCCCOM'] = '$SHNVCC -o $TARGET $CUDA_ARCH -dlink -c -dc -std=c++11 $SHNVCCFLAGS $_NVCCWRAPSHCFLAGS $_NVCCWRAPSHCCFLAGS $_NVCCCOMCOM $SOURCES'
# create builders that make static & shared objects from .cu files
static_obj_builder, shared_obj_builder = SCons.Tool.createObjBuilders(env)
# Add this suffix to the list of things buildable by Object
static_obj_builder.add_action(cuda_suffix, '$NVCCCOM')
shared_obj_builder.add_action(cuda_suffix, '$SHNVCCCOM')
static_obj_builder.add_emitter(cuda_suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj_builder.add_emitter(cuda_suffix, SCons.Defaults.SharedObjectEmitter)
# Add this suffix to the list of things scannable
SCons.Tool.SourceFileScanner.add_scanner(cuda_suffix, cuda_scanner)
# Prepend cuda_bin_path
env.PrependENVPath('PATH', cuda_bin_path)
def exists(env):
return env.Detect('nvcc')
| gpl-2.0 | -7,234,352,249,998,130,000 | 37.947712 | 153 | 0.594563 | false |
josuemontano/python_intro | tema_6/Leccion_3.py | 1 | 1116 | # -*- coding: utf-8 -*-
# Conexión a bases de datos
# Lección 3
# SQLAlchemy: Queries
from Leccion_1 import Libro
from Leccion_2 import session
def todos_libros():
return session.query(Libro).all()
def libros_recientes():
return session.query(Libro).filter(Libro.anio_publicacion >= 2010).all()
def libros_por_titulo(titulo):
return session.query(Libro).filter(Libro.titulo.startswith(titulo)).all()
def libros_recientes_por_titulo(titulo):
return session.query(Libro).filter(Libro.anio_publicacion >= 2010, Libro.titulo.startswith(titulo)).all()
def cantidad_libros_editorial(editorial):
return session.query(Libro).filter(Libro.editorial == editorial).count()
def main():
print("Libros registrados:")
for libro in todos_libros():
print(libro.titulo)
print("\nLibros recientes:")
for libro in libros_recientes():
print(libro.anio_publicacion, libro.titulo)
print("\nLibros cuyo titulo empieza con 'Math':")
for libro in libros_por_titulo('Math'):
print(libro.anio_publicacion, libro.titulo)
if __name__ == '__main__':
main()
| gpl-3.0 | 690,082,055,112,073,500 | 24.318182 | 109 | 0.692998 | false |
ColumbiaCMB/kid_readout | apps/data_taking_scripts/2016-06-jpl-hex-271/heterodyne_scan_with_source.py | 1 | 2650 | import time
import numpy as np
from kid_readout.interactive import *
from kid_readout.measurement import acquire
from kid_readout.roach import r2heterodyne, attenuator, hardware_tools
from equipment.custom import mmwave_source
from equipment.hittite import signal_generator
from equipment.srs import lockin
logger.setLevel(logging.DEBUG)
hittite = signal_generator.Hittite(ipaddr='192.168.0.200')
hittite.set_power(0)
hittite.on()
hittite.set_freq(148e9/12.)
lockin = lockin.Lockin(LOCKIN_SERIAL_PORT)
tic = time.time()
print lockin.identification
print time.time()-tic
tic = time.time()
print lockin.fast_state
print time.time()-tic
source = mmwave_source.MMWaveSource()
source.set_attenuator_turns(3.0,3.0)
source.multiplier_input = 'hittite'
source.waveguide_twist_angle = 45
source.ttl_modulation_source = 'roach'
setup = hardware.Hardware(hittite, source,lockin)
ri = hardware_tools.r2_with_mk1(1000.)
ri.iq_delay=-1
ri.set_dac_atten(20)
ri.set_fft_gain(6)
nsamp = 2**15
step = 1
nstep = 32
#f0binned = np.round(f0s * nsamp / 512.0) * 512.0 / nsamp
offset_bins = np.arange(-(nstep), (nstep)) * step
offsets = offset_bins * 512.0 / nsamp
ri.set_modulation_output('low')
ri.set_lo(1250.)
#legacy.load_heterodyne_sweep_tones(ri,(np.arange(1,129)[None,:]*7/4.+ri.lo_frequency + offsets[:,None]),
# num_tone_samples=nsamp)
state = dict(field_canceling_magnet=False,magnetic_shield=True,cryostat='starcryo')
state.update(**setup.state())
for hittite_power in np.arange(-3.0,1,.4):
logger.info("Measuring at %.1f dBm" % hittite_power)
hittite.set_power(hittite_power)
tic = time.time()
for lo in 830.+190*np.arange(0,4):
logger.info("Measuring at LO %.1f" % lo)
ri.set_lo(lo)
df = acquire.new_nc_file(suffix='scan_lo_%.1f_MHz' % lo)
ri.set_modulation_output(7)
logger.info("autogain lockin")
time.sleep(1)
lockin.auto_gain(wait_until_done=True)
time.sleep(3)
logger.info("new sensitivity: %d values %s" % (lockin.sensitivity,str(lockin.fast_state)))
state.update(**setup.state())
ri.set_modulation_output('low')
swa = acquire.run_sweep(ri, (np.arange(1, 257)[None, :] * 7 / 8. + ri.lo_frequency + offsets[:, None]),
num_tone_samples=nsamp, length_seconds=0.1, state=state, verbose=True)
df.write(swa)
df.close()
print "elapsed:", (time.time()-tic)/60.0,'minutes'
#time.sleep(60.)
# while time.time() - tic < 5*60:
# print "waiting... %.1f min remaining" % ((5*60 - (time.time() - tic))/60)
# time.sleep(60)
| bsd-2-clause | 6,223,402,488,871,364,000 | 30.176471 | 111 | 0.660377 | false |
eoss-cloud/madxxx_catalog_api | catalog/client/services/catalog_status.py | 1 | 2495 | #-*- coding: utf-8 -*-
""" EOSS catalog system
functionality for the catalog status endpoint
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "[email protected]"
__status__ = "Production"
import logging
import falcon
import ujson
from api import max_body
from client.services.db_calls import Persistance
from client.services.static_maps import j2_env
from client.services.tools import can_zip_response, compress_body, make_GeoJson
from api_logging import logger
class CatalogStatus(object):
"""
EOSS catalog class from web API
"""
def __init__(self):
self.logger = logging.getLogger('eoss.' + __name__)
@falcon.before(max_body(64 * 1024)) # max 64kB request size
def on_get(self, req, resp, sensor):
logger.info('[GET] /catalog/status/count/%s' % (sensor))
results = dict()
minx,maxx, miny, maxy = -180,180,-90,90
if 'last_days' in req.params:
last_days = int(req.params['last_days'])
else:
last_days = 4
global_extent = [[miny, minx], [maxy, maxx]]
res = Persistance().get_observation_coverage(int(sensor), last_days=last_days)
results['geojson'] = make_GeoJson(res['geojson'], res['attr'])
content_type = 'text/html'
results = j2_env.get_template('leaflet_map.html').render(title='Reference object: %s' % sensor, center='[%f, %f]' % (21.5, -102),
zoomlevel=5, geojson=ujson.dumps(results['geojson']),
label_attribute=None,
extent=ujson.dumps(global_extent))
if can_zip_response(req.headers):
resp.set_header('Content-Type', content_type)
resp.set_header('Content-Encoding', 'gzip')
if content_type == 'application/json':
resp.body = compress_body(ujson.dumps(results))
else:
resp.body = compress_body(results)
else:
resp.set_header('Content-Type', content_type)
if content_type == 'application/json':
resp.body = ujson.dumps(results)
else:
resp.body = results
resp.status = falcon.HTTP_200 | mit | 4,771,464,230,048,616,000 | 35.173913 | 137 | 0.569539 | false |
patois/IDACyber | cyber/prototype.py | 1 | 2866 | from PyQt5.QtGui import qRgb
from PyQt5.QtCore import Qt
from idacyber import ColorFilter
from ida_kernwin import ask_text, warning
from types import FunctionType
class Prototype(ColorFilter):
name = "Prototype"
help = "Right click: edit current filter function"
highlight_cursor = False
def __init__(self, pw):
self.pw = pw
self.func_call = None
self.func_def=(
"""
def process(base, offs, b, size, width, moffs):
# print("%x+%x: %02x (total pxls %d, width %d, mouse pos %d)" % (base, offs, b, size, width, moffs))
# return (b,b,b)
if (b == 0x70 or b == 0x47):
# detect potential thumb-mode pattern
color = (0x59, 0x7c, 0x92)
elif (b & 0xf0 == 0xe0):
# detect potential ARM pattern
color = (0x00, 0x40, 0x67)
else:
# default color
color = (0x00, 0x10, 0x1b)
# cross-hair
if offs%width == moffs%width or int(offs/width) == int(moffs/width):
color = (min(color[0]+0x00,0xff),
min(color[1]+0x04,0xff),
min(color[2]+0x04,0xff))
return color""")
self._compile(self.func_def)
def _compile(self, text):
self.func_def = text
try:
self.func_code = compile(text, "", "exec")
self.func_call = FunctionType(self.func_code.co_consts[0], globals(), "")
return (True, "")
except Exception as e:
return (False, e)
return (False, "")
def _set_user_func(self):
while True:
func_def = ask_text(0, self.func_def, "Please define function (must return tuple(RR,GG,BB) format")
if func_def is None:
break
res, s = self._compile(func_def)
if res:
break
warning("%s" % s)
def on_mb_click(self, event, addr, size, mouse_offs):
if event.button() == Qt.RightButton:
self._set_user_func()
def on_process_buffer(self, buffers, addr, size, mouse_offs):
colors = []
width = self.pw.get_pixel_qty_per_line()
for mapped, buf in buffers:
if mapped:
for offs in range(len(buf)):
try:
r, g, b = self.func_call(
addr,
offs,
buf[offs]&0xff,
size,
width,
mouse_offs)
colors.append((True, qRgb(r&0xFF, g&0xFF, b&0xFF)))
except:
colors.append((False, None))
else:
colors += [(False, None)]*len(buf)
return colors
def FILTER_INIT(pw):
return Prototype(pw)
def FILTER_EXIT():
return | mit | -5,844,695,650,592,457,000 | 29.866667 | 111 | 0.495115 | false |
craws/OpenAtlas | openatlas/database/gis.py | 1 | 4367 | import ast
from typing import Any, Dict, List
from flask import g
class Gis:
@staticmethod
def add_example_geom(id_: int) -> None:
sql = """INSERT INTO gis.point (entity_id, name, description, type, geom) VALUES (
(%(location_id)s),
'',
'',
'centerpoint',
public.ST_SetSRID(public.ST_GeomFromGeoJSON('{"type":"Point","coordinates":[9,17]}'),4326));
"""
g.cursor.execute(sql, {'location_id': id_})
@staticmethod
def get_by_id(id_: int) -> List[Dict[str, Any]]:
geometries = []
for shape in ['point', 'polygon', 'linestring']:
sql = f"""
SELECT
{shape}.id,
{shape}.name,
{shape}.description,
{shape}.type,
public.ST_AsGeoJSON({shape}.geom) AS geojson
FROM model.entity place
JOIN gis.{shape} {shape} ON place.id = {shape}.entity_id
WHERE place.id = %(id_)s;"""
g.cursor.execute(sql, {'id_': id_})
for row in g.cursor.fetchall():
geometry = ast.literal_eval(row['geojson'])
geometry['title'] = row['name'].replace('"', '\"') if row['name'] else ''
geometry['description'] = \
row['description'].replace('"', '\"') if row['description'] else ''
geometries.append(geometry)
return geometries
@staticmethod
def get_by_shape(shape: str, extra_ids: List[int]) -> List[Dict[str, Any]]:
polygon_sql = '' if shape != 'polygon' else \
'public.ST_AsGeoJSON(public.ST_PointOnSurface(polygon.geom)) AS polygon_point, '
sql = f"""
SELECT
object.id AS object_id,
{shape}.id,
{shape}.name,
{shape}.description,
{shape}.type,
public.ST_AsGeoJSON({shape}.geom) AS geojson, {polygon_sql}
object.name AS object_name,
object.description AS object_desc,
string_agg(CAST(t.range_id AS text), ',') AS types
FROM model.entity place
JOIN model.link l ON place.id = l.range_id
JOIN model.entity object ON l.domain_id = object.id
JOIN gis.{shape} {shape} ON place.id = {shape}.entity_id
LEFT JOIN model.link t ON object.id = t.domain_id AND t.property_code = 'P2'
WHERE place.class_code = 'E53'
AND l.property_code = 'P53'
AND (object.system_class = 'place' OR object.id IN %(extra_ids)s)
GROUP BY object.id, {shape}.id;"""
g.cursor.execute(sql, {'extra_ids': tuple(extra_ids)})
return [dict(row) for row in g.cursor.fetchall()]
@staticmethod
def test_geom(geometry: str) -> None:
from openatlas.models.gis import InvalidGeomException
sql = "SELECT st_isvalid(public.ST_SetSRID(public.ST_GeomFromGeoJSON(%(geojson)s),4326));"
g.cursor.execute(sql, {'geojson': geometry})
if not g.cursor.fetchone()['st_isvalid']:
raise InvalidGeomException
return
@staticmethod
def insert(data: Dict[str, Any], shape: str) -> None:
sql = f"""
INSERT INTO gis.{shape} (entity_id, name, description, type, geom) VALUES (
%(entity_id)s,
%(name)s,
%(description)s,
%(type)s,
public.ST_SetSRID(public.ST_GeomFromGeoJSON(%(geojson)s),4326));"""
g.cursor.execute(sql, data)
@staticmethod
def insert_import(data: Dict[str, Any]) -> None:
sql = """
INSERT INTO gis.point (entity_id, name, description, type, geom) VALUES (
%(entity_id)s,
'',
%(description)s,
'centerpoint',
public.ST_SetSRID(public.ST_GeomFromGeoJSON(%(geojson)s),4326));"""
g.cursor.execute(sql, data)
@staticmethod
def delete_by_entity_id(id_: int) -> None:
g.cursor.execute('DELETE FROM gis.point WHERE entity_id = %(id)s;', {'id': id_})
g.cursor.execute('DELETE FROM gis.linestring WHERE entity_id = %(id)s;', {'id': id_})
g.cursor.execute('DELETE FROM gis.polygon WHERE entity_id = %(id)s;', {'id': id_})
| gpl-2.0 | -4,000,327,028,606,807,000 | 40.590476 | 100 | 0.530112 | false |
cjrd/TMA | src/backend/aux/create_wiki_cooccurence.py | 1 | 5270 | #!/usr/bin/env python
import pdb
import os
import re
import cPickle as pickle
from src.backend.tma_utils import TextCleaner, ids_to_key
from lib.porter2 import stem
import sqlite3 as sqlite
from time import time
import bsddb
import random
def db_transfer(termterm_dict, termterm_db):
for t1 in termterm_dict:
for t2 in termterm_dict[t1]:
ikey = '%i,%i' % (t1,t2)
if termterm_db.has_key(ikey):
termterm_db[ikey] = str(int(termterm_db[ikey]) + termterm_dict[t1][t2])
else:
termterm_db[ikey] = str(termterm_dict[t1][t2])
if __name__ == '__main__':
# DATA
outdata_dir = '/Users/cradreed/Research/TMBrowse/develarea/'
wikfile = outdata_dir + 'enwiki_abstracts-20120307.dat' #'/Users/cradreed/Research/TMBrowse/develarea/enwiki-latest-abstract18.xml'#
# use bsd to create to cooccurence file then write to sqlite to maintain database consistency and reduce dependencies
# set up the dbase
#dbfile = '/Users/cradreed/Research/TMBrowse/develarea/wiki-terms.sqlite'
# wikivocab_file = outdata_dir + 'wikivocab_full.bdb'
# wikivocab_ct_file = outdata_dir + 'wikivocab_ct_full.bdb'
wiki_termterm_file = outdata_dir + 'wiki_termterm_full_100percent.bdb'
# os.remove(dbfile) # TESTING
# if os.path.exists(wikivocab_file):
# os.remove(wikivocab_file)
# if os.path.exists(wikivocab_ct_file):
# os.remove(wikivocab_ct_file)
# if os.path.exists(wiki_termterm_file):
# os.remove(wiki_termterm_file)
vocab_dict = pickle.load(open(outdata_dir + 'wiki_vocab_dic_full_100percent.obj','rb'))#{}#bsddb.hashopen(wikivocab_file)
vocab_ct_dict = pickle.load(open(outdata_dir + 'wiki_vocab_ct_full_100percent.obj','rb'))#{}#bsddb.hashopen(wikivocab_ct_file)
termterm_db = bsddb.btopen(wiki_termterm_file)
termterm_dict = {}
text_cleaner = TextCleaner(stopword_file='/Users/cradreed/Research/TMBrowse/trunk/src/backend/aux/stop_words.txt')
# add the cooccurence information to the table
st_time = time()
num_ab = 0
term_ct = 0
tot_ab_len = 0
dep_no = 0
print_no = 50000
transfer_no = 10*print_no
ltime = time()
with open(wikfile,'r') as wikxml:
for i, line in enumerate(wikxml):
# only sample % 20
# if random.random() > 0.20:
# continue
num_ab += 1
if num_ab <= 3500000:
continue
if num_ab % print_no == 0 and not num_ab == 0:
print 'Parsed {0:8d} of 3925809 abstracts; last {1:5d} abstracts took {2:0.1f} seconds. Average {3:4d} terms per doc.'.format(num_ab, print_no,time()-ltime, int(tot_ab_len/print_no))
ltime = time()
tot_ab_len = 0
if num_ab % transfer_no == 0 and not num_ab == 0:
print '---- Transfering %i abstracts to db -----' % transfer_no
db_transfer(termterm_dict, termterm_db)
dep_no += 1
del(termterm_dict)
termterm_dict = {}
print '---- %i transfer complete, took %0.1f seconds ----' % (dep_no, (time() - ltime))
ltime = time()
text = line.strip() # remove the abstract tags
text = text_cleaner.parse_text(text)
text = list(set(text))
tot_ab_len += len(text)
for nt1, term1 in enumerate(text):
if not vocab_dict.has_key(term1):
t1_id = term_ct
vocab_dict[term1] = t1_id
vocab_ct_dict[t1_id] = 1
term_ct += 1
else:
t1_id = vocab_dict[term1]
vocab_ct_dict[t1_id] += 1
for nt2 in xrange(nt1+1, len(text)): # 173.271281 vs 185s TODO make sure the counting is correct
term2 = text[nt2]
if not vocab_dict.has_key(term2):
t2_id = term_ct
vocab_dict[term2] = t2_id
vocab_ct_dict[t2_id] = 0 # avoid overcounting
term_ct += 1
else:
t2_id = vocab_dict[term2]
t_keys = ids_to_key(t1_id, t2_id)
if not termterm_dict.has_key(t_keys[0]):
termterm_dict[t_keys[0]] = {t_keys[1]:1}
elif termterm_dict[t_keys[0]].has_key(t_keys[1]):
termterm_dict[t_keys[0]][t_keys[1]] += 1
else:
termterm_dict[t_keys[0]][t_keys[1]] = 1
db_transfer(termterm_dict, termterm_db)
print 'Added %i terms to dic' % len(vocab_dict)
# vocab_dict.close()
# vocab_ct_dict.close()
# print termterm_db
# print vocab_dict
# print vocab_ct_dict
termterm_db.close()
pickle.dump(vocab_dict, open(outdata_dir + 'wiki_vocab_dic_full_100percent2.obj','wb'))
pickle.dump(vocab_ct_dict, open(outdata_dir + 'wiki_vocab_ct_full_100percent2.obj','wb'))
time_parse = time() - st_time
print 'Parsing %i abstracts took %f seconds' % (num_ab, time_parse) | gpl-3.0 | 1,383,182,657,101,620,200 | 40.503937 | 198 | 0.551803 | false |
LABETE/TestYourProject | casedetails/migrations/0001_initial.py | 1 | 1298 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CaseDetail',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('status', models.SmallIntegerField(choices=[(1, 'Not Started'), (2, 'In Progress'), (3, 'Passed'), (4, 'Failed'), (5, 'Not Applicable')], default=1)),
('step', models.IntegerField()),
('description', models.TextField()),
('expected', models.TextField(blank=True)),
('actual', models.TextField(blank=True)),
('input_data', models.TextField(blank=True)),
('output_data', models.TextField(blank=True)),
('defect_id', models.IntegerField(blank=True, null=True)),
('defect_id_displayed', models.IntegerField(blank=True, null=True)),
('case_id', models.IntegerField()),
],
options={
'verbose_name': 'CaseDetailModel',
'verbose_name_plural': 'CaseDetailModels',
},
),
]
| bsd-3-clause | 3,029,143,579,787,670,500 | 38.333333 | 167 | 0.539291 | false |
stormi/tsunami | src/primaires/scripting/fonctions/est_de_type.py | 1 | 2937 | # -*-coding:Utf-8 -*
# Copyright (c) 2015 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction est_de_type."""
from primaires.scripting.fonction import Fonction
class ClasseFonction(Fonction):
"""Retourne vrai si l'objet ou prototype est de type indiqué."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.est_de_type_objet, "Objet", "str")
cls.ajouter_types(cls.est_de_type_objet, "PrototypeObjet", "str")
@staticmethod
def est_de_type_objet(objet, nom_type):
"""Retourne vrai si l'objet est du type indiqué.
Retourne vrai également si le nom de type est un parent du
type de l'objet. Par exemple, si l'objet est un fruit
mais que l'on test si c'est une nourriture.
Paramètres à entrer :
* objet : l'objet à tester
* nom_type : le nom du type
"""
return objet.est_de_type(nom_type)
@staticmethod
def est_de_type_prototype(prototype, nom_type):
"""Retourne vrai si le prototype d'objet est du type indiqué.
Retourne vrai également si le nom de type est un parent du
type du prototype. Par exemple, si le prototype est un fruit
mais que l'on test si c'est une nourriture.
Paramètres à entrer :
* prototype : le prototype d'objet à tester
* nom_type : le nom du type
"""
return prototype.est_de_type(nom_type)
| bsd-3-clause | 855,048,807,130,685,200 | 38.540541 | 79 | 0.713602 | false |
jolyonb/edx-platform | lms/tests.py | 1 | 2451 | """Tests for the lms module itself."""
import logging
import mimetypes
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from mock import patch
from six import text_type
from edxmako import LOOKUP, add_lookup
from microsite_configuration import microsite
from openedx.features.course_experience import course_home_url_name
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
log = logging.getLogger(__name__)
class LmsModuleTests(TestCase):
"""
Tests for lms module itself.
"""
def test_new_mimetypes(self):
extensions = ['eot', 'otf', 'ttf', 'woff']
for extension in extensions:
mimetype, _ = mimetypes.guess_type('test.' + extension)
self.assertIsNotNone(mimetype)
def test_api_docs(self):
"""
Tests that requests to the `/api-docs/` endpoint do not raise an exception.
"""
assert settings.FEATURES['ENABLE_API_DOCS']
response = self.client.get('/api-docs/')
self.assertEqual(200, response.status_code)
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([directory for directory in directories if 'external_module' in directory]), 1)
# This should not clear the directories list
microsite.enable_microsites(log)
directories = LOOKUP['main'].directories
self.assertEqual(len([directory for directory in directories if 'external_module' in directory]), 1)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_FEEDBACK_SUBMISSION': True})
class HelpModalTests(ModuleStoreTestCase):
"""Tests for the help modal"""
def setUp(self):
super(HelpModalTests, self).setUp()
self.course = CourseFactory.create()
def test_simple_test(self):
"""
Simple test to make sure that you don't get a 500 error when the modal
is enabled.
"""
url = reverse(course_home_url_name(self.course.id), args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
| agpl-3.0 | 190,935,438,932,161,340 | 32.121622 | 108 | 0.681763 | false |
chemelnucfin/tensorflow | tensorflow/python/keras/mixed_precision/experimental/keras_test.py | 1 | 43595 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests mixed precision works correctly with Keras layers and models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import saving
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.util import nest
class AssertTypeLayer(base_layer.Layer):
"""A layer which asserts it's inputs are a certain type."""
def __init__(self, assert_type=None, **kwargs):
self._assert_type = (dtypes.as_dtype(assert_type).name if assert_type
else None)
super(AssertTypeLayer, self).__init__(**kwargs)
def assert_input_types(self, inputs):
"""Asserts `inputs` are of the correct type. Should be called in call()."""
if self._assert_type:
inputs_flattened = nest.flatten(inputs)
for inp in inputs_flattened:
assert inp.dtype.base_dtype == self._assert_type, (
'Input tensor has type %s which does not match assert type %s' %
(inp.dtype.name, self._assert_type.name))
class AddLayer(AssertTypeLayer):
"""A layer which adds it's input to a scalar variable."""
def __init__(self,
regularizer=None,
use_operator=False,
var_name='v',
**kwargs):
"""Initializes the AddLayer.
Args:
regularizer: The regularizer on the scalar variable.
use_operator: If True, add using the + operator. If False, add using
tf.add.
var_name: The name of the variable. It can be useful to pass a name other
than 'v', to test having the attribute name (self.v) being different
from the variable name.
**kwargs: Passed to AssertTypeLayer constructor.
"""
self._regularizer = regularizer
self._use_operator = use_operator
self._var_name = var_name
super(AddLayer, self).__init__(**kwargs)
def build(self, _):
self.v = self.add_weight(
self._var_name, (), initializer='ones', regularizer=self._regularizer)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
assert inputs.dtype == self.v.dtype
return self._add(inputs, self.v)
def _add(self, x, y):
if self._use_operator:
return x + y
else:
return math_ops.add(x, y)
def get_config(self):
config = super(AddLayer, self).get_config()
assert self._regularizer is None, (
'regularizer must be None to get config for AddLayer')
config['use_operator'] = self._use_operator
config['var_name'] = self._var_name
config['assert_type'] = self._assert_type
return config
class AddLayerWithoutAutoCast(AddLayer):
"""Same as AddLayer, but does not use AutoCastVariables."""
def build(self, _):
dtype = self.dtype
if dtype in ('float16', 'bfloat16'):
dtype = 'float32'
self.v = self.add_weight(
'v', (),
initializer='ones',
dtype=dtype,
experimental_autocast=False,
regularizer=self._regularizer)
self.built = True
def call(self, inputs):
self.assert_input_types(inputs)
assert self.v.dtype in (dtypes.float32, dtypes.float64)
return self._add(inputs, math_ops.cast(self.v, inputs.dtype))
class AddLayerWithFunction(AddLayer):
"""Same as AddLayer, but _add is decorated with a tf.function."""
@def_function.function
def _add(self, x, y):
return super(AddLayerWithFunction, self)._add(x, y)
class IdentityRegularizer(regularizers.Regularizer):
def __call__(self, x):
assert x.dtype == dtypes.float32
return array_ops.identity(x)
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = distribution_strategy_context.get_strategy
def create_mirrored_strategy():
if context.num_gpus() >= 1:
return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return mirrored_strategy.MirroredStrategy(['cpu:0'])
TESTCASES = ({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy
})
class KerasLayerTest(keras_parameterized.TestCase):
"""Test mixed precision with Keras layers."""
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_infer_with_float32_vars(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope(), policy.policy_scope('infer_float32_vars'):
layer = AddLayer(assert_type=dtypes.float16)
self.assertEqual(layer.dtype, dtypes.float32)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.float16)
self.assertEqual(layer.dtype, dtypes.float32)
self.assertEqual(layer._dtype_policy._name, 'float16_with_float32_vars')
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
if base_layer_utils.v2_dtype_behavior_enabled():
# Layer should now cast inputs to float16
x = constant_op.constant([1.], dtype=dtypes.float32)
y = layer(x)
self.assertEqual(y.dtype, dtypes.float16)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_floating_point_policies_with_float32_vars(self, strategy_fn):
for dtype in 'bfloat16', 'float16', 'float64':
x = constant_op.constant([1.])
policy_name = dtype + '_with_float32_vars'
with strategy_fn().scope(), policy.policy_scope(policy_name):
layer = AddLayer(assert_type=dtype)
self.assertEqual(layer.dtype, dtypes.float32)
self.assertEqual(layer._dtype_policy._name, policy_name)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtype)
self.assertEqual(layer.dtype, dtypes.float32)
self.assertEqual(layer._dtype_policy._name, policy_name)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_int32_with_float32_vars(self, strategy_fn):
# The policy int32_with_float32_vars is not useful at all (nor is any other
# non-float policy with float32 variables), but we have it for consistency,
# and so we test it.
class IdentityLayerWithVar(base_layer.Layer):
def build(self, _):
self.v = self.add_weight('v', ())
def call(self, inputs):
# Variables are only casted to other floats, not ints
assert array_ops.identity(self.v).dtype == 'float32'
return array_ops.identity(inputs)
x = constant_op.constant([1])
with strategy_fn().scope(), policy.policy_scope('int32_with_float32_vars'):
layer = IdentityLayerWithVar()
self.assertEqual(layer.dtype, dtypes.float32)
self.assertEqual(layer._dtype_policy._name, 'int32_with_float32_vars')
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.int32)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_with_int_variable(self, strategy_fn):
class LayerWithIntVar(base_layer.Layer):
def build(self, _):
self.v = self.add_weight('v', dtype='int32', trainable=False)
def call(self, inputs):
# Only float variables should be autocasted. This will fail if self.v is
# autocasted to float32
return math_ops.cast(inputs, 'int32') + self.v
x = constant_op.constant([1.])
layer = LayerWithIntVar(dtype=policy.Policy('mixed_float16'))
self.assertEqual(layer(x).dtype, 'int32')
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_with_non_autocast_variable(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
layer = AddLayerWithoutAutoCast(assert_type=dtypes.float16)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.float16)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_calling_tf_function(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
layer = AddLayerWithFunction(assert_type=dtypes.float16)
y = layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
self.assertEqual(y.dtype, dtypes.float16)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 2.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_regularizer_runs_in_var_dtype(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
# Test on AddLayer
layer = AddLayer(
assert_type=dtypes.float16, regularizer=IdentityRegularizer())
layer(x)
(regularizer_loss,) = layer.losses
self.assertEqual(regularizer_loss.dtype, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(regularizer_loss), 1.)
# Test on AddLayerWithoutAutoCast
layer = AddLayerWithoutAutoCast(
assert_type=dtypes.float16, regularizer=IdentityRegularizer())
layer(x)
(regularizer_loss,) = layer.losses
self.assertEqual(regularizer_loss.dtype, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(regularizer_loss), 1.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_passing_policy_to_layer(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope():
# Passing a Policy to 'dtype' sets the policy for that layer.
layer = AddLayer(
assert_type=dtypes.float16, dtype=policy.Policy('infer_float32_vars'))
# layer.dtype refers to the variable dtype
self.assertEqual(layer.dtype, dtypes.float32)
layer(x)
self.assertEqual(layer.v.dtype, dtypes.float32)
with policy.policy_scope('infer_float32_vars'):
# Passing a Policy to dtype overrides the global Policy
layer = AddLayer(
assert_type=dtypes.float16, dtype=policy.Policy('infer'))
# layer dtype is not yet known
self.assertEqual(layer.dtype, None)
layer(x)
self.assertEqual(layer.v.dtype, dtypes.float16)
self.assertEqual(layer.dtype, dtypes.float16)
@test_util.run_in_graph_and_eager_modes
def test_error_passing_policy_string_to_layer(self):
with self.assertRaisesRegexp(
TypeError, "Cannot convert value 'float16_with_float32_vars' to a "
"TensorFlow DType"):
# This is not allowed, as otherwise a "float16_with_float32_vars" policy
# could be created without an API call that has the name "experimental" in
# it.
AddLayer(dtype='float16_with_float32_vars')
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_gradient(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope() as strategy:
with policy.policy_scope('infer_float32_vars'):
layer = AddLayer(assert_type=dtypes.float16)
def run_fn():
with backprop.GradientTape() as tape:
y = layer(x)
# Divide by num_replicas_in_sync, as the effective total loss is the
# sum of each of the replica's losses.
y /= strategy.num_replicas_in_sync
# Learning rate is small enough that if applied to a float16 variable,
# the variable will not change. So this tests the learning rate is not
# applied to a float16 value, but instead the float32 variable.
opt = gradient_descent.SGD(2**-14)
grad = tape.gradient(y, layer.v)
return opt.apply_gradients([(grad, layer.v)])
op = strategy.experimental_run(run_fn)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
# The gradient with respective to the variable is 1. Since the
# variable is initialized with 1 and the learning rate is 2**-14, the
# new variable value should be: init_val - gradient * learning_rate,
# which is 1 - 1 * 2**-14
self.assertEqual(self.evaluate(layer.v), 1 - 2**-14)
def _test_checkpointing_layer_weights(self, strategy_fn,
mixed_prec_when_saving,
mixed_prec_when_loading):
# In this test, we potentially save with mixed precision enabled and load
# with mixed precision disabled, or vice versa. This is possible because
# variables are float32 regardless of whether mixed precision is enabled.
save_policy = 'infer_float32_vars' if mixed_prec_when_saving else 'infer'
load_policy = 'infer_float32_vars' if mixed_prec_when_loading else 'infer'
save_input_dtype = 'float16' if mixed_prec_when_saving else 'float32'
load_input_dtype = 'float16' if mixed_prec_when_loading else 'float32'
# Create a layer and save a checkpoint.
x = constant_op.constant([1.], dtype=save_input_dtype)
with strategy_fn().scope():
with policy.policy_scope(save_policy):
layer = AddLayer(assert_type=save_input_dtype)
layer(x) # Build layer
layer.set_weights([np.array(100.)])
self.assertEqual(self.evaluate(layer(x)), 101.)
checkpoint = trackable_utils.Checkpoint(layer=layer)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
# Create a new layer and restore the checkpoint.
x = constant_op.constant([1.], dtype=load_input_dtype)
with strategy_fn().scope():
with policy.policy_scope(load_policy):
layer = AddLayer(assert_type=load_input_dtype)
layer(x) # Build layer
layer.set_weights([np.array(200.)])
self.assertEqual(self.evaluate(layer(x)), 201.)
checkpoint = trackable_utils.Checkpoint(layer=layer)
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(layer.get_weights(), [100.])
self.assertEqual(self.evaluate(layer(x)), 101.)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_checkpointing_layer_weights(self, strategy_fn):
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=True, mixed_prec_when_loading=True)
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=True, mixed_prec_when_loading=False)
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=False, mixed_prec_when_loading=True)
@test_util.run_in_graph_and_eager_modes
def test_delete_variable(self):
layer = base_layer.Layer(dtype=policy.Policy('mixed_float16'))
layer.x = layer.add_weight('x')
self.assertEqual(layer.trainable_weights, [layer.x])
del layer.x
self.assertEqual(layer.trainable_weights, [])
class KerasModelTest(keras_parameterized.TestCase):
"""Test mixed precision with Keras models."""
def _is_strategy_supported(self, strategy_fn, check_model_type=False):
if (strategy_fn != default_strategy_fn and
(testing_utils.should_run_eagerly() or
(check_model_type and testing_utils.get_model_type() == 'subclass'))):
# Distribution strategies do not support subclassed models or running with
# `run_eagerly=True`.
return False
else:
return True
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'operator',
'strategy_fn': create_mirrored_strategy,
'use_operator': True
}, {
'testcase_name': 'regularizer',
'strategy_fn': create_mirrored_strategy,
'use_regularizer': True
}, {
'testcase_name': 'infer',
'strategy_fn': create_mirrored_strategy,
'policy_name': 'mixed_float16'
}, {
'testcase_name': 'norun_distributed',
'strategy_fn': create_mirrored_strategy,
'experimental_run_tf_function': False
})
@testing_utils.enable_v2_dtype_behavior
def test_model(self,
strategy_fn,
use_operator=False,
use_regularizer=False,
policy_name='mixed_float16',
experimental_run_tf_function=True):
if not self._is_strategy_supported(strategy_fn, check_model_type=True):
return
regularizer = IdentityRegularizer() if use_regularizer else None
with strategy_fn().scope():
# Pass loss_scale=None, as this test will fail if the DynamicLossScale
# skips applying gradients for a step
with policy.policy_scope(policy.Policy(policy_name, loss_scale=None)):
layer_list = []
if testing_utils.get_model_type() == 'subclass':
# Subclassed models do not have an Input layer, so the model does not
# cast inputs to the Input layer's dtype. Therefore, we need to
# manually insert a float16 cast.
cast_f16_layer = layers.Lambda(
lambda x: math_ops.cast(x, 'float16'), input_shape=(1,))
layer_list.append(cast_f16_layer)
layer = AddLayer(
assert_type=dtypes.float16,
use_operator=use_operator,
regularizer=regularizer,
input_shape=(1,))
cast_f32_layer = layers.Lambda(lambda x: math_ops.cast(x, 'float32'))
layer_list += [layer, cast_f32_layer]
model = testing_utils.get_model_from_layers(
layer_list, input_shape=(1,), input_dtype=dtypes.float16)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
# Learning rate is small enough that if applied to a float16 variable,
# the variable will not change. So this tests the learning rate not
# applied to a float16 value, but instead the float32 variable.
opt = gradient_descent.SGD(2**-14)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 2 ** -14 subtracted
# from it.
expected = 1 - 2**-14
if use_regularizer:
# Regularizer adds another 2 ** -14 to the gradient.
expected -= 2**-14
self.assertEqual(backend.eval(layer.v), expected)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'norun_distributed',
'strategy_fn': create_mirrored_strategy,
'experimental_run_tf_function': False,
})
def test_fixed_loss_scaling(self,
strategy_fn,
experimental_run_tf_function=True):
# Note: We do not test mixed precision in this method, only loss scaling.
if not self._is_strategy_supported(strategy_fn):
return
loss_scale = 8.
batch_size = 4
with strategy_fn().scope():
x = layers.Input(shape=(1,), batch_size=batch_size)
layer = AddLayer()
y = layer(x)
# The gradient of 'y' at this point is 1. With loss scaling, the gradient
# is 'loss_scale'. We divide by the batch size since the loss is averaged
# across batch elements.
expected_gradient = loss_scale / batch_size
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn([expected_gradient]))
y = core.Lambda(identity_with_grad_check_fn)(y)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 1 subtracted from it.
expected = 0
self.assertEqual(backend.eval(layer.v), expected)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'loss_scaling',
'strategy_fn': create_mirrored_strategy,
'use_loss_scaling': True
})
@testing_utils.enable_v2_dtype_behavior
def test_advanced_model(self, strategy_fn, use_loss_scaling=False):
# The advanced model tests mixed-precision-related features that would occur
# in a resnet50 model. It tests a model that has:
# * Multiple layers, some which use auto-cast variables and some which do
# not
# * Regularization on some variables and not others.
# * A fixed loss scale (if use_loss_scaling is True)
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
if use_loss_scaling:
loss_scale = 8.
else:
loss_scale = None
learning_rate = 2**-14
with strategy.scope():
with policy.policy_scope(policy.Policy('mixed_float16',
loss_scale=loss_scale)):
x = layers.Input(shape=(1,), batch_size=2)
layer1 = AddLayer(
assert_type=dtypes.float16,
regularizer=IdentityRegularizer(),
use_operator=True)
layer2 = AddLayerWithoutAutoCast(
assert_type=dtypes.float16, use_operator=True)
layer3 = AddLayer(assert_type=dtypes.float16, use_operator=False)
layer4 = AddLayerWithoutAutoCast(
assert_type=dtypes.float16,
regularizer=IdentityRegularizer(),
use_operator=False)
y = layer1(x)
y = layer2(y)
y = layer3(y)
y = layer4(y)
if use_loss_scaling:
# The gradient of 'y' at this point is 1. With loss scaling, the
# gradient is 'loss_scale'. We divide by the batch size of 2 since the
# loss is averaged across batch elements.
expected_gradient = loss_scale / 2
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=dtypes.float16,
expected_gradient=[expected_gradient]))
y = core.Lambda(identity_with_grad_check_fn)(y)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
self.assertEqual(y_true.dtype, dtypes.float32)
self.assertEqual(y_pred.dtype, dtypes.float32)
return math_ops.reduce_mean(y_pred)
opt = gradient_descent.SGD(learning_rate)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
for layer in (layer1, layer2, layer3, layer4):
if layer.losses:
# Layer has weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - 2 * learning_rate)
else:
# Layer does not have weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - learning_rate)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'pass_loss_scale_to_policy',
'strategy_fn': create_mirrored_strategy,
'pass_loss_scale_to_policy': True,
}, {
'testcase_name': 'norun_distributed',
'strategy_fn': create_mirrored_strategy,
'experimental_run_tf_function': False,
})
def test_dynamic_loss_scaling(self,
strategy_fn,
pass_loss_scale_to_policy=False,
experimental_run_tf_function=True):
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
initial_loss_scale = 2.
batch_size = 4
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=initial_loss_scale, increment_period=2)
expected_gradient = backend.variable([initial_loss_scale / batch_size],
dtype=dtypes.float16)
# If this variable is set to True, the model below will have NaN gradients
have_nan_gradients = backend.variable(False, dtype=dtypes.bool)
with strategy.scope():
opt = gradient_descent.SGD(1.)
if pass_loss_scale_to_policy:
p = policy.Policy('infer_float32_vars', loss_scale=loss_scale)
else:
p = policy.Policy('infer_float32_vars')
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
with policy.policy_scope(p):
x = layers.Input(
shape=(1,), batch_size=batch_size, dtype=dtypes.float16)
layer = AddLayer(assert_type=dtypes.float16)
y = layer(x)
identity_with_nan_grads = (
mp_test_util.create_identity_with_nan_gradients_fn(
have_nan_gradients))
y = core.Lambda(identity_with_nan_grads)(y)
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=dtypes.float16,
expected_gradient=expected_gradient))
y = core.Lambda(identity_with_grad_check_fn)(y)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
model.compile(
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# The variables starts with 1 and has a gradient of 1, so will go down by 1
# each step.
self.assertEqual(backend.eval(layer.v), 0)
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -1)
# There have been two steps without NaNs, so the loss scale will double
backend.set_value(expected_gradient,
backend.get_value(expected_gradient * 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -2)
# Next test with NaN gradients.
backend.set_value(have_nan_gradients, True)
model.fit(dataset)
# Variable should not be updated
self.assertEqual(backend.eval(layer.v), -2)
# Test with finite gradients again
backend.set_value(have_nan_gradients, False)
# The loss scale will be halved due to the NaNs, so the gradient will also
# be halved
backend.set_value(expected_gradient,
backend.get_value(expected_gradient / 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -3)
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_loss_scale_optimizer_overrides_policy_loss_scale(self):
with policy.policy_scope(policy.Policy('float32', loss_scale=10.)):
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=5.)
x = layers.Input(shape=(1,))
y = AddLayer()(x)
model = models.Model(x, y)
model.compile(opt, loss='mse')
self.assertEqual(self.evaluate(model.optimizer.loss_scale()), 5.)
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_pass_invalid_optimizer_with_loss_scaling(self):
with policy.policy_scope(policy.Policy('float32', loss_scale=10.)):
x = layers.Input(shape=(1,))
y = AddLayer()(x)
model = models.Model(x, y)
with self.assertRaisesRegexp(ValueError,
'optimizer" must be an instance of '):
model.compile(optimizers.SGD(1.), 'mse')
@test_util.run_in_graph_and_eager_modes
@testing_utils.enable_v2_dtype_behavior
def test_functional_model_loss_dtype(self):
with policy.policy_scope('float16'):
x = layers.Input(shape=(1,))
y = AddLayer()(x)
model = models.Model(x, y)
model.add_loss(math_ops.cast(y, 'float32'))
# The loss should not be casted to the policy's dtype.
self.assertEqual(model.losses[0].dtype, 'float32')
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'base_h5',
'strategy_fn': default_strategy_fn,
'h5': True,
}, {
'testcase_name': 'distribute_h5',
'strategy_fn': create_mirrored_strategy,
'h5': True,
})
@test_util.run_in_graph_and_eager_modes
def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False):
with strategy_fn().scope():
with policy.policy_scope('infer_float32_vars'):
x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16)
layer = AddLayer(assert_type=dtypes.float16)
y = layer(x)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
model.set_weights([np.array(100.)])
x = np.ones((2, 1), dtype=np.float16)
self.assertAllClose(backend.get_value(model(x)), x + 100.)
suffix = '.h5' if h5 else ''
weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix)
model.save_weights(weights_file)
model.set_weights([np.array(200.)])
self.assertAllClose(backend.get_value(model(x)), x + 200.)
model.load_weights(weights_file)
self.assertAllClose(backend.get_value(model(x)), x + 100.)
self.assertEqual(model.get_weights(), [np.array(100.)])
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'different_var_name',
'strategy_fn': default_strategy_fn,
'var_name': 'w'
}, {
'testcase_name': 'different_var_name_distribute',
'strategy_fn': create_mirrored_strategy,
'var_name': 'w'
})
def test_save_slot_variables_with_autocast_vars(self,
strategy_fn,
var_name='v'):
if not self._is_strategy_supported(strategy_fn):
return
with strategy_fn().scope(), policy.policy_scope('infer_float32_vars'):
x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float16)
# Having a var_name other than 'v' tests that a fixed bug (b/134713714)
# does not reoccur. The bug was that a crash would occur when saving a
# checkpoint where an AutoCastVariable with a slot variable would have a
# different name than the layer attribute's name (layer.v in this case).
layer = AddLayer(assert_type=dtypes.float16, var_name=var_name)
y = layer(x)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
opt = gradient_descent.SGD(1., 1.)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
weights_file = os.path.join(self.get_temp_dir(), 'weights')
model.save_weights(weights_file)
saved_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
new_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))
self.assertNotEqual(new_slot, saved_slot)
model.load_weights(weights_file)
restored_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))
self.assertEqual(restored_slot, saved_slot)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(*TESTCASES)
def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn):
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and
not context.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
# Create and run model.
with strategy.scope():
x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float32)
y = AddLayer(assert_type=dtypes.float32)(x)
model = models.Model(inputs=x, outputs=y)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=1., increment_period=2., multiplier=2.)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(loss_scale()), 2)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
# Save model weights.
save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(save_prefix)
# Run model again for 1 step (2 examples with a batch size of 2)
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
self.assertEqual(backend.get_value(loss_scale()), 4)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0)
# Load model weights and ensure loss scale weights are restored.
model.load_weights(save_prefix)
self.assertEqual(backend.get_value(loss_scale()), 2)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
'testcase_name': 'base',
'strategy_fn': default_strategy_fn,
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy,
}, {
'testcase_name': 'base_h5',
'strategy_fn': default_strategy_fn,
'h5': True,
}, {
'testcase_name': 'distribute_h5',
'strategy_fn': create_mirrored_strategy,
'h5': True,
})
def test_save_model_with_dynamic_loss_scaling(self, strategy_fn, h5=False):
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and
not context.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
# Create and run model.
with strategy.scope():
x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float32)
y = AddLayer()(x)
model = models.Model(inputs=x, outputs=y)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=1., increment_period=2., multiplier=2.)
opt = gradient_descent.SGD(1.)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(loss_scale()), 2)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
(weight,) = model.trainable_weights
orig_weight = backend.get_value(weight)
# Save model weights.
save_path = os.path.join(self.get_temp_dir(), 'model')
model.save(save_path, save_format='h5' if h5 else 'tf')
# Run model again for 1 step (2 examples with a batch size of 2)
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
new_weight = backend.get_value(weight)
self.assertNotEqual(new_weight, orig_weight)
self.assertEqual(backend.get_value(loss_scale()), 4)
self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0)
# Load model weights and ensure loss scale weights are restored.
model = saving.load_model(save_path, custom_objects={'AddLayer': AddLayer})
loss_scale = model.optimizer.loss_scale
(weight,) = model.trainable_weights
loaded_weight = backend.get_value(weight)
self.assertEqual(loaded_weight, orig_weight)
# Currently the loss scale isn't always saved when the model is saved with
# Model.save(). So we assert the loss scale either has the value when it was
# saved, or the value it was initialized with.
# TODO(reedwm): Always save/restore the loss scale with Model.save().
self.assertIn(backend.get_value(loss_scale()), (1, 2))
self.assertIn(backend.get_value(loss_scale._num_good_steps), (0, 1))
class RnnTest(keras_parameterized.TestCase):
"""Test mixed precision with RNNs."""
# TODO(b/136512020): Support and test recurrent_v2.GRU.
@parameterized.named_parameters(
{
'testcase_name': 'base_simple',
'strategy_fn': default_strategy_fn,
'rnn_class': recurrent.SimpleRNN,
}, {
'testcase_name': 'distribute_simple',
'strategy_fn': create_mirrored_strategy,
'rnn_class': recurrent.SimpleRNN,
}, {
'testcase_name': 'base_gru',
'strategy_fn': default_strategy_fn,
'rnn_class': recurrent.GRU,
}, {
'testcase_name': 'distribute_gru',
'strategy_fn': create_mirrored_strategy,
'rnn_class': recurrent.GRU,
})
@test_util.run_in_graph_and_eager_modes
# RNNs do not work properly with GradientTape in graph mode when V1 control
# flow is used.
@test_util.enable_control_flow_v2
def test_rnn(self, strategy_fn, rnn_class):
x = array_ops.ones((2, 3, 4), dtype=dtypes.float16)
strategy = strategy_fn()
with strategy.scope(), policy.policy_scope('infer_float32_vars'):
layer = rnn_class(units=4)
def run_fn():
with backprop.GradientTape() as tape:
y = layer(x)
self.assertEqual(y.dtype, dtypes.float16)
opt = gradient_descent.SGD(1.)
grads = tape.gradient(y, layer.trainable_weights)
return opt.apply_gradients(zip(grads, layer.trainable_weights))
op = strategy.experimental_run(run_fn)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
for v in layer.weights:
self.assertEqual(v.dtype, dtypes.float32)
if __name__ == '__main__':
test.main()
| apache-2.0 | 3,689,826,003,535,400,000 | 39.629077 | 90 | 0.64957 | false |
clone1612/appstore | nextcloudappstore/core/api/v1/serializers.py | 1 | 5704 | from nextcloudappstore.core.models import PhpExtensionDependency, \
DatabaseDependency, Category, AppAuthor, AppRelease, Screenshot, \
AppRating, App
from nextcloudappstore.core.validators import HttpsUrlValidator
from parler_rest.fields import TranslatedFieldsField
from parler_rest.serializers import TranslatableModelSerializer
from rest_framework import serializers
from rest_framework.fields import SerializerMethodField, DateTimeField
from django.contrib.auth import get_user_model
class PhpExtensionDependencySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField(source='php_extension.id')
version_spec = SerializerMethodField()
raw_version_spec = SerializerMethodField()
class Meta:
model = PhpExtensionDependency
fields = ('id', 'version_spec', 'raw_version_spec')
def get_version_spec(self, obj):
return obj.version_spec.replace(',', ' ')
def get_raw_version_spec(self, obj):
return obj.raw_version_spec.replace(',', ' ')
class DatabaseDependencySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField(source='database.id')
version_spec = SerializerMethodField()
raw_version_spec = SerializerMethodField()
class Meta:
model = DatabaseDependency
fields = ('id', 'version_spec', 'raw_version_spec')
def get_version_spec(self, obj):
return obj.version_spec.replace(',', ' ')
def get_raw_version_spec(self, obj):
return obj.raw_version_spec.replace(',', ' ')
class CategorySerializer(TranslatableModelSerializer):
translations = TranslatedFieldsField(shared_model=Category)
class Meta:
model = Category
fields = ('id', 'translations')
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = AppAuthor
fields = ('name', 'mail', 'homepage')
class AppReleaseSerializer(serializers.ModelSerializer):
databases = DatabaseDependencySerializer(many=True, read_only=True,
source='databasedependencies')
php_extensions = \
PhpExtensionDependencySerializer(many=True, read_only=True,
source='phpextensiondependencies')
php_version_spec = SerializerMethodField()
platform_version_spec = SerializerMethodField()
raw_php_version_spec = SerializerMethodField()
raw_platform_version_spec = SerializerMethodField()
translations = TranslatedFieldsField(shared_model=AppRelease)
class Meta:
model = AppRelease
fields = (
'version', 'php_extensions', 'databases', 'shell_commands',
'php_version_spec', 'platform_version_spec', 'min_int_size',
'download', 'created', 'licenses', 'last_modified', 'is_nightly',
'raw_php_version_spec', 'raw_platform_version_spec', 'signature',
'translations',
)
def get_platform_version_spec(self, obj):
return obj.platform_version_spec.replace(',', ' ')
def get_php_version_spec(self, obj):
return obj.php_version_spec.replace(',', ' ')
def get_raw_platform_version_spec(self, obj):
return obj.raw_platform_version_spec.replace(',', ' ')
def get_raw_php_version_spec(self, obj):
return obj.raw_php_version_spec.replace(',', ' ')
class ScreenshotSerializer(serializers.ModelSerializer):
class Meta:
model = Screenshot
fields = ('url', 'small_thumbnail')
class AppSerializer(serializers.ModelSerializer):
releases = SerializerMethodField()
screenshots = ScreenshotSerializer(many=True, read_only=True)
authors = AuthorSerializer(many=True, read_only=True)
translations = TranslatedFieldsField(shared_model=App)
last_modified = DateTimeField(source='last_release')
def __init__(self, *args, **kwargs):
self.version = kwargs.pop('version')
super().__init__(*args, **kwargs)
class Meta:
model = App
fields = (
'id', 'categories', 'user_docs', 'admin_docs', 'developer_docs',
'issue_tracker', 'website', 'created', 'last_modified', 'releases',
'screenshots', 'translations', 'is_featured', 'authors',
'rating_recent', 'rating_overall', 'rating_num_recent',
'rating_num_overall', 'certificate',
)
def get_releases(self, obj):
releases = obj.releases.prefetch_related(
'translations',
'databases',
'licenses',
'phpextensiondependencies__php_extension',
'databasedependencies__database',
'shell_commands',
).all()
if self.version:
data = [r for r in releases if r.is_compatible(self.version)]
else:
data = releases
return AppReleaseSerializer(data, many=True, read_only=True).data
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id', 'first_name', 'last_name')
class AppRatingSerializer(serializers.ModelSerializer):
user = UserSerializer(many=False, read_only=True)
translations = TranslatedFieldsField(shared_model=AppRating)
class Meta:
model = AppRating
fields = ('rating', 'rated_at', 'translations', 'user', 'app')
class AppReleaseDownloadSerializer(serializers.Serializer):
download = serializers.URLField(validators=[HttpsUrlValidator()])
signature = serializers.CharField()
nightly = serializers.BooleanField(required=False, default=False)
class AppRegisterSerializer(serializers.Serializer):
certificate = serializers.CharField()
signature = serializers.CharField()
| agpl-3.0 | 219,983,444,380,250,140 | 34.874214 | 79 | 0.66655 | false |
SgtHotshot/forge-cortex | cortex/core/views.py | 1 | 1448 | import django.shortcuts
import django.conf
import django.contrib.auth
import django.contrib.auth.forms
import django.core.urlresolvers
import django.views.generic
# pylint: disable=too-few-public-methods, too-many-ancestors, unused-argument
class RootView(django.views.generic.TemplateView):
template_name = 'root.html'
class LoginView(django.views.generic.FormView):
form_class = django.contrib.auth.forms.AuthenticationForm
redirect_arg = 'next'
template_name = 'login.html'
def get(self, *args, **kwargs):
if self.request.user.is_authenticated():
return django.shortcuts.redirect(self.get_success_url())
return super(LoginView, self).get(*args, **kwargs)
def get_success_url(self):
request_params = getattr(self.request, self.request.method)
if self.redirect_arg in request_params:
return request_params[self.redirect_arg]
elif self.redirect_arg in self.kwargs:
return self.kwargs[self.redirect_arg]
else:
return django.conf.settings.LOGIN_REDIRECT_URL
def form_valid(self, form):
django.contrib.auth.login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
class LogoutView(django.views.generic.View):
redirect_url = django.core.urlresolvers.reverse_lazy('root')
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
def post(self, *args, **kwargs):
django.contrib.auth.logout(self.request)
return django.shortcuts.redirect(self.redirect_url)
| mit | 7,560,867,296,937,909,000 | 28.55102 | 77 | 0.755525 | false |
gbanegas/Red_with_mult | main_thread.py | 1 | 1607 | '''
Created on 10 Sep 2014
@author: gustavo
'''
from reduction import Reduction
from polynomial import Polynomial
from threadc import ThreadCount
import os
import threading
import threading
def recoverfile(saved, readed):
if not os.path.exists(saved):
return True, []
f = open(saved,'r')
if(not os.stat(saved).st_size==0):
pols = []
pols_done = []
for line in readed:
pol = Polynomial(line)
pols.append(pol)
for line in f:
line = line.replace("[","")
line = line.replace("]","")
spl = line.split(',')
p = ""
for i in xrange(0,len(spl)-1):
p = p + " + x^" + str(spl[i].replace(" ",""))
p = p + " + 1"
p = p.replace("+","",1)
#print p
pol_ = Polynomial(p)
pols_done.append(pol_)
pols_set = set(pols)
pols_set_done = set(pols_done)
result = pols_set - pols_set_done
return False, list(result)
else:
return True, []
if __name__ == '__main__':
lock = threading.Lock()
lockScreen = threading.Lock()
files = ["pol_163_.txt"]
#degrees = [21, 97, 139, 163, 233, 283, 571, 1021, 1163]
#degree = 571
for fileName in files:
save = 'result_pol_'+fileName
f = open(fileName,'r')
read, pols = recoverfile(save, f)
if read:
for line in f:
pol = Polynomial(line)
pols.append(pol)
print len(pols)
threads = []
i = 0
j = 30
print "starting...."
for temp in range(0, len(pols)):
if (j > len(pols)):
j = len(pols)
thread = ThreadCount(temp,lockScreen, lock, pols[i:j], save)
i = j+1
j += 1
threads.append(thread)
for thread in threads:
thread.start()
for current in threads:
current.join()
| apache-2.0 | 2,271,769,503,056,894,200 | 20.144737 | 63 | 0.605476 | false |
opencog/ros-behavior-scripting | sensors/face_track.py | 1 | 6042 | #
# face_track.py - Registery and tracking of visible human faces
# Copyright (C) 2014,2015,2016 Hanson Robotics
# Copyright (C) 2015,2016 Linas Vepstas
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import rospy
import logging
from std_msgs.msg import Int32
# FIXME: In developer role of hrtool hr_msgs and pi_face_tracker conflict, why?
# from hr_msgs.msg import FaceEvent, Faces
from pi_face_tracker.msg import FaceEvent, Faces
from atomic_msgs import AtomicMsgs
logger = logging.getLogger('hr.eva_behavior.face_track')
# Thin python wrapper, to subscribe to face-tracking ROS messages,
# (face ID's, 3D face locations) and then re-wrap these as OpenCog
# atoms, via AtomicMsgs, and forward them on into the OpenCog
# space-time server.
#
class FaceTrack:
# Control flags. Ideally, FaceTrack should publish targets using
# ros_commo EvaControl class.
C_EYES = 16
C_FACE = 32
# Face tracking will be disabled if neither of these flags are set.
# (this allows for a manual over-ride of face-tracking by other
# control processes.)
C_FACE_TRACKING = C_FACE | C_EYES
def __init__(self):
# The OpenCog API. This is used to send face data to OpenCog.
self.atomo = AtomicMsgs()
self.atomo.create_face_octomap()
# List of currently visible faces
self.visible_faces = []
# Subscribed pi_vision topics and events
self.TOPIC_FACE_EVENT = "/camera/face_event"
self.EVENT_NEW_FACE = "new_face"
self.EVENT_LOST_FACE = "lost_face"
self.EVENT_RECOGNIZED_FACE = "recognized_face"
# Overrides current face being tracked by WebUI
self.EVENT_TRACK_FACE = "track_face"
self.TOPIC_FACE_LOCATIONS = "/camera/face_locations"
# Face appearance/disappearance from pi_vision
rospy.Subscriber(self.TOPIC_FACE_EVENT, FaceEvent, self.face_event_cb)
# Face location information from pi_vision
rospy.Subscriber(self.TOPIC_FACE_LOCATIONS, Faces, self.face_loc_cb)
rospy.Subscriber("/behavior_control", Int32, self.behavior_control_cb)
# Control Eyes and face by default
self.control_mode = 255
# ----------------------------------------------------------
# Start tracking a face
def add_face(self, faceid):
if faceid in self.visible_faces:
return
self.visible_faces.append(faceid)
logger.info("New face added to visibile faces: " +
str(self.visible_faces))
self.atomo.add_face_to_atomspace(faceid)
# Stop tracking a face
def remove_face(self, faceid):
self.atomo.remove_face_from_atomspace(faceid)
if faceid in self.visible_faces:
self.visible_faces.remove(faceid)
logger.info("Lost face; visibile faces now: " + str(self.visible_faces))
# Force the robot to turn its attention to the given
# face (to interact with, talk with) that face.
def track_face(self, faceid):
if faceid in self.visible_faces:
logger.info("Face requested interaction: " + str(faceid))
self.atomo.add_tracked_face_to_atomspace(faceid)
# ----------------------------------------------------------
# pi_vision ROS callbacks
# pi_vision ROS callback, called when a new face is detected,
# or a face is lost. Also called for recognized faces.
#
# This callback handles recognized faces using a special message
# format, published on the `/camera/face_locations`. Note that
# there is also a different topic for recognized faces, called
# `/camera/face_recognition`. See the `face-recog.py` file for
# details. I am not sure what subsystem published which message
# type. XXX FIXME - figure out why there are two different
# face recognition subsystems, and standardize one which we
# should use.
def face_event_cb(self, data):
if not self.control_mode & self.C_FACE_TRACKING:
return
if data.face_event == self.EVENT_NEW_FACE:
self.add_face(data.face_id)
elif data.face_event == self.EVENT_LOST_FACE:
self.remove_face(data.face_id)
elif data.face_event == self.EVENT_TRACK_FACE:
self.track_face(data.face_id)
elif data.face_event == self.EVENT_RECOGNIZED_FACE:
self.atomo.face_recognition(data.face_id, data.recognized_id)
# pi_vision ROS callback, called when pi_vision has new face
# location data for us. This happens frequently (about 10x/second)
def face_loc_cb(self, data):
if not self.control_mode & self.C_FACE_TRACKING:
return
for face in data.faces:
# Update location of a face. The location is stored in the
# OpenCog space server (octomap).
if face.id in self.visible_faces:
self.atomo.update_face_octomap(face.id,
face.point.x, face.point.y, face.point.z)
# Enable/disable Opencog face-tracking. This is driven by the
# master control GUI. XXX FIXME -- why should this ever be disabled?
# OpenCog should always know about faces; perhaps it is congtrol of
# head and eye movements that should be disabled?
def behavior_control_cb(self, data):
# Is facetracking currently enabled?
facetracking = self.control_mode & self.C_FACE_TRACKING
self.control_mode = data.data
print("New Control mode %i" % self.control_mode )
# If face-tracking was enabled, and is now disabled ...
if facetracking > 0 and self.control_mode & self.C_FACE_TRACKING == 0:
self.atomo.update_ft_state_to_atomspace(False)
# Need to clear faces:
for face in self.visible_faces[:]:
self.remove_face(face)
elif self.control_mode & self.C_FACE_TRACKING > 0:
self.atomo.update_ft_state_to_atomspace(True)
| agpl-3.0 | -381,146,016,300,767,550 | 34.751479 | 80 | 0.719298 | false |
abinit/abinit | tests/pymods/yaml_tools/structures/commons.py | 1 | 2155 | """
Define basic structures.
"""
from __future__ import print_function, division, unicode_literals
from ..register_tag import yaml_auto_map, yaml_implicit_scalar
@yaml_auto_map
class GenericMap(object):
"""A generic tag definition for test and example."""
@yaml_implicit_scalar
class YAMLComplex(complex):
# > [1] <
yaml_pattern = (r'[+-]?(\d+(\.\d*)?|\.\d+)([eEdD][+-]?\d+)?'
r' *[+-] *[+-]?(\d+(\.\d*)?|\.\d+)([eEdD][+-]?\d+)?i')
# > [2] <> [3] <
# [1] and [3] float with optional sign and exponential notation, will
# also match integers and .1 like (fortran does not produce this though)
# [2] + or - with optional blanks around
@staticmethod
def __new__(*args, **kwargs):
return complex.__new__(*args, **kwargs)
@classmethod
def from_scalar(cls, scal):
return cls(scal
# python always uses double and only recognise E and e
.replace('d', 'e')
.replace('D', 'e')
# python uses j instead of i (as in electro magnetism)
.replace('i', 'j')
# spaces have to be stripped out around the central + or -
.replace(' ', '')
# python expects only one + or - in string form
.replace('+-', '-')
.replace('-+', '-'))
def to_scalar(self):
return repr(self)[1:-1] # remove parentheses
class AbinitMessage(object):
_is_abinit_message = True
@yaml_auto_map
class AbinitError(AbinitMessage):
"""Base class for Abinit messages."""
__yaml_tag = 'ERROR'
@yaml_auto_map
class AbinitWarning(AbinitMessage):
__yaml_tag = 'WARNING'
# MG: Is this uses somewhere?
@yaml_auto_map
class AbinitInfo(AbinitMessage):
__yaml_tag = 'INFO'
@yaml_auto_map
class AbinitComment(AbinitMessage):
__yaml_tag = 'COMMENT'
@yaml_auto_map
class DatasetInfo(object):
__yaml_tag = 'DatasetInfo'
@yaml_auto_map
class BeginCycle(object):
__yaml_tag = 'BeginCycle'
| gpl-3.0 | -7,643,572,895,698,716,000 | 26.628205 | 77 | 0.538747 | false |
cloudtrends/env_sh_py_scripts | comm_funcs.py | 1 | 4217 | # -*- coding: utf-8 –*-
import sys
reload(sys)
import os
sys.path.append(os.getcwd())
#sys.setdefaultencoding('utf8')
### ### ### ### ### ### ### ### ### ### ### ###
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#following from Python cookbook, #475186
def has_colours(stream):
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False
has_colours = has_colours(sys.stdout)
def print_color(text, colour=WHITE):
if has_colours:
seq = "\x1b[1;%dm" % (30+colour) + text + "\x1b[0m"
sys.stdout.write(seq)
else:
sys.stdout.write(text)
def print_ok( text ):
print_color(text + "\n", GREEN)
def print_error(text):
print_color(text + "\n", RED)
def helloworld():
print "helloworld"
def listdir_fullpath(d):
if not os.path.exists( d ):
print_error("listdir_full path: ERROR dir not exit :" + d )
sys.exit(1)
return [os.path.join(d, f) for f in os.listdir(d)]
def add_to_exist_file( file_name , line ):
with open( file_name , "r+U") as f:
try:
f.seek(-1, 2)
while f.read(1) == "\n":
f.seek(-2, 1) # go back two characters, since
# reading advances by one character
except IOError: # seek failed, so the file consists
f.seek(0) # exclusively of newline characters
#else:
# f.write("\n") # Add exactly one newline character
if not line.endswith("\n"):
line = line + "\n"
f.write( line ) # Add a new line
def get_file_content_as_list( file_name ):
f = open( file_name )
lines = f.readlines()
f.close()
return lines
def get_gopath():
return os.environ['GOPATH']
def get_projects_str_by_app_type(app_type=""):
#if "gobbs" == app_type:
# return get_projects_str()
gopath = get_gopath()
if 0 == len(gopath):
print_error( "ERROR GOPATH not set." )
sys.exit(1)
proj_file = gopath + "/" + app_type + "_projects.txt"
if not os.path.exists( proj_file ):
print_error("ERROR proj_file not exit :" + proj_file )
sys.exit(1)
contents = get_file_content_as_list(proj_file)
cs = ""
for one in contents:
cs = cs + " " + one
print cs
return cs
def get_projects_str():
gopath = get_gopath()
if 0 == len(gopath):
print_error( "ERROR GOPATH not set." )
sys.exit(1)
proj_file = gopath + "/gobbs_projects.txt"
if not os.path.exists( proj_file ):
print_error("ERROR proj_file not exit :" + proj_file )
sys.exit(1)
contents = get_file_content_as_list(proj_file)
cs = ""
for one in contents:
cs = cs + " " + one
return cs
def check_app_type_and_instance_name( app_type , instance_name ):
print "app type:", app_type
if app_type not in get_projects_str_by_app_type(app_type):
print_error("ERROR\n")
print_error( "not find app type:"+ app_type )
return False
print "instance name:", instance_name
# how to check instance name?
mydir = os.getcwd()
src_dir = mydir + "/src"
app_dir = src_dir + "/" + app_type
app_views_dir = src_dir + "/" + app_type +"_views"
files = listdir_fullpath( app_views_dir )
all_instance_name = ""
find_instance = False
for one in files:
if not os.path.isdir( one ):
continue
one = one.replace( app_views_dir , "" )[1:]
if one == instance_name:
find_instance = True
all_instance_name = all_instance_name + " " + one
if not find_instance :
print_error( "instance :" + instance_name + " not in instance_name list :" )
print_error( " check your app_views_dir: " + app_views_dir )
print all_instance_name
return False
return True
if __name__ == "__main__":
print os.environ['HOME']
print os.environ['GOPATH']
| gpl-2.0 | 7,682,343,216,183,510,000 | 28.068966 | 89 | 0.559668 | false |
potatolondon/djangoappengine-1-4 | utils.py | 1 | 2473 | import os
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.app_identity import get_application_id
from google.appengine.api.datastore import Entity, Put
have_appserver = bool(apiproxy_stub_map.apiproxy.GetStub('datastore_v3'))
if not have_appserver:
from .boot import PROJECT_DIR
from google.appengine.tools import old_dev_appserver as dev_appserver
appconfig = dev_appserver.LoadAppConfig(PROJECT_DIR, {},
default_partition='dev')[0]
def appid():
if have_appserver:
return get_application_id()
else:
try:
return appconfig.application.split('~', 1)[-1]
except ImportError, e:
raise Exception("Could not get appid. Is your app.yaml file missing? "
"Error was: %s" % e)
on_production_server = 'SERVER_SOFTWARE' in os.environ and not os.environ['SERVER_SOFTWARE'].startswith("Development")
def bulk_create(instances, connection=None):
"""
Uses AppEngine's bulk Put() call on a number of instances
this will NOT call save() but it will return the instances
with their primary_key populated (unlike Django's bulk_create)
"""
if connection is None:
from django.db import connection
from .fields import AncestorKey
def prepare_entity(instance):
if isinstance(instance.pk, AncestorKey):
parent = instance.pk._parent_key
else:
parent = None
result = Entity(instance._meta.db_table, parent=parent)
for field in instance._meta.fields:
if field.name == "id":
continue
value = field.pre_save(instance, True)
setattr(instance, field.name, value)
value = field.get_db_prep_save(getattr(instance, field.attname), connection)
if isinstance(value, (list, set)):
value = list(value)
if not value:
value = None
result[field.column] = value
return result
entities = [ prepare_entity(x) for x in instances ]
keys = Put(entities)
assert(len(keys) == len(entities))
for i, key in enumerate(keys):
assert(key)
if key.parent():
instances[i]._parent_key = key.parent()
instances[i].pk.key_id = key.id_or_name()
else:
instances[i].id = key.id_or_name()
return instances
| bsd-3-clause | 3,023,978,416,118,690,000 | 31.116883 | 118 | 0.606146 | false |
frigg/frigg-worker | tests/test_deployments.py | 1 | 5223 | # -*- coding: utf8 -*-
import unittest
from unittest import mock
from docker.manager import Docker
from frigg_settings import FriggSettings
from frigg_worker.deployments import Deployment
from frigg_worker.errors import GitCloneError
DATA = {
'id': 1,
'branch': 'master',
'sha': 'superbhash',
'clone_url': 'https://github.com/frigg/test-repo.git',
'owner': 'frigg',
'name': 'test-repo',
}
BUILD_SETTINGS_WITH_NO_SERVICES = FriggSettings({
'setup_tasks': [],
'tasks': ['tox'],
'services': [],
'preview': {'tasks': ['pip install -r requirements.txt', 'gunicorn']},
'coverage': {'path': 'coverage.xml', 'parser': 'python'}
})
BUILD_SETTINGS_WITH_PRESET = FriggSettings({
'setup_tasks': [],
'tasks': ['tox'],
'services': [],
'preview': {'tasks': ['load_data'], 'preset': 'django-py3'},
'coverage': {'path': 'coverage.xml', 'parser': 'python'}
})
WORKER_OPTIONS = {
'dispatcher_url': 'http://example.com/dispatch',
'dispatcher_token': 'tokened',
'hq_url': 'http://example.com/hq',
'hq_token': 'tokened',
}
GIT_ERROR = GitCloneError('UNKNOWN', '', '', True)
class DeploymentTests(unittest.TestCase):
def setUp(self):
self.docker = Docker()
self.deployment = Deployment(1, DATA, self.docker, WORKER_OPTIONS)
@mock.patch('docker.manager.Docker.start')
@mock.patch('docker.manager.Docker.stop')
@mock.patch('frigg_worker.deployments.Deployment.clone_repo')
@mock.patch('frigg_worker.deployments.Deployment.run_task')
@mock.patch('frigg_worker.deployments.Deployment.report_run', lambda *x: None)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_WITH_NO_SERVICES)
def test_run_deploy(self, mock_run_task, mock_clone_repo, mock_docker_stop, mock_docker_start):
self.deployment.run_deploy()
mock_run_task.assert_has_calls([
mock.call('pip install -r requirements.txt'),
mock.call('gunicorn')
])
self.assertTrue(mock_clone_repo.called)
self.assertTrue(self.deployment.succeeded)
self.assertTrue(self.deployment.finished)
@mock.patch('frigg_worker.deployments.Deployment.clone_repo')
@mock.patch('frigg_worker.deployments.Deployment.run_task', side_effect=OSError())
@mock.patch('frigg_worker.deployments.Deployment.report_run', lambda *x: None)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_WITH_NO_SERVICES)
def test_run_deploy_fail_task(self, mock_run_task, mock_clone_repo):
self.deployment.run_deploy()
self.assertTrue(mock_clone_repo.called)
mock_run_task.assert_has_calls([
mock.call('pip install -r requirements.txt'),
])
self.assertFalse(self.deployment.succeeded)
self.assertTrue(self.deployment.finished)
@mock.patch('frigg_worker.deployments.Deployment.run_task')
@mock.patch('frigg_worker.deployments.Deployment.clone_repo', side_effect=GIT_ERROR)
def test_run_deploy_fail_clone(self, mock_clone, mock_run_task):
self.deployment.run_deploy()
self.assertFalse(mock_run_task.called)
self.assertFalse(self.deployment.succeeded)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_WITH_PRESET)
@mock.patch('frigg_worker.deployments.Deployment.clone_repo', lambda x: True)
@mock.patch('frigg_worker.deployments.Deployment.report_run', lambda *x: None)
@mock.patch('frigg_worker.deployments.Deployment.load_preset')
@mock.patch('frigg_worker.deployments.Deployment.run_task')
def test_run_deploy_should_load_preset(self, mock_run_task, mock_load_preset):
self.deployment.run_deploy()
self.assertTrue(mock_load_preset.called)
@mock.patch('frigg_worker.api.APIWrapper.report_run')
@mock.patch('frigg_worker.deployments.Deployment.serializer', lambda *x: {})
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: {})
def test_report_run(self, mock_report_run):
self.deployment.report_run()
mock_report_run.assert_called_once_with('Deployment', 1, '{}')
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_WITH_NO_SERVICES)
def test_create_pending_tasks_splitted_into_setup_tasks_and_tasks(self):
self.assertEqual([], self.deployment.tasks)
self.assertEqual([], self.deployment.setup_tasks)
self.deployment.create_pending_tasks()
self.assertEqual(['pip install -r requirements.txt', 'gunicorn'], self.deployment.tasks)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_WITH_PRESET)
def test_load_preset(self):
self.deployment.load_preset()
self.assertIn('daemon_task', self.deployment.preset)
self.assertIn('tasks', self.deployment.preset)
self.assertEqual(
self.deployment.preset['daemon_task'],
'nohup python3 manage.py runserver 0.0.0.0:8000 &'
)
self.assertEqual(
self.deployment.preset['tasks'],
['pip3 install -U gunicorn -r requirements.txt',
'python3 manage.py migrate',
'python3 manage.py collectstatic --noinput']
)
| mit | -1,519,599,867,766,518,800 | 41.120968 | 99 | 0.668964 | false |
ray-project/ray | python/ray/tune/trial.py | 1 | 25468 | from typing import Callable, Dict, Sequence, Union
import json
import ray
import ray.cloudpickle as cloudpickle
from collections import deque
import copy
import logging
import platform
import shutil
import uuid
import time
import os
from numbers import Number
from ray.tune import TuneError
from ray.tune.checkpoint_manager import Checkpoint, CheckpointManager
# NOTE(rkn): We import ray.tune.registry here instead of importing the names we
# need because there are cyclic imports that may cause specific names to not
# have been defined yet. See https://github.com/ray-project/ray/issues/1716.
from ray.tune.registry import get_trainable_cls, validate_trainable
from ray.tune.result import DEFAULT_RESULTS_DIR, DONE, TRAINING_ITERATION
from ray.tune.resources import Resources, \
json_to_resources, resources_to_json
from ray.tune.utils.placement_groups import PlacementGroupFactory, \
resource_dict_to_pg_factory
from ray.tune.utils.serialization import TuneFunctionEncoder
from ray.tune.utils.trainable import TrainableUtil
from ray.tune.utils import date_str, flatten_dict
from ray.util import log_once
from ray._private.utils import binary_to_hex, hex_to_binary
DEBUG_PRINT_INTERVAL = 5
logger = logging.getLogger(__name__)
class Location:
"""Describes the location at which Trial is placed to run."""
def __init__(self, hostname=None, pid=None):
self.hostname = hostname
self.pid = pid
def __str__(self):
if not self.pid:
return ""
elif self.hostname == platform.node():
return "pid={}".format(self.pid)
else:
return "{}:{}".format(self.hostname, self.pid)
class ExportFormat:
"""Describes the format to import/export the trial Trainable.
This may correspond to different file formats based on the
Trainable implementation.
"""
CHECKPOINT = "checkpoint"
MODEL = "model"
H5 = "h5"
@staticmethod
def validate(formats):
"""Validates formats.
Raises:
ValueError if the format is unknown.
"""
for i in range(len(formats)):
formats[i] = formats[i].strip().lower()
if formats[i] not in [
ExportFormat.CHECKPOINT, ExportFormat.MODEL,
ExportFormat.H5
]:
raise TuneError("Unsupported import/export format: " +
formats[i])
def checkpoint_deleter(trial_id, runner):
"""Returns a checkpoint deleter callback for a runner."""
if not runner:
return lambda checkpoint: None
def delete(checkpoint):
"""Requests checkpoint deletion asynchronously.
Args:
checkpoint (Checkpoint): Checkpoint to delete.
"""
if checkpoint.storage == Checkpoint.PERSISTENT and checkpoint.value:
logger.debug("Trial %s: Deleting checkpoint %s", trial_id,
checkpoint.value)
checkpoint_path = checkpoint.value
# Delete local copy, if any exists.
if os.path.exists(checkpoint_path):
try:
checkpoint_dir = TrainableUtil.find_checkpoint_dir(
checkpoint_path)
shutil.rmtree(checkpoint_dir)
except FileNotFoundError:
logger.warning("Checkpoint dir not found during deletion.")
# TODO(ujvl): Batch remote deletes.
runner.delete_checkpoint.remote(checkpoint.value)
return delete
class TrialInfo:
"""Serializable struct for holding information for a Trial.
Attributes:
trial_name (str): String name of the current trial.
trial_id (str): trial_id of the trial
"""
def __init__(self, trial):
self._trial_name = str(trial)
self._trial_id = trial.trial_id
@property
def trial_name(self):
return self._trial_name
@property
def trial_id(self):
return self._trial_id
def create_logdir(dirname, local_dir):
local_dir = os.path.expanduser(local_dir)
logdir = os.path.join(local_dir, dirname)
if os.path.exists(logdir):
old_dirname = dirname
dirname += "_" + uuid.uuid4().hex[:4]
logger.info(f"Creating a new dirname {dirname} because "
f"trial dirname '{old_dirname}' already exists.")
logdir = os.path.join(local_dir, dirname)
os.makedirs(logdir, exist_ok=True)
return logdir
class Trial:
"""A trial object holds the state for one model training run.
Trials are themselves managed by the TrialRunner class, which implements
the event loop for submitting trial runs to a Ray cluster.
Trials start in the PENDING state, and transition to RUNNING once started.
On error it transitions to ERROR, otherwise TERMINATED on success.
Attributes:
trainable_name (str): Name of the trainable object to be executed.
config (dict): Provided configuration dictionary with evaluated params.
trial_id (str): Unique identifier for the trial.
local_dir (str): Local_dir as passed to tune.run.
logdir (str): Directory where the trial logs are saved.
evaluated_params (dict): Evaluated parameters by search algorithm,
experiment_tag (str): Identifying trial name to show in the console.
resources (Resources): Amount of resources that this trial will use.
status (str): One of PENDING, RUNNING, PAUSED, TERMINATED, ERROR/
error_file (str): Path to the errors that this trial has raised.
"""
_nonjson_fields = [
"results",
"best_result",
"param_config",
"extra_arg",
]
PENDING = "PENDING"
RUNNING = "RUNNING"
PAUSED = "PAUSED"
TERMINATED = "TERMINATED"
ERROR = "ERROR"
def __init__(self,
trainable_name,
config=None,
trial_id=None,
local_dir=DEFAULT_RESULTS_DIR,
evaluated_params=None,
experiment_tag="",
resources=None,
placement_group_factory=None,
stopping_criterion=None,
remote_checkpoint_dir=None,
sync_to_cloud=None,
checkpoint_freq=0,
checkpoint_at_end=False,
sync_on_checkpoint=True,
keep_checkpoints_num=None,
checkpoint_score_attr=TRAINING_ITERATION,
export_formats=None,
restore_path=None,
trial_name_creator=None,
trial_dirname_creator=None,
log_to_file=None,
max_failures=0):
"""Initialize a new trial.
The args here take the same meaning as the command line flags defined
in ray.tune.config_parser.
"""
validate_trainable(trainable_name)
# Trial config
self.trainable_name = trainable_name
self.trial_id = Trial.generate_id() if trial_id is None else trial_id
self.config = config or {}
self.local_dir = local_dir # This remains unexpanded for syncing.
#: Parameters that Tune varies across searches.
self.evaluated_params = evaluated_params or {}
self.experiment_tag = experiment_tag
trainable_cls = self.get_trainable_cls()
if trainable_cls:
default_resources = trainable_cls.default_resource_request(
self.config)
# If Trainable returns resources, do not allow manual override via
# `resources_per_trial` by the user.
if default_resources:
if resources or placement_group_factory:
raise ValueError(
"Resources for {} have been automatically set to {} "
"by its `default_resource_request()` method. Please "
"clear the `resources_per_trial` option.".format(
trainable_cls, default_resources))
# New way: Trainable returns a PlacementGroupFactory object.
if isinstance(default_resources, PlacementGroupFactory):
placement_group_factory = default_resources
resources = None
# Set placement group factory to None for backwards
# compatibility.
else:
placement_group_factory = None
resources = default_resources
self.location = Location()
self.resources = resources or Resources(cpu=1, gpu=0)
self.placement_group_factory = placement_group_factory
self._setup_resources()
self.stopping_criterion = stopping_criterion or {}
self.log_to_file = log_to_file
# Make sure `stdout_file, stderr_file = Trial.log_to_file` works
if not self.log_to_file or not isinstance(self.log_to_file, Sequence) \
or not len(self.log_to_file) == 2:
self.log_to_file = (None, None)
self.max_failures = max_failures
# Local trial state that is updated during the run
self.last_result = {}
self.last_update_time = -float("inf")
# stores in memory max/min/avg/last-n-avg/last result for each
# metric by trial
self.metric_analysis = {}
# keep a moving average over these last n steps
self.n_steps = [5, 10]
self.metric_n_steps = {}
self.export_formats = export_formats
self.status = Trial.PENDING
self.start_time = None
self.logdir = None
self.runner = None
self.last_debug = 0
self.error_file = None
self.error_msg = None
self.trial_name_creator = trial_name_creator
self.custom_trial_name = None
self.custom_dirname = None
# Checkpointing fields
self.saving_to = None
if remote_checkpoint_dir:
self.remote_checkpoint_dir_prefix = remote_checkpoint_dir
else:
self.remote_checkpoint_dir_prefix = None
self.sync_to_cloud = sync_to_cloud
self.checkpoint_freq = checkpoint_freq
self.checkpoint_at_end = checkpoint_at_end
self.keep_checkpoints_num = keep_checkpoints_num
self.checkpoint_score_attr = checkpoint_score_attr
self.sync_on_checkpoint = sync_on_checkpoint
self.checkpoint_manager = CheckpointManager(
keep_checkpoints_num, checkpoint_score_attr,
checkpoint_deleter(self._trainable_name(), self.runner))
# Restoration fields
self.restore_path = restore_path
self.restoring_from = None
self.num_failures = 0
# AutoML fields
self.results = None
self.best_result = None
self.param_config = None
self.extra_arg = None
if trial_name_creator:
self.custom_trial_name = trial_name_creator(self)
if trial_dirname_creator:
self.custom_dirname = trial_dirname_creator(self)
if os.path.sep in self.custom_dirname:
raise ValueError("Trial dirname must not contain '/'. "
"Got {self.custom_dirname}")
self._state_json = None
self._state_valid = False
def _setup_resources(self, log_always: bool = False):
"""Set up resource and placement group requirements.
This will try to convert the resource request in ``self.resources``
to a placement group factory object. If this is unsuccessful,
placement groups will not be used.
Args:
log_always (bool): If True, this will always log a warning if
conversion from a resource dict to a placement group
definition was unsuccessful (e.g. when passing ``extra_``
requests).
"""
if not self.placement_group_factory and \
not int(os.getenv("TUNE_PLACEMENT_GROUP_AUTO_DISABLED", "0")):
try:
self.placement_group_factory = resource_dict_to_pg_factory(
self.resources)
except ValueError as exc:
if log_always or log_once("tune_pg_extra_resources"):
logger.warning(exc)
self.placement_group_factory = None
# Set placement group factory flag to True in Resources object.
if self.placement_group_factory:
resource_kwargs = self.resources._asdict()
resource_kwargs["has_placement_group"] = True
self.resources = Resources(**resource_kwargs)
@property
def node_ip(self):
return self.location.hostname
@property
def checkpoint(self):
"""Returns the most recent checkpoint.
If the trial is in ERROR state, the most recent PERSISTENT checkpoint
is returned.
"""
if self.status == Trial.ERROR:
checkpoint = self.checkpoint_manager.newest_persistent_checkpoint
else:
checkpoint = self.checkpoint_manager.newest_checkpoint
if checkpoint.value is None:
checkpoint = Checkpoint(Checkpoint.PERSISTENT, self.restore_path)
return checkpoint
@classmethod
def generate_id(cls):
return str(uuid.uuid1().hex)[:8]
@property
def remote_checkpoint_dir(self):
assert self.logdir, "Trial {}: logdir not initialized.".format(self)
if not self.remote_checkpoint_dir_prefix:
return None
logdir_name = os.path.basename(self.logdir)
return os.path.join(self.remote_checkpoint_dir_prefix, logdir_name)
@property
def uses_placement_groups(self):
return bool(self.placement_group_factory)
def reset(self):
return Trial(
self.trainable_name,
config=self.config,
trial_id=None,
local_dir=self.local_dir,
evaluated_params=self.evaluated_params,
experiment_tag=self.experiment_tag,
resources=self.resources,
placement_group_factory=self.placement_group_factory,
stopping_criterion=self.stopping_criterion,
remote_checkpoint_dir=self.remote_checkpoint_dir,
checkpoint_freq=self.checkpoint_freq,
checkpoint_at_end=self.checkpoint_at_end,
sync_on_checkpoint=self.sync_on_checkpoint,
keep_checkpoints_num=self.keep_checkpoints_num,
checkpoint_score_attr=self.checkpoint_score_attr,
export_formats=self.export_formats,
restore_path=self.restore_path,
trial_name_creator=self.trial_name_creator,
log_to_file=self.log_to_file,
max_failures=self.max_failures,
)
def init_logdir(self):
"""Init logdir."""
if not self.logdir:
self.logdir = create_logdir(self._generate_dirname(),
self.local_dir)
else:
os.makedirs(self.logdir, exist_ok=True)
self.invalidate_json_state()
def update_resources(
self, resources: Union[Dict, Callable, PlacementGroupFactory]):
"""EXPERIMENTAL: Updates the resource requirements.
Should only be called when the trial is not running.
Raises:
ValueError if trial status is running.
"""
if self.status is Trial.RUNNING:
raise ValueError("Cannot update resources while Trial is running.")
if isinstance(resources, PlacementGroupFactory):
self.placement_group_factory = resources
else:
self.resources = Resources(**resources)
self._setup_resources()
self.invalidate_json_state()
def set_runner(self, runner):
self.runner = runner
self.checkpoint_manager.delete = checkpoint_deleter(
self._trainable_name(), runner)
# No need to invalidate state cache: runner is not stored in json
# self.invalidate_json_state()
def set_location(self, location):
"""Sets the location of the trial."""
self.location = location
# No need to invalidate state cache: location is not stored in json
# self.invalidate_json_state()
def set_status(self, status):
"""Sets the status of the trial."""
self.status = status
if status == Trial.RUNNING:
if self.start_time is None:
self.start_time = time.time()
self.invalidate_json_state()
def set_config(self, config):
self.config = config
self.invalidate_json_state()
def set_experiment_tag(self, experiment_tag):
self.experiment_tag = experiment_tag
self.invalidate_json_state()
def write_error_log(self, error_msg):
if error_msg and self.logdir:
self.num_failures += 1
self.error_file = os.path.join(self.logdir, "error.txt")
with open(self.error_file, "a+") as f:
f.write("Failure # {} (occurred at {})\n".format(
self.num_failures, date_str()))
f.write(error_msg + "\n")
self.error_msg = error_msg
self.invalidate_json_state()
def should_stop(self, result):
"""Whether the given result meets this trial's stopping criteria."""
if result.get(DONE):
return True
for criteria, stop_value in self.stopping_criterion.items():
if criteria not in result:
raise TuneError(
"Stopping criteria {} not provided in result {}.".format(
criteria, result))
elif isinstance(criteria, dict):
raise ValueError(
"Stopping criteria is now flattened by default. "
"Use forward slashes to nest values `key1/key2/key3`.")
elif result[criteria] >= stop_value:
return True
return False
def should_checkpoint(self):
"""Whether this trial is due for checkpointing."""
result = self.last_result or {}
if result.get(DONE) and self.checkpoint_at_end:
return True
return (self.checkpoint_freq and
result.get(TRAINING_ITERATION, 0) % self.checkpoint_freq == 0)
def has_checkpoint(self):
return self.checkpoint.value is not None
def clear_checkpoint(self):
self.checkpoint.value = None
self.restoring_from = None
self.invalidate_json_state()
def on_checkpoint(self, checkpoint):
"""Hook for handling checkpoints taken by the Trainable.
Args:
checkpoint (Checkpoint): Checkpoint taken.
"""
self.checkpoint_manager.on_checkpoint(checkpoint)
self.invalidate_json_state()
def on_restore(self):
"""Handles restoration completion."""
assert self.is_restoring
self.last_result = self.restoring_from.result
self.restoring_from = None
self.invalidate_json_state()
def should_recover(self):
"""Returns whether the trial qualifies for retrying.
This is if the trial has not failed more than max_failures. Note this
may return true even when there is no checkpoint, either because
`self.checkpoint_freq` is `0` or because the trial failed before
a checkpoint has been made.
"""
return self.num_failures < self.max_failures or self.max_failures < 0
def update_last_result(self, result, terminate=False):
if self.experiment_tag:
result.update(experiment_tag=self.experiment_tag)
self.set_location(Location(result.get("node_ip"), result.get("pid")))
self.last_result = result
self.last_update_time = time.time()
for metric, value in flatten_dict(result).items():
if isinstance(value, Number):
if metric not in self.metric_analysis:
self.metric_analysis[metric] = {
"max": value,
"min": value,
"avg": value,
"last": value
}
self.metric_n_steps[metric] = {}
for n in self.n_steps:
key = "last-{:d}-avg".format(n)
self.metric_analysis[metric][key] = value
# Store n as string for correct restore.
self.metric_n_steps[metric][str(n)] = deque(
[value], maxlen=n)
else:
step = result["training_iteration"] or 1
self.metric_analysis[metric]["max"] = max(
value, self.metric_analysis[metric]["max"])
self.metric_analysis[metric]["min"] = min(
value, self.metric_analysis[metric]["min"])
self.metric_analysis[metric]["avg"] = 1 / step * (
value +
(step - 1) * self.metric_analysis[metric]["avg"])
self.metric_analysis[metric]["last"] = value
for n in self.n_steps:
key = "last-{:d}-avg".format(n)
self.metric_n_steps[metric][str(n)].append(value)
self.metric_analysis[metric][key] = sum(
self.metric_n_steps[metric][str(n)]) / len(
self.metric_n_steps[metric][str(n)])
self.invalidate_json_state()
def get_trainable_cls(self):
return get_trainable_cls(self.trainable_name)
def is_finished(self):
return self.status in [Trial.ERROR, Trial.TERMINATED]
@property
def is_restoring(self):
return self.restoring_from is not None
@property
def is_saving(self):
return self.saving_to is not None
def __repr__(self):
return self._trainable_name(include_trial_id=True)
def __str__(self):
return self._trainable_name(include_trial_id=True)
def _trainable_name(self, include_trial_id=False):
"""Combines ``env`` with ``trainable_name`` and ``trial_id``.
Can be overridden with a custom string creator.
"""
if self.custom_trial_name:
return self.custom_trial_name
if "env" in self.config:
env = self.config["env"]
if isinstance(env, type):
env = env.__name__
identifier = "{}_{}".format(self.trainable_name, env)
else:
identifier = self.trainable_name
if include_trial_id:
identifier += "_" + self.trial_id
return identifier.replace("/", "_")
def _generate_dirname(self):
if self.custom_dirname:
generated_dirname = self.custom_dirname
else:
if "MAX_LEN_IDENTIFIER" in os.environ:
logger.error("The MAX_LEN_IDENTIFIER environment variable is "
"deprecated and will be removed in the future. "
"Use TUNE_MAX_LEN_IDENTIFIER instead.")
MAX_LEN_IDENTIFIER = int(
os.environ.get("TUNE_MAX_LEN_IDENTIFIER",
os.environ.get("MAX_LEN_IDENTIFIER", 130)))
generated_dirname = f"{str(self)}_{self.experiment_tag}"
generated_dirname = generated_dirname[:MAX_LEN_IDENTIFIER]
generated_dirname += f"_{date_str()}"
return generated_dirname.replace("/", "_")
def invalidate_json_state(self):
self._state_valid = False
def get_json_state(self) -> str:
if not self._state_json or not self._state_valid:
json_state = json.dumps(
self.__getstate__(), indent=2, cls=TuneFunctionEncoder)
self._state_json = json_state
self._state_valid = True
return self._state_json
def __getstate__(self):
"""Memento generator for Trial.
Sets RUNNING trials to PENDING.
Note this can only occur if the trial holds a PERSISTENT checkpoint.
"""
state = self.__dict__.copy()
state["resources"] = resources_to_json(self.resources)
for key in self._nonjson_fields:
state[key] = binary_to_hex(cloudpickle.dumps(state.get(key)))
state["runner"] = None
state["location"] = Location()
# Avoid waiting for events that will never occur on resume.
state["restoring_from"] = None
state["saving_to"] = None
state["_state_json"] = None
state["_state_valid"] = False
return copy.deepcopy(state)
def __setstate__(self, state):
state["resources"] = json_to_resources(state["resources"])
if state["status"] == Trial.RUNNING:
state["status"] = Trial.PENDING
for key in self._nonjson_fields:
state[key] = cloudpickle.loads(hex_to_binary(state[key]))
self.__dict__.update(state)
validate_trainable(self.trainable_name)
# Avoid creating logdir in client mode for returned trial results,
# since the dir might not be creatable locally. TODO(ekl) thsi is kind
# of a hack.
if not ray.util.client.ray.is_connected():
self.init_logdir() # Create logdir if it does not exist
| apache-2.0 | 8,356,033,517,362,972,000 | 36.071325 | 79 | 0.590427 | false |
seanjh/DSRecommendationSystems | ml_evaluate_models.py | 1 | 1142 | #!/usr/bin/env python
import math
import config
import configspark
import ml_parse
import evaluate
sc = configspark.SPARK_CONTEXT
def clean():
config.clean_path(config.ML_MODEL)
def main():
clean()
ratings_train_text = sc.textFile(config.ML_RATINGS_TRAIN)
ratings_train = (
ratings_train_text
.map(ml_parse.parse_line)
.map(ml_parse.rating_convert))
ratings_validation_text = sc.textFile(config.ML_RATINGS_VALIDATION)
ratings_validation = (
ratings_validation_text
.map(ml_parse.parse_line)
.map(ml_parse.rating_convert))
ratings_train.cache()
ratings_validation.cache()
best_result = evaluate.evaluate(ratings_train, ratings_validation,
config.ML_RESULTS_FILE)
with open(config.ML_BEST_PARAMS_FILE, "w") as outfile:
outfile.write("%s,%s\n" % ("rank", "lambda"))
outfile.write("%s,%s" % (
best_result.get("rank"), best_result.get("lambda")))
best_model = best_result.get("model")
best_model.save(sc, config.ML_MODEL)
sc.stop()
if __name__ == "__main__":
main()
| apache-2.0 | 8,312,251,004,684,721,000 | 23.297872 | 71 | 0.620841 | false |
robertsj/poropy | examples/large_core_ex_1.py | 1 | 1787 | # examples/large_core_ex_1.py
#
# In this example, we investigate "by hand" the
# large reactor example.
import large_core
import time
import numpy as np
# Get the reactor from the premade script.
reactor = large_core.make_large_core()
# View all the diagnostics down the chain.
reactor.display()
# Evaluate the default pattern. We can grab the eigenvalue
# and peaking as return values.
k, p = reactor.evaluate()
print "k = ",k," p = ",p
# Alternatively, we can use print_params to display current
# values of all optimization parameters. Currently only
# keff and the max peaking are retained.
reactor.print_params()
## We can also print the power peaking.
reactor.print_peaking()
# With this, we can try optimizing by hand a bit. Peaking
# occurs at (0, 1). Printing the pattern helps visualize this.
reactor.print_pattern()
# We can also see what fuel type is where by looking, for
# example, at the burnup
reactor.print_map('burnup')
# or the enrichment
reactor.print_map('enrichment')
# Now, something that tends to work is to swap a peaking
# bundle with lower burnup with a lower peaking bundle
# with higher burnup. Let's switch the peaker with
# its 15 GWd/MTU neighbor at [0,2]. Then print and
# and evaluate.
reactor.swap([0,1],[0,2])
reactor.print_pattern()
reactor.evaluate()
reactor.print_params()
reactor.print_peaking()
# That's a slight peaking reduction with ja slight increase
# in keff. However, there is a better pattern. Try the
# "ring of fire":
pattern = np.array([48,36,5,6,19,23,17,40,3,10,15,25,32,1,44,7,9,18,33,31,8,43,11,20,26,\
24,21,16,35,27,28,29,30,12,41,34,22,13,2,45,37,14,0,4,42,47,46,38,39])
reactor.shuffle(pattern)
reactor.evaluate()
reactor.print_params()
reactor.print_peaking()
reactor.plot_peaking()
| mit | -5,657,274,151,031,554,000 | 27.822581 | 90 | 0.722999 | false |
anapophenic/knb | expt_primal.py | 1 | 15339 | import moments_cons as mc
import data_generator as dg
import binom_hmm as bh
import feature_map as fm
import numpy as np
import data_import as di
import matplotlib.pyplot as plt
import hmm_inference as hi
import visualize as vis
import os
import sys
import itertools
import td_tpm
import td_als
import em_bmm as eb
import baum_welch as bw
import postprocess as pp
import utils as ut
class model:
pass
def print_basic_info(mod):
mod.r = len(mod.ctxt_group) * len(mod.ce_group)
print ('ch = \n', mod.ch,
'\nce_group = \n', mod.ce_group,
'\ns = \n', mod.s,
'\nctxt_group = \n', mod.ctxt_group,
'\nr = \n', str(mod.r), '\n')
def print_data_info(mod):
print ('#rows of coverage (should be r)', np.shape(mod.coverage)[0],
'coverage = \n', mod.coverage,
'\nmethylated = \n', mod.methylated,
'\n') #+ 'x_zipped = ' + x_zipped
def print_data_selected_info(mod):
print ('l = \n', mod.l,
'\nN = \n', mod.N,
'\na = \n', mod.a,
'\nl_test = \n', mod.l_test,
'\n')
def data_select(mod):
print 'Preparing training data..'
coverage_train = mod.coverage[:,:mod.l].astype(float)
methylated_train = mod.methylated[:,:mod.l].astype(float)
x_importance_weighted = di.importance_weightify(mod.x_zipped, mod.l)
print 'Preparing test data..'
if mod.l_test is None:
mod.l_test = np.shape(mod.coverage)[1]
coverage_test = mod.coverage[:,:mod.l_test].astype(float)
methylated_test = mod.methylated[:,:mod.l_test].astype(float)
return coverage_train, methylated_train, coverage_test, methylated_test, x_importance_weighted
def print_header(mod):
mod.sec_title = vis.get_sec_title(mod)
vis.print_expt_setting(mod)
vis.print_table_header(mod)
def rand_init_params(m, r, n):
pi_h = ut.normalize_v(np.random.rand(m))
T_h = ut.normalize_m(np.random.rand(m,m))
p_ch_h = np.random.rand(r,m)
C_h = ut.normalize_m(np.random.rand(n,m))
return p_ch_h, C_h, T_h, pi_h
def estimate_observation(mod):
print 'Estimating..'
p_ch_h, C_h, T_h, pi_h = rand_init_params(mod.m_h, mod.r, mod.n)
if mod.td_alg == 'als':
C_h = td_als.als(mod.P_123, mod.m_h)
elif mod.td_alg == 'tpm':
C_h = td_tpm.tpm(mod.P_21, mod.P_31, mod.P_23, mod.P_13, mod.P_123, mod.m_h)
elif mod.td_alg == 'em_bmm':
p_c = mod.p_c[:,:100]
p_ch_h, pi_h = eb.em_bmm_group(mod.coverage_train, mod.methylated_train, p_ch_h, pi_h)
C_h = fm.expected_fm_p_c_group(mod.phi, mod.n, p_c, p_ch_h)
elif mod.td_alg == 'baum_welch':
p_c = mod.p_c[:,:100]
p_ch_h, T_h, pi_h = bw.baum_welch(mod.coverage_train, mod.methylated_train, p_ch_h, T_h, pi_h, 10)
C_h = fm.expected_fm_p_c_group(mod.phi, mod.n, p_c, p_ch_h)
return p_ch_h, C_h, T_h, pi_h
def postprocess(mod):
mod.lims = fm.phi_lims(mod.n, mod.r);
if mod.pp_alg == 'no':
return mod.p_ch_h, mod.C_h, mod.T_h, mod.pi_h
else:
C_h = pp.postprocess_m(mod.C_h)
if mod.pp_alg == 'pos':
C_h, T_h, pi_h = pp.refine_positify(C_h, mod.P_21, mod.P_31, mod.P_23, mod.P_13, mod.P_123)
elif mod.pp_alg == 'pos_ls':
T_h, pi_h = pp.refine_nmf(mod.P_21, C_h)
elif mod.pp_alg == 'pos_als_iter':
C_h, T_h, pi_h = pp.refine_als_p21(mod.P_21, C_h)
p_ch_h = fm.get_pc(mod.phi, mod.N, C_h, mod.a, mod.lims)
return p_ch_h, C_h, T_h, pi_h
def print_params(mod):
print 'm_h = \n' + str(mod.m_h)
mod.color_scheme = vis.default_color_scheme(mod.m_h)
#print '\nC_h = \n' + str(C_h)
#print '\nT_h = \n' + str(mod.T_h) + '\npi_h = \n' + str(mod.pi_h) + '\np_ch = \n' + str(mod.p_ch_h) + '\n'
print 'Printing matrices..'
print mod.T_h
T_title = mod.fig_title + 'T_h.pdf'
vis.show_m(mod.T_h, T_title, mod.path_name, mod.state_name_h, True)
print mod.C_h
C_title = mod.fig_title + 'C_h.pdf'
vis.show_m(mod.C_h, C_title, mod.path_name, mod.state_name_h, False)
print mod.p_ch_h
p_title = mod.fig_title + 'p_h.pdf'
vis.show_m(mod.p_ch_h, p_title, mod.path_name, mod.state_name_h, False)
print mod.pi_h
pi_title = mod.fig_title + 'pi_h.pdf'
vis.show_v(mod.pi_h, pi_title, mod.path_name, mod.state_name_h)
print 'generating feature map graph...'
mod.feature_map_title = mod.fig_title + '_feature_map.pdf'
vis.print_feature_map(mod)
def print_decoding(coverage, methylated, h_dec_p, mod, option_str):
mod.posterior_title = mod.fig_title + 'l_test = ' + str(mod.l_test) + option_str + '_posterior.pdf'
mod.bed_title = vis.get_bed_title_header(mod) + 'l_test = ' + str(mod.l_test) + option_str + '_bed'
bed_list = vis.print_bed(h_dec_p, mod)
vis.plot_meth_and_bed(coverage, methylated, bed_list, mod)
def decode(mod):
p_x_h = lambda i: bh.p_x_ch_binom(mod.p_ch_h, mod.coverage_test, mod.methylated_test, i)
print 'posterior decoding...'
mod.h_dec_p = hi.posterior_decode(mod.l_test, mod.pi_h, mod.T_h, p_x_h);
print_decoding(mod.coverage_test, mod.methylated_test, mod.h_dec_p, mod, '')
mod.coverage_test_reduced, mod.methylated_test_reduced = ut.reduce_nonzero(mod.coverage_test, mod.methylated_test)
mod.h_dec_p_reduced = mod.h_dec_p[mod.nz_idx]
print_decoding(mod.coverage_test_reduced, mod.methylated_test_reduced, mod.h_dec_p_reduced, mod, '_reduced_')
#vis.browse_states(h_dec_p, path_name, posterior_title, color_scheme)
#print 'viterbi decoding...'
#h_dec_v = hi.viterbi_decode(l_test, pi_h, T_h, p_x_h);
#viterbi_title = fig_title + 'l_test = ' + str(l_test) + '_viterbi.pdf'
#vis.browse_states(h_dec_v, viterbi_title, color_scheme)
def real_expt(mod):
#sys.stdout = open(path_name+'/parameters.txt', 'w+');
vis.directory_setup(mod);
vis.print_doc_header(mod);
for mod.ch, mod.ce_group, mod.s, mod.ctxt_group in itertools.product(mod.chrs, mod.cell_groups, mod.segments, mod.ctxt_groups):
print_basic_info(mod)
mod.coverage, mod.methylated, mod.N, mod.x_zipped, mod.a, mod.p_c = di.data_prep_ctxt_ce(mod);
print_data_info(mod)
for mod.l, mod.l_test in itertools.product(mod.lengths, mod.lengths_test):
print_data_selected_info(mod)
mod.coverage_train, mod.methylated_train, mod.coverage_test, mod.methylated_test, mod.x_importance_weighted = data_select(mod)
for mod.phi in mod.phis:
#mod.P_21, mod.P_31, mod.P_23, mod.P_13, mod.P_123 = mc.moments_cons_importance_weighted(mod);
#vis.save_moments(P_21, P_31, P_23, P_13, P_123, ch, ce_group, s, ctxt_group, l, path_name)
print_header(mod)
for mod.m_h, (mod.td_alg, mod.pp_alg) in itertools.product(mod.ms, mod.selections):
mod.p_ch_h, mod.C_h, mod.T_h, mod.pi_h = estimate_observation(mod)
mod.p_ch_h, mod.C_h, mod.T_h, mod.pi_h = postprocess(mod)
mod.fig_title = vis.get_fig_title(mod)
mod.state_name_h = vis.state_name(mod.p_ch_h)
decode(mod)
print_params(mod)
vis.print_fig_and(mod)
vis.print_fig_bs(mod)
vis.print_table_aheader(mod)
vis.print_doc_aheader(mod)
# in synthetic data, we can have additional rows indicating ground truth states
# bed_name_gtd = 'gt_decoder'
def get_bed(mod, option_str, p_ch_h, h_seq):
mod.m_h = np.shape(p_ch_h)[1]
mod.bed_title = vis.get_bed_title_header(mod) + 'l_test = ' + str(mod.l_test) + option_str + '_bed'
bed_list = vis.print_bed(h_seq, mod)
state_name = vis.state_name(p_ch_h)
return bed_list, state_name
def synthetic_print_decoding(mod, p_ch_h, T_h, pi_h, option_str):
p_x_h = lambda i: bh.p_x_ch_binom(p_ch_h, mod.coverage_test, mod.methylated_test, i)
h_dec_p = hi.posterior_decode(mod.l_test, pi_h, T_h, p_x_h);
mod.bed_list_h, mod.state_name_h = get_bed(mod, option_str, p_ch_h, h_dec_p)
vis.plot_meth_and_twobeds(mod.coverage_test, mod.methylated_test, mod)
def synthetic_matching(mod):
print 'Matching..'
col_ind = ut.find_match(mod.p_ch, mod.p_ch_h)
pi_h = mod.pi_h[col_ind]
p_ch_h = mod.p_ch_h[:,col_ind]
T_h = mod.T_h[:, col_ind][col_ind, :]
C_h = mod.C_h[:, col_ind]
#print 'm_hat = \n' + str(mod.m_h)
#print '\nC_h = \n' + str(C_h)
#print '\nT_h = \n' + str(T_h) + '\npi_h = \n' + str(pi_h) + '\np_ch_h = \n' + str(p_ch_h) + '\n'
print '|p_ch_h - p_ch| = ', np.linalg.norm(p_ch_h - mod.p_ch)
print '|T_h - T| = ', np.linalg.norm(T_h - mod.T)
print '|pi_h - pi| = ', np.linalg.norm(pi_h - mod.pi)
err_p = np.linalg.norm(p_ch_h - mod.p_ch)
return p_ch_h, C_h, T_h, pi_h, err_p
def synthetic_expt(mod):
vis.directory_setup(mod);
#sys.stdout = open(path_name+'/parameters.txt', 'w+');
mod.p_c, mod.p_ch, mod.T, mod.pi = generate_params(mod.N, mod.m, mod.r,mod.min_sigma_t, mod.mu)
print 'Generating Data..'
mod.size_to_try = len(mod.ls)
mod.errs_spectral = np.zeros((mod.reps, mod.size_to_try));
mod.errs_em = np.zeros((mod.reps, mod.size_to_try));
for i in range(mod.reps):
for j in range(mod.size_to_try):
mod.l = mod.ls[j]
mod.coverage_test, mod.methylated_test, mod.h_test = dg.generate_seq_bin_c(mod, mod.l_test);
mod.coverage_train, mod.methylated_train, mod.h_train = dg.generate_seq_bin_c(mod, mod.l);
mod.N, mod.x_zipped = di.triples_from_seq(mod.coverage_train, mod.methylated_train, 'explicit')
mod.a, mod.p_c_h = di.stats_from_seq(mod.coverage_train, mod.methylated_train)
mod.x_importance_weighted = di.importance_weightify(mod.x_zipped, mod.l);
mod.P_21, mod.P_31, mod.P_23, mod.P_13, mod.P_123 = mc.moments_cons_importance_weighted(mod);
mod.fig_title = 'synthetic'
mod.td_alg = ''
mod.pp_alg = ''
mod.bed_list_gt, mod.state_name_gt = get_bed(mod, 'gt_state', mod.p_ch, mod.h_test)
synthetic_print_decoding(mod, mod.p_ch, mod.T, mod.pi, 'gt_decoder')
#R_21, R_31, R_23, R_13, R_123, C, S_1, S_3 = mc.moments_gt(O, phi, N, n, T, pi)
#check_conc(P_21, R_21, P_31, R_31, P_23, P_13, P_123, R_123)
#print 'C = '
#print C
for mod.m_h, (mod.td_alg, mod.pp_alg) in itertools.product(mod.ms, mod.selections):
mod.p_ch_h, mod.C_h, mod.T_h, mod.pi_h = estimate_observation(mod)
mod.p_ch_h, mod.C_h, mod.T_h, mod.pi_h = postprocess(mod)
mod.p_ch_h, mod.C_h, mod.T_h, mod.pi_h, err_p = synthetic_matching(mod)
if mod.td_alg == 'tpm':
mod.errs_spectral[i,j] = err_p
elif mod.td_alg == 'baum_welch':
mod.errs_em[i,j] = err_p
print 'posterior decoding...'
synthetic_print_decoding(mod, mod.p_ch_h, mod.T_h, mod.pi_h, 'estimated_decoder')
print_params(mod)
mod.error_bar_spectral = error_bar(mod.errs_spectral)
mod.error_bar_em = error_bar(mod.errs_em)
plot_error(mod)
def error_bar(mat):
return (np.mean(mat, axis=0), np.std(mat, axis=0))
def plot_error(mod):
fig, ax = plt.subplots(1, 1)
plt.hold(True)
line_spectral = plt.errorbar(mod.ls, mod.error_bar_spectral[0], yerr=mod.error_bar_spectral[1], color='r', label='Spectral')
line_em = plt.errorbar(mod.ls, mod.error_bar_em[0], yerr=mod.error_bar_em[1], color='b', label='EM')
plt.yscale('log')
plt.legend([line_spectral, line_em], ['Spectral', 'EM'])
ax.grid(True)
ax.set_title('Reconstruction Error, m = ' + str(mod.m_h))
plt.show(block=False)
fig.savefig('Synthetic_m=' + str(mod.m_h))
plt.close('all')
def generate_params(N, m, r, min_sigma_t, mu):
p_c = dg.generate_p_c(N, r, mu);
print 'p_c = '
print p_c
p_ch = dg.generate_p_ch_random(m,r);
#p_ch = dg.generate_p_ch_cartesian(ms);
#p_ch = dg.generate_p_ch_monotone(m,r);
print 'p_ch = '
print p_ch
T = dg.generate_T(m, min_sigma_t);
print 'T = '
print T
pi = dg.generate_pi(m);
print 'pi = '
print pi
return p_c, p_ch, T, pi
if __name__ == '__main__':
np.random.seed(0);
#phi = phi_onehot;
#phi = phi_beta;
#phi = mc.phi_beta_shifted_cached;
#phi = mc.phi_binning_cached;
#if phi == mc.phi_onehot:
# n = N + 1
#for phi in [fm.phi_beta_shifted_cached, fm.phi_binning_cached]:
# for m in range(2,10,1):
# synthetic_expt(phi, m)
#path_name = 'synthetic/1026'
#synthetic_expt(fm.phi_beta_shifted_cached, 3, path_name);
'''
Expt 1: Compare Binning Feature vs. Beta Feature
'''
'''
mod = model()
#chrs = [str(a) for a in range(1,20,1)]
#chrs.append('X')
#chrs.append('Y')
#chrs = ['1', '2']
mod.chrs = ['1']
#cells = ['E1', 'E2', 'V8', 'V9', 'P13P14', 'P15P16']
#cells = ['E2', 'E1', 'E', 'V8', 'V9', 'V', 'P13P14', 'P15P16', 'P']
#cells = ['E', 'V', 'P']
#cell_groups = [['E', 'V']]
mod.cell_groups = [['E', 'V']]
# n should be divisible by cell_groups * ctxt_groups
mod.n = 50
mod.ms = range(6, 7)
#order: CC, CT, CA, CG
#ctxt_groups = [[range(0,4)], [range(4,8)], [range(8,12)], [range(12,16)], [range(0,4), range(4,8), range(8,12), range(12, 16)]]
#ctxt_groups = [[range(8,12), range(12,16)]]
#ctxt_groups = [[range(0,4)]]
#ctxt_groups = [[range(0,4), range(4,8), range(8,12), range(12, 16)]]
mod.ctxt_groups = [[range(12,16)]]
#'als', 'tpm',
#td_algs = ['em_bmm', 'baum_welch']
#pp_algs = ['pos', 'pos_als', 'pos_als_iter','no']
mod.path_name = '0527/'
mod.tex_name = 'result.tex'
#segments = range(1, 6)
#segments = range(1,5)
mod.segments = [1]
mod.lengths = [10000]
#, 20000, 40000, 80000, 160000, 320000
mod.lengths_test = [10000]
#phis = [mc.phi_beta_shifted_cached, mc.phi_binning_cached]
mod.phis = [fm.phi_beta_shifted_cached_listify]
#mod.phis = [fm.phi_binning_cached_listify]
#fm.phi_beta_shifted_cached_listify
#mod.selections = [('baum_welch', 'no'), ('em_bmm', 'pos_ls')]
mod.selections = [('tpm', 'pos'), ('als', 'pos')]
real_expt(mod)
#real_expt(phis, chrs, cell_groups, segments, lengths, lengths_test, n, ms, ctxt_groups, 0, path_name, tex_name)
'''
'''
Expt 2: Vary the number of Segments
'''
'''
path_name = 'vary_s'
segments = range(1,6)
lengths = [320000]
phis = [mc.phi_beta_shifted_cached]
real_expt(phis, chrs, cells, segments, lengths, n, ms, ctxt, path_name)
'''
'''
Expt 3: Synthetic dataset
'''
mod = model()
mod.ch = '1'
mod.s = 1
mod.ce_group = ['E','V']
mod.ctxt_group = [range(12,16)]
mod.phi = fm.phi_binning_cached_listify;
mod.path_name = 'synthetic/'
mod.n = 30
mod.N = 100
mod.ls = [pow(2,i) for i in range(8, 16)]
mod.l_test = 500;
mod.min_sigma_t = 0.8
mod.r = 2
mod.m = 4
mod.ms = range(4,5,1)
mod.selections = [('tpm', 'pos'), ('baum_welch', 'no')]
mod.reps = 200
mod.mu = 25
synthetic_expt(mod)
| cc0-1.0 | -183,416,328,529,724,450 | 35.007042 | 138 | 0.575722 | false |
Manouchehri/pychdk | ptp2/camera.py | 1 | 18415 | import logging
import usb
import struct
import binascii
import time
from os import path
import ptp2.util
from ptp2.typedefs import *
from ptp2.chdk_ptp_values import *
from ptp2.ptp_values import StandardResponses
__all__ = ['PTPCamera', 'CHDKCamera']
class _CameraBase(object):
def __init__(self, usb_device=None, log_level=logging.WARNING):
self._intf = None
self._handle = None
self._ep_in = None
self._ep_out = None
self._ep_intr = None
self.logger = logging.getLogger('_CameraBase')
self.logger.setLevel(log_level)
self._transaction_id = 0
if usb_device is not None:
self.open(usb_device)
def __del__(self):
self.close()
def open(self, usb_device):
intf = ptp2.util.get_ptp_interface(usb_device)
if intf is None:
raise TypeError('USB Device %s not a PTP Camera' % (usb_device))
self._intf = intf
self._handle = usb_device
# Grab endpoints
for ep in self._intf:
ep_type = usb.util.endpoint_type(ep.bmAttributes)
ep_dir = usb.util.endpoint_direction(ep.bEndpointAddress)
if ep_type == usb.util.ENDPOINT_TYPE_BULK:
if ep_dir == usb.util.ENDPOINT_IN:
self._ep_in = ep.bEndpointAddress
elif ep_dir == usb.util.ENDPOINT_OUT:
self._ep_out = ep.bEndpointAddress
elif ep_type == usb.util.ENDPOINT_TYPE_INTR:
self._ep_intr = ep.bEndpointAddress
def close(self):
# Excplicity release usb device
if self._handle is not None:
usb.util.dispose_resources(self._handle)
# _, self._handle = self._handle, None
_, self._intf = self._intf, None
self._ep_in = None
self._ep_out = None
self._ep_intr = None
def reopen(self):
if self._handle is None:
raise ValueError('No USB Device assigned. (Did you open it first?)')
if self._intf is not None:
raise ValueError('Already open')
self.open(self._handle)
def _bulk_write(self, bytestr, timeout=0):
return self._handle.write(self._ep_out, bytestr, timeout=timeout)
def _bulk_read(self, size, timeout=0):
return self._handle.read(self._ep_in, size, timeout=timeout).tostring()
def check_event(self, size=512, timeout=5000):
buf = self._handle.read(self._ep_intr, size=size, timeout=timeout).tostring()
p = ParamContainer(buf)
self.logger.debug('Received Event ' + buf.encode('hex'))
self.logger.debug(repr(p))
if p.type != PTP_CONTAINER_TYPE.EVENT:
raise ValueError('Received non-event container of type {t} on interrupt endpoint!'.format(t=p.type))
return p
def send_ptp_message(self, bytestr, timeout=0):
self.logger.debug('Sending ' + binascii.hexlify(bytestr).decode('utf-8')) # .encode('hex'))
return self._bulk_write(bytestr, timeout)
def recv_ptp_message(self, timeout=0):
buf = self._bulk_read(size=512, timeout=timeout)
self.logger.debug('Received ' + binascii.hexlify(buf).decode('utf-8'))
msg_len = struct.unpack('<I', buf[:4])[0]
bytes_left = msg_len - 512
if bytes_left > 0:
buf += self._bulk_read(size=bytes_left, timeout=timeout)
return buf
def new_ptp_command(self, op_code, params=[]):
ptp_command = ParamContainer()
ptp_command.type = PTP_CONTAINER_TYPE.COMMAND
ptp_command.code = op_code
ptp_command.transaction_id = self._transaction_id
ptp_command.params = params
self._transaction_id += 1
return ptp_command
def ptp_transaction(self, command, params=[], tx_data=None, receiving=True, timeout=0):
recvd_data = None
recvd_response = None
ptp_request = self.new_ptp_command(command, params)
ptp_request_data = None
if tx_data is not None:
assert isinstance(tx_data, str)
ptp_request_data = DataContainer()
ptp_request_data.code = ptp_request.code
ptp_request_data.transaction_id = ptp_request.transaction_id
ptp_request_data.data = tx_data
# Send request
bytes_xfrered = self.send_ptp_message(ptp_request.pack(), timeout)
# Send data
if ptp_request_data is not None:
bytes_xfered = self.send_ptp_message(ptp_request_data.pack(), timeout)
if receiving:
# read first 512 bytes to grab total data length
buf = self.recv_ptp_message(timeout)
_, type_ = struct.unpack('<IH', buf[:6])
if type_ == PTP_CONTAINER_TYPE.DATA:
recvd_data = DataContainer(buf)
elif type_ == PTP_CONTAINER_TYPE.RESPONSE:
recvd_response = ParamContainer(buf)
elif type_ in [PTP_CONTAINER_TYPE.COMMAND, PTP_CONTAINER_TYPE.EVENT]:
recvd_data = ParamContainer(buf)
else:
raise TypeError('Unknown PTP USB container type: %d' % (type_))
# If we haven't got the response yet, try again
if recvd_response is None:
buf = self.recv_ptp_message(timeout=timeout)
_, type_ = struct.unpack('<IH', buf[:6])
if type_ == PTP_CONTAINER_TYPE.RESPONSE:
recvd_response = ParamContainer(buf)
else:
raise TypeError('Expected response container, received type: %d' % (type_))
if recvd_response is not None:
self.logger.debug('Response: ' + repr(recvd_response))
self.logger.debug('ptp_transaction end')
return recvd_response, recvd_data
class PTPCamera(_CameraBase):
"""
If the PTPCamera class is not initialized with a usb_device handle, the first
PTP device found will be used.
"""
def __init__(self, usb_device=None, log_level=logging.WARNING):
self.logger = logging.getLogger('PTPCamera')
self.logger.setLevel(log_level)
if usb_device is None:
cams = ptp2.util.list_ptp_cameras()
if not cams:
raise IOError('No PTP Devices Found')
usb_device = cams[0]
self.logger.debug('Init with PTP device ' + usb_device.product)
self.session_id = 0x1
_CameraBase.__init__(self, usb_device=usb_device, log_level=log_level)
def open_session(self):
response, data = self.ptp_transaction(PTP_OPCODE.OPEN_SESSION, params=[self.session_id])
if (response.code != PTP_RESPONSE_CODE.OK) and (response.code != PTP_RESPONSE_CODE.SESSION_ALREADY_OPENED):
raise ValueError('Could not open PTP session (got 0x{:x})'.format(response.code))
return True
def close_session(self):
response, data = self.ptp_transaction(PTP_OPCODE.CLOSE_SESSION)
return self.check_response(response)
def initiate_capture(self):
response, data = self.ptp_transaction(PTP_OPCODE.INITIATE_CAPTURE, params=[0x0, 0x0])
self.check_response(response)
return response, data
def capture(self):
self.open_session()
response, data = self.initiate_capture()
self.check_response(response)
# We should now receive an ObjectAdded event followed by a CaptureComplete event
# However, the Nikon J3 often (but not always) sends these two events out of order.
# TODO: sometimes we receive DevicePropChanged instead of ObjectAdded from the Nikon J3
obj_added_event = None
capture_complete_event = None
event1 = self.check_event()
event2 = self.check_event()
for event in [event1, event2]:
if event.code == PTP_EVENT_CODE.OBJECT_ADDED:
obj_added_event = event
elif event.code == PTP_EVENT_CODE.CAPTURE_COMPLETE:
capture_complete_event = event
if obj_added_event is None:
raise IOError('ObjectAdded event was not received')
if capture_complete_event is None:
raise IOError('CaptureComplete event was not received')
# self.close_session()
object_handle = obj_added_event.params[0]
return object_handle
def capture_and_download(self):
start_time = time.time()
object_handle = self.capture()
response, data = self.ptp_transaction(PTP_OPCODE.GET_OBJECT, params=[object_handle])
total_time = time.time() - start_time
self.logger.info('total time to capture and download: {s:0.4f} seconds'.format(s=total_time))
img_size = data.length
self.logger.debug('image size ' + str(img_size - 12))
# f = open('/tmp/foo.jpg', 'w')
# f.write(data.data)
# self.logger.debug('wrote tmp file')
def check_response(self, response):
if response.code != PTP_RESPONSE_CODE.OK:
raise ValueError('PTP response code was not OK (got 0x{:x})'.format(response.code))
return True
class CHDKCamera(_CameraBase):
"""
For use with Canon cameras using the CHDK firmware.
Available functions (see docstrings for info):
get_chdk_version
upload_file
download_file
get_live_view_data
execute_lua
read_script_message
write_script_message
"""
def __init__(self, usb_device=None):
_CameraBase.__init__(self, usb_device)
def get_chdk_version(self):
"""
Retrieves the PTP-core (MAJOR,MINOR) version tuple from the
camera.
Note: This is different than the (MAJOR,MINOR) version tuple
for the live_view PTP extensions.
"""
recvd_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.Version],
tx_data=None, receiving=False, timeout=0)
major, minor = recvd_response.params
return major, minor
def check_script_status(self):
"""
:returns: CHDKScriptStatus
Check status of running scripts on camera
"""
recvd_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.ScriptStatus],
tx_data=None, receiving=False, timeout=0)
status = recvd_response.params[0]
return status
def execute_lua(self, script, block=False):
"""
:param script: LUA script to execute on camera
:type script: str
:param block: Wait for script to return before continuing
:type block: bool
:returns: (script_id, script_error, [msgs])
Execute a script on the camera.
Values returned by the LUA script are passed in individual
messages.
"""
# NULL terminate script if necessary
if not script.endswith('\0'):
script += '\0'
recvd_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.ExecuteScript, CHDKScriptLanguage.LUA],
tx_data=script, receiving=False, timeout=0)
script_id, script_error = recvd_response.params
if not block:
return script_id, script_error, []
else:
msgs = self._wait_for_script_return()
return script_id, script_error, msgs
def read_script_message(self):
"""
Checks camera for messages created by running scripts.
"""
recvd_response, recvd_data = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.ReadScriptMsg, CHDKScriptLanguage.LUA],
tx_data=None, receiving=True, timeout=0)
return recvd_response, recvd_data
def write_script_message(self, message, script_id=0):
"""
:param message: Message to send
:type message: str
:param script_id: ID of script to deliver message to.
:type script_id: int
Passes a message to a running script.
"""
recvd_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.WriteScriptMsg, script_id],
tx_data=message, receiving=False, timeout=0)
msg_status = recvd_response.params[0]
return msg_status
@classmethod
def __pack_file_for_upload(cls, local_filename, remote_filename=None):
"""
Private method to create a buffer holding
filename's contents for uploading to the camera.
called in `CHDKCamera.upload_file'
"""
if remote_filename is None:
remote_filename = path.basename(remote_filename)
if not remote_filename.endswith('\0'):
remote_filename += '\0'
filename_len = len(remote_filename)
fmt = '<I%dc' % (filename_len)
filebuf = struct.pack(fmt, filename_len, remote_filename)
with open(local_filename, 'rb') as fid:
contents = fid.read(-1)
fmt = '<%dB' % (len(contents))
filebuf += struct.pack(fmt, *contents)
return filebuf
def upload_file(self, local_filename, remote_filename=None, timeout=0):
"""
:param local_filename: Name of file on computer
:type local_filename: str
:param remote_filename: Name of file on camera
:type remote_filename: str
Upload a file to the camera. If remote_filename is None, the
file is uploaded to the root folder on the SD card.
"""
filestr = self.__pack_file_for_upload(local_filename, remote_filename)
dlfile_response, dlfile_data = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.UploadFile],
tx_data=filestr, receiving=False, timeout=timeout)
if ret_code != CHDKResponses.OK:
raise PTPError(tempdata_response.params[0], CHDKResponses.message[ret_code])
def download_file(self, filename, timeout=0):
"""
:param filename: Full path of file to download
:type filename: str
Download a file from the camera
"""
# CHDK Download process:
# - Store desried filename on camera w/ TempData
# - Send DownloadFile command
if not filename.endswith('\0'):
filename += '\0'
tempdata_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.TempData, 0],
tx_data=filename, receiving=False, timeout=timeout)
ret_code = tempdata_response.params[0]
# check response for problems
if ret_code != CHDKResponses.OK:
raise PTPError(tempdata_response.params[0], CHDKResponses.message[ret_code])
dlfile_response, dlfile_data = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.DownloadFile],
tx_data=None, receiving=True, timeout=timeout)
ret_code = tempdata_response.params[0]
# check response for problems
if ret_code != CHDKResponses.OK:
raise PTPError(tempdata_response.params[0], CHDKResponses.message[ret_code])
# Clear tempdata field
clear_response, _ = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.TempData, CHDKTempData.CLEAR],
tx_data=None, receiving=False, timeout=timeout)
# Return the raw string buffer
return dlfile_data.data
def get_live_view_data(self, liveview=True, overlay=False, palette=False):
"""
:param liveview: Return the liveview image
:type liveview: bool
:param overlay: Return the overlay image
:type overlay: bool
:param palette: Return the overlay palette
:type palette: bool
:returns: :class:`typdefs.CHDK_LV_Data`
Grabs a live view image from the camera.
"""
flags = 0
if liveview:
flags |= CHDKLVTransfer.VIEWPORT
if overlay:
flags |= CHDKLVTransfer.BITMAP
if palette:
flags |= CHDKLVTransfer.PALETTE
recvd_response, recvd_data = self.ptp_transaction(command=PTP_OC_CHDK,
params=[CHDKOperations.GetDisplayData, flags],
tx_data=None, receiving=True, timeout=0)
if recvd_data.type == PTP_CONTAINER_TYPE.DATA:
lv_data = CHDK_LV_Data(recvd_data.data)
else:
lv_data = None
return recvd_response, lv_data
def _wait_for_script_return(self, timeout=0):
"""
Polls the camera every 50ms.
Reads queued messages if present, sleeps again if
a script is currently running.
Returns read messages when no scripts are running.
"""
msg_count = 1
msgs = []
t_start = time.time()
while True:
STATUS = self.check_script_status()
if STATUS & CHDKScriptStatus.RUN:
# log.debug('Script running, sleeping 50ms')
time.sleep(50e-3)
if timeout > 0 and timeout > (time.time() - t_start):
raise PTPError(StandardResponses.TRANSACTION_CANCELLED, "Timeout waiting for script to return")
elif STATUS & CHDKScriptStatus.MSG:
msg, msg_buf = self.read_script_message()
msg_count += 1
msgs.append((msg, msg_buf))
elif STATUS == CHDKScriptStatus.NONE:
break
else:
raise PTPError(StandardResponses.UNDEFINED, "Invalid response for script status: 0x%X" % (STATUS))
return msgs
| gpl-3.0 | -3,705,542,958,739,789,000 | 34.07619 | 120 | 0.576595 | false |
allmyservos/allmyservos | __bootstrap.py | 1 | 6408 | #!/usr/bin/python
#######################################################################
# AllMyServos - Fun with PWM
# Copyright (C) 2015 Donate BTC:14rVTppdYQzLrqay5fp2FwP3AXvn3VSZxQ
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#######################################################################
import sys, traceback, logging, os, re, time
from subprocess import Popen, PIPE
from StringIO import StringIO
## The AmsEnvironment object collects required information from the host pi
class AmsEnvironment:
patterns = {
'pid': re.compile(r'(?P<pid>\d+)')
}
info = {}
@staticmethod
def Now():
return int(round(time.time() * 1000))
@staticmethod
def AppInfo():
""" Returns environment info
"""
if (not any(AmsEnvironment.info)):
a = AmsEnvironment.info
a['app_path'] = os.path.dirname(__file__)
a['contrib_path'] = os.path.join(a['app_path'],'contrib')
a['file_path'] = os.path.join(a['app_path'],'files')
a['command_script'] = sys.argv[0]
a['command_args'] = sys.argv[1:]
try:
a['terminal'] = os.ttyname(sys.stdout.fileno())
except:
a['terminal'] = '';
return AmsEnvironment.info
@staticmethod
def AppPath():
""" Returns app path
"""
try:
AmsEnvironment.info['app_path']
except:
AmsEnvironment.AppInfo()
return AmsEnvironment.info['app_path']
@staticmethod
def ContribPath():
""" Returns contrib path
"""
try:
AmsEnvironment.info['contrib_path']
except:
AmsEnvironment.AppInfo()
return AmsEnvironment.info['contrib_path']
@staticmethod
def FilePath():
""" Returns file path
"""
try:
AmsEnvironment.info['file_path']
except:
AmsEnvironment.AppInfo()
return AmsEnvironment.info['file_path']
@staticmethod
def Terminal():
""" Returns the current terminal
"""
try:
AmsEnvironment.info['terminal']
except:
AmsEnvironment.AppInfo()
return AmsEnvironment.info['terminal']
@staticmethod
def Vendors():
""" Returns list of vendor names
"""
try:
AmsEnvironment.__vendors
except:
AmsEnvironment.__vendors = os.listdir(AmsEnvironment.ContribPath())
AmsEnvironment.__vendors = [ x for x in AmsEnvironment.__vendors if os.path.isdir(os.path.join(AmsEnvironment.ContribPath(), x)) ]
return AmsEnvironment.__vendors
@staticmethod
def IsLxdeRunning():
""" Returns whether lxde is running
"""
try:
AmsEnvironment.__lxdeRunning
except:
AmsEnvironment.__lxdeRunning = AmsEnvironment.__isLxdeRunning()
return AmsEnvironment.__lxdeRunning
@staticmethod
def Scan():
""" Adds system paths required to import modules in the contrib folder
"""
try:
AmsEnvironment.__scanned
except:
AmsEnvironment.__scanned = True
vendors = AmsEnvironment.Vendors()
if (any(vendors)):
for v in vendors:
vpath = os.path.join(AmsEnvironment.ContribPath(), v)
mods = os.listdir(vpath)
mods = [ x for x in mods if os.path.isdir(os.path.join(vpath, x)) ]
for m in mods:
sys.path.append(os.path.join(vpath, m))
@staticmethod
def EnableErrorLogging():
logpath = os.path.join(AmsEnvironment.FilePath(),'logs')
if (not os.path.exists(logpath)):
os.makedirs(logpath)
logging.basicConfig(filename=os.path.join(AmsEnvironment.FilePath(),'logs','exception.log'),filemode='a',level=logging.DEBUG, format= '%(asctime)s - %(levelname)s - %(message)s')
AmsEnvironment.logger = logging.getLogger('amslogger')
sys.excepthook = AmsEnvironment.errorHandler
@staticmethod
def EnableOutputLogging():
AmsEnvironment._old_stdout = sys.stdout
AmsEnvironment._old_stderr = sys.stderr
AmsEnvironment.outlogger = OutLogger(AmsEnvironment._old_stdout, AmsEnvironment._old_stderr, os.path.join(AmsEnvironment.FilePath(),'logs'))
sys.stdout = AmsEnvironment.outlogger
sys.stderr = AmsEnvironment.outlogger
@staticmethod
def outputHandler(value):
AmsEnvironment.logger.debug(value)
@staticmethod
def errorHandler(type, value, tb):
AmsEnvironment.logger.exception("Uncaught exception: {0}".format(str(value)))
@staticmethod
def __extract_function_name():
tb = sys.exc_info()[-1]
stk = traceback.extract_tb(tb, 1)
fname = stk[0][3]
return fname
def LogException(e):
logging.error(
"Function {function_name} raised {exception_class} ({exception_docstring}): {exception_message}".format(
function_name = AmsEnvironment.__extract_function_name(), #this is optional
exception_class = e.__class__,
exception_docstring = e.__doc__,
exception_message = e.message))
@staticmethod
def __isLxdeRunning():
""" Utility
"""
if(not 'console' in AmsEnvironment.Terminal()):
#not running from rc.local
for l in AmsEnvironment.__pgrepX().split('\n'):
match = AmsEnvironment.patterns['pid'].match(l)
if(match):
return True
return False
@staticmethod
def __pgrepX():
""" Utility
"""
p = Popen(['pgrep', 'X'], stdout=PIPE)
o = p.communicate()[0]
if(p.returncode == 0):
return o
return ''
## Custom StdOut handler to copy ouput to a log file.
class OutLogger(StringIO):
def __init__(self, old_stdout, old_stderr, logpath, useold = True):
""" Initializes the Logger object
Extends StringIO in order to capture stdout and stderr
@param parent
@param gui
@param options
"""
StringIO.__init__(self) #overriding object must implement StringIO
self.logpath = logpath
if (not os.path.exists(self.logpath)):
os.makedirs(self.logpath)
self.logfile = os.path.join(self.logpath, 'output.log')
self.useold = useold
self.old_stdout = old_stdout
self.old_stderr = old_stderr
def write(self, value):
''' capture and reverse console output
'''
try:
StringIO.write(self,value)
f = open(self.logfile, 'a')
f.write(value)
f.close()
except Exception as e:
pass
if(self.useold):
self.old_stdout.write(value) #repeat to command line
AmsEnvironment.Scan() | gpl-2.0 | 8,564,679,286,356,368,000 | 30.571429 | 180 | 0.686954 | false |
dhess/lobbyists | lobbyists/tests/test_parse_govt_entities.py | 1 | 4633 | # -*- coding: utf-8 -*-
#
# test_parse_govt_entities.py - Tests for lobbyists govt entity parsing.
# Copyright (C) 2008 by Drew Hess <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests for lobbyists govt entity parsing."""
import unittest
import lobbyists
import util
class TestParseGovtEntities(unittest.TestCase):
def test_name(self):
"""Parse government entity name"""
filings = list(lobbyists.parse_filings(util.testpath('government_entity_name.xml')))
x = filings.pop()
f = x['filing']
self.failUnlessEqual(f['id'], '2627E811-33AB-43F4-B8E0-5B979A10FBF9')
entities = x['govt_entities']
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'UNDETERMINED')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'UNDETERMINED')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'UNDETERMINED')
self.failUnlessEqual(len(entities), 0)
x = filings.pop()
f = x['filing']
self.failUnlessEqual(f['id'], 'A55002C7-78C4-41BA-A6CA-01FCF7650116')
entities = x['govt_entities']
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'Treasury, Dept of')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'Federal Reserve System')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'HOUSE OF REPRESENTATIVES')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'Vice President of the U.S.')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'Office of Policy Development')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'],
'Executive Office of the President (EOP)')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'SENATE')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'White House Office')
self.failUnlessEqual(len(entities), 0)
x = filings.pop()
f = x['filing']
self.failUnlessEqual(f['id'], '106C2C6E-F0E1-46E3-9409-294E0BD27878')
entities = x['govt_entities']
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'],
'Federal Communications Commission (FCC)')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'Environmental Protection Agency (EPA)')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'Energy, Dept of')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'HOUSE OF REPRESENTATIVES')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'],
'Federal Energy Regulatory Commission (FERC)')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'SENATE')
self.failUnlessEqual(len(entities), 0)
x = filings.pop()
f = x['filing']
self.failUnlessEqual(f['id'], 'FFF29969-FDEC-4125-809E-0D8D2D8E73FC')
entities = x['govt_entities']
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'],
'Health & Human Services, Dept of (HHS)')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'SENATE')
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'HOUSE OF REPRESENTATIVES')
self.failUnlessEqual(len(entities), 0)
x = filings.pop()
f = x['filing']
self.failUnlessEqual(f['id'], 'FD29F4AF-763B-42A6-A27E-0AE115CD6D51')
entities = x['govt_entities']
e = entities.pop()['govt_entity']
self.failUnlessEqual(e['name'], 'NONE')
self.failUnlessEqual(len(entities), 0)
self.failUnlessEqual(len(filings), 0)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -3,323,991,959,407,665,700 | 41.118182 | 92 | 0.615152 | false |
mozilla/FlightDeck | apps/search/views.py | 1 | 5465 | import commonware.log
from django.core.paginator import Paginator, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from jetpack.models import Package
from .helpers import package_search, get_activity_scale
from .forms import SearchForm
from pyes.urllib3.connectionpool import TimeoutError
log = commonware.log.getLogger('f.search')
SORT_MAPPING = {
'score':'_score',
'activity':'-activity',
'forked':'-copies_count',
'used':'-times_depended',
'new':'-created_at',
'size':'-size',
}
REVERSE_SORT_MAPPING = dict((v, k) for k, v in SORT_MAPPING.items())
def search(request):
form = SearchForm(request.GET)
form.is_valid()
query = form.cleaned_data
q = query.get('q').lower()
type_ = query.get('type') or None
types = {'a': 'addon', 'l': 'library'}
page = query.get('page') or 1
limit = 20
activity_map = get_activity_scale()
if q and query.get('sort') == '':
sort = '_score'
elif query.get('sort') == '':
sort = '-activity'
else:
sort = SORT_MAPPING.get(query.get('sort'), '_score')
query['sort'] = REVERSE_SORT_MAPPING.get(sort)
filters = {}
filters['user'] = request.user
author = query.get('author')
if author:
filters['author'] = author.id
if query.get('copies'):
filters['copies_count__gte'] = query['copies']
else:
query['copies'] = 0
if query.get('used') and type_ != 'a':
# Add-ons can't be depended upon, so this query would filter out
# every single Add-on
filters['times_depended__gte'] = query['used']
else:
query['used'] = 0
if query.get('example'):
filters['example'] = 'true'
if query.get('featured'):
filters['featured'] = 'true'
if query.get('activity'):
filters['activity__gte'] = activity_map.get(str(query['activity']), 0)
copies_facet = {'terms': {'field': 'copies_count'}}
times_depended_facet = {'terms': {'field': 'times_depended'}}
examples_facet = {'query': {'term': {'example': 'true' }}}
featured_facet = {'query': {'term': {'featured': 'true' }}}
facets_ = {
'copies': copies_facet,
'times_depended': times_depended_facet,
'example': examples_facet,
'featured': featured_facet
}
template = ''
results={}
facets={}
if type_:
filters['type'] = type_
qs = package_search(q, **filters).order_by(sort).facet(**facets_)
try:
results['pager'] = Paginator(qs, per_page=limit).page(page)
except EmptyPage:
results['pager'] = Paginator(qs, per_page=limit).page(1)
facets = _facets(results['pager'].object_list.facets)
facets['everyone_total'] = len(qs)
template = 'results.html'
else:
# combined view
results['addons'] = package_search(q, type='a', **filters) \
.order_by(sort)[:5]
results['libraries'] = package_search(q, type='l', **filters) \
.order_by(sort)[:5]
results['all'] = package_search(q, **filters).facet(**facets_)[:0]
facets = _facets(results['all'].facets)
facets['everyone_total'] = facets['combined_total']
template = 'aggregate.html'
ctx = {
'q': q,
'page': 'search',
'form': form,
'query': query,
'type': types.get(type_, None)
}
ctx.update(results)
ctx.update(facets)
if request.is_ajax():
template = 'ajax/' + template
return _render(request, template, ctx)
def rss_redirect(request, type_):
from base.helpers import urlparams
form = SearchForm(request.GET)
form.is_valid()
query = dict(form.cleaned_data)
if type_ != 'combined':
query['type'] = type_[0]
return redirect(urlparams(reverse('search.rss'), **query), permanent=True)
def _render(request, template, data={}):
return render_to_response(template, data, RequestContext(request))
def _facets(facets):
type_totals = dict((t['term'], t['count']) for t in facets['types'])
my_total = 0
if 'author' in facets and len(facets['author']):
my_total = facets['author'][0]['count']
max_copies = 0
if 'copies' in facets:
copies_steps = [t['term'] for t in facets['copies']]
if copies_steps:
copies_steps.sort()
max_ = copies_steps.pop()
max_copies = max(max_copies, max_)
max_times_depended = 0
if 'times_depended' in facets:
depended_steps = [t['term'] for t in facets['times_depended']]
if depended_steps:
depended_steps.sort()
max_ = depended_steps.pop()
max_times_depended = max(max_times_depended, max_)
example_count = 0
if 'example' in facets:
example_count = facets['example']
featured_count = 0
if 'featured' in facets:
featured_count = facets['featured']
return {
'addon_total': type_totals.get('a', 0),
'library_total': type_totals.get('l', 0),
'my_total': my_total,
'combined_total': type_totals.get('a', 0) + type_totals.get('l', 0),
'max_copies': max_copies,
'max_times_depended': max_times_depended,
'examples_total': example_count,
'featured_total': featured_count
}
| bsd-3-clause | 6,986,333,226,037,713,000 | 29.19337 | 78 | 0.58097 | false |
JoseHerradez/Django_Patrones | carrusel/breadcrumbs/tests.py | 1 | 1821 | from django.test import TestCase
from breadcrumbs.models import *
from breadcrumbs.views import *
class ArmarBreadcrumbslHTMLTestCase(TestCase):
def setUp(self):
BreadcrumbsLevels.objects.create(niveles=4)
def test_armarBreadcrumbsHTML(self):
levels = BreadcrumbsLevels.objects.get(niveles=4)
breadcrumbs = BreadcrumbsContent.objects.filter(breadcrumb=levels.pk).values()
data = {
'elements': BreadcrumbsContent.objects.filter(breadcrumb=levels.pk).values()
}
dic = armarBreadcrumbsHTML(data)
self.assertNotEqual(dic,None)
def test_armarBreadcrumbsHTML_2(self):
levels = BreadcrumbsLevels.objects.get(niveles=4)
data = {
'elements': BreadcrumbsContent.objects.filter(breadcrumb=levels.pk).values()
}
dic = armarBreadcrumbsHTML(data)
self.assertNotEqual(dic['file'], None)
def test_armarBreadcrumbsHTML_3(self):
levels = BreadcrumbsLevels.objects.get(niveles=4)
data = {
'elements': BreadcrumbsContent.objects.filter(breadcrumb=levels.pk).values()
}
dic = armarBreadcrumbsHTML(data)
self.assertNotEqual(dic['code'], None)
def test_armarBreadcrumbsHTML_4(self):
levels = BreadcrumbsLevels.objects.get(niveles=4)
data = {
'elements': BreadcrumbsContent.objects.filter(breadcrumb=levels.pk).values()
}
dic = armarBreadcrumbsHTML(data)
self.assertNotEqual(dic['file'], "")
def test_armarBreadcrumbsHTML_5(self):
levels = BreadcrumbsLevels.objects.get(niveles=4)
data = {
'elements': BreadcrumbsContent.objects.filter(breadcrumb=levels.pk).values()
}
dic = armarBreadcrumbsHTML(data)
self.assertNotEqual(dic['code'], "") | gpl-3.0 | 1,260,581,154,931,359,000 | 36.183673 | 88 | 0.662273 | false |
llvm/llvm-zorg | llvmbisect/llvmlab/shell.py | 1 | 1123 | """
shell like utilities
"""
import os
def execute(args):
import subprocess
"""execute(command) - Run the given command (or argv list) in a shell and
return the exit code."""
return subprocess.Popen(args).wait()
def capture(args, include_stderr=False):
import subprocess
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output."""
stderr = subprocess.PIPE
if include_stderr:
stderr = subprocess.STDOUT
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=stderr)
out, _ = p.communicate()
return p.wait(), out
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
import errno
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError as e:
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
| apache-2.0 | 3,373,758,457,936,435,000 | 24.522727 | 77 | 0.643811 | false |
cqcarrow/tarrow | Client/Core/controller.py | 1 | 8459 | """ Create an set of stocks (input by symbol) and open communications
between them and the server via the Gateway.
Copyright (c) Cambridge Quantum Computing ltd. All rights reserved.
Licensed under the MIT License. See LICENSE file
in the project root for full license information.
"""
import sys
import datetime
import smtplib
import copy
import importlib
from .gateway import Gateway
from .pricebar import PriceBar
class Controller:
""" Responsible for managing a set of stocks in one process.
"""
def __init__(self, version, environment, global_settings, client_id, symbols):
self.version = version
self.environment = environment
self.gateway = Gateway()
self.account = self.gateway.getAccounts()[0]
self.stocks = {}
self.new_bars = {}
self.global_settings = global_settings
self.loadStocks(symbols)
if "reporter" not in global_settings:
global_settings["reporter"] = NullReporter()
self.reporter = global_settings["reporter"]
self.reporter.initiate(version, environment, client_id)
def loadStock(self, symbol, exchange, currency):
self.report("Getting stock {:s} from gateway".format(symbol))
return self.gateway.getStock(
self.account,
symbol,
exchange,
currency
)
def loadStocks(self, symbols):
for symbol in symbols:
stock_settings = self.getStockSettings(symbol)
stock = self.loadStock(
symbol,
stock_settings['exchange'],
stock_settings['currency']
)
for option_name in stock_settings:
if not hasattr(stock, option_name):
self.report(
"Warning: attribute {:s} hasn't got a default value".format(
option_name
)
)
setattr(stock, option_name, stock_settings[option_name])
self.report("Initiating signallers")
stock.signaller.initialise()
# now we store the stock object in self.stocks, referenced by its
# symbol.
self.stocks[symbol] = stock
def getStockSettings(self, symbol):
self.report("getting settings for {:s}".format(symbol))
# Create a shallow copy of settings so that settings can be overwritten by each stock.
stock_settings = copy.copy(self.global_settings)
self.report("after copy")
self.report("copied settings for {:s}".format(symbol))
self.report("loading version file for {:s}".format(symbol))
try:
# try to load the generic index file for the stock
version_file = importlib.import_module(
"Versions.{:s}.Stocks.{:s}.index".format(
self.version,
symbol
)
)
self.report("loaded version file for {:s}".format(symbol))
stock_settings.update(version_file.settings)
except Exception as error:
self.report("Exception type: ", error)
self.report("Stock file {:s} has no index.py file, continuing anyway".format(
symbol
))
self.report("loading version file for {:s}".format(symbol))
try:
# try to load the environment-specific file for the stock
environment_file = importlib.import_module(
"Versions.{:s}.Stocks.{:s}.{:s}".format(
self.version,
symbol,
self.environment
)
)
self.report("loaded environment file for {:s}".format(symbol))
stock_settings.update(environment_file.settings)
except Exception as error:
self.report("Exception type: ", error)
self.report("Stock file {:s} has no {:s}.py file, continuing anyway".format(
symbol,
self.environment
))
# verify that we have the exchange and currency. If not, then
# we don't have enough information to launch the client.
if "exchange" not in stock_settings:
raise ValueError(
"Stock {:s}'s exchange should be in it's index file or {:s} file".format(
symbol,
self.environment
)
)
elif "currency" not in stock_settings:
raise ValueError(
"Stock {:s}'s currency should be in it's index file or {:s} file".format(
symbol,
self.environment
)
)
self.report("loaded all settings for {:s}".format(symbol))
return stock_settings
def goLive(self):
# these are done in blocks rather than all-in-one, to allow separate stocks
# to get to the same stage before moving on
for symbol in self.stocks:
# load the stock data from 2012 to the current year
# (this also performs detection, classification, learning)
self.report("Loading Stock Data", symbol)
self.stocks[symbol].load()
for symbol in self.stocks:
self.stocks[symbol].analyse()
for symbol in self.stocks:
# Grab historical data from the stock. This is just in case
# this client starts up after the market opens or misses the previous
# day, etc.
self.report(
"Requesting historical data for stock '" + symbol + "'")
self.stocks[symbol].addHistoricalPriceBars(
self.gateway.getHistory(self.stocks[symbol])
)
for symbol in self.stocks:
# Subscribe to live market data
self.report("Requesting live data for stock '" + symbol + "'")
self.gateway.subscribeToMarketData(self.stocks[symbol])
self.gateway.finalise()
# Run the listening loop.
self.listen()
def getLogTag(self):
return "Controller"
def listen(self):
""" Run the main listening loop, handling responses from the gateway. """
while True:
# wait or the gateway to send us something
listen_input = self.gateway.listen()
if listen_input["Type"] == "Prepare for Live Bars":
# PriceBars are going to come in - store them all and process in bulk afterwards,
# so that the message queue isn't blocked
self.new_bars = {}
# We have received a live bar, pass it to the stock.
elif listen_input["Type"] == "Live Bar":
# Find the stock based on the returned live data request's ID
symbol = self.gateway.request_to_stock[listen_input['RequestID']]
stock = self.stocks[symbol]
# Pass the new bar to the stock
self.new_bars[symbol] = PriceBar(listen_input['Bar'])
elif listen_input["Type"] == "End of Live Bars":
self.report("All bars are in. Adding them to the stock...")
for symbol in self.new_bars:
self.stocks[symbol].addLivePriceBar(self.new_bars[symbol])
self.report("Ready to process!")
for symbol in self.new_bars:
self.stocks[symbol].processNewBar()
self.current_time = self.stocks[symbol].current_time
self.report("Done. Flushing signallers.")
for symbol in self.stocks:
self.stocks[symbol].signaller.flush()
self.reporter.newBars(self, self.current_time)
# now tell the Arrow Server that we are done processing, for bookkeeping purposes.
self.gateway.finalise()
elif listen_input["Type"] == "Server Exit":
self.report("Server has closed.")
self.report("Generating complete report.")
self.report("Trades:", sum(len(self.stocks[symbol].closed_trades) for symbol in self.stocks))
self.reporter.endOfDay(self)
sys.exit(0) | mit | 902,031,004,339,659,300 | 42.067708 | 109 | 0.554084 | false |
rickerbh/tictactoe_py | tests/ai_strategy_factory_tests.py | 1 | 1186 | from nose.tools import *
from tictactoe.ai_strategy_factory import AIStrategyFactory
from tictactoe.ai_strategies.hard import Hard
from tictactoe.ai_strategies.easy import Easy
def factory_returns_hard_strategy_test():
factory = AIStrategyFactory()
strategy = factory.strategy("Hard", "X", "O")
assert isinstance(strategy, Hard)
def factory_returns_easy_strategy_test():
factory = AIStrategyFactory()
strategy = factory.strategy("Easy", "X", "O")
assert isinstance(strategy, Easy)
def factory_returns_easy_strategy_case_insensitive_test():
factory = AIStrategyFactory()
strategy = factory.strategy("eAsY", "X", "O")
assert isinstance(strategy, Easy)
def factory_handles_bad_strategy_test():
factory = AIStrategyFactory()
ex = None
try:
strategy = factory.strategy("NoStrategyHereSorry", None, None)
except Exception as e:
ex = e
assert isinstance(ex, ValueError)
def factor_handles_no_strategy_test():
factory = AIStrategyFactory()
ex = None
try:
strategy = factory.strategy(None, None, None)
except Exception as e:
ex = e
assert isinstance(ex, ValueError)
| mit | 1,108,111,440,951,364,200 | 27.926829 | 70 | 0.689713 | false |
stevearc/python-pike | pike/env.py | 1 | 15498 | """ Environments for running groups of graphs. """
import os
import time
from datetime import datetime
import copy
import logging
import six
import tempfile
import threading
from six.moves import cPickle as pickle # pylint: disable=F0401
from .exceptions import StopProcessing
from .items import FileMeta
from .nodes import (ChangeListenerNode, ChangeEnforcerNode, CacheNode, Edge,
NoopNode)
from .sqlitedict import SqliteDict
from .util import resource_spec
LOG = logging.getLogger(__name__)
def commit(cache):
""" Commit if SqliteDict, else do nothing. """
try:
cache.commit()
except AttributeError:
pass
def watch_graph(graph, partial=False, cache=None, fingerprint='md5'):
"""
Construct a copy of a graph that will watch source nodes for changes.
Parameters
----------
graph : :class:`~pike.Graph`
partial : bool, optional
If True, the :class:`~pike.ChangeListenerNode` will only propagate
changed files and the graph will rely on a :class:`~pike.CacheNode` to
produce the total output.
cache : str, optional
If present, cache the file fingerprints and other data in this file.
fingerprint : str or callable, optional
The method to use for fingerprinting files when ``watch=True``. See
:class:`~pike.nodes.watch.ChangeListenerNode` for details. (default
'md5')
"""
new_graph = copy.deepcopy(graph)
with new_graph:
# If we only pass through the changed files, we'll need a CacheNode at
# the end
if partial:
sink = CacheNode(cache, new_graph.name + '_cache')
new_graph.sink.connect(sink, '*', '*')
enforcer = ChangeEnforcerNode()
for i, node in enumerate(new_graph.source_nodes()):
# Find the outbound edge of the source
if node.eout:
edge = node.eout[0]
edge_index = edge.n2.ein.index(edge)
edge.remove()
else:
# If source has no outbound edge, make one.
edge = Edge(n2=NoopNode())
edge_index = 0
# Funnel files through a change listener
key = new_graph.name + '_listen_' + str(i)
listener = ChangeListenerNode(stop=False, cache=cache, key=key,
fingerprint=fingerprint)
node.connect(listener)
# Create a fan-in, fan-out with the changed files that goes through
# a ChangeEnforcer. That way processing will continue even if only
# one of the sources has changed files.
listener.connect(enforcer, input_name=str(i))
if not partial:
listener.connect(enforcer, output_name='all', input_name=str(i)
+ '_all')
if edge.input_name == '*':
edge.input_name = None
input_name = edge.input_name
new_edge = enforcer.connect(edge.n2, str(i), input_name)
# Preserve edge ordering to preserve argument ordering
edge.n2.ein.remove(new_edge)
edge.n2.ein.insert(edge_index, new_edge)
return new_graph
class IExceptionHandler(object):
"""
Interface for exception handlers.
This class can intercept exceptions raised while running a graph in an
environment and perform some processing.
"""
def handle_exception(self, graph, exc, node):
"""
Handle an exception.
Parameters
----------
graph : :class:`~pike.graph.Graph`
exc : :class:`Exception`
node : :class:`~pike.nodes.base.Node`
Returns
-------
handled : bool
If True, the Environment will not raise the exception
"""
raise NotImplementedError
def apply_error_style(node, error_style):
"""
Apply error styles to a graph.
Parameters
----------
node : :class:`~pike.nodes.base.Node`
The node that threw the exception
error_style : dict
The styles to apply to nodes and edges involved in the traceback.
Returns
-------
style : dict
Style dict for passing to :meth:`pike.graph.Graph.dot`.
"""
styles = {}
for node in node.walk_up(True):
styles[node] = error_style
for edge in node.ein:
styles[edge] = error_style
return styles
class RenderException(IExceptionHandler):
"""
Render traceback as a png in a directory.
Parameters
----------
output_dir : str, optional
Directory to render exception into (defaults to temporary directory)
error_style : dict, optional
Dict of attributes to apply to nodes and edges involved in the
traceback (default {'color': 'red'}).
"""
def __init__(self, output_dir=None, error_style=None):
super(RenderException, self).__init__()
self.error_style = error_style or {'color': 'red'}
if output_dir is None:
self.output_dir = tempfile.gettempdir()
else:
self.output_dir = output_dir
def handle_exception(self, graph, exc, node):
filename = 'exc_%s.png' % datetime.now().isoformat()
fullpath = os.path.join(self.output_dir, filename)
styles = apply_error_style(node, self.error_style)
graph.render(fullpath, style=styles)
LOG.error("Exception rendered as %s", fullpath)
class ShowException(IExceptionHandler):
"""
When an exception occurs, this will auto-open the visual traceback.
Parameters
----------
error_style : dict, optional
Dict of attributes to apply to nodes and edges involved in the
traceback (default {'color': 'red'}).
**kwargs : dict, optional
These will be passed to :meth:`~pike.graph.Graph.show`
"""
def __init__(self, error_style=None, show_kwargs=None):
super(ShowException, self).__init__()
self.error_style = error_style or {'color': 'red'}
self.show_kwargs = show_kwargs or {}
def handle_exception(self, graph, exc, node):
styles = apply_error_style(node, self.error_style)
graph.show(style=styles, **self.show_kwargs)
class Environment(object):
"""
Environment for running multiple Graphs and caching the results.
Parameters
----------
watch : bool, optional
If True, watch all graphs for changes in the source files and rerun
them if changes are detected (default False)
cache : str, optional
The sqlite file to use as a persistent cache (defaults to in-memory
dict)
fingerprint : str or callable, optional
The method to use for fingerprinting files when ``watch=True``. See
:class:`~pike.nodes.watch.ChangeListenerNode` for details. (default
'md5')
exception_handler : :class:`~.IExceptionHandler`, optional
When running a graph throws an exception, this handler will do
something useful, like rendering a graph that visually shows you where
the error happened.
Notes
-----
"""
def __init__(self,
watch=False,
cache=None,
fingerprint='md5',
exception_handler=None,
):
self._fingerprint = fingerprint
self._graphs = {}
self._cache_file = cache
if cache is not None:
self._cache = SqliteDict(cache, 'processed', autocommit=False,
synchronous=0)
self._gen_files = SqliteDict(cache, 'file_paths', autocommit=False,
synchronous=0)
else:
self._cache = {}
self._gen_files = {}
self.default_output = None
self.watch = watch
self._exc_handler = exception_handler
def add(self, graph, ignore_default_output=False, partial=False):
"""
Add a graph to the Environment.
Parameters
----------
graph : :class:`~pike.Graph`
The graph to add
ignore_default_output : bool, optional
If True, will *not* run the ``default_output`` graph on the output
of this graph (default False)
partial : bool, optional
This argument will be passed to :meth:`~.watch_graph`
"""
name = graph.name
if name in self._graphs:
raise KeyError("Graph '%s' already exists in environment!" %
graph.name)
if self.default_output is not None and not ignore_default_output:
wrapper = copy.deepcopy(graph)
wrapper.name += '-wrapper'
with wrapper:
edge = wrapper.sink.connect(self.default_output, '*', '*')
graph = wrapper
if self.watch:
graph = watch_graph(graph, partial, self._cache_file,
self._fingerprint)
self._graphs[name] = graph
def set_default_output(self, graph):
"""
Set a default operation to be run after every graph.
By default, every time you :meth:`~.add` a Graph, that Graph will have
this process tacked on to the end. This can be used to do common
operations, such as writing files or generating urls.
Parameters
----------
graph : :class:`~pike.Graph` or :class:`~pike.Node`
The graph to run after other graphs.
"""
self.default_output = graph
def get(self, name):
""" Get the cached results of a graph. """
return self._cache.get(name)
def save(self, filename):
""" Saved the cached asset metadata to a file """
self.run_all(True)
with open(filename, 'wb') as ofile:
pickle.dump(dict(self._cache), ofile)
def load(self, filename):
""" Load cached asset metadata from a file """
with open(filename, 'rb') as ifile:
self._cache = pickle.load(ifile)
def run(self, name, bust=False):
"""
Run a graph and cache the result.
Returns the cached result if one exists.
Parameters
----------
name : str
Name of the graph to run
bust : bool, optional
If True, will ignore the cache and rerun (default False)
Returns
-------
results : dict
Same output as the graph
"""
if bust or self.watch or name not in self._cache:
LOG.debug("Running %s", name)
try:
start = time.time() * 1000
results = self._graphs[name].run()
elapsed = int(time.time() * 1000 - start)
LOG.info("Ran %s in %d ms", name, elapsed)
for items in six.itervalues(results):
for item in items:
if isinstance(item, FileMeta):
# Remove data to save memory
if hasattr(item, 'data'):
del item.data
self._gen_files[item.filename] = item.fullpath
commit(self._gen_files)
self._cache[name] = results
commit(self._cache)
except StopProcessing:
LOG.debug("No changes for %s", name)
except Exception as e:
if hasattr(e, 'node') and self._exc_handler is not None:
LOG.error("Exception at node %s", e.node)
graph = self._graphs[name]
ret = False
try:
ret = self._exc_handler.handle_exception(graph, e,
e.node)
except Exception:
LOG.exception("Error while handling exception")
if not ret:
raise e
else:
raise
return self._cache.get(name)
def run_all(self, bust=False):
""" Run all graphs. """
for name in self._graphs:
self.run(name, bust)
def clean(self, directory, dry_run=False):
"""
Remove all files in a directory that were not generated by the env
Parameters
----------
directory : str
The location to look for unnecessary files
dry_run : bool, optional
If True, will not actually delete the files (default False)
Returns
-------
removed : list
List of file paths that were deleted by the operation
Raises
------
exc : :class:`~ValueError`
If there are no known generated files. That would delete all files
in the directory, which is probably not the intended behavior.
"""
if not self._gen_files:
raise ValueError("No generated files found. Have you run "
"`run_all()`?")
directory = resource_spec(directory)
all_files = set()
for fullpath in six.itervalues(self._gen_files):
all_files.add(os.path.abspath(fullpath))
removed = []
for root, _, files in os.walk(directory):
for filename in files:
fullpath = os.path.abspath(os.path.join(root, filename))
if fullpath not in all_files:
removed.append(fullpath)
if not dry_run:
LOG.info("Removing %s", fullpath)
os.remove(fullpath)
return removed
def run_forever(self, sleep=2, daemon=False, daemon_proc=False):
"""
Rerun graphs forever, busting the env cache each time.
This is generally only useful if ``watch=True``.
Parameters
----------
sleep : int, optional
How long to sleep between runs. Default 2 seconds.
daemon : bool, optional
If True, will run in a background thread (default False)
daemon_proc : bool, optional
If True, will run in a child process (default False)
"""
if daemon and daemon_proc:
raise TypeError("daemon and daemon_proc cannot both be True")
if daemon:
thread = threading.Thread(target=self.run_forever,
kwargs={'sleep': sleep})
thread.daemon = True
thread.start()
return thread
elif daemon_proc:
pid = os.fork()
if pid != 0:
return pid
while True:
try:
self.run_all(bust=True)
except KeyboardInterrupt:
break
except Exception:
LOG.exception("Error while running forever!")
time.sleep(sleep)
def lookup(self, path):
"""
Get a generated asset path
Parameters
----------
path : str
Relative path of the asset
Returns
-------
path : str or None
Absolute path of the generated asset (if it exists). If the path is
known to be invalid, this value will be None.
"""
if path not in self._gen_files:
return None
fullpath = self._gen_files[path]
return fullpath
| mit | -6,384,054,855,440,314,000 | 32.115385 | 79 | 0.552975 | false |
HRODEV/Frequency | Frequency/Board/ActionPanel/ActionPanel.py | 1 | 16633 | import Game
from Board.ActionPanel.ArrowItem import *
from Board.ActionPanel.BuyUnitItems import *
from GameLogic.Map import Tile
from GameLogic.Unit import Soldier
from GameLogic.UnitFactory import getUnitPrice
from Helpers import Colors
from Helpers.EventHelpers import EventExist
from Vector2 import Vector2
class ActionPanel:
def __init__(self, game: Game, tile: Tile = None, endturnButtonRect=None, newSelection=None):
self.Size = Vector2((game.Settings.Resolution.X - game.Settings.GetMapSize().X) // 2,
game.Settings.Resolution.Y)
self.Position = Vector2(0, 0)
self.Tile = tile
self.EndturnButtonRect = endturnButtonRect
self.NewSelection = newSelection
self.Map = None
self.EndTurnButtonImage = pygame.transform.scale(
pygame.image.load('images/buttons/endturnButton.png').convert_alpha(), [150, 25])
game.Settings.SetMenuLeftSize(self.Size)
def Update(self, game: Game) -> 'ActionPanel':
# End turn
if self.EndturnButtonIsClickedByMouse(game):
game.Logic.EndTurn()
return DefaultActionPanel(game)
return ActionPanel(game, self.Tile, self.EndturnButtonRect)
def Draw(self, game: Game):
font = pygame.font.Font(None, 30)
font.set_bold(True)
# Draw the background
pygame.draw.rect(game.Settings.GetScreen(), Colors.WHITE,
(self.Position.X, self.Position.Y, self.Size.X, self.Size.Y))
game.Settings.GetScreen().blit(font.render("Action panel", True, Colors.BLACK), (10, 10))
# Draw end turn button
self.EndturnButtonRect = game.Settings.GetScreen().blit(self.EndTurnButtonImage,
(10, game.Settings.Resolution.Y - 50))
def EndturnButtonIsHoverdByMouse(self):
return self.EndturnButtonRect is not None and self.EndturnButtonRect.collidepoint(pygame.mouse.get_pos())
def EndturnButtonIsClickedByMouse(self, game):
return self.EndturnButtonIsHoverdByMouse() and EventExist(game.Events, pygame.MOUSEBUTTONUP)
class DefaultActionPanel(ActionPanel):
def Update(self, game: Game):
nself = super().Update(game)
return DefaultActionPanel(game, self.Tile, nself.EndturnButtonRect)
def Draw(self, game: Game):
super().Draw(game)
font = pygame.font.Font(None, 20)
game.Settings.GetScreen().blit(font.render("Default", True, Colors.BLACK), (10, 35))
game.Settings.GetScreen().blit(font.render("Choose an tile or end the turn",
True, Colors.BLACK), (10, 55))
class SimpleTextButton:
def __init__(self, text, position):
self._text = text
self._position = position
self.clicked = False
self.rect = None
def Draw(self, screen):
font = pygame.font.Font(None, 20)
textColor = Colors.RED if self.clicked else Colors.BLACK
if self.IsHoverdByMouse():
self.rect = screen.blit(font.render(self._text, True, textColor, Colors.DIMGREY), self._position)
else:
self.rect = screen.blit(font.render(self._text, True, textColor), self._position)
def IsHoverdByMouse(self):
return self.rect is not None and self.rect.collidepoint(pygame.mouse.get_pos())
def IsClickedByMouse(self, game):
return self.IsHoverdByMouse() and EventExist(game.Events, pygame.MOUSEBUTTONUP)
class UnitActionPanel(ActionPanel):
def __init__(self, game: Game, tile: Tile = None, endturnButtonRect=None, buttons=None, newSelection=None,
_barackButton=None, _moveButton=None, _moveUnitFromBoatButton=None):
super().__init__(game, tile, endturnButtonRect, newSelection)
self._barrackButton = _barackButton if _barackButton is not None else SimpleTextButton("Buy Barrack", (10, 100))
if _moveButton is not None:
self._moveButton = _moveButton
else:
self._moveButton = SimpleTextButton("Move Unit", (10, 130))
self._moveButton.clicked = True
self._moveUnitFromBoatButton = _moveUnitFromBoatButton if _moveUnitFromBoatButton is not None \
else SimpleTextButton("move Unit to land", (10, 160))
if buttons is not None:
self.Buttons = buttons
else:
import GameLogic.MapHelpers
self.Buttons = []
for pos in GameLogic.MapHelpers.getAroundingTiles(tile, game.Logic.Map):
if pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y:
self.Buttons.append(ArrowButtonRight(Vector2(40, 0)))
elif pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDownRight(Vector2(40, 40)))
elif pos.Position.X == tile.Position.X and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDown(Vector2(0, 40)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDownLeft(Vector2(-40, 40)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y:
self.Buttons.append(ArrowButtonLeft(Vector2(-40, 0)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUpLeft(Vector2(-40, -40)))
elif pos.Position.X == tile.Position.X and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUp(Vector2(0, -40)))
elif pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUpRight(Vector2(40, -40)))
def Update(self, game: Game):
nself = super().Update(game)
if type(nself) is DefaultActionPanel:
return nself
if self._barrackButton.IsClickedByMouse(game) or self._moveButton.IsClickedByMouse(game) \
or self._moveUnitFromBoatButton.IsClickedByMouse(game):
if self._moveButton.IsClickedByMouse(game):
self._moveButton.clicked = True
self._barrackButton.clicked = False
self._moveUnitFromBoatButton.clicked = False
elif self._barrackButton.IsClickedByMouse(game):
self._barrackButton.clicked = True
self._moveButton.clicked = False
self._moveUnitFromBoatButton.clicked = False
else:
self._moveUnitFromBoatButton.clicked = True
self._barrackButton.clicked = False
self._moveButton.clicked = False
clickedButton = next((btn for btn in self.Buttons if btn.IsClickedByMouse(game)), None)
if self._moveButton.clicked:
if clickedButton is not None:
self.Tile.Unit.MoveTo(game.Logic.Map.GetTile(clickedButton.GetDestinationPosition(self.Tile.Position)))
return UnitActionPanel(game, self.Tile, nself.EndturnButtonRect, None,
clickedButton.GetDestinationPosition(self.Tile.Position))
elif self._barrackButton.clicked:
if clickedButton is not None:
barrack = game.Logic.BuyBarrack(
game.Logic.Map.GetTile(clickedButton.GetDestinationPosition(self.Tile.Position)))
if barrack is not None:
return BarrackActionPanel(game, game.Logic.Map.GetTile(
clickedButton.GetDestinationPosition(self.Tile.Position)))
else:
if clickedButton is not None:
self.Tile.Unit.Unit.MoveTo(
game.Logic.Map.GetTile(clickedButton.GetDestinationPosition(self.Tile.Position)))
self.Tile.Unit.Unit = None
return UnitActionPanel(game, self.Tile, nself.EndturnButtonRect, None,
clickedButton.GetDestinationPosition(self.Tile.Position))
return UnitActionPanel(game, self.Tile, nself.EndturnButtonRect, self.Buttons, None, self._barrackButton,
self._moveButton, self._moveUnitFromBoatButton)
def Draw(self, game: Game):
super().Draw(game)
screen = game.Settings.GetScreen()
font = pygame.font.Font(None, 20)
game.Settings.GetScreen().blit(font.render("Unit actions", True, Colors.BLACK), (10, 35))
screen.blit(font.render("Choose you actions with the unit",
True, Colors.BLACK), (10, 55))
screen.blit(font.render("Attack points: %i" % self.Tile.Unit.AttackPoints, True, Colors.BLACK), (10, 190))
screen.blit(font.render("Defense points: %i" % self.Tile.Unit.DefencePoints, True, Colors.BLACK), (10, 210))
# choose between buy a barrack or move the unit
self._barrackButton.Draw(screen)
self._moveButton.Draw(screen)
if type(self.Tile.Unit) is Boat:
self._moveUnitFromBoatButton.Draw(screen)
# Draw the Arrow Buttons
for arrowButton in self.Buttons:
arrowButton.Draw(game)
class BarrackActionPanel(ActionPanel):
def __init__(self, game: Game, tile: Tile = None, endturnButtonRect=None, buttons=None, buyUnits=None):
super().__init__(game, tile, endturnButtonRect)
if buttons is not None:
self.Buttons = buttons
else:
import GameLogic.MapHelpers
self.Buttons = []
for pos in GameLogic.MapHelpers.getAroundingTiles(tile, game.Logic.Map):
if pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y:
self.Buttons.append(ArrowButtonRight(Vector2(40, 0)))
elif pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDownRight(Vector2(40, 40)))
elif pos.Position.X == tile.Position.X and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDown(Vector2(0, 40)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y + 1:
self.Buttons.append(ArrowButtonDownLeft(Vector2(-40, 40)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y:
self.Buttons.append(ArrowButtonLeft(Vector2(-40, 0)))
elif pos.Position.X == tile.Position.X - 1 and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUpLeft(Vector2(-40, -40)))
elif pos.Position.X == tile.Position.X and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUp(Vector2(0, -40)))
elif pos.Position.X == tile.Position.X + 1 and pos.Position.Y == tile.Position.Y - 1:
self.Buttons.append(ArrowButtonUpRight(Vector2(40, -40)))
if buyUnits is not None:
self.BuyUnits = buyUnits
else:
self.BuyUnits = []
self.BuyUnits.append(SoldierButton(Vector2(0, 100), game.Logic.PlayingPlayer.Character.Id))
self.BuyUnits.append(RobotButton(Vector2(1, 100), game.Logic.PlayingPlayer.Character.Id))
self.BuyUnits.append(TankButton(Vector2(2, 100), game.Logic.PlayingPlayer.Character.Id))
self.BuyUnits.append(BoatButton(Vector2(3, 100), game.Logic.PlayingPlayer.Character.Id))
def Update(self, game: Game):
nself = super().Update(game)
if type(nself) is DefaultActionPanel:
return nself
clickedArrowButton = next((btn for btn in self.Buttons if btn.IsClickedByMouse(game)), None)
clickedUnitButton = next((btn for btn in self.BuyUnits if btn.IsClickedByMouse(game)), None)
if clickedUnitButton is not None:
for btn in self.BuyUnits:
btn.clicked = False
clickedUnitButton.clicked = True
clickedUnitButton = next((btn for btn in self.BuyUnits if btn.clicked), None)
if clickedUnitButton is not None and clickedArrowButton is not None:
game.Logic.BuyUnit(
clickedUnitButton.GetUnitType(),
game.Logic.Map.GetTile(clickedArrowButton.GetDestinationPosition(self.Tile.Position))
)
return BarrackActionPanel(game, self.Tile, nself.EndturnButtonRect, self.Buttons, self.BuyUnits)
def Draw(self, game: Game):
super().Draw(game)
screen = game.Settings.GetScreen()
font = pygame.font.Font(None, 20)
screen.blit(font.render("Barrack actions", True, Colors.BLACK), (10, 35))
screen.blit(font.render("Choose you actions with the Barrack",
True, Colors.BLACK), (10, 55))
screen.blit(font.render("Defence Points: %i" % self.Tile.Building.DefencePoints, True, Colors.BLACK), (10, 75))
# Draw the Arrow Buttons
for arrowButton in self.Buttons:
arrowButton.Draw(game)
# Draw the Buy Unit Buttons
for unitBuyButton in self.BuyUnits:
unitBuyButton.Draw(game)
current_money = game.Logic.PlayingPlayer.Money
# Draw the price of the units and check if the user can buy the unit
if current_money >= getUnitPrice(Soldier, self.Tile.Building.Owner.Character):
# Soldier
screen.blit(font.render('ƒ ' + str(getUnitPrice(Soldier, self.Tile.Building.Owner.Character)),
True, Colors.BLACK), (15, 150))
else:
screen.blit(font.render('ƒ ' + str(getUnitPrice(Soldier, self.Tile.Building.Owner.Character)),
True, Colors.RED), (15, 150))
if current_money >= getUnitPrice(Robot, self.Tile.Building.Owner.Character):
# Robot
screen.blit(font.render('ƒ ' + str(getUnitPrice(Robot, self.Tile.Building.Owner.Character)),
True, Colors.BLACK), (73, 150))
else:
screen.blit(font.render('ƒ ' + str(getUnitPrice(Robot, self.Tile.Building.Owner.Character)),
True, Colors.RED), (73, 150))
if current_money >= getUnitPrice(Tank, self.Tile.Building.Owner.Character):
# Tank
screen.blit(font.render('ƒ ' + str(getUnitPrice(Tank, self.Tile.Building.Owner.Character)),
True, Colors.BLACK), (131, 150))
else:
screen.blit(font.render('ƒ ' + str(getUnitPrice(Tank, self.Tile.Building.Owner.Character)),
True, Colors.RED), (131, 150))
if current_money >= getUnitPrice(Boat, self.Tile.Building.Owner.Character):
# Boat
screen.blit(font.render('ƒ ' + str(getUnitPrice(Boat, self.Tile.Building.Owner.Character)),
True, Colors.BLACK), (189, 150))
else:
screen.blit(font.render('ƒ ' + str(getUnitPrice(Boat, self.Tile.Building.Owner.Character)),
True, Colors.RED), (189, 150))
class InfoActionTile(ActionPanel):
def Update(self, game: Game):
nself = super().Update(game)
if type(nself) is DefaultActionPanel:
return nself
return InfoActionTile(game, self.Tile, nself.EndturnButtonRect)
def Draw(self, game: Game):
super().Draw(game)
font = pygame.font.Font(None, 20)
game.Settings.GetScreen().blit(font.render("Tile Info", True, Colors.BLACK), (10, 35))
game.Settings.GetScreen().blit(font.render("Here you can find info about the tile",
True, Colors.BLACK), (10, 55))
if self.Tile.Building is not None:
game.Settings.GetScreen().blit(
font.render("Defence Points: %i" % self.Tile.Building.DefencePoints, True, Colors.BLACK), (10, 75))
if self.Tile.Unit is not None:
game.Settings.GetScreen().blit(
font.render("Attack points: %i" % self.Tile.Unit.AttackPoints, True, Colors.BLACK), (10, 190))
game.Settings.GetScreen().blit(
font.render("Defense points: %i" % self.Tile.Unit.DefencePoints, True, Colors.BLACK), (10, 210)) | mit | 4,173,592,840,113,008,000 | 48.183432 | 120 | 0.613307 | false |
aabdulwahed/cloudpots | agent/client/container/services.py | 1 | 1693 | import simplejson as json
from docker import Client
_DOCKER_BASEURL_= 'unix://var/run/docker.sock'
_PORTS_ = [2200]
class ContainerEngine():
def __init__(self):
self.ports = [22]
def newClient(self,base_url=_DOCKER_BASEURL_):
return Client(base_url)
def createContainer(self,client,
image_id,
command=None,
mem_limit=None,
cpu_shares=None,
private_ports = []):
"""Initiate and Create Container"""
return client.create_container(image_id,
command,
detach=True,
ports = private_ports ,
mem_limit=mem_limit,
cpu_shares=cpu_shares)
def startContainer(self,client, container_id, container_endpoints={}):
"""Start Container"""
return client.start(container_id, port_bindings = container_endpoints)
def searchImage(self, client, image_id):
"""Search Public Image Repo -- docker hub"""
try:
return client.search(image_id)[0]
except:
return {'ERROR':'Image is not found'}
def pullImage(self, client, image_id):
"""Pull Image from Public Repo"""
try:
for line in client.pull(image_id, stream=True):
print(json.dumps(json.loads(line), indent=4))
return self.list_image(client,image=image_id)
except:
return {'ERROR':'Unable to pull image with a record id "%s" from docker hub!!'%(image_id)}
def list_images(self, client, image=None):
"""list local repo images"""
if image != None:
return client.images(image=image)
return client.images()
def removeContainer(self, client, container):
"""stop container then remove the stopped one"""
try:
client.stop(container)
except:
pass
try:
client.remove_container(container)
except:
return False
return True
| apache-2.0 | -1,505,548,805,681,197,800 | 22.513889 | 93 | 0.677496 | false |
MuckRock/muckrock | muckrock/foiamachine/urls.py | 1 | 3617 | """
FOIA Machine urls
"""
# Django
from django.conf import settings
from django.conf.urls import include, url
from django.views.defaults import page_not_found, server_error
from django.views.generic import RedirectView, TemplateView
from django.views.static import serve
# Third Party
import debug_toolbar
# MuckRock
from muckrock.accounts import views as account_views
from muckrock.agency.urls import agency_url
from muckrock.agency.views import AgencyAutocomplete
from muckrock.foiamachine import views
from muckrock.jurisdiction.urls import jur_url
from muckrock.jurisdiction.views import JurisdictionAutocomplete
def handler404(request, exception):
"""404 handler"""
return page_not_found(request, exception, template_name="foiamachine/404.html")
def handler500(request):
"""500 handler"""
return server_error(request, template_name="foiamachine/500.html")
urlpatterns = [
url(r"^$", views.Homepage.as_view(), name="index"),
url(
r"^accounts/signup/$",
RedirectView.as_view(
url=settings.SQUARELET_URL + "/accounts/signup/?intent=foiamachine"
),
name="signup",
),
url(r"^accounts/login/$", views.LoginView.as_view(), name="login"),
url(r"^accounts/logout/$", views.account_logout, name="acct-logout"),
url(r"^accounts/profile/$", views.Profile.as_view(), name="profile"),
url(
r"^foi/create/$",
views.FoiaMachineRequestCreateView.as_view(),
name="foi-create",
),
url(
r"^foi/(?P<slug>[\w-]+)-(?P<pk>\d+)/$",
views.FoiaMachineRequestDetailView.as_view(),
name="foi-detail",
),
url(
r"^foi/(?P<slug>[\w-]+)-(?P<pk>\d+)/update/$",
views.FoiaMachineRequestUpdateView.as_view(),
name="foi-update",
),
url(
r"^foi/(?P<slug>[\w-]+)-(?P<pk>\d+)/delete/$",
views.FoiaMachineRequestDeleteView.as_view(),
name="foi-delete",
),
url(
r"^foi/(?P<slug>[\w-]+)-(?P<pk>\d+)/share/$",
views.FoiaMachineRequestShareView.as_view(),
name="foi-share",
),
url(
r"^foi/(?P<foi_slug>[\w-]+)-(?P<foi_pk>\d+)/comms/create/$",
views.FoiaMachineCommunicationCreateView.as_view(),
name="comm-create",
),
url(
r"^foi/(?P<foi_slug>[\w-]+)-(?P<foi_pk>\d+)/comms/(?P<pk>\d+)/update/$",
views.FoiaMachineCommunicationUpdateView.as_view(),
name="comm-update",
),
url(
r"^foi/(?P<foi_slug>[\w-]+)-(?P<foi_pk>\d+)/comms/(?P<pk>\d+)/delete/$",
views.FoiaMachineCommunicationDeleteView.as_view(),
name="comm-delete",
),
url(r"^agency/%s/$" % agency_url, views.agency_detail, name="agency-detail"),
url(
r"^jurisdiction/%s/$" % jur_url,
views.jurisdiction_detail,
name="jurisdiction-detail",
),
url(
r"^agency-autocomplete/$",
AgencyAutocomplete.as_view(),
name="agency-autocomplete",
),
url(
r"^jurisdiction-autocomplete/$",
JurisdictionAutocomplete.as_view(),
name="jurisdiction-autocomplete",
),
url(r"^__debug__/", include(debug_toolbar.urls)),
url(r"^accounts/", include("social_django.urls", namespace="social")),
url(r"^rp_iframe/$", account_views.rp_iframe, name="acct-rp-iframe"),
]
if settings.DEBUG:
urlpatterns += [
url(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT}),
url(r"^500/$", TemplateView.as_view(template_name="foiamachine/500.html")),
url(r"^404/$", TemplateView.as_view(template_name="foiamachine/404.html")),
]
| agpl-3.0 | -3,732,254,894,878,999,000 | 31.294643 | 84 | 0.614598 | false |
epeios-q37/epeios | other/exercises/Hangman/fr/k.py | 1 | 1586 | # coding: utf-8
import sys
sys.path.append(".")
from workshop.fr.k import *
DIVULGUER_MOT_SECRET = VRAI
def choisirMot(*args):
return workshop.rfPickWord(*args)
def lettreEstDansMot(*args):
return workshop.rfIsLetterInWord(*args)
def donnerMasque(*args):
return workshop.rfGetMask(*args)
def majCorps(*args):
return workshop.rfUpdateBody(*args)
"""
Ajout de la gestion du mot à deviner ('motSecret').
"""
class Pendu:
def raz(self,suggestion,motAuHasard):
self.motSecret = choisirMot(suggestion,motAuHasard)
self.bonnesPioches = ""
self.nombreErreurs = 0
def __init__(self):
self.motSecret = ""
self.bonnesPioches = ""
self.nombreErreurs = 0
def traiterEtTesterPioche(self,pioche):
if lettreEstDansMot(pioche,self.motSecret):
if not lettreEstDansMot(pioche,self.bonnesPioches):
self.bonnesPioches += pioche
return VRAI
else:
self.nombreErreurs += 1
return FAUX
"""
Utilisation du mot à deviner stocké dans 'pendu' ('pendu.motSecret').
Divulgation ou non du mot à deviner selon configuration.
"""
def raz(pendu,suggestion,motAuHasard):
pendu.raz(suggestion,motAuHasard)
print(pendu.motSecret)
afficher(donnerMasque(pendu.motSecret,""))
if DIVULGUER_MOT_SECRET:
divulguerMotSecret( pendu.motSecret )
def traiterPioche(pendu,pioche):
if pendu.traiterEtTesterPioche(pioche):
afficher(donnerMasque(pendu.motSecret,pendu.bonnesPioches))
else:
majCorps(pendu.nombreErreurs)
go(globals())
| agpl-3.0 | 6,059,636,515,371,799,000 | 20.927536 | 69 | 0.684576 | false |
gorantornqvist/nagios-plugins | check_freenas.py | 1 | 5941 | #!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2015 Goran Tornqvist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ******************************************************************************
# check_freenas.py - Simple script for monitoring freenas status and replication
# PS: you may want to check out this script as well:
# https://github.com/Doctor-love/check_freenas_api/blob/master/check_freenas_api.py
#
#
# Tip: To ignore capacity warnings which are set quite low, change these rows in check_alerts():
# if alert['level'] != 'OK':
# if alert['message'].find('capacity for the volume') == -1:
# errors = errors + 1
#
# Troubleshooting: If you receive an error from the script, make sure you can access the api of your freenas using a web browser.
# Example: http://freenas/api/v1.0/system/alert/?format=json (login: root)
import argparse
import json
import sys
import string
import requests
class Startup(object):
def __init__(self, hostname, user, secret):
self._hostname = hostname
self._user = user
self._secret = secret
self._ep = 'http://%s/api/v1.0' % hostname
def request(self, resource, method='GET', data=None):
if data is None:
data = ''
try:
r = requests.request(
method,
'%s/%s/' % (self._ep, resource),
data=json.dumps(data),
headers={'Content-Type': "application/json"},
auth=(self._user, self._secret),
)
except:
print 'UNKNOWN - Error when contacting freenas server: ' + str(sys.exc_info())
sys.exit(3)
if r.ok:
try:
return r.json()
except:
print 'UNKNOWN - Error when contacting freenas server: ' + str(sys.exc_info())
sys.exit(3)
def check_repl(self):
repls = self.request('storage/replication')
errors=0
msg=''
try:
for repl in repls:
if repl['repl_status'] != 'Succeeded' and repl['repl_status'] != None and repl['repl_status'][:7] != 'Sending' and repl['repl_status'] != 'Up to date':
errors = errors + 1
msg = msg + repl['repl_zfs'] + ' [' + repl['repl_status'] + '] ' ;
except:
print 'UNKNOWN - Error when contacting freenas server: ' + str(sys.exc_info())
sys.exit(3)
if errors > 0:
print 'WARNING - ' + msg.strip() + '. Go to Storage > Replication Tasks > View Replication Tasks in FreeNAS for more details.'
sys.exit(1)
else:
print 'OK - No replication errors'
sys.exit(0)
def check_alerts(self):
alerts = self.request('system/alert')
warn=0
crit=0
msg=''
try:
for alert in alerts:
if alert['dismissed'] == False:
if alert['level'] == 'CRIT':
crit = crit + 1
msg = msg + '- (C) ' + string.replace(alert['message'], '\n', '. ') + ' '
elif alert['level'] == 'WARN':
warn = warn + 1
msg = msg + '- (W) ' + string.replace(alert['message'], '\n', '. ') + ' '
except:
print 'UNKNOWN - Error when contacting freenas server: ' + str(sys.exc_info())
sys.exit(3)
if crit > 0:
print 'CRITICAL ' + msg
sys.exit(2)
elif warn > 0:
print 'WARNING ' + msg
sys.exit(1)
else:
print 'OK - No problem alerts'
sys.exit(0)
def check_updates(self):
updates = self.request('system/update/check')
if not updates:
print 'OK - No pending updates.'
sys.exit(0)
else:
print 'WARNING - There are pending updates. Go to System > Update to apply pending updates.'
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='Checks a freenas server using the API')
parser.add_argument('-H', '--hostname', required=True, type=str, help='Hostname or IP address')
parser.add_argument('-u', '--user', required=True, type=str, help='Normally only root works')
parser.add_argument('-p', '--passwd', required=True, type=str, help='Password')
parser.add_argument('-t', '--type', required=True, type=str, help='Type of check, either repl, alerts or updates')
args = parser.parse_args(sys.argv[1:])
startup = Startup(args.hostname, args.user, args.passwd)
if args.type == 'alerts':
startup.check_alerts()
elif args.type == 'repl':
startup.check_repl()
elif args.type == 'updates':
startup.check_updates()
else:
print "Unknown type: " + args.type
sys.exit(3)
if __name__ == '__main__':
main()
| mit | 2,421,966,631,558,977,000 | 37.083333 | 167 | 0.577176 | false |
CityGrid/arsenal | server/arsenalweb/models/nodes.py | 1 | 7921 | '''Arsenal nodes DB Model'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
TIMESTAMP,
Text,
)
from sqlalchemy.orm import relationship
from sqlalchemy.orm import backref
from arsenalweb.models.common import (
Base,
BaseAudit,
check_null_dict,
check_null_string,
get_name_id_dict,
get_name_id_list,
hypervisor_vm_assignments,
jsonify,
)
LOG = logging.getLogger(__name__)
class Node(Base):
'''Arsenal Node object.'''
__tablename__ = 'nodes'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Text, nullable=False)
unique_id = Column(Text, nullable=False)
status_id = Column(Integer, ForeignKey('statuses.id'), nullable=False)
hardware_profile_id = Column(Integer, ForeignKey('hardware_profiles.id'), nullable=False)
operating_system_id = Column(Integer, ForeignKey('operating_systems.id'), nullable=False)
ec2_id = Column(Integer, ForeignKey('ec2_instances.id'))
data_center_id = Column(Integer, ForeignKey('data_centers.id'))
uptime = Column(Text, nullable=False)
serial_number = Column(Text, ForeignKey('physical_devices.serial_number'))
processor_count = Column(Integer)
last_registered = Column(TIMESTAMP)
created = Column(TIMESTAMP, nullable=False)
updated = Column(TIMESTAMP, nullable=False)
updated_by = Column(Text, nullable=False)
status = relationship('Status', backref='nodes', lazy='joined')
hardware_profile = relationship('HardwareProfile', backref=backref('nodes'), lazy='joined')
operating_system = relationship('OperatingSystem', backref=backref('nodes'), lazy='joined')
ec2_instance = relationship('Ec2Instance', backref=backref('nodes'), lazy='joined')
data_center = relationship('DataCenter', backref=backref('nodes'), lazy='joined')
physical_device = relationship('PhysicalDevice',
backref=backref('nodes'),
lazy='joined',
foreign_keys=[serial_number])
node_groups = relationship('NodeGroup',
secondary='node_group_assignments',
backref='nodes',
lazy='dynamic')
tags = relationship('Tag',
secondary='tag_node_assignments',
backref='nodes',
lazy='dynamic')
network_interfaces = relationship('NetworkInterface',
secondary='network_interface_assignments',
backref='nodes',
lazy='dynamic')
hypervisor = relationship('Node',
secondary='hypervisor_vm_assignments',
primaryjoin=hypervisor_vm_assignments.c.hypervisor_id == id,
secondaryjoin=hypervisor_vm_assignments.c.guest_vm_id == id,
backref='guest_vms',
lazy='dynamic')
def __json__(self, request):
try:
fields = request.params['fields']
if fields == 'all':
# Everything.
all_fields = dict(
id=self.id,
name=self.name,
unique_id=self.unique_id,
status=get_name_id_dict([self.status]),
hardware_profile=get_name_id_dict([self.hardware_profile]),
operating_system=get_name_id_dict([self.operating_system]),
ec2_instance=check_null_dict(self.ec2_instance),
data_center=get_name_id_dict([self.data_center]),
uptime=check_null_string(self.uptime),
serial_number=check_null_string(self.serial_number),
processor_count=check_null_string(self.processor_count),
node_groups=get_name_id_list(self.node_groups),
tags=get_name_id_list(self.tags, extra_keys=['value']),
network_interfaces=get_name_id_list(self.network_interfaces,
extra_keys=[
'unique_id',
]),
guest_vms=get_name_id_list(self.guest_vms),
hypervisor=get_name_id_list(self.hypervisor),
physical_device=self.physical_device,
last_registered=self.last_registered,
created=self.created,
updated=self.updated,
updated_by=self.updated_by,
)
return jsonify(all_fields)
else:
# Always return name id and unique_id, then return whatever additional fields
# are asked for.
resp = get_name_id_dict([self], extra_keys=['unique_id'])
my_fields = fields.split(',')
# Backrefs are not in the instance dict, so we handle them here.
if 'node_groups' in my_fields:
resp['node_groups'] = get_name_id_list(self.node_groups)
if 'hypervisor' in my_fields:
resp['hypervisor'] = get_name_id_list(self.hypervisor)
if 'guest_vms' in my_fields:
my_guest_vms = get_name_id_list(self.guest_vms)
if my_guest_vms:
resp['guest_vms'] = my_guest_vms
# Need this so we don't return an empty list of guest_vms
# for each guest vm.
else:
del resp['guest_vms']
if 'tags' in my_fields:
resp['tags'] = get_name_id_list(self.tags,
extra_keys=['value'])
if 'network_interfaces' in my_fields:
resp['network_interfaces'] = get_name_id_list(self.network_interfaces,
extra_keys=[
'unique_id',
'ip_address',
'bond_master',
'port_description',
'port_number',
'port_switch',
'port_vlan',
])
resp.update((key, getattr(self, key)) for key in my_fields if
key in self.__dict__)
return jsonify(resp)
# Default to returning only name, id, and unique_id.
except KeyError:
resp = get_name_id_dict([self], extra_keys=['unique_id'])
return resp
class NodeAudit(BaseAudit):
'''Arsenal NodeAudit object.'''
__tablename__ = 'nodes_audit'
| apache-2.0 | -1,277,032,278,955,777,500 | 45.052326 | 95 | 0.505239 | false |
shashankasharma/commons-csv | analysis/sec_analysis.py | 1 | 2470 | from os import walk
import sys, os, fnmatch, re
mypath = ''
if len(sys.argv)==2 and os.path.exists(sys.argv[1]):
mypath = sys.argv[1]
else:
mypath = os.getcwd()
filelist = []
keyfilelist = []
opbufferinit = '\nRunning security analysis:'
opbuffer = ''
for (dirpath, dirnames, filenames) in walk(mypath):
for filename in fnmatch.filter(filenames, '*.c'):
filelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.cpp'):
filelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.java'):
filelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.json'):
filelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.key'):
keyfilelist.append(os.path.join(dirpath,filename))
for filename in fnmatch.filter(filenames, '*.pem'):
keyfilelist.append(os.path.join(dirpath,filename))
doregex = re.compile('([A-Z0-9]{64})[\s\'\"\;\)\]\}]*$')
awsregex = re.compile('([A-Za-z\/]*[0-9][a-zA-Z0-9\/]+)[\s\'\"\;\)\]\}]*$')
for filename in filelist:
filetype = filename.split('.')[-1]
linenum = 0
with open(filename) as f:
for eachline in f:
linenum+=1
eachline = eachline.lstrip().rstrip()
if len(doregex.findall(eachline)):
opbuffer+='\n\n' + 'Filename: {}\nLine number: {}'.format(filename, linenum)
break
elif len(awsregex.findall(eachline)):
flag = False
for eachtoken in awsregex.findall(eachline):
if len(eachtoken) == 40:
opbuffer+='\n\n' + 'Filename: {}\nLine number: {}'.format(filename, linenum)
flag = True
break
if flag:
break
if len(keyfilelist):
opbuffer+="\n\nFound files with security keys."
for eachfile in keyfilelist:
opbuffer+='\n' + 'Filename: {}'.format(eachfile)
opbuffer+="\n\nPlease remove these files before pushing changes."
with open(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),'secanalysis.result'),'w') as opfile:
opfile.write(opbufferinit)
if len(opbuffer) or len(keyfilelist):
opbuffer+='\n\nSTATUS: FAILURE'
else:
opbuffer+='\n\nSTATUS: SUCCESS'
opfile.write(opbuffer)
print opbufferinit + opbuffer + '\n'
| apache-2.0 | -3,758,016,491,021,055,500 | 38.83871 | 106 | 0.601215 | false |
miing/mci_migo | api/v20/tests/test_registration.py | 1 | 10245 | from mock import (
MagicMock,
NonCallableMock,
patch,
call as mock_call,
)
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from gargoyle.testutils import switches
from u1testutils.django import patch_settings
from identityprovider.models import (
Account,
EmailAddress,
)
from identityprovider.tests.utils import (
SSOBaseUnittestTestCase,
)
from api.v20 import (
handlers,
registration,
)
from api.v20.tests.utils import call
MODULE = 'api.v20.registration'
class RegistrationTestCase(SSOBaseUnittestTestCase):
def setUp(self):
super(RegistrationTestCase, self).setUp()
self.email = self.factory.make_email_address()
self.MockEmailAddress = self._apply_patch(MODULE, 'EmailAddress')
self.mock_account = MagicMock(spec=Account())
self.mock_emails = MagicMock()
self.mock_emails.count.return_value = 0
self.MockEmailAddress.objects.filter.return_value = self.mock_emails
@patch(MODULE + ".send_impersonation_email")
def test_register_already_registered_and_inactive(self, mock_send):
self.mock_emails.count.return_value = 1
email = MagicMock()
email.account.is_active = False
self.mock_emails.__getitem__.return_value = email
with self.assertRaises(registration.EmailAlreadyRegistered):
registration.register(self.email, "", "")
self.assertFalse(mock_send.called)
@patch(MODULE + ".send_impersonation_email")
def test_register_already_registered_and_active(self, mock_send):
self.mock_emails.count.return_value = 1
email = MagicMock()
email.account.is_active = True
self.mock_emails.__getitem__.return_value = email
with self.assertRaises(registration.EmailAlreadyRegistered):
registration.register(self.email, "", "")
self.assertTrue(mock_send.called)
@patch(MODULE + ".send_new_user_email")
@patch(MODULE + ".Account.objects.create_account")
def test_register_success(self, mock_create_account, mock_send):
mock_create_account.return_value = self.mock_account
registration.register(
self.email,
'MySecretPassword1',
'displayname'
)
mock_create_account.assert_called_once_with(
'displayname',
self.email,
'MySecretPassword1',
email_validated=False
)
mock_send.assert_called_once_with(
account=self.mock_account, email=self.email)
def test_invalid_email_and_pw(self):
with self.assertRaises(ValidationError) as e:
registration.register("", '', 'displayname')
self.assertEqual(
e.message_dict['email'][0],
'This field cannot be blank.'
)
self.assertEqual(
e.message_dict['email'][0],
'Password must be at least 8 characters long.'
)
class RegistrationHandlerTestCase(SSOBaseUnittestTestCase):
handler = handlers.AccountRegistrationHandler()
url = reverse('api-registration')
def setUp(self):
super(RegistrationHandlerTestCase, self).setUp()
self.data = {
'email': self.factory.make_email_address(),
'password': 'asdfASDF1',
'displayname': 'Ricardo the Magnificent',
'captcha_id': 'ignored',
'captcha_solution': 'ignored',
}
self.mock_account = MagicMock(
spec=Account,
openid_identifier='abcdefg',
preferredemail=MagicMock(email=self.data['email']),
displayname=self.data['displayname'],
status=20
)
self.mock_account.emailaddress_set.all.return_value = [
NonCallableMock(spec=EmailAddress, email=self.data['email'])
]
self.mock_register = self._apply_patch(
'api.v20.handlers.registration.register'
)
self.mock_register.return_value = self.mock_account
def test_registration_handler_invalid_data(self):
data = {
'email': 'x',
'password': 'y',
'captcha_id': 'ignored',
'captcha_solution': 'ignored',
}
self.mock_register.side_effect = ValidationError({'email': 'Invalid'})
response, json_body = call(self.handler.create, self.url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(json_body['code'], "INVALID_DATA")
self.assertIn("Invalid request data", json_body['message'])
self.assertIn('email', json_body['extra'])
self.assertNotIn('password', json_body['extra'])
self.assertNotIn('displayname', json_body['extra'])
def test_registration_already_registered(self):
self.mock_register.side_effect = registration.EmailAlreadyRegistered
response, json_body = call(self.handler.create, self.url, self.data)
self.assertEqual(response.status_code, 409)
self.assertEqual(json_body['code'], "ALREADY_REGISTERED")
self.assertIn("already registered", json_body['message'])
self.assertIn('email', json_body['extra'])
self.assertIn(self.data['email'], json_body['extra']['email'])
def test_registration_success(self):
response, json_body = call(self.handler.create, self.url, self.data)
self.assertEqual(response.status_code, 201)
self.assertIn('openid', json_body)
self.assertIn('href', json_body)
self.assertEqual(json_body['email'], self.data['email'])
self.assertEqual(json_body['displayname'], self.data['displayname'])
self.assertEqual(json_body['status'], 'Active')
self.assertEqual(len(json_body['emails']), 1)
self.assertIn(self.data['email'], json_body['emails'][0]['href'])
@patch('api.v20.handlers.Captcha')
def test_register_captcha_required(self, mock_captcha):
captcha_data = {'captcha_id': 999, 'image_url': 'somewhere'}
del self.data['captcha_id']
del self.data['captcha_solution']
mock_captcha.new.return_value.serialize.return_value = captcha_data
with switches(CAPTCHA=True):
response, json_body = call(
self.handler.create, self.url, self.data)
self.assertEqual(response.status_code, 401)
self.assertEqual(json_body['code'], "CAPTCHA_REQUIRED")
self.assertIn('A captcha challenge is required', json_body['message'])
self.assertFalse(self.mock_register.called)
self.assertEqual(json_body['extra'], captcha_data)
@patch('api.v20.handlers.Captcha')
def test_register_captcha_success(self, mock_captcha):
mock_captcha.return_value.verify.return_value = True
self.data['captcha_id'] = 999
self.data['captcha_solution'] = 'foo bar'
response, json_body = call(self.handler.create, self.url, self.data)
self.assertEqual(response.status_code, 201)
expected_calls = mock_call(999).verify(
'foo bar', '127.0.0.1', self.data['email']).call_list()
self.assertEqual(mock_captcha.mock_calls, expected_calls)
self.assertIn('openid', json_body)
self.assertIn('href', json_body)
self.assertEqual(json_body['email'], self.data['email'])
self.assertEqual(json_body['displayname'], self.data['displayname'])
self.assertEqual(json_body['status'], 'Active')
self.assertEqual(len(json_body['emails']), 1)
self.assertIn(self.data['email'], json_body['emails'][0]['href'])
@patch('api.v20.handlers.Captcha')
def test_register_captcha_failure(self, mock_captcha):
mock_captcha.return_value.verify.return_value = False
self.data['captcha_id'] = 999
self.data['captcha_solution'] = 'foo bar'
response, json_body = call(self.handler.create, self.url, self.data)
self.assertEqual(response.status_code, 403)
expected_calls = mock_call(999).verify(
'foo bar', '127.0.0.1', self.data['email']).call_list()
self.assertEqual(mock_captcha.mock_calls, expected_calls)
self.assertEqual(json_body['code'], "CAPTCHA_FAILURE")
self.assertIn('Failed response to captcha challenge.',
json_body['message'])
self.assertFalse(self.mock_register.called)
OVERIDES = dict(
DISABLE_CAPTCHA_VERIFICATION=False,
EMAIL_WHITELIST_REGEXP_LIST=['^canonicaltest(?:\+.+)?@gmail\.com$']
)
@patch('identityprovider.models.captcha.Captcha._open')
def test_register_captcha_whitelist(self, mock_open):
self.data['email'] = '[email protected]'
self.data['captcha_id'] = '999'
self.data['captcha_solution'] = 'foo bar'
with patch_settings(**self.OVERIDES):
response, json_body = call(
self.handler.create, self.url, self.data
)
self.assertTrue(self.mock_register.called)
self.assertFalse(mock_open.called)
@patch('identityprovider.models.captcha.Captcha._open')
def test_register_captcha_whitelist_with_uuid(self, mock_open):
self.data['email'] = '[email protected]'
self.data['captcha_id'] = '999'
self.data['captcha_solution'] = 'foo bar'
with patch_settings(**self.OVERIDES):
response, json_body = call(
self.handler.create, self.url, self.data
)
self.assertTrue(self.mock_register.called)
self.assertFalse(mock_open.called)
@patch('identityprovider.models.captcha.Captcha._open')
def test_register_captcha_whitelist_fail(self, mock_open):
self.data['captcha_id'] = '999'
self.data['captcha_solution'] = 'foo bar'
self.data['email'] = '[email protected]'
mock_open.return_value.is_error = False
mock_open.return_value.data.return_value = 'false\nmessage'
with patch_settings(**self.OVERIDES):
response, json_body = call(
self.handler.create, self.url, self.data
)
self.assertFalse(self.mock_register.called)
self.assertTrue(mock_open.called)
| agpl-3.0 | -4,786,676,302,009,785,000 | 39.334646 | 78 | 0.632992 | false |
IQSS/gentb-site | R/Neural_Network/program/Load_Data_EMB.py | 2 | 1128 | # -*- coding: utf-8 -*-
# Read raw data
# Author: Jimmy Royer
# [email protected]
# June 20, 2016
import pandas as pd
# Training Sample -- All the Mutations
data = pd.read_csv("./input/emb.csv")
# Create target variable
data['y'] = (data['dr'] == "r") * 1
data.drop('dr', axis=1, inplace=True)
# List of Features to Keep in the Analysis
features = [var for var in data.columns if var != "y"]
# List subset of Features
features_small = ["SNP_CN_4247429_A916G_M306V_embB", "SNP_CN_4247431_G918A_M306I_embB", "SNP_CN_4247431_G918C_M306I_embB", "SNP_CN_4247730_G1217C_G406A_embB",
"SNP_CN_4248003_A1490G_Q497R_embB", "SNP_CN_4249518_A3005G_H1002R_embB", "SNP_CN_409569_G208A_A70T_iniB", "SNP_CN_4247729_G1216A_G406S_embB",
"SNP_CN_4247431_G918T_M306I_embB", "SNP_CN_4247429_A916C_M306L_embB", "SNP_P_4243222_C11A_promoter_embA.embB", "SNP_CN_4247574_A1061C_D354A_embB",
"SNP_CN_4247495_G982T_D328Y_embB", "SNP_CN_4249583_G3070A_D1024N_embB", "SNP_CN_4243392_A160G_N54D_embA", "SNP_P_4243225_C8T_promoter_embA.embB",
"SNP_CN_4242182_G2320T_A774S_embC", "SNP_CN_4247729_G1216T_G406C_embB"] | agpl-3.0 | -4,881,976,249,242,843,000 | 46.041667 | 159 | 0.697695 | false |
radiocosmology/alpenhorn | alpenhorn/auto_import.py | 1 | 12008 | """Routines for the importing of new files on a node."""
import logging
import os
import time
import peewee as pw
from watchdog.events import FileSystemEventHandler
from watchdog.observers.polling import PollingObserver
from . import acquisition as ac
from . import archive as ar
from . import config, db, util
log = logging.getLogger(__name__)
def import_file(node, file_path):
done = False
while not done:
try:
_import_file(node, file_path)
done = True
except pw.OperationalError as e:
log.exception(e)
log.error(
"MySQL connexion dropped. Will attempt to reconnect in " "five seconds."
)
time.sleep(5)
# TODO: handle reconnection
db.database_proxy.connect()
def in_directory(file, directory):
"""Test if file is contained within the directory. Does not check existence."""
directory = os.path.join(directory, "")
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([file, directory]) == directory
def _import_file(node, file_path):
"""Import a file into the DB.
This routine adds the following to the database, if they do not already exist
(or might be corrupted).
- The acquisition that the file is a part of.
- Information on the acquisition, if it is of type "corr".
- The file.
- Information on the file, if it is of type "corr".
- Indicates that the file exists on this node.
Parameters
----------
node : storage.StorageNode
The node we are processing.
file_path : string
Path of file on the node to import. If is is an absolute path it must
be within the node root, otherwise is is assumed to be relative to
the node root.
"""
log.debug('Considering "%s" for import.', file_path)
# Occasionally the watchdog sends events on the node root directory itself. Skip these.
if file_path == node.root:
log.debug('Skipping import request on the node root itself "%s"', node.root)
return
# Ensure the path is an absolute path within the node
if os.path.isabs(file_path):
if not in_directory(file_path, node.root):
log.error(
'File "%s" was not an absolute path within the node "%s"',
file_path,
node.root,
)
return
else:
file_path = os.path.join(node.root, file_path)
abspath = os.path.normpath(file_path)
# Skip requests to import a directory. Again these are occasionally sent by the watchdog
if os.path.isdir(file_path):
log.debug('Path to import "%s" is a directory. Skipping...', file_path)
return
relpath = os.path.relpath(abspath, node.root)
# Skip the file if there is still a lock on it.
dir_name, base_name = os.path.split(abspath)
if os.path.isfile(os.path.join(dir_name, ".%s.lock" % base_name)):
log.debug('Skipping "%s", which is locked by ch_master.py.', file_path)
return
# Check if we can handle this acquisition, and skip if we can't
acq_type_name = ac.AcqType.detect(relpath, node)
if acq_type_name is None:
log.info('Skipping non-acquisition path "%s".', file_path)
return
# Figure out which acquisition this is; add if necessary.
acq_type, acq_name = acq_type_name
try:
acq = ac.ArchiveAcq.get(ac.ArchiveAcq.name == acq_name)
log.debug('Acquisition "%s" already in DB. Skipping.', acq_name)
except pw.DoesNotExist:
acq = add_acq(acq_type, acq_name, node)
log.info('Acquisition "%s" added to DB.', acq_name)
# What kind of file do we have?
file_name = os.path.relpath(relpath, acq_name)
ftype = ac.FileType.detect(file_name, acq, node)
if ftype is None:
log.info('Skipping unrecognised file "%s/%s".', acq_name, file_name)
return
# Add the file, if necessary.
try:
file_ = ac.ArchiveFile.get(
ac.ArchiveFile.name == file_name, ac.ArchiveFile.acq == acq
)
log.debug('File "%s/%s" already in DB. Skipping.', acq_name, file_name)
except pw.DoesNotExist:
log.debug('Computing md5sum of "%s".', file_name)
md5sum = util.md5sum_file(abspath, cmd_line=False)
size_b = os.path.getsize(abspath)
done = False
while not done:
try:
with db.database_proxy.atomic():
file_ = ac.ArchiveFile.create(
acq=acq,
type=ftype,
name=file_name,
size_b=size_b,
md5sum=md5sum,
)
ftype.file_info.new(file_, node)
done = True
except pw.OperationalError as e:
log.exception(e)
log.error(
"MySQL connexion dropped. Will attempt to reconnect in "
"five seconds."
)
time.sleep(5)
# TODO: re-implement
# di.connect_database(True)
log.info('File "%s/%s" added to DB.', acq_name, file_name)
# Register the copy of the file here on the collection server, if (1) it
# does not exist, or (2) if there has previously been a copy here ensure it
# is checksummed to ensure the archives integrity.
if not file_.copies.where(ar.ArchiveFileCopy.node == node).count():
copy_size_b = os.stat(abspath).st_blocks * 512
copy = ar.ArchiveFileCopy.create(
file=file_, node=node, has_file="Y", wants_file="Y", size_b=copy_size_b
)
log.info('Registered file copy "%s/%s" to DB.', acq_name, file_name)
else:
# Mark any previous copies as not being present...
query = ar.ArchiveFileCopy.update(has_file="N").where(
ar.ArchiveFileCopy.file == file_, ar.ArchiveFileCopy.node == node
)
query.execute()
# ... then take the latest and mark it with has_file=M to force it to be
# checked.
copy = (
ar.ArchiveFileCopy.select()
.where(ar.ArchiveFileCopy.file == file_, ar.ArchiveFileCopy.node == node)
.order_by(ar.ArchiveFileCopy.id)
.get()
)
copy.has_file = "M"
copy.wants_file = "Y"
copy.save()
# TODO: imported files caching
# if import_done is not None:
# bisect.insort_left(import_done, file_path)
# with open(LOCAL_IMPORT_RECORD, "w") as fp:
# fp.write("\n".join(import_done))
# Routines for registering files, acquisitions, copies and info in the DB.
# ========================================================================
def add_acq(acq_type, name, node, comment=""):
"""Add an aquisition to the database.
This looks for an appropriate acquisition type, and if successful creates
the ArchiveAcq and AcqInfo entries for the acquisition.
Parameters
----------
acq_type : AcqType
Type of the acquisition
name : string
Name of the acquisition directory.
node : StorageNode
Node that the acquisition is on.
comment : string, optional
An optional comment.
Returns
-------
acq : ArchiveAcq
The ArchiveAcq entry.
acqinfo : AcqInfoBase
The AcqInfo entry.
"""
# Is the acquisition already in the database?
if ac.ArchiveAcq.select(ac.ArchiveAcq.id).where(ac.ArchiveAcq.name == name).count():
raise AlreadyExists('Acquisition "%s" already exists in DB.' % name)
# Create the ArchiveAcq entry and the AcqInfo entry for the acquisition. Run
# in a transaction so we don't end up with inconsistency.
with db.database_proxy.atomic():
# Insert the archive record
acq = ac.ArchiveAcq.create(name=name, type=acq_type, comment=comment)
# Generate the metadata table
acq_type.acq_info.new(acq, node)
return acq
# Exceptions
# ==========
class Validation(Exception):
"""Raise when validation of a name or field fails."""
class DataBaseError(Exception):
"""Raise when there is some internal inconsistency with the database."""
class AlreadyExists(Exception):
"""Raise when a record already exists in the database."""
class DataFlagged(Exception):
"""Raised when data is affected by a global flag."""
# Watchdog stuff
# ==============
class RegisterFile(FileSystemEventHandler):
def __init__(self, node):
log.info('Registering node "%s" for auto_import watchdog.', node.name)
self.node = node
self.root = node.root
if self.root[-1] == "/":
self.root = self.root[0:-1]
super(RegisterFile, self).__init__()
def on_created(self, event):
import_file(self.node, event.src_path)
return
def on_modified(self, event):
import_file(self.node, event.src_path)
return
def on_moved(self, event):
import_file(self.node, event.src_path)
return
def on_deleted(self, event):
# For lockfiles: ensure that the file that was locked is added: it is
# possible that the watchdog notices that a file has been closed before the
# lockfile is deleted.
dirname, basename = os.path.split(event.src_path)
if basename[0] == "." and basename[-5:] == ".lock":
basename = basename[1:-5]
import_file(self.node, os.path.join(dirname, basename))
# Routines to control the filesystem watchdogs.
# =============================================
obs_list = None
def setup_observers(node_list):
"""Setup the watchdogs to look for new files in the nodes."""
global obs_list
# If any node has auto_import set, look for new files and add them to the
# DB. Then set up a watchdog for it.
obs_list = []
for node in node_list:
if node.auto_import:
# TODO: Normal observers don't work via NFS so we use the polling
# observer, however, we could try and detect this and switch back
obs_list.append(
PollingObserver(
timeout=config.config["service"]["auto_import_interval"]
)
)
obs_list[-1].schedule(RegisterFile(node), node.root, recursive=True)
else:
obs_list.append(None)
# Start up the watchdog threads
for obs in obs_list:
if obs:
obs.start()
def catchup(node_list):
"""Traverse the node directory for new files and importem"""
for node in node_list:
if node.auto_import:
# Get list of all files that exist on the node
q = (
ar.ArchiveFileCopy.select(ac.ArchiveFile.name, ac.ArchiveAcq.name)
.where(
ar.ArchiveFileCopy.node == node, ar.ArchiveFileCopy.has_file == "Y"
)
.join(ac.ArchiveFile)
.join(ac.ArchiveAcq)
)
already_imported_files = [os.path.join(a, f) for a, f in q.tuples()]
log.info('Crawling base directory "%s" for new files.', node.root)
for dirpath, d, f_list in os.walk(node.root):
log.info('Crawling "%s".', dirpath)
for file_name in sorted(f_list):
if file_name in already_imported_files:
log.debug('Skipping already-registered file "%s".', file_name)
else:
import_file(node, os.path.join(dirpath, file_name))
def stop_observers():
"""Stop watchidog threads."""
for obs in obs_list:
if obs:
obs.stop()
def join_observers():
"""Wait for watchdog threads to terminate."""
for obs in obs_list:
if obs:
obs.join()
| mit | 535,321,243,543,484,740 | 31.719346 | 92 | 0.587692 | false |
yukezhu/visual7w-qa-models | prepare_dataset.py | 1 | 15804 | import os
import sys
import json
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
import numpy as np
import skimage.io
def prepro_question_answer(imgs):
'''
tokenize all questions, answers and multiple choices
in the dataset. all punctuations are removed.
'''
# preprocess all the questions and answers
print 'example processed tokens:'
for i,img in enumerate(imgs):
img['processed_question_tokens'] = []
img['processed_answer_tokens'] = []
img['processed_mc_tokens'] = []
for j, qa_pair in enumerate(img['qa_pairs']):
question_txt = str(qa_pair['question']).lower().translate(None, string.punctuation).strip().split()
img['processed_question_tokens'].append(question_txt)
answer_txt = str(qa_pair['answer']).lower().translate(None, string.punctuation).strip().split()
img['processed_answer_tokens'].append(answer_txt)
processed_mc_tokens = []
if 'multiple_choices' in qa_pair:
for mc in qa_pair['multiple_choices']:
mc_txt = str(mc).lower().translate(None, string.punctuation).strip().split()
processed_mc_tokens.append(mc_txt)
img['processed_mc_tokens'].append(processed_mc_tokens)
if i < 10 and j == 0: print question_txt, answer_txt
def build_vocab(imgs, params):
'''
we build a word vocabulary from the questions and answers.
rare words with frequency lower than a threshold are replaced
by a special token UNK (last token in the vocabulary).
'''
count_thr = params['word_count_threshold']
# count up the number of words
counts = {}
for img in imgs:
if img['split'] in ['train', 'val']: # test set shouldn't be used for building vocab
for txt in img['processed_question_tokens']:
for w in txt: counts[w] = counts.get(w, 0) + 1
for txt in img['processed_answer_tokens']:
for w in txt: counts[w] = counts.get(w, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print 'top words and their counts:'
print '\n'.join(map(str,cw[:20]))
# print some stats
total_words = sum(counts.itervalues())
print 'total words:', total_words
bad_words = [w for w,n in counts.iteritems() if n <= count_thr]
vocab = [w for w,n in counts.iteritems() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print 'number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts))
print 'number of words in vocab would be %d' % (len(vocab), )
print 'number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words)
# lets look at the distribution of lengths as well
sent_lengths = {}
for img in imgs:
for txt in img['processed_question_tokens']:
nw = len(txt)
sent_lengths[nw] = sent_lengths.get(nw, 0) + 1
for txt in img['processed_answer_tokens']:
nw = len(txt)
sent_lengths[nw] = sent_lengths.get(nw, 0) + 1
max_len = max(sent_lengths.keys())
print 'max length sentence in raw data: ', max_len
print 'sentence length distribution (count, number of words):'
sum_len = sum(sent_lengths.values())
for i in xrange(max_len+1):
print '%2d: %10d %f%%' % (i, sent_lengths.get(i,0), sent_lengths.get(i,0)*100.0/sum_len)
# lets now produce the final annotations
# additional special UNK token we will use below to map infrequent words to
print 'inserting the special UNK token'
vocab.append('UNK')
for img in imgs:
img['final_questions'] = []
for txt in img['processed_question_tokens']:
question = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
img['final_questions'].append(question)
img['final_answers'] = []
for txt in img['processed_answer_tokens']:
answer = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
img['final_answers'].append(answer)
img['final_mcs'] = []
for mc in img['processed_mc_tokens']:
mcs = []
for txt in mc:
mc = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
mcs.append(mc)
img['final_mcs'].append(mcs)
return vocab
def encode_question_answer(imgs, params, wtoi):
'''
encode all questions and answers into one large array, which will be 1-indexed.
also produces label_start_ix and label_end_ix which store 1-indexed
and inclusive (Lua-style) pointers to the first and last caption for
each image in the dataset.
'''
max_question_length = params['max_question_length']
max_answer_length = params['max_answer_length']
MC = params['num_multiple_choice']
N = len(imgs) # total number of images
M = sum(len(img['final_answers']) for img in imgs) # total number of QA pairs
assert M == sum(len(img['final_questions']) for img in imgs), \
'error: total numbers of questions and answers don\'t match'
question_label_arrays = []
answer_label_arrays = []
mc_label_arrays = []
question_label_length = np.zeros(M, dtype='uint32')
answer_label_length = np.zeros(M, dtype='uint32')
mc_label_length = np.zeros([M, MC], dtype='uint32')
label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed
label_end_ix = np.zeros(N, dtype='uint32')
label_id = np.zeros(M, dtype='uint32') # id of the QA pair
question_counter = 0
answer_counter = 0
mc_counter = 0
counter = 1
for i,img in enumerate(imgs):
n = len(img['final_questions'])
assert n > 0, 'error: some image has no QA pairs'
# getting the labels for questions
Li = np.zeros((n, max_question_length), dtype='uint32')
for j,s in enumerate(img['final_questions']):
question_label_length[question_counter] = min(max_question_length, len(s)) # record the length of this sequence
label_id[question_counter] = img['qa_pairs'][j]['qa_id']
question_counter += 1
for k,w in enumerate(s):
if k < max_question_length:
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and captions are padded with zeros
question_label_arrays.append(Li)
# getting the labels for answers
Li = np.zeros((n, max_answer_length), dtype='uint32')
for j,s in enumerate(img['final_answers']):
answer_label_length[answer_counter] = min(max_answer_length, len(s)) # record the length of this sequence
assert label_id[answer_counter] == img['qa_pairs'][j]['qa_id'], 'order of answers doesn\'t match order of questions'
answer_counter += 1
for k,w in enumerate(s):
if k < max_answer_length:
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and QAs are padded with zeros
answer_label_arrays.append(Li)
# getting the labels for multiple choices
Li = np.zeros((n, MC, max_answer_length), dtype='uint32')
for h,m in enumerate(img['final_mcs']):
# assert len(m) == MC, 'question has %d multiple choices (expected %d)' % (len(m), MC)
for j,s in enumerate(m):
mc_label_length[mc_counter,j] = min(max_answer_length, len(s)) # record the length of this sequence
for k,w in enumerate(s):
if k < max_answer_length:
Li[h,j,k] = wtoi[w]
mc_counter += 1
# note: word indices are 1-indexed, and QAs are padded with zeros
mc_label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = counter + n - 1
counter += n
Lq = np.concatenate(question_label_arrays, axis=0)
La = np.concatenate(answer_label_arrays, axis=0)
Lmc = np.concatenate(mc_label_arrays, axis=0) # put all the labels together
assert La.shape[0] == M, 'error: La dimension not matched.'
assert Lq.shape[0] == M, 'error: Lq dimension not matched.'
assert Lmc.shape[0] == M, 'error: Lmc dimension not matched.'
#assert np.all(question_label_length > 0), 'error: some question had no words?'
#assert np.all(answer_label_length > 0), 'error: some answer had no words?'
#assert np.all(mc_label_length > 0), 'error: some multiple choices had no words?'
print 'encoded questions to array of size ', `Lq.shape`
print 'encoded answers to array of size ', `La.shape`
print 'encoded multiple choices to array of size ', `Lmc.shape`
return Lq, La, Lmc, label_start_ix, label_end_ix, question_label_length, answer_label_length, label_id
def load_image(filename, color=True):
'''
Load image from file into a numpy array
-color is the flag for whether to load rgb or grayscale image
return img as a 3d tensor (HxWx3)
'''
img_data = skimage.io.imread(filename, as_grey=not color)
img = skimage.img_as_float(img_data).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color: img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def reduce_along_dim(img, dim, weights, indicies):
'''
Perform bilinear interpolation given along the image dimension dim
-weights are the kernel weights
-indicies are the crossponding indicies location
return img resize along dimension dim
'''
other_dim = abs(dim-1)
if other_dim == 0: #resizing image width
weights = np.tile(weights[np.newaxis,:,:,np.newaxis],(img.shape[other_dim],1,1,3))
out_img = img[:,indicies,:]*weights
out_img = np.sum(out_img,axis=2)
else: # resize image height
weights = np.tile(weights[:,:,np.newaxis,np.newaxis],(1,1,img.shape[other_dim],3))
out_img = img[indicies,:,:]*weights
out_img = np.sum(out_img,axis=1)
return out_img
def cubic_spline(x):
'''
Compute the kernel weights
See Keys, "Cubic Convolution Interpolation for Digital Image
Processing," IEEE Transactions on Acoustics, Speech, and Signal
Processing, Vol. ASSP-29, No. 6, December 1981, p. 1155.
'''
absx = np.abs(x)
absx2 = absx**2
absx3 = absx**3
kernel_weight = (1.5*absx3 - 2.5*absx2 + 1) * (absx<=1) + (-0.5*absx3 + 2.5* absx2 - 4*absx + 2) * ((1<absx) & (absx<=2))
return kernel_weight
def contribution(in_dim_len , out_dim_len, scale):
'''
Compute the weights and indicies of the pixels involved in the cubic interpolation along each dimension.
output:
weights a list of size 2 (one set of weights for each dimension). Each item is of size OUT_DIM_LEN*Kernel_Width
indicies a list of size 2(one set of pixel indicies for each dimension) Each item is of size OUT_DIM_LEN*kernel_width
note that if the entire column weights is zero, it gets deleted since those pixels don't contribute to anything
'''
kernel_width = 4
if scale < 1:
kernel_width = 4 / scale
x_out = np.array(range(1,out_dim_len+1))
#project to the input space dimension
u = x_out/scale + 0.5*(1-1/scale)
#position of the left most pixel in each calculation
l = np.floor( u - kernel_width/2)
#maxium number of pixels in each computation
p = int(np.ceil(kernel_width) + 2)
indicies = np.zeros((l.shape[0],p) , dtype = int)
indicies[:,0] = l
for i in range(1,p):
indicies[:,i] = indicies[:,i-1]+1
#compute the weights of the vectors
u = u.reshape((u.shape[0],1))
u = np.repeat(u,p,axis=1)
if scale < 1:
weights = scale*cubic_spline(scale*(indicies-u ))
else:
weights = cubic_spline((indicies-u))
weights_sums = np.sum(weights,1)
weights = weights/ weights_sums[:, np.newaxis]
indicies = indicies - 1
indicies[indicies<0] = 0
indicies[indicies>in_dim_len-1] = in_dim_len-1 #clamping the indicies at the ends
valid_cols = np.all( weights==0 , axis = 0 ) == False #find columns that are not all zeros
indicies = indicies[:,valid_cols]
weights = weights[:,valid_cols]
return weights , indicies
def imresize(img , cropped_width , cropped_height):
'''
Function implementing matlab's imresize functionality default behaviour
Cubic spline interpolation with antialiasing correction when scaling down the image.
'''
width_scale = float(cropped_width) / img.shape[1]
height_scale = float(cropped_height) / img.shape[0]
order = np.argsort([height_scale , width_scale])
scale = [height_scale , width_scale]
out_dim = [cropped_height , cropped_width]
weights = [0,0]
indicies = [0,0]
for i in range(0, 2):
weights[i] , indicies[i] = contribution(img.shape[ i ],out_dim[i], scale[i])
for i in range(0, len(order)):
img = reduce_along_dim(img , order[i] , weights[order[i]] , indicies[order[i]])
return img
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# default arguments work fine with Visual7W
parser.add_argument('--dataset_json', default='visual7w-toolkit/datasets/visual7w-telling/dataset.json', help='input dataset json file')
parser.add_argument('--output_json', default='data/qa_data.json', help='output json file')
parser.add_argument('--output_h5', default='data/qa_data.h5', help='output h5 file')
parser.add_argument('--num_multiple_choice', default=3, type=int, help='number of multiple choices of each question.')
parser.add_argument('--max_question_length', default=15, type=int, help='max length of a question, in number of words. questions longer than this get clipped.')
parser.add_argument('--max_answer_length', default=5, type=int, help='max length of an answer, in number of words. answers longer than this get clipped.')
parser.add_argument('--word_count_threshold', default=5, type=int, help='only words that occur more than this number of times will be put in vocab')
parser.add_argument('--image_dim', default=224, type=int, help='dimension of image after rescale (224 is the input image dimension for VGGNet-16)')
parser.add_argument('--image_path', default='images/v7w_%s.jpg', help='path template based on image id')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print 'parsed input parameters:'
print json.dumps(params, indent=2)
dataset = json.load(open(params['dataset_json'], 'r'))
prepro_question_answer(dataset['images'])
# create the vocab
vocab = build_vocab(dataset['images'], params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
image_id = list(set([x['image_id'] for x in dataset['images']]))
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['word_to_ix'] = wtoi
json.dump(out, open(params['output_json'], 'w'))
print 'wrote ', params['output_json']
# encode answers in large arrays, ready to ship to hdf5 file
Lq, La, Lmc, label_start_ix, label_end_ix, question_label_length, answer_label_length, label_id = encode_question_answer(dataset['images'], params, wtoi)
# create output h5 file
f = h5py.File(params['output_h5'], "w")
f.create_dataset("question_label", dtype='uint32', data=Lq)
f.create_dataset("answer_label", dtype='uint32', data=La)
f.create_dataset("mc_label", dtype='uint32', data=Lmc)
f.create_dataset("qa_start_ix", dtype='uint32', data=label_start_ix)
f.create_dataset("qa_end_ix", dtype='uint32', data=label_end_ix)
f.create_dataset("question_label_length", dtype='uint32', data=question_label_length)
f.create_dataset("answer_label_length", dtype='uint32', data=answer_label_length)
f.create_dataset("qa_id", dtype='uint32', data=label_id)
# loading image dataset
print 'start to process images into hdf5'
f.create_dataset("image_id", dtype='uint32', data=image_id)
img_num = len(image_id)
img_dim = params['image_dim']
img_data = f.create_dataset("image_data", (img_num, 3, img_dim, img_dim))
for k, img_id in enumerate(image_id):
img_path = params['image_path'] % str(img_id)
img = load_image(img_path)
img = imresize(img, img_dim, img_dim)
img_data[k] = img.transpose(2, 0, 1)
if k % 500 == 0:
print 'processed %d / %d images' % (k, img_num)
f.close()
print 'wrote ', params['output_h5']
| mit | 2,191,950,445,973,915,600 | 40.699208 | 162 | 0.666477 | false |
torshid/foodnow | jinja.py | 1 | 4189 | def foodnow():
return "Food — Now !"
def isMobile():
from flask import request
from common import phones
agent = request.headers.get('User-Agent')
return any(phone in agent.lower() for phone in phones)
def fileExists(name):
import os
if name[:1] == '/':
name = name[1:]
return os.path.isfile(os.path.dirname(os.path.abspath(__file__)) + '/' + name)
def checkSessions():
from flask import request, session
if 'mail' in request.cookies and 'password' in request.cookies:
session['mail'] = request.cookies['mail']
session['password'] = request.cookies['password']
if 'mail' in session and 'password' in session:
from tables import users
user = users.getUser(session['mail'], session['password'])
if user:
session['user'] = user
else :
if 'mail' in session: del session['mail']
if 'password' in session: del session['password']
if 'user' in session: del session['user']
return
def dishImageExists(dishid):
from config import dishesthumbspath
return fileExists(dishesthumbspath + str(dishid) + '.png')
def nl2br(value):
import re
from jinja2 import evalcontextfilter, Markup, escape
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', '<br/>\n') \
for p in _paragraph_re.split(escape(value)))
return Markup(result)
def random(min, max):
from random import randint
return randint(min, max)
def istrue(s):
return s == '1' or s == 1
def isfalse(s):
return not istrue(s)
def isLogged():
from flask import session
return 'user' in session
def getUser():
from flask import session
return session['user']
def getUserEmployments():
from tables import employees
return employees.getUserEmployments(getUser()[0])
def isManager(employee):
from tables import employees
return employees.isManager(employee)
def isWorker(employee):
from tables import employees
return employees.isWorker(employee)
def isDriver(employee):
from tables import employees
return employees.isDriver(employee)
def getRoles():
from common import roles
return roles
def getThumbWidth():
from common import dishthumbsize
return dishthumbsize[0]
def getMenuDishes(menuid):
from tables import dishes
return dishes.getMenuDishes(menuid)
def getRoleTitle(role):
from common import roles
for rol in roles:
if rol[0] == role:
return rol[1]
return 'Unknown'
def panel_for(entity, **data):
from flask.helpers import url_for
return '/'.join(url_for(entity, **data).split('/')[3:]).replace('/', '-')
def getResto(id = None, pseudo = None):
from tables import restos
if id:
return restos.getRestoFromId(id)
else:
return restos.getResto(pseudo)
def getLikedRestos(userId):
from tables import restolikes
return restolikes.getLikedRestos(userId)
def updateProfile(userId, name = None, email = None, password = None):
from entities import user
user.updateProfile(name, email, password)
return
def getMostLikedRestos():
from tables import restos
return restos.getMostLikedRestos()
def getMostLikedDishes():
from tables import dishes
return dishes.getMostLikedDishes()
def getUserFromId(id):
from tables import users
return users.getUserFromId(id)
def getLikedDishes(userId):
from tables import dishlikes
return dishlikes.getLikedDishes(userId)
def getUserLikedDishesId(userId):
from tables import dishlikes
return dishlikes.getUserLikedDishesId(userId)
def getLikedRestos(userId):
from tables import restolikes
return restolikes.getLikedRestos(userId)
def getUserLikedRestosId(userId):
from tables import restolikes
return restolikes.getUserLikedRestosId(userId)
def addReview(userId, restoId, dishId, content):
from entities import reviews
inserted = reviews.addReview(userId, restoId, dishId, content)
return inserted
def getAllReviews(userId):
from tables import reviews
list = reviews.getAllReviews(userId)
return list
| gpl-3.0 | 4,369,039,640,209,388,000 | 26.366013 | 82 | 0.685455 | false |
nmc-probe/emulab-nome | tbsetup/plab/libdslice/dslice/fileutil.py | 1 | 2574 | """
Copyright (c) 2002 Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
EXPORT LAWS: THIS LICENSE ADDS NO RESTRICTIONS TO THE EXPORT LAWS OF
YOUR JURISDICTION. It is licensee's responsibility to comply with any
export regulations applicable in licensee's jurisdiction. Under
CURRENT (May 2000) U.S. export regulations this software is eligible
for export from the U.S. and can be downloaded by or otherwise
exported or reexported worldwide EXCEPT to U.S. embargoed destinations
which include Cuba, Iraq, Libya, North Korea, Iran, Syria, Sudan,
Afghanistan and any other country to which the U.S. has embargoed
goods and services.
DESCRIPTION: Various file utilties
AUTHOR: Brent Chun ([email protected])
$Id: fileutil.py,v 1.1 2003-08-19 17:17:20 aclement Exp $
"""
import os
def rename(oldfile, newfile):
"""Rename file, potentially going across filesystems"""
try:
os.rename(oldfile, newfile)
except:
f = open(oldfile)
data = f.read()
f.close()
g = open(newfile, "w")
g.write(data)
g.close()
os.unlink(oldfile)
| agpl-3.0 | 3,412,019,477,320,608,300 | 39.857143 | 70 | 0.752137 | false |
objectrocket/python-client | tests/test_bases.py | 1 | 6264 | """Tests for the objectrocket.bases module."""
import pytest
from objectrocket import errors
from objectrocket.bases import BaseInstance
from objectrocket.bases import BaseOperationsLayer
REQUIRED_INSTANCE_FIELDS = [
'created',
'name',
'plan',
'service',
'type',
'version'
]
class OpsLayerPrototype(BaseOperationsLayer):
"""A class for testing the :py:class:`objectrocket.bases.BaseOperationsLayer`."""
@property
def _default_request_kwargs(self):
"""Base requires this to be implemented."""
return super(OpsLayerPrototype, self)._default_request_kwargs
@property
def _url(self):
"""Base requires this to be implemented."""
pass
class InstancePrototype(BaseInstance):
"""A class for testing the :py:class:`objectrocket.bases.BaseOperationsLayer`."""
@property
def get_connection(self):
"""Base requires this to be implemented."""
return super(BaseInstance, self).get_connection()
###########################
# Tests for BaseInstance. #
###########################
def test_client_is_properly_embedded_in_base_instance(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._client is client
def test_instance_doc_is_properly_embedded_in_base_instance(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._instance_document is mongodb_sharded_doc
@pytest.mark.parametrize('needed_field', REQUIRED_INSTANCE_FIELDS)
def test_instance_creation_fails_with_missing_field(client, mongodb_sharded_doc, needed_field):
mongodb_sharded_doc.pop(needed_field, None)
with pytest.raises(KeyError) as exinfo:
InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert exinfo.value.args == (needed_field,)
def test_instance_creation_fails_with_missing_connect_string(client, mongodb_sharded_doc):
mongodb_sharded_doc.pop("connect_string", None)
with pytest.raises(errors.InstancesException) as exinfo:
InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert str(exinfo.value) == 'No connection string found.'
def test_instance_repr_is_as_expected(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
inst_id = hex(id(inst))
expected_repr = (
'<{!s} name={!s} id={!s} at {!s}>'
.format(inst.__class__.__name__, inst.name, inst.id, inst_id)
)
assert repr(inst) == expected_repr
def test_inst_connect_string_attribute_is_as_expected(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._connect_string == mongodb_sharded_doc['connect_string']
def test_inst_created_attribute_is_as_expected(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._created == mongodb_sharded_doc['created']
def test_inst_instance_document_attribute_is_as_expected(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._instance_document is mongodb_sharded_doc
def test_inst_name_attribute_is_as_expected(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._name == mongodb_sharded_doc['name']
def test_inst_plan_attribute_is_as_expected(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._plan == mongodb_sharded_doc['plan']
def test_inst_service_attribute_is_as_expected(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._service == mongodb_sharded_doc['service']
def test_inst_type_attribute_is_as_expected(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._type == mongodb_sharded_doc['type']
def test_inst_version_attribute_is_as_expected(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst._version == mongodb_sharded_doc['version']
def test_inst_to_dict_returns_expected_output(client, mongodb_sharded_doc):
inst = InstancePrototype(instance_document=mongodb_sharded_doc, instances=client.instances)
assert inst.to_dict() == mongodb_sharded_doc
##################################
# Tests for BaseOperationsLayer. #
##################################
def test_client_is_properly_embedded_in_base_ops(client):
inst = OpsLayerPrototype(base_client=client)
assert client is inst._client
def test_default_request_kwargs(client):
inst = OpsLayerPrototype(base_client=client)
assert inst._default_request_kwargs == {
'headers': {
'Content-Type': 'application/json'
},
'hooks': {
'response': inst._verify_auth
}
}
def test_url(client):
inst = OpsLayerPrototype(base_client=client)
assert inst._url is None
def test_verify_auth_returns_none_with_status_code_200(client, mocked_response, obj):
mocked_response.status_code = 200
mocked_response.request = obj
mocked_response.request.method = 'TEST'
mocked_response.request.path_url = '/TEST/PATH/'
inst = OpsLayerPrototype(base_client=client)
assert inst._verify_auth(mocked_response) is None
def test_verify_auth_raises_with_status_code_401(client, mocked_response, obj):
mocked_response.status_code = 401
mocked_response.request = obj
mocked_response.request.method = 'TEST'
mocked_response.request.path_url = '/TEST/PATH/'
inst = OpsLayerPrototype(base_client=client)
with pytest.raises(errors.AuthFailure) as exinfo:
inst._verify_auth(mocked_response)
assert exinfo.value.args[0] == 'Received response code 401 from TEST /TEST/PATH/.'
| mit | -6,753,140,476,015,602,000 | 35.208092 | 95 | 0.716475 | false |
joshwalawender/KeckUtilities | telescopeSchedule/queryTelSched.py | 1 | 5013 | #!/usr/env/python
'''
Name: queryTelSched.py
Purpose:
Query the telescope database and return the value of `field` for the given
`date` and `tel`. Try to replicate functionality of the old queryTelSched
which was located at: ~kics/instr/bin/queryTelSched (on a summit machine).
This program tries to be backward compatible with the old telescope
schedule database and programs which called it. Some field names have
changed with the new database, so a translation step is included in the
queryTelSched function below. To add additional translations, just add to
the translations dictionary
Example Use:
python queryTelSched.py 2018-12-18 1 Instrument
Arguments:
date: The date for the query in a string with YYYY-MM-DD format.
tel: An int (1 or 2) indicating the telescope.
field: A string with the field to return. For more information on the API
and on what fields are returnable, see the web liks below.
Additional Information on the Telescope Schedule API can be found here:
https://www.keck.hawaii.edu/software/db_api/telSchedule.php
Details on the getSchedule command and what it returns can be found here:
https://www.keck.hawaii.edu/software/db_api/telSchedule.php?cmd=getSchedule
Modification history:
2018-12-18 jwalawender Original version (adapted from old version for
old database API).
'''
## Import General Tools
import argparse
import logging
import requests
import json
##-------------------------------------------------------------------------
## Parse Command Line Arguments
##-------------------------------------------------------------------------
## create a parser object for understanding command-line arguments
p = argparse.ArgumentParser(description='''
''')
## add arguments
p.add_argument('date', type=str,
help="Date (HST) in YYYY-MM-DD format.")
p.add_argument('tel', type=int,
help="Telescope number as int (i.e. 1 or 2).")
p.add_argument('field', type=str,
help="Field to query (e.g. Instrument).")
## add flags
p.add_argument("-v", "--verbose", dest="verbose",
default=False, action="store_true",
help="Be verbose! (default = False)")
args = p.parse_args()
##-------------------------------------------------------------------------
## Create logger object
##-------------------------------------------------------------------------
log = logging.getLogger('queryTelSched')
log.setLevel(logging.DEBUG)
LogConsoleHandler = logging.StreamHandler()
if args.verbose is True:
LogConsoleHandler.setLevel(logging.DEBUG)
else:
LogConsoleHandler.setLevel(logging.INFO)
LogFormat = logging.Formatter('%(levelname)9s: %(message)s')
LogConsoleHandler.setFormatter(LogFormat)
log.addHandler(LogConsoleHandler)
##-------------------------------------------------------------------------
## Define some useful functions
##-------------------------------------------------------------------------
def querydb(req):
'''A simple wrapper to form a generic API level query to the telescope
schedule web API. Returns a JSON object with the result of the query.
'''
log.debug('Querying telescope schedule')
url = f"https://www.keck.hawaii.edu/software/db_api/telSchedule.php?{req}"
r = requests.get(url)
return json.loads(r.text)
def get_schedule(date, tel):
'''Use the querydb function and getSchedule of the telescope schedule web
API with arguments for date and telescope number. Returns a JSON object
with the schedule result.
'''
if tel not in [1,2]:
log.error("Telescope number must be 1 or 2.")
return
req = f"cmd=getSchedule&date={date}&telnr={tel}"
result = querydb(req)
log.debug('Got result from schedule database')
return result
##-------------------------------------------------------------------------
## Main Program: queryTelSched
##-------------------------------------------------------------------------
def queryTelSched(date, tel, field):
result = get_schedule(date, tel)
log.debug(f"Found {len(result)} programs")
translations = {'InstrAcc': 'Account',
}
output_list = []
for i,entry in enumerate(sorted(result, key=lambda x: x['StartTime'])):
log.debug(f"Entry {i+1}:")
for key in entry.keys():
log.debug(f" {key:>15s}: {entry[key]}")
try:
output_list.append(entry[field])
except KeyError:
log.error(f'Field "{field}" not found')
if field in translations.keys():
log.debug(f'Trying tranlated key "{translations[field]}"')
output_list.append(entry[translations[field]])
log.warning(f'Please update the script calling for "{field}" '
f'to use "{translations[field]}" instead.')
print('/'.join(output_list))
return output_list
if __name__ == '__main__':
queryTelSched(args.date, args.tel, args.field)
| bsd-2-clause | 2,792,296,178,803,762,000 | 35.591241 | 78 | 0.595053 | false |
Yvtou/Marriage-Spider | Match.py | 1 | 1711 | # -*- coding: utf-8 -*-
__author__ = 'Owen'
import urllib2
import re
from openpyxl import Workbook
#建立工作表格
wb = Workbook()
ws = wb.active
ws.title = "test"
#设置需要抓取的页面范围
for pageIndex in range(3, 10):
print u'正在抓取第' + str(pageIndex) + u'位的信息……'
#抓取网页的地址
url = 'http://www.i520.org.tw/products-' + str(pageIndex) + '.html'
request = urllib2.Request(url)
#处理Http和Url错误
try:
response = urllib2.urlopen(request)
#若有错误,显示错误类型
except urllib2.URLError, e:
if hasattr(e, 'code'):
print u'服务器无法完成此次请求'
print u'错误代码:', e.code
elif hasattr(e, 'reason'):
print u'无法连接到服务器'
print u'原因: ', e.reason
#若无错误,则开始抓取
else:
#正则匹配,注意中文编码问题
content = response.read().decode('utf-8')
pattern = re.compile('<div class="girlInfo">.*?<h2>(.*?)</h2>.*?<ul>.*?<li>(.*?)</li>.*?<li>(.*?)</li>.*?<li>(.*?)</li>.*?<li>(.*?)</li>.*?<li>(.*?)</li>.*?<li>(.*?)</li>.*?<li>(.*?)</li>.*?</ul>',re.S)
items = re.findall(pattern,content)
#输出结果
for item in items:
print item[0],item[1],item[2],item[3],item[4],item[5],item[6],item[7]
#写入工作表
for c in range(0,8):
d = ws.cell(row = pageIndex+1, column = c+1)
d.value = item[c]
#储存
wb.save('temp.xlsx')
#注意!先保存此表格中数据再进行下次抓取,否则数据会被覆盖!
else:
print u'抓取结束'
| gpl-2.0 | -8,156,034,206,582,531,000 | 28 | 210 | 0.518649 | false |
baixuexue123/note | python/basics/internet/select/select_test.py | 1 | 2304 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import select
import socket
import Queue
"""
通常 nonblocking模式, 如果socket没准备好的情况下, 试图用发送或接受数据, 对send()和recv()的调用
会产生socket.error异常
"""
sock_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_server.setblocking(0)
sock_server.bind(('', 5555))
sock_server.listen(5)
inputs = [sock_server]
outputs = []
message_queues = {}
while True:
print "waiting for next event"
readable, writable, exceptional = select.select(inputs, outputs, [], 1.0)
# when timeout reached, select return three empty lists
if not (readable or writable or exceptional):
print "Time out !"
for s in readable:
if s is sock_server:
# a readable socket is ready to accept a connection
conn, addr = s.accept()
print " connection from", addr
conn.setblocking(0)
inputs.append(conn)
message_queues[conn] = Queue.Queue()
else:
data = s.recv(1024)
if data:
print "received: ", data, "from ", s.getpeername()
message_queues[s].put(data)
if s not in outputs:
outputs.append(s)
else:
# Interpret empty result as closed connection
print " closing"
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
# remove message queue
del message_queues[s]
for s in writable:
try:
next_msg = message_queues[s].get_nowait()
except Queue.Empty:
print " ", s.getpeername(), 'queue empty'
outputs.remove(s)
else:
print " sending ", next_msg, " to ", s.getpeername()
s.send(time.asctime() + ' ' + next_msg)
for s in exceptional:
print " exception condition on ", s.getpeername()
# stop listening for input on the connection
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
# Remove message queue
del message_queues[s]
| bsd-2-clause | -3,976,293,605,853,064,000 | 28.813333 | 77 | 0.562165 | false |