prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
import warnings
import os
import subprocess
import multiprocessing
import yaml
import sys
import csv
import glob
import numpy as np
import matplotlib.pyplot as plt
from matplotlib_venn import venn2
import pandas as pd
import seaborn as sns
from tqdm.auto import tqdm
import gseapy as gp
import hashlib
def checkMd5(work_dir):
md5sum_file = os.path.join(work_dir,"raw-data", "md5sums.csv")
def md5(file):
work_dir = os.getcwd()
file = os.path.join(work_dir, "raw-data", file)
hash_md5 = hashlib.md5()
with open(file, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return(hash_md5.hexdigest())
if not os.path.exists(os.path.join(work_dir, ".md5summscorrect")):
if os.path.exists(md5sum_file):
print("Checking MD5 checksums")
df = pd.read_csv(os.path.join(work_dir,"raw-data","md5sums.csv"))
df["md5sum_new"] = df["file"].apply(md5)
#compare original checksums with calculated ones
df["md5sumCorrect"] = df["md5sum"] == df["md5sum_new"]
check_list = df[~df["md5sumCorrect"]]
if len(check_list) > 0:
print("Calculated MD5 checksums do not match originals:")
print(check_list["file"])
sys.exit(1)
else:
print("MD5 checksums correct")
open(".md5summscorrect", 'a').close()
def write2log(work_dir,command,name):
with open(os.path.join(work_dir,"commands.log"), "a") as file:
file.write(name)
print(*command, sep="",file=file)
def set_threads(args):
max_threads=str(multiprocessing.cpu_count())
threads=args["threads"]
if threads == "max":
threads=max_threads
threads=str(threads)
return threads
def rename(work_dir):
file=open(os.path.join(work_dir,"rename.config"), "r")
lines=file.readlines()
count=0
for line in lines: #removes newline characters
lines[count]=line.replace("\n","")
count+=1
for line in lines:#rename files
old_name,new_name=line.split(";")
os.rename(os.path.join(work_dir,
"raw-data",
old_name),os.path.join(work_dir,
"raw-data",
new_name))
def get_extension(work_dir):
file_list=glob.glob(os.path.join(work_dir,"raw-data","*.gz"))
test_file=file_list[0]
extension_index=test_file.index(".",0)
file_extension=test_file[extension_index:]
return file_extension
def file_exists(file): #check if file exists/is not size zero
if os.path.exists(file):
if os.path.getsize(file) > 0:
print("Skipping "+file+" (already exists/analysed)")
return(True)
else:
return(False)
def csv2fasta(csv,script_dir):
df_CSV=pd.read_csv(csv)
line_number_fasta=len(df_CSV) * 2
df=pd.DataFrame(columns=["column"],
index=np.arange(line_number_fasta))
#create fasta df
df["column"]=df_CSV.stack().reset_index(drop=True)
df.iloc[0::2, :]=">"+df.iloc[0::2, :]
library_name=os.path.basename(csv)
library_name=library_name.replace(".csv","")
fasta_base=library_name + ".fasta"
fasta_file=os.path.join(script_dir,"index",library_name,fasta_base)
os.makedirs(os.path.join(script_dir,"index",library_name),
exist_ok=True)
df.to_csv(fasta_file,index=False, header=False)
#add new CRISPR library and fasta file location to library.yaml
yaml_list=["clip_seq","fasta","index_path","read_mod","sg_length","species"]
with open(os.path.join(script_dir,"library.yaml")) as f:
doc=yaml.safe_load(f)
doc[library_name]={}
for i in yaml_list:
doc[library_name][i]=""
doc[library_name]["fasta"]=fasta_file
with open(os.path.join(script_dir,"library.yaml"), "w") as f:
yaml.dump(doc,f)
#exit message
sys.exit("Fasta file created and added to library.yaml\nPlease provid more CRISPR library information in this file before first run.")
def fastqc(work_dir,threads,file_extension,exe_dict):
fastqc_exe=os.path.join(exe_dict["fastqc"],"fastqc")
if not os.path.isdir(os.path.join(work_dir,"fastqc")) or len(os.listdir(os.path.join(work_dir,"fastqc"))) == 0:
os.makedirs(os.path.join(work_dir,"fastqc"),exist_ok=True)
fastqc_command=fastqc_exe+" --threads "+str(threads)+" --quiet -o fastqc/ raw-data/*"+file_extension
multiqc_command=["multiqc","-o","fastqc/","fastqc/"]
#log commands
with open(os.path.join(work_dir,"commands.log"),"w") as file:
file.write("FastQC: ")
print(fastqc_command, file=file)
file.write("MultiQC: ")
print(*multiqc_command, sep=" ", file=file)
try:
print("Running FastQC on raw data")
subprocess.run(fastqc_command, shell=True)
except:
sys.exit("ERROR: FastQC failed, check logs")
print("Running MultiQC")
subprocess.run(multiqc_command)
else:
print("Skipping FastQC/MultiQC (already performed)")
def check_index(library,crispr_library,script_dir,exe_dict,work_dir):
bowtie2_dir=exe_dict["bowtie2"]
try:
index_path=library[crispr_library]["index_path"]
fasta=library[crispr_library]["fasta"]
print(crispr_library+" library selected")
if index_path in ["",None]:
print("No index file found for "+crispr_library)
if fasta == "":
sys.exit("ERROR:No fasta file found for "+crispr_library)
else:
index_base=os.path.join(script_dir,
"index",
crispr_library,
crispr_library+"-index")
index_dir=os.path.join(script_dir,"index",crispr_library)
os.makedirs(index_dir,exist_ok=True)
bowtie2_build_command=os.path.join(bowtie2_dir,
"bowtie2-build ")+fasta+" "+index_base
write2log(work_dir,bowtie2_build_command,"Bowtie2-build: ")
print("Building Bowtie2 index for "+crispr_library+" library")
try:
subprocess.run(bowtie2_build_command, shell=True) #build index
#Write bowtie2 index file location to library.yaml
with open(os.path.join(script_dir,"library.yaml")) as f:
doc=yaml.safe_load(f)
doc[crispr_library]["index_path"]=index_base
with open(os.path.join(script_dir,"library.yaml"), "w") as f:
yaml.dump(doc,f)
except:
sys.exit("ERROR: bowtie2-build failed, check logs")
except KeyError:
sys.exit("ERROR: CRISPR library not specified in command line")
def guide_names(library,crispr_library):
try:
fasta=library[crispr_library]["fasta"]
except KeyError:
sys.exit("ERROR: CRISPR library not specified in command line")
output_name=fasta.replace(".fasta","-guide_names.csv")
if not os.path.exists(output_name):
library = pd.read_csv(fasta, names = ['guide'])
#creates new dataframe with only guide names:
library = library[library['guide'].str.contains('>')]
#removes '<' character from each row:
library = library['guide'].str.strip('>')
#saves guide names to a .csv file:
library.to_csv(output_name, index=False, header=False)
def count(library,crispr_library,mismatch,threads,script_dir,work_dir,exe_dict):
#reload library yaml
with open(os.path.join(script_dir,"library.yaml")) as file:
library=yaml.full_load(file)
os.makedirs(os.path.join(work_dir,"count"),exist_ok=True)
try:
read_mod=library[crispr_library]["read_mod"]
sg_length=library[crispr_library]["sg_length"]
sg_length=str(sg_length)
index_path=library[crispr_library]["index_path"]
clip_seq=library[crispr_library]["clip_seq"]
mismatch=str(mismatch)
except KeyError:
sys.exit("ERROR: CRISPR library not specified in command line")
file_extension=get_extension(work_dir)
print("Aligning reads to reference (mismatches allowed: "+mismatch+")")
#bowtie2 and bash commands (common to both trim and clip)
bowtie2_dir=exe_dict["bowtie2"]
bowtie2= os.path.join(bowtie2_dir,
"bowtie2")+" --no-hd -p "+threads+" -t -N "+mismatch+" -x "+index_path+" - 2>> crispr.log | "
bash="sed '/XS:/d' | cut -f3 | sort | uniq -c > "
#trim, align and count
if read_mod == "trim":
file_list=glob.glob(os.path.join(work_dir,"raw-data","*"+file_extension))
for file in tqdm(file_list, position=0, leave=True):
base_file=os.path.basename(file)
out_file=os.path.join(work_dir,"count",base_file.replace(file_extension,
".guidecounts.txt"))
if not file_exists(out_file):
tqdm.write("Aligning "+base_file)
print(base_file+":", file=open("crispr.log", "a"))
cutadapt="cutadapt -j "+threads+" --quality-base 33 -l "+sg_length+" -o - "+file+" 2>> crispr.log | "
cutadapt=str(cutadapt)
bowtie2=str(bowtie2)
count_command=cutadapt+bowtie2+bash+out_file
write2log(work_dir,count_command,"Count: ")
try:
subprocess.run(count_command,shell=True)
except:
sys.exit("ERROR: read count failed, check logs")
elif read_mod == "clip":
file_list=glob.glob(os.path.join(work_dir,"raw-data","*"+file_extension))
for file in tqdm(file_list, position=0, leave=True):
base_file=os.path.basename(file)
out_file=os.path.join(work_dir,"count",base_file.replace(file_extension,".guidecounts.txt"))
if not file_exists(out_file):
print("Aligning "+base_file)
print(base_file+":", file=open("crispr.log", "a"))
cutadapt="cutadapt -j "+threads+" --quality-base 33 -a "+clip_seq+" -o - "+file+" 2>> crispr.log | "
cutadapt=str(cutadapt)
bowtie2=str(bowtie2)
count_command=cutadapt+bowtie2+bash+out_file
write2log(work_dir,count_command,"Count: ")
try:
subprocess.run(count_command, shell=True)
except:
sys.exit("ERROR: read count failed, check logs")
#remove first line from guide count text files (bowtie2 artefact)
count_list=glob.glob(os.path.join(work_dir,"count","*guidecounts.txt"))
for file in count_list:
command="sed '1d' "+file+" > "+file+".temp "+"&& mv "+file+".temp "+file
try:
subprocess.run(command, shell=True)
except:
sys.exit("ERROR: removal of first line of count file failed")
def plot(df,y_label,save_file):
sns.set_style("white")
sns.set_style("ticks")
sns.barplot(x=list(df.keys())[0],
y=list(df.keys())[1],
data=df,
color="royalblue",
edgecolor="black",
linewidth=1)
plt.ylabel(y_label)
plt.xticks(rotation = 'vertical')
plt.xlabel("")
plt.tight_layout()
sns.despine()
plt.savefig(save_file)
plt.close()
def plot_alignment_rate(work_dir):
plot_file=os.path.join(work_dir,"count","alignment-rate.pdf")
if not file_exists(plot_file):
open(os.path.join(work_dir,"files.txt"),"w").writelines([ line for line in open(os.path.join(work_dir,"crispr.log")) if ".gz:" in line ])
open(os.path.join(work_dir,"alignment-rate.txt"),"w").writelines([ line for line in open(os.path.join(work_dir,"crispr.log")) if "overall alignment rate" in line ])
line_number=len(open(os.path.join(work_dir,"files.txt")).readlines())
df=pd.DataFrame(columns=["file","alignment_rate"],index=np.arange(line_number))
counter=0
for line in open(os.path.join(work_dir,"files.txt")):
line=line.replace(":","")
line=line.replace("\n","")
df.iloc[counter,0]=line
counter+=1
counter=0
for line in open(os.path.join(work_dir,"alignment-rate.txt")):
line=line.replace("% overall alignment rate","")
line=line.replace("\n","")
df.iloc[counter,1]=line
counter+=1
df["alignment_rate"]=pd.to_numeric(df["alignment_rate"])
os.remove(os.path.join(work_dir,"files.txt"))
os.remove(os.path.join(work_dir,"alignment-rate.txt"))
#plot alignment rate
plot(df,"Alignment rate (%)",plot_file)
def plot_coverage(work_dir,library,crispr_library): #plots coverage per sample after alignment
plot_file=os.path.join(work_dir,"count","coverage.pdf")
if not file_exists(plot_file):
#get number of sgRNAs in CRISPR library
fasta=library[crispr_library]["fasta"]
fasta=pd.read_table(fasta, header=None)
lib_size=len(fasta) / 2
#extract number of single mapped aligned reads from crispr.log
open(os.path.join(work_dir,"files.txt"),"w").writelines([ line for line in open(os.path.join(work_dir,"crispr.log")) if ".gz:" in line ])
open(os.path.join(work_dir,"read-count.txt"),"w").writelines([ line for line in open(os.path.join(work_dir,"crispr.log")) if "aligned exactly 1 time" in line ])
line_number=len(open(os.path.join(work_dir,"files.txt")).readlines())
df=pd.DataFrame(columns=["sample","coverage"],index=np.arange(line_number))
counter=0
for line in open(os.path.join(work_dir,"files.txt")):
line=line.replace(":","")
line=line.replace("\n","")
df.iloc[counter,0]=line
counter+=1
counter=0
for line in open(os.path.join(work_dir,"read-count.txt")):
line=line.split("(")[0]
line=line.replace(" ","")
line=int(line)
df.iloc[counter,1]=line
counter+=1
#calculate coverage per sample
df["coverage"]=df["coverage"] / lib_size
os.remove(os.path.join(work_dir,"files.txt"))
os.remove(os.path.join(work_dir,"read-count.txt"))
#plot coverage per sample
plot(df,"Fold sequence coverage per sample",plot_file)
def normalise(work_dir):
df=pd.read_table(os.path.join(work_dir,"count","counts-aggregated.tsv"))
column_range=range(2,len(df.columns))
for i in column_range:
column_sum=df.iloc[:,i].sum()
df.iloc[:,i]=df.iloc[:,i] / column_sum * 1E8
df.iloc[:,i]=df.iloc[:,i].astype(int)
df.to_csv(os.path.join(work_dir,"count","counts-aggregated-normalised.csv"),index=False,header=True)
def join_counts(work_dir,library,crispr_library):
#load sgRNA names, used for merging data2
fasta=library[crispr_library]["fasta"]
guide_name_file=fasta.replace(".fasta","-guide_names.csv")
sgrnas_list00 = list(csv.reader(open(guide_name_file)))
sgrnas_list0 = []
for x in sgrnas_list00: #Flattens the list
for y in x:
sgrnas_list0.append(y)
#Generates sgRNA and gene columns for final output
sgRNA_output = []
gene_output = []
for n in sgrnas_list0:
#print(n)
s,g = n.split("_", 1)
sgRNA_output.append(g)
gene_output.append(s)
#Generates reference Pandas data frame from sgRNA list library file
d0 = {'sgRNA':pd.Series(sgRNA_output),'gene':pd.Series(gene_output),'sgRNA2':pd.Series(sgrnas_list0)}
dfjoin1 = pd.DataFrame(d0) #sgRNA/gene column required for MAGeCK, sgRNA2 is needed for join operation (deleted later)
#Generates a list of all count .txt files
file_list = glob.glob(os.path.join(work_dir,"count",'*.guidecounts.txt'))
file_list.sort()
file_list2 = [w.replace('.guidecounts.txt','') for w in file_list] #this list will generate the column headers for the output file (removes .txt)
#Counts number of .txt files in script folder
txtnumber = len(file_list)
#Generates list of lists for join function output
cycle = 1
master_count_list0 = []
while cycle <= txtnumber:
master_count_list0.append("count_list"+ str(cycle))
cycle +=1
master_count_list1 = []
for i in master_count_list0:
master_count_list1.append([i])
cycle = 1
master_sgrna_list0 = []
while cycle <= txtnumber:
master_sgrna_list0.append("sgrna_list"+ str(cycle))
cycle +=1
master_sgrna_list1 = []
for i in master_sgrna_list0:
master_sgrna_list1.append([i])
#Generates Pandas data frame and adds each of the count files in the folder to it after joining
counter = 0
while counter < txtnumber:
#Opens count files and extract counts and sgRNA names
file = list(csv.reader(open(file_list [counter])))
for x in file:
a = str(x)
if a.count(' ') > 1:
z,b,c = a.split()
bint = int(b)
cmod = c.replace("']","")
master_count_list1 [counter].append(bint)
master_sgrna_list1 [counter].append(cmod)
else:
b,c = a.split()
bint = b.replace("['","")
bint = int(bint)
cmod = c.replace("']","")
master_count_list1 [counter].append(bint)
master_sgrna_list1 [counter].append(cmod)
#Generates Pandas data frame for the data
d1 = {'sgRNA2':pd.Series(master_sgrna_list1 [counter]),
file_list2 [counter]:pd.Series(master_count_list1 [counter])}
df1 = pd.DataFrame(d1)
#Performs left join to merge Pandas data frames sets:
dfjoin1 = pd.merge(dfjoin1, df1, on='sgRNA2', how='left')
dfjoin1 = dfjoin1.fillna(0) #Replaces nan with zero
counter +=1
#Deletes sgRNA2 column from dataframe (only needed for joining, not for MAGeCK)
dfjoin2 = dfjoin1.drop(columns='sgRNA2')
#only keep base name as column names
column_number=len(dfjoin2.columns)
column_range=range(2,column_number)
for i in column_range:
old_name=dfjoin2.columns[i]
new_name=os.path.basename(old_name)
dfjoin2.rename(columns={list(dfjoin2)[i]:new_name},inplace=True)
#Writes all data to a single .tsv file, ready for MAGeCK
dfjoin2.to_csv(os.path.join(work_dir,"count",'counts-aggregated.tsv'), sep='\t',index=False)
def mageck(work_dir,script_dir,cnv,fdr):
if fdr > 0.25:
print("WARNING: MAGeCK FDR cut off set higher than default 0.25")
#determine number of samples in count table
header=subprocess.check_output(["head", "-1",os.path.join(work_dir,"count","counts-aggregated.tsv")])
header=header.decode("utf-8")
sample_count=header.count("\t") - 1
if sample_count == 2:
if "pre" and "post" in header:
print("Skipping MAGeCK (only CRISPR library samples present)")
return(None)
#check for stats.config
stats_config=os.path.join(work_dir,"stats.config")
if not os.path.exists(stats_config):
print("ERROR: stats.config not found (MAGeCK comparisons)")
return(None)
#create MAGeCK dir
os.makedirs(os.path.join(work_dir,"mageck"),exist_ok=True)
#load MAGeCK comparisons and run MAGeCK
df=pd.read_csv(os.path.join(work_dir,"stats.config"),sep=";")
sample_number=len(df)
sample_range=range(sample_number)
def cnv_com(script_dir,cnv,ccle_ref): #generate MAGeCK command for CNV correction
#check if specified cell line is in CCLE data list
cell_line_list=subprocess.check_output(["head",
"-1",os.path.join(script_dir,
"CCLE","CCLE_copynumber_byGene_2013-12-03.txt")])
cell_line=cnv
cnv_command=" --cnv-norm "+ccle_ref+" --cell-line "+cell_line
return(cnv_command,cell_line_list)
def CCLE_cell_line_exists(script_dir,cnv,ccle_ref):
cell_line_list=cnv_com(script_dir,cnv,ccle_ref)[1]
cell_line_list=cell_line_list.decode("utf-8")
cell_line=cnv
if not cell_line in cell_line_list:
print("ERROR: specified cell line not found in CCLE reference file")
print("Skipping CNV correction for MAGeCK")
return(False)
else:
return(True)
for i in sample_range:
test_sample=df.loc[i]["t"]
control_sample=df.loc[i]["c"]
mageck_output=test_sample+"_vs_"+control_sample
if not file_exists(os.path.join(work_dir,"mageck",mageck_output)):
os.makedirs(os.path.join(work_dir,"mageck",mageck_output),exist_ok=True)
prefix=os.path.join(work_dir,"mageck",mageck_output,mageck_output)
input=os.path.join(work_dir,"count","counts-aggregated.tsv")
log=" 2>> "+os.path.join(work_dir,"crispr.log")
mageck_command="mageck test -k "+input+" -t "+test_sample+" -c "+control_sample+" -n "+prefix+log
write2log(work_dir,mageck_command,"MAGeCK: ")
try:
print("Running MAGeCK without CNV correction")
subprocess.run(mageck_command, shell=True)
except:
print("MAGeCK failed. Check log")
return(None)
#check if CNV correction is requested and perform checks
if cnv != None:
ccle_ref=os.path.join(script_dir,"CCLE","CCLE_copynumber_byGene_2013-12-03.txt")
if not os.path.exists(ccle_ref):
print("WARNING: no CCLE copy number file found")
print("Downloading CCLE copy number file from https://data.broadinstitute.org")
url=" https://data.broadinstitute.org/ccle_legacy_data/dna_copy_number/CCLE_copynumber_byGene_2013-12-03.txt"
download_command="wget --directory-prefix="+os.path.join(script_dir,"CCLE")+url
write2log(work_dir,download_command,"Download CCLE file: ")
try:
subprocess.run(download_command, shell=True)
except:
sys.exit("ERROR: download failed, check log and url")
if not CCLE_cell_line_exists(script_dir,cnv,ccle_ref):
return
else:
cnv_command=cnv_com(script_dir,cnv,ccle_ref)[0]
else:
if not CCLE_cell_line_exists(script_dir,cnv,ccle_ref):
return
else:
cnv_command=cnv_com(script_dir,cnv,ccle_ref)[0]
if cnv != None:
cnv_dir=os.path.join(work_dir,"mageck-cnv",mageck_output)
if not file_exists(cnv_dir):
os.makedirs(cnv_dir, exist_ok=True)
prefix=os.path.join(work_dir,"mageck-cnv",mageck_output,mageck_output)
input=os.path.join(work_dir,"count","counts-aggregated.tsv")
log=" 2>> "+os.path.join(work_dir,"crispr.log")
mageck_command="mageck test -k "+input+" -t "+test_sample+" -c "+control_sample+" -n "+prefix+log
mageck_command=mageck_command+cnv_command
write2log(work_dir,mageck_command,"MAGeCK: ")
subprocess.run(mageck_command, shell=True)
#plot MAGeCK hits
file_list=glob.glob(os.path.join(work_dir,"mageck","*","*gene_summary.txt"))
for file in file_list:
save_path=os.path.dirname(file)
out_put_file=os.path.join(save_path,"logFC-plot.pdf")
if not file_exists(out_put_file):
plot_command="Rscript "+os.path.join(script_dir,"R","plot-hits.R ")+ \
work_dir+" "+file+" mageck "+save_path+" "+mageck_output+ \
" "+script_dir+" "+str(fdr)
write2log(work_dir,plot_command,"Plot hits MAGeCK: ")
try:
subprocess.run(plot_command, shell=True)
except:
sys.exit("ERROR: plotting hits failed, check log")
def remove_duplicates(work_dir):
df=pd.read_table(os.path.join(work_dir,"count","counts-aggregated.tsv"))
df.drop_duplicates(subset="sgRNA", keep="first", inplace=True)
df.to_csv(os.path.join(work_dir,"count","counts-aggregated.tsv"),index=False,header=True,sep="\t")
def convert4bagel(work_dir,library,crispr_library): #convert MAGeCK formatted count table to BAGEL2 format
count_table_bagel2=os.path.join(work_dir,"bagel",'counts-aggregated-bagel2.tsv')
if not file_exists(count_table_bagel2):
#obtain sequences of each guide
try:
fasta=library[crispr_library]["fasta"]
except KeyError:
sys.exit("ERROR: CRISPR library not specified in command line")
df_fasta=pd.read_csv(fasta, header=None)
#place sgRNA name and sequence is separate columns
df_name=df_fasta[df_fasta[0].str.contains(">")]
names=df_name.squeeze()#convert to series
names=names.reset_index(drop=True)#reset index
names=names.str.replace(">","")#remove >
names.name="sgRNA"
df_seq=df_fasta[~df_fasta[0].str.contains(">")]
seq=df_seq.squeeze()#convert to series
seq=seq.reset_index(drop=True)#reset index
seq.name="SEQUENCE"
df_join=pd.concat([names,seq],axis=1)#create df with names and sequences
#create gene column
df_join["gene"]=df_join["sgRNA"].str.split(pat="_").str[0]
#df_join.rename(columns = {"sgRNA":"gene"}, inplace = True)
#open MAGeCK formatted count table
count_file=os.path.join(work_dir,"count","counts-aggregated.tsv")
df_master= | pd.read_csv(count_file, sep="\t") | pandas.read_csv |
# summarizeLib.py
# <NAME>
# 3.28.19
#
# module of functions that allow you to create per-cell / per-sample summary tables
import pandas as pd
import numpy as np
import math
def get_laud_db(database_):
""" returns the COSMIC database after lung and fathmm filter """
pSiteList = database_.index[database_['Primary site'] == 'lung'].tolist()
database_filter = database_.iloc[pSiteList]
keepRows = database_filter['FATHMM score'] >= 0.7
db_fathmm_filter = database_filter[keepRows]
db_fathmm_filter = db_fathmm_filter.reset_index(drop=True)
return db_fathmm_filter
# mutationsDF__fillIn()
# goal is to construct a cell-wise dataframe with mutations to each
# of EGFR, KRAS and BRAF. the challange is getting the cells to line
# up, hence the for loop
#
# GOI needs to be lowercase
#
def mutationsDF_fillIn(GOI, GOI_df, mutationsDF_, all_cosmic_muts_):
mutName = GOI + '_mut'
for i in range(0,len(mutationsDF_.index)):
currCell = mutationsDF_['cell'][i]
rightIndex = GOI_df['cell'] == currCell
rightRow = GOI_df[rightIndex]
rightCell = rightRow['cell']
rightCell = str(rightCell).split()[1]
rightMut = rightRow['mutations']
rightMut = str(rightMut).split()[1]
currMut = ''.join(rightMut)
currMut = currMut.replace("'", "")
currMut = currMut.replace("]", "")
currMut = currMut.replace("[", "")
currMut = currMut.replace(" ", "")
mutStr = GOI + ' ' + currMut
if mutStr in all_cosmic_muts_:
mutationsDF_[mutName][i] = currMut
else:
mutationsDF_[mutName][i] = ''
# removeExtraCharacters_mutationsDF_()
# essentially converting mutationsDF_ mutation cols from lists to
# strings. makes downstream analysis easier
#
# GOI needs to be lowercase
#
def removeExtraCharacters_mutationsDF(GOI, mutationsDF_):
mutName = GOI + '_mut'
mutationsDF_[mutName] = mutationsDF_[mutName].str.replace("'", "") # remove quotes
mutationsDF_[mutName] = mutationsDF_[mutName].str.replace("[", "") # remove brackets
mutationsDF_[mutName] = mutationsDF_[mutName].str.replace("]", "") # remove brackets
mutationsDF_[mutName] = mutationsDF_[mutName].str.replace(" ", "") # remove whitespace?
# genericSummaryTableFillIn()
# fills in a given (metadata) field in summaryTable_. pulls from
# patientMetadata_ and goes cell-by-cell through
# summaryTable_, filling in fields like patientID/driver_gene
#
def genericSummaryTableFillIn(metaField, summaryField, summaryTable_, patientMetadata_):
for i in range(0,len(summaryTable_.index)):
currCell = summaryTable_['cell'].iloc[i]
currPlate = currCell.split('_')[1]
index_to_keep = patientMetadata_['plate'] == currPlate
keepRow = patientMetadata_[index_to_keep]
try:
currField = list(keepRow[metaField])[0]
summaryTable_[summaryField][i] = currField
except IndexError:
continue
#print('ERROR: plate not found') # these are just the plates were NOT
# including in the analysis
# fusionsFillIn()
# Takes the existing fusionsDF (which is just a list of the five fusions
# we looked for, and what cells they're found in) and populates
# summaryTable_ with this shit
#
# this works, but holllllyyyy shitttt we can do better
#
def fusionsFillIn(fusionsDF_, summaryTable_):
""" takes the existing fusionsDF and populates summaryTable_ with this shit """
for i in range(0, len(summaryTable_.index)):
currCell = summaryTable_['cell'].iloc[i]
for col in fusionsDF_.columns:
if currCell in list(fusionsDF_[col]):
summaryTable_['fusions_found'][i] = col
# translatedMutsFillIn_EGFR()
# need to make a 'mutations_found_translated' field that converts our
# 'raw' mutation calls to something that more resembles those reported
# in our clinical cols. Need a seperate func for EGFR, bc there are
# so many potential variants to account for
#
def translatedMutsFillIn_EGFR(summaryTable_):
for i in range(0,len(summaryTable_.index)):
translatedList = []
currCell = summaryTable_['cell'].iloc[i]
currMuts_egfr = summaryTable_['mutations_found_EGFR'].iloc[i]
currMuts_egfr_split = currMuts_egfr.split(',')
for item in currMuts_egfr_split:
if 'delELR' in item:
translatedList.append('EGFR del19')
elif '745_' in item:
translatedList.append('EGFR del19')
elif '746_' in item:
translatedList.append('EGFR del19')
elif 'ins' in item:
translatedList.append('EGFR ins20')
elif item != '':
translatedList.append('EGFR ' + item)
summaryTable_['mutations_found_translated'][i] = translatedList
# translatedMutsFillIn_nonEGFR()
# need to make a 'mutations_found_translated' field that converts our
# 'raw' mutation calls to something that more resembles those reported
# in our clinical cols. This func handles BRAF and KRAS, bc there are
# only like 2 possible clinically reported muts for them, so we'd might
# as well keep everything
#
# want GOI to be capitilized here
def translatedMutsFillIn_nonEGFR(GOI, summaryTable_):
colName = 'mutations_found_' + GOI
for i in range(0,len(summaryTable_.index)):
translatedList = []
currCell = summaryTable_['cell'].iloc[i]
currMuts = summaryTable_[colName].iloc[i]
currMuts_split = currMuts.split(',')
for item in currMuts_split:
if item != '' and '?' not in item:
translatedList.append(GOI + ' ' + item)
summaryTable_['mutations_found_translated'][i] = summaryTable_['mutations_found_translated'][i] + translatedList
# translatedMutsFillIn_fusions()
# need to make a 'mutations_found_translated' field that converts our
# 'raw' mutation calls to something that more resembles those reported
# in our clinical cols. for fusions this time
#
def translatedMutsFillIn_fusions(summaryTable_):
""" converts 'raw' mutation calls to something that more resembles
those reported in our clinical cols. for fusions """
for i in range(0,len(summaryTable_.index)):
currCell = summaryTable_['cell'].iloc[i]
currFus = summaryTable_['fusions_found'].iloc[i]
if not | pd.isnull(currFus) | pandas.isnull |
"""
Routines for analysing output data.
:Author:
<NAME>
"""
import warnings
from typing import Tuple
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
def fit_function(x_data, *params):
p, d = x_data
p_th, nu, A, B, C = params
x = (p - p_th)*d**(1/nu)
return A + B*x + C*x**2
def get_fit_params(p_list, d_list, f_list, params_0=None) -> np.ndarray:
"""Get fitting params."""
# Curve fitting inputs.
x_data = np.array([p_list,d_list])
# Target outputs.
y_data = f_list
# Curve fit.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params_opt, _ = curve_fit(fit_function, x_data, y_data, p0=params_0)
return params_opt
def fit_fss_params(df_filt: pd.DataFrame,p_left_val: float,p_right_val: float,p_nearest: float,n_bs: int = 100,) -> Tuple[np.ndarray, np.ndarray, pd.DataFrame]:
"""Get optimized parameters and data table."""
# Truncate error probability between values.
df_trunc = df_filt[(p_left_val <= df_filt['probability']) & (df_filt['probability'] <= p_right_val)].copy()
df_trunc = df_trunc.dropna(subset=['p_est'])
d_list = df_trunc['d'].values
p_list = df_trunc['probability'].values
f_list = df_trunc['p_est'].values
# Initial parameters to optimize.
f_0 = df_trunc[df_trunc['probability'] == p_nearest]['p_est'].mean()
if | pd.isna(f_0) | pandas.isna |
'''
Run this to get html files
This file contains code to obtain html data from oslo bors and yahoo finance
'''
import argparse
import re
import threading
import time
from pprint import pprint
from typing import List
import sys
import pathlib
import os
import numpy as np
import pandas as pd
import pypatconsole as ppc
from bs4 import BeautifulSoup as bs
from pandas import DataFrame, to_numeric
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from tqdm import tqdm
import config as cng
import yfinance_hotfix as yf
import utils
def dump_assert(file: str):
assert file is not None, 'File parameter must be specified when dump=True'
def get_osebx_htmlfile(url: str, timeout: int=cng.DEFAULT_TIMEOUT, wait_target_class: str=None,
verbose: int=1, dump: bool=True, file: str=None) -> str:
'''Load OSEBX html files using selenium'''
if verbose >= 1: print(f'Gathering data from {url}')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-gpu")
driver = webdriver.Chrome(options=chrome_options)
if verbose >= 2: print('Initialized chromedriver')
driver.get(url)
if verbose >= 2: print('Waiting for target HTML class to appear')
# If the webpage dynamically loads the table with the stock information. This code will force the webdriver
# wait until the wanted element is loaded.
if not wait_target_class is None:
try:
WebDriverWait(driver, timeout).until(
EC.presence_of_element_located((By.CLASS_NAME, wait_target_class))
)
except:
print(f'Timeout: Could not load class {wait_target_class} from {url}')
driver.quit()
exit()
if verbose >= 2: print('Element located')
page_src = driver.page_source
driver.quit()
if dump:
if verbose >= 1: print(f'Dumping HTML file: {file}')
dump_assert(file)
with open(file, 'w+') as file:
file.write(page_src)
return page_src
def get_osebx_htmlfiles():
'''Get OSEBX HTML files'''
get_osebx_htmlfile(url=cng.BORS_QUOTES_URL,
wait_target_class=cng.QUOTES_WAIT_TARGET_CLASS,
dump=True,
file=cng.QUOTES_HTML_DATE_FILE,
verbose=2)
get_osebx_htmlfile(url=cng.BORS_RETURNS_URL,
wait_target_class=cng.RETURNS_WAIT_TARGET_CLASS,
dump=True,
file=cng.RETURNS_HTML_DATE_FILE,
verbose=2)
def scrape_osebx_html(quotes: str=None, returns: str=None, verbose: int=0, dump: bool=True,
file: str=None) -> pd.DataFrame:
'''
Scrape stocks from oslo bors HTML files.
HTML of websites of quotes and returns
should be located in same folder this file.
quotes: https://www.oslobors.no/ob_eng/markedsaktivitet/#/list/shares/quotelist/ob/all/all/false
returns: https://www.oslobors.no/ob_eng/markedsaktivitet/#/list/shares/return/ob/all/all/false
'''
if quotes is None:
quotes = cng.QUOTES_HTML_FILE
if returns is None:
returns = cng.RETURNS_HTML_FILE
with open(quotes) as html_source:
soup_quotes = bs(html_source, 'html.parser')
with open(returns) as html_source:
soup_return = bs(html_source, 'html.parser')
# Filter out the stock tables
html_quotes = soup_quotes.find('div', class_="ng-scope").find('ui-view').find('ui-view').find('tbody').find_all('tr')
html_return = soup_return.find('div', class_="ng-scope").find('ui-view').find('ui-view').find('tbody').find_all('tr')
tickers = []
names = []
lasts = []
buys = []
sells = []
tradecounts = []
marketcaps = []
sectors = []
infos = []
profits_today = []
profits_1wk = []
profits_1month = []
profits_ytd = []
profits_1yr = []
# Create lists with features. Only preprocessing for strings are done (values are all strings).
# Further preprocessing will be done later when the values are in a pandas DataFrame.
for quotesrow, returnrow in tqdm(zip(html_quotes, html_return), total=len(html_quotes), disable=verbose):
# Scrape ticker, name, marketcap, sector and info.
tickers.append(quotesrow.a.text)
names.append(quotesrow.find('td', {'data-header':'Navn'}).text)
lasts.append(quotesrow.find('td', {'data-header':'Last'}).text.replace(',', ''))
buys.append(quotesrow.find('td', {'data-header':'Buy'}).text.replace(',', ''))
sells.append(quotesrow.find('td', {'data-header':'Sell'}).text.replace(',', ''))
tradecounts.append(quotesrow.find('td', {'data-header':'No. of trades'}).text.replace(',', ''))
marketcaps.append(quotesrow.find('td', {'data-header':'Market cap (MNOK)'}).text.replace(',', ''))
# Marketcap unit is in millions, multiply by 10e6 to get normal values
sectors.append(quotesrow.find('td', class_='icons').get('title'))
# Info is whether instrument is a Liquidit y provider or not
infos.append('LP' if 'fa-bolt' in quotesrow.find('td', class_='infoIcon').i.get('class') else np.nan)
# Scrape return values
# Values are percentages, and are currently in text form. Divide by 100 to get normal values
profits_today.append(returnrow.find('td', class_='CHANGE_PCT_SLACK').text.replace('%', ''))
profits_1wk.append(returnrow.find('td', class_='CHANGE_1WEEK_PCT_SLACK').text.replace('%', ''))
profits_1month.append(returnrow.find('td', class_='CHANGE_1MONTH_PCT_SLACK').text.replace('%', ''))
profits_ytd.append(returnrow.find('td', class_='CHANGE_YEAR_PCT_SLACK').text.replace('%', ''))
profits_1yr.append(returnrow.find('td', class_='CHANGE_1YEAR_PCT_SLACK').text.replace('%', ''))
if verbose >= 1:
print(f'Ticker: {tickers[-1]}')
print(f'Name: {names[-1]}')
print(f'Last: {lasts[-1]}')
print(f'Buy: {buys[-1]}')
print(f'Sell: {sells[-1]}')
print(f'Cap: {marketcaps[-1]}')
print(f'Sector: {sectors[-1]}')
print(f'Info: {infos[-1]}')
print(f'Profit today: {profits_today[-1]}')
print(f'Profit 1 week: {profits_1wk[-1]}')
print(f'Profit 1 month: {profits_1month[-1]}')
print(f'Profit YTD: {profits_ytd[-1]}')
print(f'Profit 1 year: {profits_1yr[-1]}')
print()
df = DataFrame(dict(
ticker=tickers,
name=names,
sector=sectors,
last_=lasts, # DataFrame.last is a method, hence the underscore
buy=buys,
sell=sells,
tradecount=tradecounts,
info=infos,
marketcap=marketcaps,
profit_today=profits_today,
profit_1wk=profits_1wk,
profit_1month=profits_1month,
profit_ytd=profits_ytd,
profit_1yr=profits_1yr
))
# Turn returns to floats then divide by 100 to convert from percentages to "numbers"
columns_to_num = ['profit_today', 'profit_1wk', 'profit_1month', 'profit_ytd', 'profit_1yr']
df[columns_to_num] = df[columns_to_num].apply(to_numeric, errors='coerce') / 100
# Turn other things to numeric as well
# coerce turns missing or invalid values to nan
df.last_ = to_numeric(df.last_, errors='coerce')
df.buy = to_numeric(df.buy, errors='coerce')
df.sell = to_numeric(df.sell, errors='coerce')
df.tradecount = to_numeric(df.tradecount, errors='coerce')
if dump:
dump_assert(file)
df.to_csv(file, index=False)
return df
def yahoo_querier_(ticker: str, featdict: dict) -> None:
'''
Adds ticker information to dictionary inplace
At the time of writing this code, Yahoo is acting retarded.
For some reason MOWI, NEL etc and whatnot not properly indexed on Yahoo Finance.
The Python scraper should work fine.
'''
ticker_string = ticker.strip()+'.OL'
ticker_string = re.sub('\s+','-',ticker_string)
t = yf.Ticker(ticker_string)
featdict[ticker] = t.info
sys.stdout.write(f'{ticker_string} ')
sys.stdout.flush()
return
def get_yahoo_stats(tickers=None, verbose: int=1, dump: bool=True, file: str=None) -> pd.DataFrame:
'''
Get Yahoo stuff
'''
if tickers is None:
tickers = pd.read_csv(cng.BORS_CSV_DATE_FILE).ticker
featdict = dict()
threads = [threading.Thread(target=yahoo_querier_, args=(ticker, featdict)) for ticker in tickers]
if verbose >= 2: print('Starting threads\n')
utils.run_threads(
threads=threads,
chunksize=20,
start_interval=0.01,
chunk_interval=1)
if verbose >= 2: print('Creating dataframe')
df = pd.DataFrame(featdict).T
df.index.name = 'ticker'
df.reset_index(inplace=True)
if dump:
if verbose >= 2: print('Dumping DataFrame')
dump_assert(file)
df.to_csv(file, index=False)
if verbose >= 2: print('Returning dataframe')
return df
def combine_osebx_yahoo(df_osebx: pd.DataFrame=None, df_yahoo: pd.DataFrame=None):
'''
Combine OSEBX and Yahoo datasets
'''
if df_osebx is None:
df_osebx = pd.read_csv(cng.BORS_CSV_DATE_FILE)
if df_yahoo is None:
df_yahoo = pd.read_csv(cng.YAHOO_CSV_DATE_FILE)
df_combined = | pd.merge(df_osebx, df_yahoo, on=cng.MERGE_DFS_ON, suffixes=('_osebx', '_yahoo')) | pandas.merge |
import pandas as pd
if __name__ == '__main__':
tennet_delta_df = pd.read_csv('../data/tennet_balans_delta/tennet_balans_delta_okt_2020_nov_2021.csv')
tennet_delta_df.index = | pd.to_datetime(tennet_delta_df['time'], errors='coerce') | pandas.to_datetime |
import pandas as pd
import numpy as np
import time
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
import io
import base64
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from flask import render_template
from lol_online.db import get_db
from . import champion_dictionary
def oldest_game(df_games):
'''returns time of oldest stored game'''
ts = df_games.creation.min() // 1000
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ts))
def newest_game(df_games):
'''returns time of most recent stored game'''
ts = df_games.creation.max() // 1000
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ts))
def played_unplayed_champions(df_p):
'''returns lists of champions that the given play has played previously and has never played'''
played = set(df_p.champion_id.apply(champion_dictionary.id_to_champion))
unplayed = list(set(champion_dictionary.champion_to_id_dict.keys()) - played)
played = sorted(list(played))
return played, unplayed
def get_player_games(account_id, df_players):
'''extracts desired player's rows from all players'''
return df_players[df_players.player_id == account_id]
def players_by_team(account_id, df_p, df_other_players):
'''groups players dataframe into allies and enemies then returns the seperated dataframes'''
# df_np are non-player, df_a are ally, df_e are enemy
df_a = pd.merge(
df_other_players,
df_p,
how='inner',
left_on=['game_id','win'],
right_on=['game_id','win'],
suffixes=[None,'_player']
)
# created inverted_win in order to inner join as pandas cant join on inequalities
df_other_players['inverted_win'] = np.where(df_other_players.win, 0, 1)
df_e = pd.merge(
df_other_players,
df_p,
how='inner',
left_on=['game_id','inverted_win'],
right_on=['game_id','win'],
suffixes=[None,'_player']
)
df_a.drop(['player_id_player', 'champion_id_player'], axis=1, inplace=True)
# in df_e, win state is flipped in order to align with current player's perspective
df_e.drop(['player_id_player', 'champion_id_player', 'win_player', 'win'], axis=1, inplace=True)
df_e.rename({'inverted_win': 'win'}, axis=1, inplace=True)
return df_a, df_e
def join_player_games(df_p, df_games):
'''merges player and games dataframes on game_id and returns the resulting dataframe'''
df_pg = pd.merge(df_games, df_p, how='inner', left_index=True, right_on='game_id')
df_pg.set_index('game_id', inplace=True)
df_pg.drop(['queue','creation','player_id'], axis=1, inplace=True)
df_pg['player_team'] = np.where(df_pg.win, df_pg.winner, np.where(df_pg.winner==100, 200, 100))
return df_pg
def winrate_by_champ(df):
'''calculates winrate per champion and returns the resulting dataframe'''
grouped = df.groupby('champion_id').win
df_g = pd.DataFrame({'games': grouped.count(), 'wins': grouped.sum()})
df_g['losses'] = df_g.games - df_g.wins
df_g['winrate'] = df_g.wins / df_g.games
p_value = lambda champion: stats.binom_test(champion.wins, champion.games) # p = 0.05
df_g['p_value'] = df_g.apply(p_value, axis=1)
df_g.index = pd.Series(df_g.index).apply(champion_dictionary.id_to_champion)
return df_g
def blue_red_winrate(df_pg):
'''calculates player winrate depending on side of map and returns a dataframe containing stats'''
grouped = df_pg.groupby(['win', 'winner']).game_id.count()
blue = [grouped[1, 100], grouped[0, 200]]
red = [grouped[1, 200], grouped[0, 100]]
df_brwr = pd.DataFrame([blue, red], index=['blue', 'red'], columns=['wins', 'losses'])
df_brwr['games'] = df_brwr.wins + df_brwr.losses
df_brwr['winrate'] = df_brwr.wins / df_brwr.games
df_brwr['p_value'] = df_brwr.apply(
lambda x: stats.binom_test(x.wins, x.games), axis=1
)
return df_brwr
def their_yasuo_vs_your_yasuo(df_awr, df_ewr):
'''is each champion win more when on your team or on enemy team?'''
df_yas = pd.DataFrame({'games_with': df_awr.games, 'winrate_with': df_awr.winrate,
'games_against': df_ewr.games, 'winrate_against': df_ewr.winrate})
df_yas['delta_winrate'] = df_yas.winrate_with - (1 - df_yas.winrate_against)
return df_yas.sort_values(by='delta_winrate')
def average_game_durations(df_pg):
'''
returns multiindexed dataframe of average game durations grouped by win and forfeit
also has overall/overall for overall average duration
'''
df = pd.DataFrame()
df['duration'] = df_pg.duration.copy()
df['win'] = np.where(df_pg.win, 'win', 'loss')
df['forfeit'] = np.where(df_pg.forfeit, 'forfeit', 'non-forfeit')
format_duration = lambda x: '{}:{:02d}'.format(int(x/60), int(x%60))
df_duration = df.groupby(['forfeit','win']).mean().loc[:,['duration']]
df_duration['duration'] = df_duration.duration.apply(format_duration)
df_duration.rename({'duration':'average_duration'}, axis=1, inplace=True)
df_duration.loc[('overall','overall'),:] = format_duration(df.duration.mean())
return df_duration
def game_durations_plot(df_pg, string=False):
'''
generates figure of game durations with three suplots for all, forfeit and non-forfeit games
converts figure to html-rederable image and returns
reference for this conversion:
https://gitlab.com/snippets/1924163
https://stackoverflow.com/questions/50728328/python-how-to-show-matplotlib-in-flask
'''
mpl.use('agg')
plt.style.use('ggplot')
fig, ax = plt.subplots(3, sharex=True)
ax[0].set_title('all')
ax[1].set_title('non-forfeits')
ax[2].set_title('forfeits')
low_min = df_pg.duration.min() // 60
low_bin = low_min * 60
high_min = df_pg.duration.max() // 60
high_bin = (high_min + 1) * 60
nbins = (high_min - low_min) + 2
bins = np.linspace(low_bin, high_bin, nbins)
# populate the subplots
game_durations_subplot(df_pg, ax[0], bins, None)
game_durations_subplot(df_pg, ax[1], bins, False)
game_durations_subplot(df_pg, ax[2], bins, True)
low_label = (low_min + 4) // 5 * 5
high_label = high_min // 5 * 5
low_tick = -low_min % 5
high_tick = low_tick + high_label - low_label
plt.xticks(
ticks=range(low_tick, high_tick + 5, 5),
labels=range(low_label, high_label + 5, 5)
)
plt.xlabel('game duration (min)')
plt.legend()
png_image = io.BytesIO()
FigureCanvas(fig).print_png(png_image)
png_image_b64_string = 'data:image/png;base64,'
png_image_b64_string += base64.b64encode(png_image.getvalue()).decode('utf8')
if string:
return png_image_b64_string
else:
return render_template('test_img.html', image=png_image_b64_string)
def game_durations_subplot(df_pg, axis, bins, forfeit=None):
'''fills subplots of figure generated in game_durations_plot'''
if forfeit:
df = df_pg[df_pg.forfeit == 1]
elif forfeit == False:
df = df_pg[df_pg.forfeit == 0]
else:
df = df_pg
win = df[df.win == 1]
loss = df[df.win == 0]
all_cut = pd.cut(df_pg.duration, bins=bins, right=False)
win_cut = | pd.cut(win.duration, bins=bins, right=False) | pandas.cut |
import os
import csv
import pandas as pd
from os.path import join, basename
import pathlib
import sys
import seaborn as sns
import numpy as np
sns.set_theme()
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
#pd.set_option("display.max_rows", None, "display.max_columns", None)
MO_FACTOR=1.4
ERR_FACTOR=2
def readData(frames, filename):
df = pd.read_csv(filename)
frames.append(df)
return
def dateEarlierThan(date1, date2):
# date is in the following form: Tue_Sep_29_22:53:21_2020
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
weekday1,month1,day1,time1,year1,tz = date1.split("_")
weekday2,month2,day2,time2,year2,tz = date2.split("_")
if int(year1) < int(year2):
return True
elif int(year1) > int(year2):
return False
elif month.index(month1) < month.index(month2):
return True
elif month.index(month1) > month.index(month2):
return False
elif int(day1) < int(day2):
return True
elif int(day1) > int(day2):
return False
return False
curDir=str(pathlib.Path(__file__).parent.absolute())
#dataDir = join(curDir, "../data_test/")
dataDir = join(curDir, "../data/")
experiments = []
solvers = []
solverToDate = {} # solver to the latest commit date
solverIdToExperiments = {}
for filename in os.listdir(dataDir):
if filename == "benchmarks.csv":
benchmarks = pd.read_csv(join(dataDir, filename))
else:
experiments.append(join(dataDir, filename))
solver,date,commit,args,timeout=filename.split("%%")
solverId = solver + " " + " ".join(args.split("+"))
if solverId not in solvers:
solvers.append(solverId)
if solverId not in solverIdToExperiments:
solverIdToExperiments[solverId] = []
solverIdToExperiments[solverId].append(join(dataDir, filename))
if solver not in solverToDate:
solverToDate[solver] = date
elif dateEarlierThan(solverToDate[solver], date):
solverToDate[solver] = date
families = benchmarks.family.unique().tolist()
families = ['all'] + families
valueToCompare = ["wall time"]
results = ["all", "sat", "unsat"]
benchmark_to_family = {}
for row in benchmarks.values.tolist():
fam = row[0]
benchmark = row[1]
if benchmark not in benchmark_to_family:
benchmark_to_family[benchmark] = []
benchmark_to_family[benchmark].append(fam)
def getTime(time, result, limit):
if time > limit or result == 'to':
return limit
elif result == 'mo':
return limit * MO_FACTOR
elif result == 'err':
return limit * ERR_FACTOR
else:
return time
def getDataForSolver(solverId, metric, limit):
experimentsForSolverId = solverIdToExperiments[solverId]
date = solverToDate[solverId.split()[0]]
for experiment in experimentsForSolverId:
if basename(experiment).split("%%")[1] == date:
df = | pd.read_csv(experiment) | pandas.read_csv |
# pylint: disable=E1101
from datetime import datetime, timedelta
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
import pandas.tseries.offsets as offsets
import pandas as pd
import unittest
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
class TestResample(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEquals(g.ngroups, 2593)
self.assert_(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
self.assertEquals(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assert_(result.index.name == 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEquals(len(result), 3)
self.assert_((result.index.dayofweek == [6, 6, 6]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/9/2005'])
self.assertEquals(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [0, 0]).all())
self.assertEquals(result.irow(0), s['1/3/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [1, 1]).all())
self.assertEquals(result.irow(0), s['1/4/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [2, 2]).all())
self.assertEquals(result.irow(0), s['1/5/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [3, 3]).all())
self.assertEquals(result.irow(0), s['1/6/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [4, 4]).all())
self.assertEquals(result.irow(0), s['1/7/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEquals(len(result), 7)
self.assert_((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/3/2005'])
self.assertEquals(result.irow(5), s['1/9/2005'])
self.assert_(result.index.name == 'index')
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assert_(result.index.freq == Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEquals(len(result), 12961)
self.assertEquals(result[0], s[0])
self.assertEquals(result[-1], s[-1])
self.assert_(result.index.name == 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEquals(len(result), len(expect))
self.assertEquals(len(result.columns), 4)
xs = result.irow(-2)
self.assertEquals(xs['open'], s[-6])
self.assertEquals(xs['high'], s[-6:-1].max())
self.assertEquals(xs['low'], s[-6:-1].min())
self.assertEquals(xs['close'], s[-2])
xs = result.irow(0)
self.assertEquals(xs['open'], s[0])
self.assertEquals(xs['high'], s[:5].max())
self.assertEquals(xs['low'], s[:5].min())
self.assertEquals(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEquals(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assert_(result.index.freq == offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assert_((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assert_((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assert_((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M', how='mean')
expected = ts.groupby(lambda x: x.month).mean()
self.assertEquals(len(result), 2)
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
self.assertRaises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1)
expected = df.T.resample('M').T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1)
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M'))
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2)
expected = p_apply(panel2, lambda x: x.resample('M', axis=1))
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', how=lambda x: x.mean(1), axis=1)
expected = panel.resample('M', how='mean', axis=1)
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', how=lambda x: x.mean(2), axis=2)
expected = panel.resample('M', how='mean', axis=2)
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the middle
# of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left')
expected = ts.resample(freq, closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2)
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
self.assert_(resampled.index.equals(exp_rng))
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left')
expected = ts.resample('D', closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period')
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
self.assert_(result.index.equals(exp_index))
def test_resample_empty(self):
ts = _simple_ts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
result = ts.resample('A', kind='period')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
xp = DataFrame()
rs = xp.resample('A')
assert_frame_equal(xp, rs)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W')
expected = ts.resample('W-SUN')
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
result = ts.resample('M')
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M')
expected = df.resample('M', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left')
exp = df.tshift(1, freq='D').resample('M', kind='period')
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q')
expected = df.resample('Q', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left')
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left')
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M')
self.assert_(len(resampled) == 1)
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
result = ts.resample(freq, how='mean')
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left')
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
self.assert_(result.index.equals(ex_index))
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC')
self.assert_(len(result) == 0)
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period')
self.assert_(len(result) == 1)
self.assert_(result.index[0] == Period('2000-04', freq='M'))
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d')
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min', how=['mean', 'sum'])
tm.assert_isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D', how='sum')
exp = ts.sort_index().resample('D', how='sum')
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64','int32','float64','float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype = dtype)
result = df.resample("T", how=lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T", how="median")
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M', how=lambda x: x.mean())
exp = ts.resample('M', how='mean')
tm.assert_series_equal(result, exp)
self.assertRaises(Exception, ts.resample, 'M',
how=[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result = ts.resample('M', how={'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
foo_exp = ts.resample('M', how='mean')
bar_exp = ts.resample('M', how='std')
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS', 'sum')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return TimeSeries(np.random.randn(len(rng)), index=rng)
from pandas.tseries.frequencies import MONTHS, DAYS
class TestResamplePeriodIndex(unittest.TestCase):
_multiprocess_can_split_ = True
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = ts.resample(targ, fill_method=meth,
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995',
freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec'), result)
assert_series_equal(ts.resample('a'), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
self.assertRaises(ValueError, ts.resample, 'a-dec')
self.assertRaises(ValueError, ts.resample, 'q-mar')
self.assertRaises(ValueError, ts.resample, 'M')
self.assertRaises(ValueError, ts.resample, 'w-thu')
def test_basic_upsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
resampled = result.resample('D', fill_method='ffill', convention='end')
expected = result.to_timestamp('D', how='end')
expected = expected.asfreq('D', 'ffill').to_period()
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', fill_method='ffill', limit=2,
convention='end')
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D', fill_method='ffill')
exp = df['a'].resample('D', fill_method='ffill')
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M', fill_method='ffill')
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
def test_quarterly_upsample(self):
targets = ['D', 'B', 'M']
for month in MONTHS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_monthly_upsample(self):
targets = ['D', 'B']
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_weekly_upsample(self):
targets = ['D', 'B']
for day in DAYS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp')
expected = ts.to_timestamp(how='end').resample('A-DEC')
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month, fill_method='ffill')
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how, fill_method='ffill')
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR', fill_method='ffill')
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = TimeSeries(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A')
expected = stamps.resample('A').to_period('A')
assert_series_equal(filled, expected)
filled = s.resample('A', fill_method='ffill')
expected = stamps.resample('A', fill_method='ffill').to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = TimeSeries(np.random.randn(5), index=rng)
self.assertRaises(Exception, s.resample, 'A')
def test_resample_5minute(self):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = TimeSeries(np.random.randn(len(rng)), index=rng)
result = ts.resample('5min')
expected = ts.to_timestamp().resample('5min')
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D')
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s')
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_empty(self):
ts = _simple_pts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assert_(len(result) == 0)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min', how=len)
expected = s.resample('10min', how=len).ix[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU')
self.assert_(result.isnull().all())
result = ts.resample('W-THU', fill_method='ffill')[:-1]
expected = ts.asfreq('W-THU', method='ffill')
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W')
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample('W').tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D')
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right')
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period')
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', how='mean', closed='left', label='right')
exp = s[1:].resample('10min', how='mean', closed='left', label='right')
assert_series_equal(result, exp)
result = s.resample('10min', how='mean', closed='left', label='left')
exp = s[1:].resample('10min', how='mean', closed='left', label='left')
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
self.assert_(result.index.equals(ex_index))
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A')
exp = ts.to_timestamp().resample('A').to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', how='first', closed='left', label='left')
def test_resample_bms_2752(self):
# GH2753
foo = pd.Series(index=pd.bdate_range('20000101','20000201'))
res1 = foo.resample("BMS")
res2 = foo.resample("BMS").resample("B")
self.assertEqual(res1.index[0], Timestamp('20000103'))
self.assertEqual(res1.index[0], res2.index[0])
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span')
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right'))
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left'))
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A", how='mean')
tm.assert_almost_equal(result[0], s.mean())
def test_resample_doesnt_truncate(self):
"""Test for issue #3020"""
import pandas as pd
dates = | pd.date_range('01-Jan-2014','05-Jan-2014', freq='D') | pandas.date_range |
import numpy as np
import pandas as pd
from utils import data_generator
## data dimension
N_train = 110 # sum of training and valiadation set
dim = 24
## initialize parameters
c1_value = round(np.random.uniform(0, 20),2)
c2_value = round(np.random.uniform(0, 20),2)
duration = round(np.random.uniform(1, 4))
eta = round(np.random.uniform(0.8, 1),2)
paras = | pd.DataFrame([[c1_value, c2_value, duration, eta]],columns=("c1", "c2", "P", "eta")) | pandas.DataFrame |
"""
@author: <NAME>
@name: Bootstrap Estimation Procedures
@summary: This module provides functions that will perform the MLE for each
of the bootstrap samples.
"""
import numpy as np
import pandas as pd
from . import pylogit as pl
from .display_names import model_type_to_display_name
def extract_default_init_vals(orig_model_obj, mnl_point_series, num_params):
"""
Get the default initial values for the desired model type, based on the
point estimate of the MNL model that is 'closest' to the desired model.
Parameters
----------
orig_model_obj : an instance or sublcass of the MNDC class.
Should correspond to the actual model that we want to bootstrap.
mnl_point_series : pandas Series.
Should denote the point estimate from the MNL model that is 'closest'
to the desired model.
num_params : int.
Should denote the number of parameters being estimated (including any
parameters that are being constrained during estimation).
Returns
-------
init_vals : 1D ndarray of initial values for the MLE of the desired model.
"""
# Initialize the initial values
init_vals = np.zeros(num_params, dtype=float)
# Figure out which values in mnl_point_series are the index coefficients
no_outside_intercepts = orig_model_obj.intercept_names is None
if no_outside_intercepts:
init_index_coefs = mnl_point_series.values
init_intercepts = None
else:
init_index_coefs =\
mnl_point_series.loc[orig_model_obj.ind_var_names].values
init_intercepts =\
mnl_point_series.loc[orig_model_obj.intercept_names].values
# Add any mixing variables to the index coefficients.
if orig_model_obj.mixing_vars is not None:
num_mixing_vars = len(orig_model_obj.mixing_vars)
init_index_coefs = np.concatenate([init_index_coefs,
np.zeros(num_mixing_vars)],
axis=0)
# Account for the special transformation of the index coefficients that is
# needed for the asymmetric logit model.
if orig_model_obj.model_type == model_type_to_display_name["Asym"]:
multiplier = np.log(len(np.unique(orig_model_obj.alt_IDs)))
# Cast the initial index coefficients to a float dtype to ensure
# successful broadcasting
init_index_coefs = init_index_coefs.astype(float)
# Adjust the scale of the index coefficients for the asymmetric logit.
init_index_coefs /= multiplier
# Combine the initial interept values with the initial index coefficients
if init_intercepts is not None:
init_index_coefs =\
np.concatenate([init_intercepts, init_index_coefs], axis=0)
# Add index coefficients (and mixing variables) to the total initial array
num_index = init_index_coefs.shape[0]
init_vals[-1 * num_index:] = init_index_coefs
# Note that the initial values for the transformed nest coefficients and
# the shape parameters is zero so we don't have to change anything
return init_vals
def get_model_abbrev(model_obj):
"""
Extract the string used to specify the model type of this model object in
`pylogit.create_chohice_model`.
Parameters
----------
model_obj : An MNDC_Model instance.
Returns
-------
str. The internal abbreviation used for the particular type of MNDC_Model.
"""
# Get the 'display name' for our model.
model_type = model_obj.model_type
# Find the model abbreviation for this model's display name.
for key in model_type_to_display_name:
if model_type_to_display_name[key] == model_type:
return key
# If none of the strings in model_type_to_display_name matches our model
# object, then raise an error.
msg = "Model object has an unknown or incorrect model type."
raise ValueError(msg)
def get_model_creation_kwargs(model_obj):
"""
Get a dictionary of the keyword arguments needed to create the passed model
object using `pylogit.create_choice_model`.
Parameters
----------
model_obj : An MNDC_Model instance.
Returns
-------
model_kwargs : dict.
Contains the keyword arguments and the required values that are needed
to initialize a replica of `model_obj`.
"""
# Extract the model abbreviation for this model
model_abbrev = get_model_abbrev(model_obj)
# Create a dictionary to store the keyword arguments needed to Initialize
# the new model object.d
model_kwargs = {"model_type": model_abbrev,
"names": model_obj.name_spec,
"intercept_names": model_obj.intercept_names,
"intercept_ref_pos": model_obj.intercept_ref_position,
"shape_names": model_obj.shape_names,
"shape_ref_pos": model_obj.shape_ref_position,
"nest_spec": model_obj.nest_spec,
"mixing_vars": model_obj.mixing_vars,
"mixing_id_col": model_obj.mixing_id_col}
return model_kwargs
def get_mnl_point_est(orig_model_obj,
new_df,
boot_id_col,
num_params,
mnl_spec,
mnl_names,
mnl_init_vals,
mnl_fit_kwargs):
"""
Calculates the MLE for the desired MNL model.
Parameters
----------
orig_model_obj : An MNDC_Model instance.
The object corresponding to the desired model being bootstrapped.
new_df : pandas DataFrame.
The pandas dataframe containing the data to be used to estimate the
MLE of the MNL model for the current bootstrap sample.
boot_id_col : str.
Denotes the new column that specifies the bootstrap observation ids for
choice model estimation.
num_params : non-negative int.
The number of parameters in the MLE of the `orig_model_obj`.
mnl_spec : OrderedDict or None.
If `orig_model_obj` is not a MNL model, then `mnl_spec` should be an
OrderedDict that contains the specification dictionary used to estimate
the MNL model that will provide starting values for the final estimated
model. If `orig_model_obj` is a MNL model, then `mnl_spec` may be None.
mnl_names : OrderedDict or None.
If `orig_model_obj` is not a MNL model, then `mnl_names` should be an
OrderedDict that contains the name dictionary used to initialize the
MNL model that will provide starting values for the final estimated
model. If `orig_model_obj` is a MNL, then `mnl_names` may be None.
mnl_init_vals : 1D ndarray or None.
If `orig_model_obj` is not a MNL model, then `mnl_init_vals` should be
a 1D ndarray. `mnl_init_vals` should denote the initial values used to
estimate the MNL model that provides starting values for the final
desired model. If `orig_model_obj` is a MNL model, then `mnl_init_vals`
may be None.
mnl_fit_kwargs : dict or None.
If `orig_model_obj` is not a MNL model, then `mnl_fit_kwargs` should be
a dict. `mnl_fit_kwargs` should denote the keyword arguments used when
calling the `fit_mle` function of the MNL model that will provide
starting values to the desired choice model. If `orig_model_obj` is a
MNL model, then `mnl_fit_kwargs` may be None.
Returns
-------
mnl_point : dict.
The dictionary returned by `scipy.optimize` after estimating the
desired MNL model.
mnl_obj : An MNL model instance.
The model object used to estimate the desired MNL model.
"""
# Get specification and name dictionaries for the mnl model, for the case
# where the model being bootstrapped is an MNL model. In this case, the
# the mnl_spec and the mnl_names that are passed to the function are
# expected to be None.
if orig_model_obj.model_type == model_type_to_display_name["MNL"]:
mnl_spec = orig_model_obj.specification
mnl_names = orig_model_obj.name_spec
if mnl_init_vals is None:
mnl_init_vals = np.zeros(num_params)
if mnl_fit_kwargs is None:
mnl_fit_kwargs = {}
# Alter the mnl_fit_kwargs to ensure that we only perform point estimation
mnl_fit_kwargs["just_point"] = True
# Use BFGS by default to estimate the MNL since it works well for the MNL.
if "method" not in mnl_fit_kwargs:
mnl_fit_kwargs["method"] = "BFGS"
# Initialize the mnl model object for the given bootstrap sample.
mnl_obj = pl.create_choice_model(data=new_df,
alt_id_col=orig_model_obj.alt_id_col,
obs_id_col=boot_id_col,
choice_col=orig_model_obj.choice_col,
specification=mnl_spec,
model_type="MNL",
names=mnl_names)
# Get the MNL point estimate for the parameters of this bootstrap sample.
mnl_point = mnl_obj.fit_mle(mnl_init_vals, **mnl_fit_kwargs)
return mnl_point, mnl_obj
def retrieve_point_est(orig_model_obj,
new_df,
new_id_col,
num_params,
mnl_spec,
mnl_names,
mnl_init_vals,
mnl_fit_kwargs,
extract_init_vals=None,
**fit_kwargs):
"""
Calculates the MLE for the desired MNL model.
Parameters
----------
orig_model_obj : An MNDC_Model instance.
The object corresponding to the desired model being bootstrapped.
new_df : pandas DataFrame.
The pandas dataframe containing the data to be used to estimate the
MLE of the MNL model for the current bootstrap sample.
new_id_col : str.
Denotes the new column that specifies the bootstrap observation ids for
choice model estimation.
num_params : non-negative int.
The number of parameters in the MLE of the `orig_model_obj`.
mnl_spec : OrderedDict or None.
If `orig_model_obj` is not a MNL model, then `mnl_spec` should be an
OrderedDict that contains the specification dictionary used to estimate
the MNL model that will provide starting values for the final estimated
model. If `orig_model_obj` is a MNL model, then `mnl_spec` may be None.
mnl_names : OrderedDict or None.
If `orig_model_obj` is not a MNL model, then `mnl_names` should be an
OrderedDict that contains the name dictionary used to initialize the
MNL model that will provide starting values for the final estimated
model. If `orig_model_obj` is a MNL, then `mnl_names` may be None.
mnl_init_vals : 1D ndarray or None.
If `orig_model_obj` is not a MNL model, then `mnl_init_vals` should be
a 1D ndarray. `mnl_init_vals` should denote the initial values used to
estimate the MNL model that provides starting values for the final
desired model. If `orig_model_obj` is a MNL model, then `mnl_init_vals`
may be None.
mnl_fit_kwargs : dict or None.
If `orig_model_obj` is not a MNL model, then `mnl_fit_kwargs` should be
a dict. `mnl_fit_kwargs` should denote the keyword arguments used when
calling the `fit_mle` function of the MNL model that will provide
starting values to the desired choice model. If `orig_model_obj` is a
MNL model, then `mnl_fit_kwargs` may be None.
extract_init_vals : callable or None, optional.
Should accept 3 arguments, in the following order. First, it should
accept `orig_model_obj`. Second, it should accept a pandas Series of
the estimated parameters from the MNL model. The index of the Series
will be the names of the coefficients from `mnl_names`. Thirdly, it
should accept an int denoting the number of parameters in the desired
choice model. The callable should return a 1D ndarray of starting
values for the desired choice model. Default == None.
fit_kwargs : dict.
Denotes the keyword arguments to be used when estimating the desired
choice model using the current bootstrap sample (`new_df`). All such
kwargs will be directly passed to the `fit_mle` method of the desired
model object.
Returns
-------
final_point : dict.
The dictionary returned by `scipy.optimize` after estimating the
desired choice model.
"""
# Get the MNL point estimate for the parameters of this bootstrap sample.
mnl_point, mnl_obj = get_mnl_point_est(orig_model_obj,
new_df,
new_id_col,
num_params,
mnl_spec,
mnl_names,
mnl_init_vals,
mnl_fit_kwargs)
mnl_point_series = | pd.Series(mnl_point["x"], index=mnl_obj.ind_var_names) | pandas.Series |
import faiss
import pandas as pd
import time
import numpy as np
import torch
import os
from scipy import stats as s
class knn:
def __init__(self, datafile, savefile=None, knn_size=10, save_to_file=True, resume=True):
self.knn_size = knn_size
self.x_data = None
self.y_data = None
self.save_file = datafile if not savefile else savefile
self.classes = None
self.save_to_file = save_to_file
self.faiss_index = None
# self.faiss_index = faiss.IndexFlatL2()
if datafile and resume:
print(f'loading data from file: {datafile}')
if (os.path.exists(datafile)):
print('File found')
data = torch.load(datafile)
self.x_data = data['x'].numpy()
self.y_data = data['y']
print(
f'Found {self.x_data.shape[0]} points with {len(set(self.y_data))} classes')
print( | pd.Series(self.y_data) | pandas.Series |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
| pd.testing.assert_index_equal(result, expected) | pandas.testing.assert_index_equal |
# -*- coding: utf-8 -*-
import pandas
import numpy
import sys
import unittest
from datetime import datetime
from pandas.testing import assert_frame_equal, assert_series_equal
import os
import copy
sys.path.append("..")
import warnings
import nPYc
from nPYc.enumerations import SampleType
from nPYc.enumerations import AssayRole
from nPYc.enumerations import VariableType
from generateTestDataset import generateTestDataset
import tempfile
from isatools import isatab
class test_msdataset_synthetic(unittest.TestCase):
"""
Test MSDataset object functions with synthetic data
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata = pandas.DataFrame(
{'Sample File Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'Sample Base Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'AssayRole': [AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference],
'SampleType': [SampleType.StudySample, SampleType.StudyPool, SampleType.ExternalReference],
'Sample Name': ['Sample1', 'Sample2', 'Sample3'], 'Acqu Date': ['26-May-17', '26-May-17', '26-May-17'],
'Acqu Time': ['16:42:57', '16:58:49', '17:14:41'], 'Vial': ['1:A,1', '1:A,2', '1:A,3'],
'Instrument': ['XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest'],
'Acquired Time': [datetime(2017, 5, 26, 16, 42, 57), datetime(2017, 5, 26, 16, 58, 49),
datetime(2017, 5, 26, 17, 14, 41)], 'Run Order': [0, 1, 2], 'Batch': [1, 1, 2],
'Correction Batch': [numpy.nan, 1, 2], 'Matrix': ['U', 'U', 'U'],
'Subject ID': ['subject1', 'subject1', 'subject2'], 'Sample ID': ['sample1', 'sample2', 'sample3'],
'Dilution': [numpy.nan, '60.0', '100.0'],'Exclusion Details': ['','','']})
self.msData.featureMetadata = pandas.DataFrame(
{'Feature Name': ['Feature1', 'Feature2', 'Feature3'], 'Retention Time': [6.2449, 2.7565, 5.0564],
'm/z': [249.124281, 381.433191, 471.132083]})
self.msData.featureMetadata['Exclusion Details'] = None
self.msData.featureMetadata['User Excluded'] = False
self.msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=self.msData.featureMetadata.index)
self.msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=self.msData.featureMetadata.index)
self.msData._intensityData = numpy.array([[10.2, 20.95, 30.37], [10.1, 20.03, 30.74], [3.065, 15.83, 30.16]])
# Attributes
self.msData.Attributes['FeatureExtractionSoftware'] = 'UnitTestSoftware'
# excluded data
self.msData.sampleMetadataExcluded = []
self.msData.intensityDataExcluded = []
self.msData.featureMetadataExcluded = []
self.msData.excludedFlag = []
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[0, :])
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata)
self.msData.excludedFlag.append('Samples')
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[:, 0])
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata)
self.msData.excludedFlag.append('Features')
# finish
self.msData.VariableType = VariableType.Discrete
self.msData.initialiseMasks()
def test_rsd_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='No reference samples'):
msData.sampleMetadata = pandas.DataFrame(None)
with self.assertRaises(ValueError):
msData.rsdSP
with self.subTest(msg='Only one reference sample'):
msData.sampleMetadata = pandas.DataFrame([[nPYc.enumerations.AssayRole.PrecisionReference, nPYc.enumerations.SampleType.StudyPool]], columns=['AssayRole', 'SampleType'])
with self.assertRaises(ValueError):
msData.rsdSP
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
msData = nPYc.MSDataset('', fileType='empty')
msData.sampleMetadata['Sample File Name'] = ['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02_x',
'Test2_RPOS_ToF02_U2W03_b',
'Test3_RNEG_ToF03_S3W04_2',
'Test4_RPOS_ToF04_B1S1_SR_q',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01_9',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21']
msData._getSampleMetadataFromFilename(msData.Attributes['filenameSpec'])
##
# Check basename
##
basename = pandas.Series(['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_RPOS_ToF04_B1S1_SR',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21'],
name='Sample Base Name',
dtype='str')
assert_series_equal(msData.sampleMetadata['Sample Base Name'], basename)
##
# Check Study
##
study = pandas.Series(['Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test1',
'Test1'],
name='Study',
dtype='str')
assert_series_equal(msData.sampleMetadata['Study'], study)
##
#
##
chromatography = pandas.Series(['H',
'R',
'R',
'L',
'L',
'H',
'H',
'R',
'R',
'R',
'L',
'L',
'H',
'H',
'H'],
name='Chromatography',
dtype='str')
assert_series_equal(msData.sampleMetadata['Chromatography'], chromatography)
##
#
##
ionisation = pandas.Series(['POS',
'POS',
'NEG',
'POS',
'NEG',
'POS',
'POS',
'POS',
'NEG',
'POS',
'POS',
'NEG',
'POS',
'POS',
'POS'],
name='Ionisation',
dtype='str')
assert_series_equal(msData.sampleMetadata['Ionisation'], ionisation)
##
#
##
instrument = pandas.Series(['ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF06',
'ToF06',
'ToF06'],
name='Instrument',
dtype='str')
| assert_series_equal(msData.sampleMetadata['Instrument'], instrument) | pandas.testing.assert_series_equal |
#!/usr/bin/env python3
# coding: utf-8
import requests
import sys
import pandas as pd
from requests.auth import HTTPBasicAuth
name = 'INSERT OWN API NAME HERE'
password = '<PASSWORD> OWN API PASSWORD HERE'
#set initial values
uploads = pd.DataFrame() #empty dataframe
start = 0
end = 100
def transid_dt(transid):
'''function to convert transid into a datetime
wigle's stats are based on their timezone and
the transid is from their timezone'''
ts = | pd.to_datetime(transid[0:8]) | pandas.to_datetime |
import typing
import datetime
import pandas as pd
from .make_df import ComicDataFrame
from lib.aws_util.s3.upload import upload_to_s3
from lib.aws_util.s3.download import download_from_s3
def store(df: ComicDataFrame) -> typing.NoReturn:
dt = datetime.datetime.now()
bucket = 'av-adam-store'
save_dir = '/tmp/'
upload_dir = f'ruijianime/comic/'
meta_path = f'{save_dir}meta.csv'
meta_obj = f'{upload_dir}meta.csv'
tag_path = f'{save_dir}tag.csv'
tag_obj = f'{upload_dir}tag.csv'
author_path = f'{save_dir}author.csv'
author_obj = f'{upload_dir}author.csv'
def add_timestamp() -> typing.NoReturn:
df.meta['updated_at'] = dt
df.tag['updated_at'] = dt
df.author['updated_at'] = dt
def download() -> typing.NoReturn:
download_from_s3(bucket, meta_obj, meta_path)
download_from_s3(bucket, tag_obj, tag_path)
download_from_s3(bucket, author_obj, author_path)
def merge() -> typing.NoReturn:
meta_old = pd.read_csv(meta_path)
meta = pd.concat((meta_old, df.meta), ignore_index=True)
meta.drop_duplicates(
subset=['comic_id'],
keep='last',
inplace=True,
)
print(meta)
meta.to_csv(meta_path, index=False)
tag_old = | pd.read_csv(tag_path) | pandas.read_csv |
import pandas as pd
from SALib.analyze.radial_ee import analyze as ee_analyze
from SALib.analyze.sobol_jansen import analyze as jansen_analyze
from SALib.plotting.bar import plot as barplot
# results produced with
# python launch.py --specific_inputs oat_mc_10_samples.csv --num_cores 48
# python launch.py --specific_inputs oat_cim_extremes.csv --num_cores 2
# python launch.py --specific_inputs moat_10_samples.csv --num_cores 46
from .settings import *
data_dir = indir
problem = {
'num_vars': 53,
'names': ['Farm___Crops___variables___Dryland_Winter_Barley___root_depth_m',
'Farm___Crops___variables___Dryland_Winter_Barley___water_use_ML_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Barley___yield_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Canola___root_depth_m',
'Farm___Crops___variables___Dryland_Winter_Canola___water_use_ML_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Canola___yield_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Wheat___root_depth_m',
'Farm___Crops___variables___Dryland_Winter_Wheat___water_use_ML_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Wheat___yield_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Barley___root_depth_m',
'Farm___Crops___variables___Irrigated_Winter_Barley___water_use_ML_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Barley___yield_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Canola___root_depth_m',
'Farm___Crops___variables___Irrigated_Winter_Canola___water_use_ML_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Canola___yield_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Wheat___root_depth_m',
'Farm___Crops___variables___Irrigated_Winter_Wheat___water_use_ML_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Wheat___yield_per_Ha',
'Farm___Fields___soil___zone_10___TAW_mm',
'Farm___Fields___soil___zone_11___TAW_mm',
'Farm___Fields___soil___zone_12___TAW_mm',
'Farm___Fields___soil___zone_1___TAW_mm',
'Farm___Fields___soil___zone_2___TAW_mm',
'Farm___Fields___soil___zone_3___TAW_mm',
'Farm___Fields___soil___zone_4___TAW_mm',
'Farm___Fields___soil___zone_5___TAW_mm',
'Farm___Fields___soil___zone_6___TAW_mm',
'Farm___Fields___soil___zone_7___TAW_mm',
'Farm___Fields___soil___zone_8___TAW_mm',
'Farm___Fields___soil___zone_9___TAW_mm',
'Farm___Irrigations___Gravity___cost_per_Ha',
'Farm___Irrigations___Gravity___head_pressure',
'Farm___Irrigations___Gravity___irrigation_efficiency',
'Farm___Irrigations___Gravity___pumping_cost_per_ML',
'Farm___Irrigations___PipeAndRiser___cost_per_Ha',
'Farm___Irrigations___PipeAndRiser___head_pressure',
'Farm___Irrigations___PipeAndRiser___irrigation_efficiency',
'Farm___Irrigations___PipeAndRiser___pumping_cost_per_ML',
'Farm___Irrigations___Spray___cost_per_Ha',
'Farm___Irrigations___Spray___head_pressure',
'Farm___Irrigations___Spray___irrigation_efficiency',
'Farm___Irrigations___Spray___pumping_cost_per_ML',
'Farm___zone_10___Irrigation', 'Farm___zone_11___Irrigation',
'Farm___zone_2___Irrigation', 'Farm___zone_4___Irrigation',
'Farm___zone_6___Irrigation', 'Farm___zone_7___Irrigation',
'Farm___zone_8___Irrigation', 'Farm___zone_9___Irrigation',
'policy___goulburn_allocation_scenario', 'policy___gw_cap',
'policy___gw_restriction'],
'bounds': [(0.80008164104, 1.49988829764),
(1.50055050742, 2.99888102069),
(1.5019032420200003, 3.4997506932099998),
(0.800586478968, 1.4996985073),
(2.50048002895, 5.9984797603299995),
(0.801052350325, 2.59824297051),
(0.800504246618, 1.49975544648),
(2.5014981435299997, 5.9979681912),
(1.5004709810799999, 5.99716646463),
(0.800280272497, 1.49937425734),
(1.5009590614, 2.9992559947000004),
(2.50329796931, 6.996816011819999),
(0.800211596215, 1.49974890273),
(2.0025975557, 5.99742468979),
(1.3008100600299999, 4.99958661017),
(0.8000586077680001, 1.7993585851400002),
(2.50005748529, 5.99920182664),
(1.5021921746899998, 7.99719295089),
(150.013080285, 199.99630294),
(145.01266211, 184.97447762599998),
(145.036691741, 184.96132256099997),
(145.017973816, 184.964659778),
(145.009985077, 184.987775366),
(100.017759932, 159.950281059),
(100.00893349, 159.939807798),
(150.002663759, 199.995911171),
(150.049539279, 199.966206716),
(75.011883698, 109.982509833),
(100.007801344, 159.986958043),
(145.015806747, 184.983072651),
(2000.04766978, 2499.9660698000002),
(8.00489093285, 14.999582054100001),
(0.500092622216, 0.8998440697460001),
(8.0072724319, 14.9995752798),
(2000.65212205, 3299.41488388),
(8.00365090987, 14.9983740134),
(0.600018657025, 0.899703908987),
(8.005434387660001, 14.9933485659),
(2500.62094903, 3499.76177012),
(25.0039236705, 34.9957834096),
(0.7001056060199999, 0.8998137827079999),
(30.000316497100002, 59.9914045149),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 2.0),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 2.0),
(0.0, 2.0),
(0.600156362739, 0.999676343195),
(0.0, 1.0)]
}
def collect_results(problem, oat_length, reps, np_res, numeric_vals):
jansen_results_df = pd.DataFrame()
ee_results_df = pd.DataFrame()
rep_length = oat_length * reps
_, cols = np_res.shape
for col in range(cols):
cn = col_names[col]
res = np_res[:rep_length, col]
si = jansen_analyze(problem, res, reps, seed=101)
js_df = si.to_df()
js_df.columns = ['{}_{}'.format(cn, suf) for suf in js_df.columns]
jansen_results_df = pd.concat([jansen_results_df, js_df], axis=1)
si = ee_analyze(problem, numeric_vals[:rep_length],
res, reps, seed=101)
ee_df = si.to_df()
ee_df.columns = ['{}_{}'.format(cn, suf) for suf in ee_df.columns]
ee_results_df = pd.concat([ee_results_df, ee_df], axis=1)
return jansen_results_df, ee_results_df
# End collect_results()
def plot_results(jansen_results_df, ee_results_df, target_metric):
# STs = [c for c in jansen_results_df.columns if '_conf' not in c and target_metric in c]
idx = [True if 'irrigation' in r.lower() else False for r in jansen_results_df.index]
# ax = jansen_results_df.loc[idx, STs].plot(kind='bar', figsize=(10,6))
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
tgt_idx = [c for c in ee_results_df.columns if target_metric.lower() in c.lower()]
ax = ee_results_df.loc[idx, tgt_idx].plot(kind='bar', figsize=(10,6))
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# End plot_results()
template_df = pd.read_csv(f'{data_dir}example_sample.csv', index_col=0)
is_perturbed = (template_df != template_df.iloc[0]).any()
perturbed_cols = template_df.loc[:, is_perturbed].columns
target_num_vars = problem['num_vars']
oat_length = target_num_vars + 1
target_metric = "SW Allocation Index"
### Extreme values without interactions ###
numeric_samples = pd.read_csv(f'{data_dir}extreme_numeric_samples.csv', index_col=0)
numeric_samples = numeric_samples[perturbed_cols]
numeric_vals = numeric_samples.values
extreme_results = pd.read_csv(f'{data_dir}no_irrigation_extreme_results.csv', index_col=0)
np_res = extreme_results.values
col_names = extreme_results.columns
extreme_results = {}
for i in range(len(col_names)):
x_diff = (numeric_vals[0, :] - numeric_vals[1, :])
y_diff = (np_res[0, i] - np_res[1, i])
extreme_results[col_names[i]] = y_diff / x_diff
# End for
no_ext_results = | pd.DataFrame(extreme_results, index=perturbed_cols) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
from evalml.pipelines import BaselineBinaryPipeline, BaselineMulticlassPipeline
from evalml.utils import get_random_state
def test_baseline_binary_random(X_y_binary):
X, y = X_y_binary
values = np.unique(y)
parameters = {
"Baseline Classifier": {
"strategy": "random"
}
}
clf = BaselineBinaryPipeline(parameters=parameters)
clf.fit(X, y)
expected_predictions = pd.Series(get_random_state(0).choice(np.unique(y), len(X)), dtype="Int64")
assert_series_equal(expected_predictions, clf.predict(X).to_series())
predicted_proba = clf.predict_proba(X)
assert predicted_proba.shape == (len(X), 2)
expected_predictions_proba = pd.DataFrame(np.array([[0.5 for i in range(len(values))]] * len(X)))
assert_frame_equal(expected_predictions_proba, predicted_proba.to_dataframe())
np.testing.assert_allclose(clf.feature_importance.iloc[:, 1], np.array([0.0] * X.shape[1]))
def test_baseline_binary_random_weighted(X_y_binary):
X, y = X_y_binary
values, counts = np.unique(y, return_counts=True)
percent_freq = counts.astype(float) / len(y)
assert percent_freq.sum() == 1.0
parameters = {
"Baseline Classifier": {
"strategy": "random_weighted"
}
}
clf = BaselineBinaryPipeline(parameters=parameters)
clf.fit(X, y)
expected_predictions = pd.Series(get_random_state(0).choice(np.unique(y), len(X), p=percent_freq), dtype="Int64")
assert_series_equal(expected_predictions, clf.predict(X).to_series())
expected_predictions_proba = pd.DataFrame(np.array([[percent_freq[i] for i in range(len(values))]] * len(X)))
predicted_proba = clf.predict_proba(X)
assert predicted_proba.shape == (len(X), 2)
assert_frame_equal(expected_predictions_proba, predicted_proba.to_dataframe())
np.testing.assert_allclose(clf.feature_importance.iloc[:, 1], np.array([0.0] * X.shape[1]))
def test_baseline_binary_mode():
X = pd.DataFrame({'one': [1, 2, 3, 4], 'two': [2, 3, 4, 5], 'three': [1, 2, 3, 4]})
y = | pd.Series([10, 11, 10]) | pandas.Series |
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
)
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"]
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = [1, 2, 3]
df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object))
assert df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
def test_string_slice_empty(self):
# GH 14424
df = DataFrame()
assert not df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="^0$"):
df.loc["2011", 0]
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame(
[["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64)
expected = DataFrame(
[[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64)
expected = DataFrame(
[["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
def test_astype_assignment_full_replacements(self):
# full replacements / no nans
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.iloc[:, 0] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.loc[:, "A"] = df["A"].astype(np.int64)
expected = | DataFrame({"A": [1, 2, 3, 4]}) | pandas.DataFrame |
"""
Copyright 2018 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal, assert_series_equal
from gs_quant.api.gs.data import GsDataApi
from gs_quant.context_base import ContextMeta
from gs_quant.errors import MqValueError
from gs_quant.markets import MarketDataCoordinate
from gs_quant.session import GsSession, Environment
from gs_quant.target.assets import FieldFilterMap
from gs_quant.target.coordinates import MDAPIDataQuery
from gs_quant.target.data import MarketDataVendor, DataSetEntity, DataQuery, DataSetFieldEntity
test_coordinates = (
MarketDataCoordinate(mkt_type='Prime', mkt_quoting_style='price', mkt_asset='335320934'),
MarketDataCoordinate(mkt_type='IR', mkt_asset='USD', mkt_class='Swap', mkt_point=('2Y',)),
)
test_str_coordinates = (
'Prime_335320934_.price',
'IR_USD_Swap_2Y'
)
test_defn_dict = {'id': 'EXAMPLE_FROM_SLANG',
'name': 'Example DataSet',
'description': 'This is a test.',
'shortDescription': '',
'vendor': 'Goldman Sachs',
'dataProduct': 'TEST',
'entitlements': {'query': ['internal'],
'view': ['internal', 'role:DataServiceView', 'role:DataServiceAdmin'],
'upload': ['internal'],
'admin': ['internal', 'role:DataServiceAdmin'],
'edit': ['internal', 'role:DataServiceAdmin']},
'parameters': {'methodology': '',
'coverage': '',
'notes': '',
'history': '',
'frequency': '',
'applyMarketDataEntitlements': False,
'uploadDataPolicy': 'DEFAULT_POLICY',
'logicalDb': 'STUDIO_DAILY',
'symbolStrategy': 'ARCTIC_LINK',
'immutable': False,
'includeInCatalog': False,
'coverageEnabled': True},
'dimensions': {'timeField': 'date',
'transactionTimeField': 'updateTime',
'symbolDimensions': ['assetId'],
'nonSymbolDimensions': [{'field': 'price', 'column': 'PRICE'}],
'measures': [{'field': 'updateTime', 'column': 'UPDATE_TIME'}],
'entityDimension': 'assetId'},
'defaults': {'startSeconds': 2592000.0},
'createdById': '9eb7226166a44236905cae2913cfbd3c',
'createdTime': '2018-07-24T00:32:25.77Z',
'lastUpdatedById': '4ad8ebb6480d49e6b2e9eea9210685cf',
'lastUpdatedTime': '2019-10-24T14:20:13.653Z'}
bond_data = [
{
'mktType': 'Prime',
'mktAsset': '335320934',
'mktQuotingStyle': 'price',
'price': 1.0139,
'time': pd.to_datetime('2019-01-20T01:03:00Z')
},
{
'mktType': 'Prime',
'mktAsset': '335320934',
'mktQuotingStyle': 'price',
'price': 1.0141,
'time': pd.to_datetime('2019-01-20T01:08:00Z')
}
]
swap_data = [
{
'mktType': 'IR',
'mktAsset': 'USD',
'mktClass': 'Swap',
'mktPoint': ('2Y',),
'mktQuotingStyle': 'ATMRate',
'ATMRate': 0.02592,
'time': pd.to_datetime('2019-01-20T01:09:45Z')
}
]
bond_expected_frame = pd.DataFrame(
data={
'time': [pd.to_datetime('2019-01-20T01:03:00Z'), pd.to_datetime('2019-01-20T01:08:00Z')],
'mktType': ['Prime', 'Prime'],
'mktAsset': ['335320934', '335320934'],
'mktQuotingStyle': ['price', 'price'],
'value': [1.0139, 1.0141]
},
index=pd.DatetimeIndex(['2019-01-20T01:03:00', '2019-01-20T01:08:00']),
)
swap_expected_frame = pd.DataFrame(
data={
'time': [pd.to_datetime('2019-01-20T01:09:45Z')],
'mktType': ['IR'],
'mktAsset': ['USD'],
'mktClass': ['Swap'],
'mktPoint': [('2Y',)],
'mktQuotingStyle': ['ATMRate'],
'value': [0.02592]
},
index=pd.DatetimeIndex(['2019-01-20T01:09:45']),
)
def test_coordinates_data(mocker):
start = dt.datetime(2019, 1, 2, 1, 0)
end = dt.datetime(2019, 1, 2, 1, 10)
# mock GsSession and data response
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_post', side_effect=[{'responses': [{'data': bond_data}]},
{'responses': [{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]}
])
coord_data_result = GsDataApi.coordinates_data(coordinates=test_coordinates[0], start=start, end=end)
assert_frame_equal(coord_data_result, bond_expected_frame)
str_coord_data_result = GsDataApi.coordinates_data(coordinates=test_str_coordinates[1], start=start, end=end)
assert_frame_equal(str_coord_data_result, swap_expected_frame)
coords_data_result = GsDataApi.coordinates_data(coordinates=test_coordinates, start=start, end=end,
as_multiple_dataframes=True)
assert len(coords_data_result) == 2
assert_frame_equal(coords_data_result[0], bond_expected_frame)
assert_frame_equal(coords_data_result[1], swap_expected_frame)
GsSession.current._post.reset_mock()
str_coords_data_result = GsDataApi.coordinates_data(coordinates=test_str_coordinates, start=start, end=end,
as_multiple_dataframes=True)
assert len(str_coords_data_result) == 2
assert_frame_equal(str_coords_data_result[0], bond_expected_frame)
assert_frame_equal(str_coords_data_result[1], swap_expected_frame)
GsSession.current._post.assert_called_once_with('/data/coordinates/query',
payload=MDAPIDataQuery(market_data_coordinates=test_coordinates,
start_time=start,
end_time=end,
vendor=MarketDataVendor.Goldman_Sachs,
format="MessagePack")
)
def test_coordinate_data_series(mocker):
start = dt.datetime(2019, 1, 2, 1, 0)
end = dt.datetime(2019, 1, 2, 1, 10)
# mock GsSession and data response
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_post', side_effect=[{'responses': [{'data': bond_data}]},
{'responses': [{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]},
{'responses': [{'data': bond_data},
{'data': swap_data}]}
])
bond_expected_series = pd.Series(index=bond_expected_frame.index, data=bond_expected_frame.value.values)
swap_expected_series = pd.Series(index=swap_expected_frame.index, data=swap_expected_frame.value.values)
coord_data_result = GsDataApi.coordinates_data_series(coordinates=test_coordinates[0], start=start, end=end)
| assert_series_equal(coord_data_result, bond_expected_series) | pandas.util.testing.assert_series_equal |
import os
import pandas as pd
from datetime import datetime, timedelta
from embrace import get_date_from_garmin
import collections
folders = ['01-09-TR1', '10-20-TR2', '21-30-TR3']
def timestamp2datetime2minutes(file_path):
df = pd.read_csv(file_path, header=1)
df.Timestamp = df.Timestamp.map(lambda ts: datetime.fromtimestamp(ts))
df_waves = df.iloc[:, 81:152]
df_waves.insert(loc=0, column='Timestamp', value=df['Timestamp'])
df_waves = df_waves.dropna()
df_waves = df_waves.reset_index(drop=True)
# print(df_waves)
flag = 0
dic_eeg = dict()
for col in df_waves.columns:
print('col: ', col)
datetimeOfkind, valuesOfkind = [], []
i = 0
if col != 'Timestamp':
while i < len(df_waves['Timestamp']):
sum_ = 0
count = 0
current_datetime = df_waves['Timestamp'][i]
# print('current datetime:', current_datetime)
date_time = datetime.strptime(str(df_waves['Timestamp'][i]).split('.')[0], '%Y-%m-%d %H:%M:%S')
# transfer 15:43:44 to 15:43:00
date_time_00 = date_time.strftime('%Y-%m-%d %H:%M:%S')
date_time_00 = datetime.strptime(date_time_00[0:-2] + '00', '%Y-%m-%d %H:%M:%S')
next_time = date_time_00 + timedelta(minutes=1)
next_time = datetime.strptime(str(next_time).split('.')[0], '%Y-%m-%d %H:%M:%S')
# print('next_time:', next_time)
while current_datetime < next_time:
sum_ += df_waves[col][i]
i += 1
count += 1
if i < len(df_waves['Timestamp']):
current_datetime = df_waves['Timestamp'][i]
elif i >= len(df_waves['Timestamp']):
break
# print('file:', file, 'col:', col, 'i:', i, 'count:', count, 'sum:', sum_)
valuesOfkind.append(float(sum_ / count))
datetimeOfkind.append(date_time_00)
if flag == 0:
dic_eeg['Timestamp'] = datetimeOfkind
flag = 1
dic_eeg[col] = valuesOfkind
df_final = pd.DataFrame(dic_eeg, columns=list(dic_eeg.keys()))
return df_final
def avg_14_channels(df):
theta, alpha, betal, betah, gamma, datetime_eeg = [], [], [], [], [], []
dic_avg = dict()
for row in range(len(df['Timestamp'])):
theta_temp, alpha_temp, betal_temp, betah_temp, gamma_temp = [], [], [], [], []
for col in df.columns:
if col[-5:] == 'Theta':
theta_temp.append(df[col][row])
elif col[-5:] == 'Alpha':
alpha_temp.append(df[col][row])
elif col[-5:] == 'BetaL':
betal_temp.append(df[col][row])
elif col[-5:] == 'BetaH':
betah_temp.append(df[col][row])
elif col[-5:] == 'Gamma':
gamma_temp.append(df[col][row])
theta.append(sum(theta_temp) / len(theta_temp))
alpha.append(sum(alpha_temp) / len(alpha_temp))
betal.append(sum(betal_temp) / len(betal_temp))
betah.append(sum(betah_temp) / len(betah_temp))
gamma.append(sum(gamma_temp) / len(gamma_temp))
datetime_eeg.append(df['Timestamp'][row])
dic_avg['Timestamp'] = datetime_eeg
dic_avg['Theta'] = theta
dic_avg['Alpha'] = alpha
dic_avg['BetaL'] = betal
dic_avg['BetaH'] = betah
dic_avg['Gamma'] = gamma
df_eeg = | pd.DataFrame.from_dict(dic_avg) | pandas.DataFrame.from_dict |
#-*-coding: utf-8
"""
Created on Sat Dec 01 2018
@author: JeongChanwoo
"""
import pandas as pd
import numpy as np
import re
from os import listdir
class DataReader(object):
def __init__(self):
self.data_path =None
self.data_list = None
self.total_data = None
self.user_lecture_data = None
self.user_data = None
self.user_index = None
self.lecture_index = None
def data_list_rise(self,path):
file_list = listdir(path)
data_list = []
for k in file_list:
regural = re.compile('(unit_complete)|(start_course)|(unit_complete)')
m = regural.search(k)
if m is not None:
data_list.append(k)
self.data_list = data_list
return data_list
def read_json(self, data_list, path):
data_dict = {}
for k in data_list:
try:
data_dict[k[:-5]] = | pd.read_json(self.data_path + k, encoding='utf-8') | pandas.read_json |
import os
import json
import itertools
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
import subprocess
import matplotlib.pyplot as plt
import geopandas as gpd
import rasterio as rio
from rasterio.windows import (
Window,
transform
)
from rasterio import features
import rasterio.mask
from rasterio.plot import show
from fiona.crs import to_string
GRID_ID = 1
def write_indices(area_dict, area, indices_dir):
"""
Reads the bands for each image of each area and calculates the derived indices.
Args:
area_dict (dict) : Python dictionary containing the file paths per area
area (str) : The area of interest (AOI)
Returns:
data (pd.DataFrame) : The resulting pandas dataframe containing the raw spectral
bands and derived indices
"""
subdata = {}
image_list = area_dict[area]['images']
# Iterate over each year
for image_file in tqdm(image_list, total=len(image_list)):
year = image_file.split('_')[-1].split('.')[0]
# Read each band
src = rio.open(image_file)
out_meta = src.meta
for band_idx in range(src.count):
band = src.read(band_idx+1).ravel()
subdata['B{}'.format(band_idx+1)] = band
# Get derived indices
subdata["ndvi"] = ndvi(subdata)
subdata["ndbi"] = ndbi(subdata)
subdata["savi"] = savi(subdata)
subdata["mndwi"] = mndwi(subdata)
subdata["ui"] = ui(subdata)
subdata["nbi"] = nbi(subdata)
subdata["brba"] = brba(subdata)
subdata["nbai"] = nbai(subdata)
subdata["mbi"] = mbi(subdata)
subdata["baei"] = baei(subdata)
for index in subdata:
subdata[index] = subdata[index].reshape(
(src.height,src.width)
).astype(np.float64)
output_file = indices_dir + 'indices_' + area + '_' + year + '.tif'
area_dict[area]['indices'].append(output_file)
out_meta = src.meta
out_meta.update({
"driver": "GTiff",
"height": src.height,
"width": src.width,
"count": 1,
'nodata': -1,
"dtype": np.float64
})
out_meta.update(count = 10)
with rasterio.open(output_file, 'w', **out_meta, compress='deflate') as dst:
dst.write(subdata["ndvi"], 1)
dst.write(subdata["ndbi"], 2)
dst.write(subdata["savi"], 3)
dst.write(subdata["mndwi"], 4)
dst.write(subdata["ui"], 5)
dst.write(subdata["nbi"], 6)
dst.write(subdata["brba"], 7)
dst.write(subdata["nbai"], 8)
dst.write(subdata["mbi"], 9)
dst.write(subdata["baei"], 10)
return area_dict
def save_predictions_window(pred, image_src, output_file, window, tfm):
"""
Saves the predictions as a TIFF file, using img_source as reference.
Args:
pred (numpy array) : The array containing the predictions
image_src (str) : Path to the source image to be used as a reference file
Returns:
None
"""
with rio.open(image_src) as src:
out_image = np.array(pred).reshape(
(window.height,window.width)
)
out_meta = src.meta
out_meta.update({
"driver": "GTiff",
"height": window.height,
"width": window.width,
"count": 1,
'nodata': -1,
"dtype": np.float64,
"transform": tfm
})
with rio.open(output_file, "w", **out_meta, compress='deflate') as dest:
dest.write(out_image, 1)
def rename_ind_cols(df):
"""
Renames columns according to column names used by model
"""
cols = [c for c in df.columns if 'I' in c]
renaming = {}
ind_dict = {
'I1': "ndvi",
'I2': "ndbi",
'I3': "savi",
'I4': "mndwi",
'I5': "ui",
'I6': "nbi",
'I7': "brba",
'I8': "nbai",
'I9': "mbi",
'I10': "baei",
}
# create mapping of column names
for col in cols:
pat = col.split('_')[0]
col_n = col.replace(pat, ind_dict[pat])
renaming[col] = col_n
return df.rename(columns = renaming)
def get_rasters_merged(
raster_file1,
raster_file2,
output_file,
tmp_dir,
grid_blocks=5
):
p = Path(tmp_dir)
tmp_files = [str(f) for f in list(p.glob('tmp*.tif'))]
for f in tmp_files:
os.remove(f)
windows = make_windows(raster_file1, grid_blocks = grid_blocks)
pbar = tqdm(enumerate(windows), total=len(windows))
for idx, window in pbar:
raster1 = rio.open(raster_file1).read(1, window=window)
raster2 = rio.open(raster_file2).read(1, window=window)
result = np.maximum(raster1, raster2)
# Save
image_src = raster_file1
tmp_file = tmp_dir + 'tmp{}.tif'.format(idx)
tfm = transform(window, transform = rio.open(image_src).transform)
save_predictions_window(result, image_src, tmp_file, window, tfm)
stitch(output_file, tmp_dir)
def get_preds_windowing(
area,
area_dict,
model,
tmp_dir,
best_features,
output,
grid_blocks=5,
threshold=0
):
# Delete tmp files from previous run
if Path(output).is_file():
os.remove(output)
p = Path(tmp_dir)
tmp_files = [str(f) for f in list(p.glob('tmp*.tif'))]
for f in tmp_files:
os.remove(f)
# Read bands
src_file = area_dict[area]['images'][0]
windows = make_windows(src_file, grid_blocks = grid_blocks)
pbar = tqdm(enumerate(windows), total=len(windows))
for idx, window in pbar:
pbar.set_description('Processing {}...'.format(area))
df_bands = read_bands_window(area_dict, area, window=window)
df_inds = read_inds_window(area_dict, area, window=window)
df_test = pd.concat((df_bands, df_inds), axis = 1)
df_test = rename_ind_cols(df_test)
df_test = df_test.replace([np.inf, -np.inf], 0)
# Prediction
X_test = df_test[best_features].fillna(0)
all_zeroes = (X_test.iloc[:, :-1].sum(axis=1) == 0)
data = X_test
features = best_features
# Prettify Tiff
preds = model.predict_proba(data)[:, 1]
if threshold > 0:
preds[(preds < threshold)] = 0
preds[all_zeroes] = -1
# Save
image_src = src_file
output_file = tmp_dir + 'tmp{}.tif'.format(idx)
tfm = transform(window, transform = rio.open(src_file).transform)
save_predictions_window(preds, image_src, output_file, window, tfm)
#print('Saving to {}...'.format(output))
stitch(output, tmp_dir)
def stitch(output_file, tmp_dir):
"""
Merges all raster files to one
Source: https://gis.stackexchange.com/questions/230553/merging-all-tiles-from-one-directory-using-gdal
Args:
output_file (str) : The output filepath
tmp_dir (str) : Path to temporary directory
Returns:
result () : The stitched image
"""
p = Path(tmp_dir)
file_list = [str(f) for f in list(p.glob('tmp*.tif'))]
files_string = " ".join(file_list)
command = "gdal_merge.py -n -1 -a_nodata -1 -o {} -of gtiff ".format(output_file) + files_string
text = '''
# set conda env for these commands - took me 3h to figure out
eval "$(conda shell.bash hook)"
conda activate ee
{}
'''.format(command)
f = open(tmp_dir + "stitch.sh", "w")
f.write(text)
f.close()
result = subprocess.run('sh ' + tmp_dir + 'stitch.sh', shell = True, stdout=subprocess.PIPE)
return result
def read_inds_window(area_dict, area, window):
"""
Reads the bands for each image of each area and calculates
the derived indices.
Args:
area_dict (dict) : Python dictionary containing the file paths per area
area (str) : The area of interest (AOI)
Returns:
data (pd.DataFrame) : The resulting pandas dataframe containing the raw spectral
bands and derived indices
"""
data = []
image_list = area_dict[area]['indices']
# Iterate over each year
for image_file in image_list:
year = image_file.split('_')[-1].split('.')[0]
# Read each band
subdata = dict()
raster = rio.open(image_file)
for band_idx in range(raster.count):
band = raster.read(band_idx+1, window=window).ravel()
subdata['I{}'.format(band_idx+1)] = band
# Cast to pandas subdataframe
subdata = | pd.DataFrame(subdata) | pandas.DataFrame |
################################################################################
"""
DJ JOE Website Playlist File Generator
--------------------------------------
(c) 2021 - Stanley Solutions - <NAME>
This application serves an interface to allow the recording of Apple Music or
Spotify playlists.
"""
################################################################################
# Requirements
import html
import pandas as pd
YOUTUBE_BASE_URL = "https://www.youtube.com/results?search_query={}"
def format_youtube_search(terms: list):
# Generate a search-url for Youtube based on the terms provided.
query = ' '.join(terms).replace(' ', '%20')
return YOUTUBE_BASE_URL.format(query)
def playlist_json(tracks):
"""Generate a JSON List of Dictionaries for Each Track."""
for i, track in enumerate(tracks):
tracks[i] = {
"title": track[0],
"artist": track[1],
"explicit": track[2],
}
return tracks
def playlist_html_table(playlist: str, tracks: str, table_id: str = None,
classes: str = None):
"""Generate an HTML Table from the Playlist's Information."""
table_list = [t[:2] for t in tracks]
# Generate Table
df = | pd.DataFrame(table_list, columns=["Title", "Artist(s)"]) | pandas.DataFrame |
"""Format helpers"""
import math
import pandas as pd
import pandas.lib as lib
import numpy as np
pd_is_datetime_arraylike = None
try:
from pandas.core.common import is_datetime_arraylike as pd_is_datetime_arraylike
except:
pass
from functools import partial
def is_datetime_arraylike(arr):
if isinstance(arr, pd.DataFrame):
return arr.apply(pd_is_datetime_arraylike).all()
elif pd_is_datetime_arraylike is not None:
return pd_is_datetime_arraylike(arr)
elif isinstance(arr, pd.DatetimeIndex):
return True
else:
inferred = lib.infer_dtype(arr)
return 'datetime' in inferred
class DateTimeFormat(object):
def __init__(self, fmtstr, coerce=True):
self.fmtstr = fmtstr
self.coerce = coerce
def __call__(self, value):
if isinstance(value, pd.Series):
return value.apply(self.__call__)
else:
if not hasattr(value, 'strftime'):
if self.coerce:
value = | pd.to_datetime(value) | pandas.to_datetime |
#!/usr/bin/python3
import sys
import pandas as pd
import numpy as np
import os
import concurrent.futures
import functools, itertools
import sofa_time
import statistics
import multiprocessing as mp
import socket
import ipaddress
# sys.path.insert(0, '/home/st9540808/Desktop/sofa/bin')
import sofa_models, sofa_preprocess
import sofa_config
import sofa_print
colors_send = ['#14f2e0', '#41c8e5', '#6e9eeb']
colors_recv = ['#9a75f0', '#c74bf6', '#f320fa', '#fe2bcc']
color_send = itertools.cycle(colors_send)
color_recv = itertools.cycle(colors_recv)
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit", # 13
"msg_id"] # 14
# @profile
def extract_individual_rosmsg(df_send_, df_recv_, *df_others_):
""" Return a dictionary with topic name as key and
a list of ros message as value.
Structure of return value: {topic_name: {(guid, seqnum): log}}
where (guid, seqnum) is a msg_id
"""
# Convert timestamp to unix time
# unix_time_off = statistics.median(sofa_time.get_unix_mono_diff() for i in range(100))
# for df in (df_send, df_recv, *df_others):
# df['ts'] = df['ts'] + unix_time_off
df_send_[1]['ts'] = df_send_[1]['ts'] + df_send_[0].cpu_time_offset + df_send_[0].unix_time_off
df_recv_[1]['ts'] = df_recv_[1]['ts'] + df_recv_[0].cpu_time_offset + df_recv_[0].unix_time_off
df_others = []
for cfg_to_pass, df_other in df_others_:
df_other['ts'] = df_other['ts'] + cfg_to_pass.cpu_time_offset + cfg_to_pass.unix_time_off
df_others.append(df_other)
df_send = df_send_[1]
df_recv = df_recv_[1]
# sort by timestamp
df_send.sort_values(by=['ts'], ignore_index=True)
df_recv.sort_values(by=['ts'], ignore_index=True)
# publish side
gb_send = df_send.groupby('guid')
all_publishers_log = {guid:log for guid, log in gb_send}
# subscription side
gb_recv = df_recv.groupby('guid')
all_subscriptions_log = {guid:log for guid, log in gb_recv}
# other logs (assume there's no happen-before relations that needed to be resolved)
# every dataframe is a dictionary in `other_log_list`
gb_others = [df_other.groupby('guid') for df_other in df_others]
other_log_list = [{guid:log for guid, log in gb_other} for gb_other in gb_others]
# find guids that are in both subsciption and publisher log
interested_guids = all_subscriptions_log.keys() \
& all_publishers_log.keys()
res = {}
for guid in interested_guids:
# get a publisher from log
df = all_publishers_log[guid]
df_send_partial = all_publishers_log[guid].copy()
add_data_calls = df[~pd.isna(df['seqnum'])] # get all non-NaN seqnums in log
try:
pubaddr, = pd.unique(df['publisher']).dropna()
print(pubaddr)
except ValueError as e:
print('Find a guid that is not associated with a publisher memory address. Error: ' + str(e))
continue
# print(add_data_calls)
all_RTPSMsg_idx = ((df_send['func'] == '~RTPSMessageGroup') & (df_send['publisher'] == pubaddr))
all_RTPSMsgret_idx = ((df_send['func'] == '~RTPSMessageGroup exit') & (df_send['publisher'] == pubaddr))
all_sendSync_idx = ((df_send['func'] == 'sendSync') & (df_send['publisher'] == pubaddr))
all_nn_xpack_idx = (df['func'] == 'nn_xpack_send1')
modified_rows = []
for idx, add_data_call in add_data_calls.iterrows():
ts = add_data_call['ts']
rcl_idx = df.loc[(df['ts'] < ts) & (df['layer'] == 'rcl')]['ts'].idxmax()
df_send_partial.loc[rcl_idx, 'seqnum'] = add_data_call.loc['seqnum']
# For grouping RTPSMessageGroup function
try:
ts_gt = (df_send['ts'] > ts) # ts greater than that of add_data_call
RTPSMsg_idx = df_send.loc[ts_gt & all_RTPSMsg_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsg_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
RTPSMsgret_idx = df_send.loc[ts_gt & all_RTPSMsgret_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsgret_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
sendSync_idx = df_send.loc[ts_gt & (df_send['ts'] < df_send.loc[RTPSMsgret_idx, 'ts']) & all_sendSync_idx]
sendSync = sendSync_idx.copy()
sendSync['seqnum'] = add_data_call.loc['seqnum']
modified_rows.extend(row for _, row in sendSync.iterrows())
except ValueError as e:
pass
if 'rmw_cyclonedds_cpp' in df['implementation'].values:
try:
df_cls = other_log_list[0][guid]
seqnum = add_data_call.loc['seqnum']
max_ts = df_cls[(df_cls['layer'] == 'cls_egress') & (df_cls['seqnum'] == seqnum)]['ts'].max()
index = df.loc[(ts < df['ts']) & (df['ts'] < max_ts) & all_nn_xpack_idx].index
df_send_partial.loc[index, 'seqnum'] = seqnum
except ValueError as e:
pass
df_send_partial = pd.concat([df_send_partial, pd.DataFrame(modified_rows)])
# get a subscrption from log
df = all_subscriptions_log[guid]
df_recv_partial = all_subscriptions_log[guid].copy()
add_recvchange_calls = df[~pd.isna(df['seqnum'])] # get all not nan seqnums in log
if 'cyclonedds' in df['layer'].unique():
add_recvchange_calls = df[df['func'] == 'ddsi_udp_conn_read exit']
all_sub = pd.unique(df['subscriber']) # How many subscribers subscribe to this topic?
subs_map = {sub: (df['subscriber'] == sub) &
(df['func'] == "rmw_take_with_info exit") for sub in all_sub}
all_pid = pd.unique(df_recv['pid'])
pid_maps = {pid: (df_recv['pid'] == pid) &
(df_recv['func'] == "rmw_wait exit") for pid in all_pid}
modified_rows = []
for idx, add_recvchange_call in add_recvchange_calls.iterrows():
ts = add_recvchange_call['ts']
subaddr = add_recvchange_call.at['subscriber']
seqnum = add_recvchange_call.at['seqnum']
# Consider missing `rmw_take_with_info exit` here
try:
rmw_take_idx = df.loc[(df['ts'] > ts) & subs_map[subaddr]]['ts'].idxmin()
if 'cyclonedds' in df['layer'].unique():
free_sample = df.loc[(df['func'] == 'free_sample') & (df['seqnum'] == seqnum)]
if len(free_sample) == 0:
continue
free_sample = free_sample.iloc[0]
if free_sample['ts'] > df.at[rmw_take_idx, 'ts']:
rmw_take_idx = df.loc[(df['ts'] > free_sample['ts']) & subs_map[subaddr]]['ts'].idxmin()
# if 'cyclonedds' in df['layer'].unique():
# free_sample = df_recv.loc[(df_recv['ts'] > ts) &
# (df_recv['func'] == 'free_sample') &
# (df_recv['pid'] == df.at[rmw_take_idx, 'pid']) &
# (df_recv['seqnum'] == seqnum)]
# free_sample_idx = free_sample.idxmax()
# if len(free_sample) == 0:
# rmw_take_idx = df.loc[(df['ts'] > free_sample['ts']) & subs_map[subaddr]]['ts'].idxmin()
# print(df.loc[rmw_take_idx])
# free_sample() should be called in rmw_take, therefore
# free_sample() happened before rmw_take_with_info returns
df_recv_partial.at[rmw_take_idx, 'seqnum'] = seqnum
# TODO: Group by ip port in cls_ingress
UDPResourceReceive_idx = df.loc[(df['ts'] < ts) &
(df['func'] == 'UDPResourceReceive exit') &
(df['pid'] == add_recvchange_call.at['pid'])]['ts'].idxmax()
df_recv_partial.at[UDPResourceReceive_idx, 'seqnum'] = seqnum
except ValueError as e:
pass
try:
# Group rmw_wait exit
pid = df_recv_partial.at[rmw_take_idx, 'pid']
rmw_wait_idx = df_recv.loc[(df_recv['ts'] < df_recv_partial.at[rmw_take_idx,'ts']) &
pid_maps[pid]]['ts'].idxmax()
modified_row = df_recv.loc[rmw_wait_idx]
modified_row.at['seqnum'] = add_recvchange_call.at['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
except ValueError as e:
pass
# Doesn't need to remove duplicates for
# a = pd.DataFrame(modified_rows)
# print(a[~a.index.duplicated(keep='first')])
df_recv_partial = pd.concat([df_recv_partial, pd.DataFrame(modified_rows)])
# Merge all modified dataframes
df_merged = df_send_partial.append(df_recv_partial, ignore_index=True, sort=False)
# handle other log files
for other_log in other_log_list:
df_other = other_log[guid]
df_merged = df_merged.append(df_other, ignore_index=True, sort=False)
# Avoid `TypeError: boolean value of NA is ambiguous` when calling groupby()
df_merged['subscriber'] = df_merged['subscriber'].fillna(np.nan)
df_merged['guid'] = df_merged['guid'].fillna(np.nan)
df_merged['seqnum'] = df_merged['seqnum'].fillna(np.nan)
df_merged.sort_values(by=['ts'], inplace=True)
gb_merged = df_merged.groupby(['guid', 'seqnum'])
ros_msgs = {msg_id:log for msg_id, log in gb_merged} # msg_id: (guid, seqnum)
# get topic name from log
topic_name = df_merged['topic_name'].dropna().unique()
if len(topic_name) > 1:
raise Exception("More than one topic in a log file")
topic_name = topic_name[0]
if topic_name in res:
res[topic_name] = {**res[topic_name], **ros_msgs}
else:
res[topic_name] = ros_msgs
print('finished parsing ' + topic_name)
return res
def extract_individual_rosmsg2(df_send, df_recv, df_cls):
# unix_time_off = statistics.median(sofa_time.get_unix_mono_diff() for i in range(100))
# for df in (df_send, df_recv, df_cls):
# df['ts'] = df['ts'] + unix_time_off
df_send.sort_values(by=['ts'], ignore_index=True)
df_recv.sort_values(by=['ts'], ignore_index=True)
df_cls.sort_values(by=['ts'], ignore_index=True)
# publish side
gb_send = df_send.groupby('guid')
all_publishers_log = {guid:log for guid, log in gb_send}
# subscription side
gb_recv = df_recv.groupby('guid')
all_subscriptions_log = {guid:log for guid, log in gb_recv}
# in kernel (probably not need it)
gb_cls = df_cls.groupby('guid')
all_cls_log = {guid:log for guid, log in gb_cls}
interested_guids = all_subscriptions_log.keys() \
& all_publishers_log.keys()
res = {}
for guid in interested_guids:
# get a publisher from log
df = all_publishers_log[guid].copy()
df_send_partial = all_publishers_log[guid].copy()
add_data_calls = df[~pd.isna(df['seqnum'])] # get all non-NaN seqnums in log
try:
pubaddr, = pd.unique(df['publisher']).dropna()
except ValueError as e:
print('Find a guid that is not associated with a publisher memory address. Error: ' + str(e))
continue
print(pubaddr)
modified_rows = []
for idx, add_data_call in add_data_calls.iterrows():
seqnum = add_data_call['seqnum']
ts = add_data_call['ts']
rcl_idx = df.loc[(df['ts'] < ts) & (df['layer'] == 'rcl')]['ts'].idxmax()
df_send_partial.loc[rcl_idx, 'seqnum'] = add_data_call.loc['seqnum']
# Use the two timestamps to get a slice of dataframe
# Here we drop :~RTPSMessageGroup exit"
ts_cls = df_cls[(df_cls['guid'] == guid) &
(df_cls['seqnum'] == seqnum) &
(df_cls['layer'] == 'cls_egress')]['ts'].max() # Get ts upper bound
df_send_tgt = df_send[(ts <= df_send['ts']) &
(df_send['ts'] <= ts_cls) &
(df_send['publisher'] == pubaddr)]
modified_row = df_send_tgt.copy()
modified_row['guid'] = guid
modified_row['seqnum'] = seqnum
modified_rows.append(modified_row)
df_send_partial = df_send_partial.combine_first(pd.concat(modified_rows))
# get a subscrption from log
df = all_subscriptions_log[guid].copy()
df_recv_partial = all_subscriptions_log[guid].copy()
add_recvchange_calls = df[~pd.isna(df['seqnum'])] # get all not nan seqnums in log
all_sub = pd.unique(df['subscriber']) # How many subscribers subscribe to this topic?
subs_map = {sub: (df['subscriber'] == sub) &
(df['func'] == "rmw_take_with_info exit") for sub in all_sub}
all_pid = pd.unique(df_recv['pid'])
pid_maps = {pid: (df_recv['pid'] == pid) &
(df_recv['func'] == "rmw_wait exit") for pid in all_pid}
modified_rows = []
for idx, add_recvchange_call in add_recvchange_calls.iterrows():
ts = add_recvchange_call.at['ts']
subaddr = add_recvchange_call.at['subscriber']
seqnum = add_recvchange_call.at['seqnum']
# Use the two timestamps to get a slice of dataframe
ts_cls = df_cls[(df_cls['guid'] == guid) &
(df_cls['seqnum'] == seqnum) &
(df_cls['layer'] == 'cls_ingress')]['ts'].min()
df_recv_tgt = df_recv[(ts_cls < df_recv['ts']) & (df_recv['ts'] < ts)].copy()
# Consider missing `rmw_take_with_info exit` here
try:
rmw_take_idx = df.loc[(df['ts'] > ts) & subs_map[subaddr]]['ts'].idxmin()
df_recv_partial.at[rmw_take_idx, 'seqnum'] = seqnum
# TODO: Group by ip port in cls_ingress
UDPResourceReceive_idx = df_recv_tgt.loc[(df_recv_tgt['func'] == 'UDPResourceReceive exit') &
(df_recv_tgt['pid'] == add_recvchange_call.at['pid'])]['ts'].idxmax();
df_recv_partial.at[UDPResourceReceive_idx, 'seqnum'] = seqnum
# Group rmw_wait exit
pid = df_recv_partial.at[rmw_take_idx, 'pid']
rmw_wait_idx = df_recv.loc[(df_recv['ts'] < df_recv_partial.at[rmw_take_idx,'ts']) &
pid_maps[pid]]['ts'].idxmax()
modified_row = df_recv.loc[rmw_wait_idx]
modified_row.at['seqnum'] = add_recvchange_call.at['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
except ValueError as e:
pass
df_recv_partial = pd.concat([df_recv_partial, pd.DataFrame(modified_rows)])
# Merge all modified dataframes
df_merged = df_send_partial.append(df_recv_partial, ignore_index=True, sort=False)
# handle other log files
df_merged = df_merged.append(df_cls, ignore_index=True, sort=False)
# Avoid `TypeError: boolean value of NA is ambiguous` when calling groupby()
df_merged['subscriber'] = df_merged['subscriber'].fillna(np.nan)
df_merged['guid'] = df_merged['guid'].fillna(np.nan)
df_merged['seqnum'] = df_merged['seqnum'].fillna(np.nan)
df_merged.sort_values(by=['ts'], inplace=True)
gb_merged = df_merged.groupby(['guid', 'seqnum'])
ros_msgs = {msg_id:log for msg_id, log in gb_merged} # msg_id: (guid, seqnum)
# get topic name from log
topic_name = df_merged['topic_name'].dropna().unique()
if len(topic_name) > 1:
raise Exception("More than one topic in a log file")
topic_name = topic_name[0]
if res.get(topic_name) is None:
res[topic_name] = ros_msgs
else:
res[topic_name].update(ros_msgs)
print(type(res[topic_name]))
print('finished parsing ' + topic_name)
return res
# print(df_recv_partial[['layer', 'ts', 'func', 'guid', 'seqnum']])
def print_all_msgs(res):
for topic_name, all_msgs_log in res.items():
print('topic: ' + topic_name)
for (guid, seqnum), msg_log in all_msgs_log.items():
print('msg_id: ', (guid, seqnum))
print(msg_log)
print('')
def get_rcl_publish(df):
try:
rcl = df.loc[df['func'] == 'rcl_publish'].iloc[0] # shuold be unique
except ValueError as e:
print(e)
return pd.Series('false', index=['layer']) # return a dummy for easy checkup
return rcl
# @profile
def ros_msgs_trace_read(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
# msg_log['subscriber'] = msg_log['subscriber'].apply(lambda x: np.nan if x is pd.NA else x)
gb_sub = msg_log.groupby('subscriber') # How many subscribers receviced this ros message?
start = get_rcl_publish(msg_log)
if start.at['layer'] != 'rcl': # skip when the first function call is not from rcl
continue
for sub_addr, sub_log in gb_sub:
trace = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
end = sub_log.iloc[-1]
if end.at['layer'] != 'rmw': # skip when the last function call is not from rmw (eg. rosbag2)
continue
time = start['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = start['ts'] - cfg.time_base
trace['timestamp'] = time
trace['duration'] = (end['ts'] - start['ts']) * 1e3 # ms
trace['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Transmission: %s -> %s<br>Seqnum: %d" % \
(start['layer'], start['func'], end['layer'], end['func'],
start['topic_name'],
start['comm'], end['comm'],
int(start['seqnum']))
trace['unit'] = 'ms'
trace['msg_id'] = msg_id
traces.append(trace)
traces = pd.DataFrame(traces)
return traces
def ros_msgs_trace_read_ros_lat_send(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
trace = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
start = get_rcl_publish(msg_log)
end = msg_log.loc[msg_log['func'] == 'write_sample_gc'].iloc[0]
time = end['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = end['ts'] - cfg.time_base
trace['timestamp'] = time
trace['duration'] = (end['ts'] - start['ts']) * 1e3 # ms
trace['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Transmission: %s -> %s<br>Seqnum: %d" % \
(start['layer'], start['func'], end['layer'], end['func'],
start['topic_name'],
start['comm'], end['comm'],
int(start['seqnum']))
trace['unit'] = 'ms'
trace['msg_id'] = msg_id
traces.append(trace)
traces = pd.DataFrame(traces)
return traces
def ros_msgs_trace_read_os_lat_send(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
start = get_rcl_publish(msg_log)
if start.at['layer'] != 'rcl': # skip when the first function call is not from rcl
continue
all_sendSync = msg_log.loc[(msg_log['func'] == 'sendSync') | (msg_log['func'] == 'nn_xpack_send1')].copy()
all_egress = msg_log.loc[msg_log['layer'] == 'cls_egress']
for _, sendSync in all_sendSync.iterrows():
trace = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
# addr = sendSync['daddr']
port = sendSync['dport']
egress = all_egress.loc[(all_egress['dport'] == port)].iloc[0]
time = sendSync['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = sendSync['ts'] - cfg.time_base
trace['timestamp'] = time
trace['duration'] = (egress['ts'] - sendSync['ts']) * 1e3 # ms
trace['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Destination address: %s:%d" % \
(sendSync['layer'], sendSync['func'], egress['layer'], '',
start['topic_name'],
str(ipaddress.IPv4Address(socket.ntohl(int(all_egress['daddr'].unique())))),
socket.ntohs(int(port)))
trace['unit'] = 'ms'
trace['msg_id'] = msg_id
traces.append(trace)
traces = pd.DataFrame(traces)
return traces
def ros_msgs_trace_read_os_lat_recv(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
start = get_rcl_publish(msg_log)
if start.at['layer'] != 'rcl': # skip when the first function call is not from rcl
continue
all_recv = msg_log.loc[(msg_log['func'] == 'UDPResourceReceive exit') |
(msg_log['func'] == 'ddsi_udp_conn_read exit')].copy()
all_ingress = msg_log.loc[msg_log['layer'] == 'cls_ingress'].copy()
for _, ingress in all_ingress.iterrows():
trace = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
addr = ingress['daddr']
port = ingress['dport']
try:
recv = all_recv.loc[(all_recv['dport'] == port)].iloc[0]
except Exception as e:
print(str(msg_id) + " missing " + str(port))
print(e)
continue
time = ingress['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = ingress['ts'] - cfg.time_base
trace['timestamp'] = time
trace['duration'] = (recv['ts'] - ingress['ts']) * 1e3 # ms
trace['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Source address: %s:%d, Destination address: %s:%d<br>Seqnum: %d" % \
(ingress['layer'], '', recv['layer'], recv['func'], start['topic_name'],
str(ipaddress.IPv4Address(socket.ntohl(int(ingress['saddr'])))), socket.ntohs(int(ingress['sport'])),
str(ipaddress.IPv4Address(socket.ntohl(int(addr)))), socket.ntohs(int(port)),
int(ingress['seqnum']))
trace['unit'] = 'ms'
trace['msg_id'] = msg_id
traces.append(trace)
traces = pd.DataFrame(traces)
return traces
def ros_msgs_trace_read_dds_lat_send(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
start = get_rcl_publish(msg_log)
if start.at['layer'] != 'rcl': # skip when the first function call is not from rcl
continue
all_sendSync = msg_log.loc[(msg_log['func'] == 'sendSync') | (msg_log['func'] == 'nn_xpack_send1')].copy()
add_pub_change = msg_log.loc[(msg_log['func'] == 'add_pub_change') |
(msg_log['func'] == 'write_sample_gc')].copy().squeeze()
for _, sendSync in all_sendSync.iterrows():
trace = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
time = add_pub_change['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = add_pub_change['ts'] - cfg.time_base
trace['timestamp'] = time
trace['duration'] = (sendSync['ts'] - add_pub_change['ts']) * 1e3 # ms
trace['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s" % \
(add_pub_change['layer'], add_pub_change['func'], sendSync['layer'], sendSync['func'], \
start['topic_name'])
trace['unit'] = 'ms'
trace['msg_id'] = msg_id
traces.append(trace)
traces = pd.DataFrame(traces)
return traces
def ros_msgs_trace_read_dds_ros_lat_recv(items, cfg):
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit",
"msg_id"]
traces_indds = []
traces_inros = []
topic_name, all_msgs_log = items
for msg_id, msg_log in all_msgs_log.items():
# msg_log['subscriber'] = msg_log['subscriber'].apply(lambda x: np.nan if x is pd.NA else x)
gb_sub = msg_log.groupby('subscriber') # How many subscribers receviced this ros message?
start = get_rcl_publish(msg_log)
if start.at['layer'] != 'rcl': # skip when the first function call is not from rcl
continue
for sub_addr, sub_log in gb_sub:
trace_indds = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
trace_inros = dict(zip(sofa_ros2_fieldnames, itertools.repeat(-1)))
end = sub_log.iloc[-1]
if end.at['layer'] != 'rmw': # skip when the last function call is not from rmw (eg. rosbag2)
continue
try:
pid_ros, = sub_log.loc[sub_log['func'] == 'rmw_take_with_info exit', 'pid'].unique() # shuold be unique
pid_dds, = sub_log.loc[(sub_log['func'] == 'add_received_change') |
(msg_log['func'] == 'ddsi_udp_conn_read exit'), 'pid'].unique()
except ValueError as e:
print(e)
continue
try: # Consider missing 'rmw_wait exit' here
os_return = msg_log.loc[((msg_log['func'] == 'UDPResourceReceive exit') | (msg_log['func'] == 'ddsi_udp_conn_read exit')) &
(msg_log['pid'] == pid_dds)].iloc[0]
dds_return = msg_log.loc[(msg_log['func'] == 'rmw_wait exit') & (msg_log['pid'] == pid_ros)].iloc[0]
ros_return = sub_log.loc[sub_log['func'] == 'rmw_take_with_info exit'].squeeze()
except IndexError as e:
print(e)
continue
time = os_return['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = os_return['ts'] - cfg.time_base
trace_indds['timestamp'] = time
trace_indds['duration'] = (dds_return['ts'] - os_return['ts']) * 1e3 # ms
trace_indds['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Transmission: %s -> %s" % \
(os_return['layer'], os_return['func'], dds_return['layer'], dds_return['func'],
start['topic_name'], start['comm'], os_return['comm'])
trace_indds['unit'] = 'ms'
trace_indds['msg_id'] = msg_id
time = dds_return['ts']
if cfg is not None and not cfg.absolute_timestamp:
time = dds_return['ts'] - cfg.time_base
trace_inros['timestamp'] = time
trace_inros['duration'] = (ros_return['ts'] - dds_return['ts']) * 1e3 # ms
trace_inros['name'] = "[%s] %s -> [%s] %s <br>Topic Name: %s<br>Transmission: %s -> %s" % \
(dds_return['layer'], dds_return['func'], ros_return['layer'], ros_return['func'],
start['topic_name'], start['comm'], ros_return['comm'])
trace_inros['unit'] = 'ms'
trace_inros['msg_id'] = msg_id
if trace_inros['duration'] <= 0 or trace_indds['duration'] <= 0:
continue
traces_indds.append(trace_indds)
traces_inros.append(trace_inros)
traces_dds = pd.DataFrame(traces_indds)
traces_ros = | pd.DataFrame(traces_inros) | pandas.DataFrame |
import urllib
import pytest
import pandas as pd
from pandas import testing as pdt
from anonympy import __version__
from anonympy.pandas import dfAnonymizer
from anonympy.pandas.utils_pandas import load_dataset
@pytest.fixture(scope="module")
def anonym_small():
df = load_dataset('small')
anonym = dfAnonymizer(df)
return anonym
@pytest.fixture(scope="module")
def anonym_big():
try:
df = load_dataset('big')
anonym = dfAnonymizer(df)
except urllib.error.HTTPError:
anonym = None
return anonym
def test_anonym_obj(anonym_small, anonym_big):
assert isinstance(anonym_small, dfAnonymizer), "should have\
returned `dfAnonymizer` object"
if anonym_big is None:
assert False, "Failed to fetch the DataFrame"
assert isinstance(anonym_big, dfAnonymizer), "should have returned\
`dfAnonymizer` object"
def test_numeric_noise(anonym_small):
output = anonym_small.numeric_noise('age', seed=42, inplace=False)
expected = pd.Series([38, 47], dtype='int64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_noise(['age', 'salary'],
seed=42,
inplace=False)
expected = pd.DataFrame({'age': [38, 47],
'salary': [59239.79912097112, 49323.30756879504]})
pdt.assert_frame_equal(expected, output)
def test_numeric_binning(anonym_small):
output = anonym_small.numeric_binning('salary', bins=2, inplace=False)
dtype = pd.CategoricalDtype([
pd.Interval(49315.0, 54279.0, closed='right'),
pd.Interval(54279.0, 59234.0, closed='right')],
ordered=True)
expected = pd.Series([
pd.Interval(54279.0, 59234.0, closed='right'),
pd.Interval(49315.0, 54279.0, closed='right')],
dtype=dtype)
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_binning(['age', 'salary'],
bins=2,
inplace=False)
dtype2 = pd.CategoricalDtype([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
ordered=True)
ser2 = pd.Series([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
dtype=dtype2)
expected = pd.DataFrame({'age': ser2, 'salary': expected})
pdt.assert_frame_equal(expected, output)
def test_numeric_masking(anonym_small):
output = anonym_small.numeric_masking('age', inplace=False)
expected = pd.Series([7.5, -7.5], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_masking(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': [-4954.900676201789, 4954.900676201798],
'salary': [5.840670901327418e-15,
5.840670901327409e-15]})
pdt.assert_frame_equal(expected, output)
def test_numeric_rounding(anonym_small):
output = anonym_small.numeric_rounding('salary', inplace=False)
expected = pd.Series([60000.0, 50000.0], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_rounding(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': {0: 30, 1: 50}, 'salary': {0: 60000.0,
1: 50000.0}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_fake(anonym_small):
output = anonym_small.categorical_fake('name',
locale=['en_US'],
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake(['name', 'email'],
locale=['en_GB'],
seed=42,
inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'}})
pdt.assert_frame_equal(expected, output)
output = anonym_small.categorical_fake({'name': 'name_female'},
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake({'ssn': 'ssn', 'web': 'url'},
seed=42,
inplace=False)
expected = pd.DataFrame({'ssn': {0: '655-15-0410', 1: '760-36-4013'},
'web': {0: 'http://www.hill.net/',
1: 'http://johnson.com/'}})
pdt.assert_frame_equal(expected, output)
def test_categorical_fake_auto(anonym_small):
output = anonym_small.categorical_fake_auto(seed=42, inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'},
'ssn': {0: '655-15-0410', 1: '760-36-4013'}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_resampling(anonym_small):
output = anonym_small.categorical_resampling('name',
inplace=False,
seed=42)
expected = pd.Series(['Bruce', 'Tony'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_resampling(['web', 'ssn'],
seed=2,
inplace=False)
expected = pd.DataFrame({'web':
{0: 'http://www.alandrosenburgcpapc.co.uk',
1: 'http://www.alandrosenburgcpapc.co.uk'},
'ssn': {0: '656564664', 1: '343554334'}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_tokenization(anonym_small):
output = anonym_small.categorical_tokenization('name',
key='test',
inplace=False)
expected = pd.Series(['45fe1a783c', 'bda8a41313'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_tokenization(['web', 'ssn'],
key='test',
inplace=False)
expected = pd.DataFrame({'web': {0: 'e667d84f37', 1: '986a819ea2'},
'ssn': {0: '0f7c17cc6f', 1: 'f42ad34907'}})
| pdt.assert_frame_equal(expected, output) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
from scipy import linalg
from tqdm import tqdm
import os
import logging
def get_args():
parser = argparse.ArgumentParser(description="calculate splicing scores per gene/cell")
parser.add_argument("--input", help="Name of the input file from rijk_zscore")
parser.add_argument("--svd_type", choices=["normgene","normdonor"], help="Method of calculating matrix before SVD")
parser.add_argument("--grouping_level_2", help="column to group the data by (e.g. ontology, compartment, tissue)", default="ontology")
parser.add_argument("--grouping_level_1", help="subset data by this column before checking for differences (e.g. tissue, compartment)", default="dummy")
parser.add_argument("--outname_pq", help="Name of output file")
parser.add_argument("--outname_tsv", help="Name of output File")
parser.add_argument("--outname_log", help="Name of output File")
args = parser.parse_args()
return args
def main():
args = get_args()
logging.basicConfig(
filename = args.outname_log,
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info("Beginning calculation")
logging.info("Read in parquet file")
df = pd.read_parquet(args.input)
##### PERFORM SVD ZSCORE CALCULATION #####
logging.info("Perform SVD zscore calculation")
letters = ["Start", "End"]
if args.svd_type == "normgene":
zcontrib_col = "zcontrib"
elif args.svd_type == "normdonor":
for let in letters:
# find number of reads per donor (or acceptor) per cell
df["cell_gene_pos" + let] = df["cell_gene"] + df["junc" + let].astype(str)
df["n.g_pos" + let] = df.groupby("cell_gene_pos" + let)["numReads"].transform("sum")
# normalize on a donor/acceptor rather than a gene basis
# TRY OUT NOT SQRT-ING denominator as normalization
df["zcontrib_posnorm" + let] = df["numReads"] * df["nSijk" + let] / df["n.g_pos" + let]
zcontrib_col = "zcontrib_posnorm"
for let in letters:
# replace NANs with zeros
df["zcontrib{}_rep".format(let)] = df[zcontrib_col + let].fillna(0)
# create label for each junction + donor/acceptor
df["str_junc" + let] = df["junc" + let].astype(int).astype(str) + "_" + let
df["cell_gene_pos" + let] = df["cell"] + df["gene"] + df["junc" + let].astype(str)
# get sum of zcontribs for the given cell and splice site
df["summed_zcontrib" + let] = df.groupby("cell_gene_pos" + let)["zcontrib{}_rep".format(let)].transform('sum')
k = 3 # number of components to include
loads = {"f{}".format(i) : {} for i in range(k)}
zs = {"svd_z{}".format(i) : {} for i in range(k)}
logging.info("Iterate over each gene")
for gene, gene_df in tqdm(df.groupby("gene")):
# get zcontrib matrix
gene_mats = []
for let in letters:
gene_mat = gene_df.drop_duplicates("cell_gene_pos" + let).pivot_table(index="cell_gene",columns="str_junc{}".format(let),values="summed_zcontrib" + let,fill_value=0)
gene_mats.append(gene_mat)
gene_mat = gene_mats[0].merge(gene_mats[1],on="cell_gene")
# mean-normalize the rows
gene_mat = gene_mat.subtract(gene_mat.mean(axis=1),axis=0)
# calculate svd
u, s, vh = linalg.svd(gene_mat,check_finite=False,full_matrices=False)
if len(s) >= k:
# calculate new z scores based on svd
new_zs = gene_mat.dot(np.transpose(vh[:k,:]))
# calculate load on each component
load = np.square(s)/sum(np.square(s))
# save new zs and fs in dictionaries to save later
for i in range(k):
loads["f{}".format(i)][gene] = load[i]
zs["svd_z{}".format(i)].update( | pd.Series(new_zs[i].values,index=new_zs.index) | pandas.Series |
import os
import tempfile
import torch,torchvision
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import argparse
import torch.multiprocessing as mp
import torchvision.transforms as transforms
import torchvision.models as models
import time
import pandas as pd
def setup(rank, world_size):
print("Running init_process_group...")
dist.init_process_group("nccl", rank=rank, world_size=world_size)
print("Finished init_process_group...")
def cleanup():
dist.destroy_process_group()
def train(gpu, args):
rank = args.nr * args.gpus + gpu
setup(rank, args.world_size)
transform = transforms.Compose([
torchvision.transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
])
batch_size = args.batchsize
train_dataset = torchvision.datasets.CIFAR10('./datasets/',transform=transform,download=True)
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=args.world_size,rank=rank)
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=2,sampler=sampler)
model = models.resnet152()
torch.cuda.set_device(gpu)
model.cuda()
print("GPU initialization")
dummy_input = torch.randn(1, 3,224,224, dtype=torch.float).to(gpu)
for _ in range(10):
_ = model(dummy_input)
model = nn.parallel.DistributedDataParallel(model,device_ids=[gpu])
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
training_run_data = | pd.DataFrame(columns=['epoch','batch','batch_size','gpu_number','time']) | pandas.DataFrame |
import pandas as pd
import os
from pipeline_ie.config import Config
from pathlib import Path
class DataLoader:
def __init__(self, input_data):
self.config = Config().config
self.input = input_data
def check_input(self):
try:
if self.input == "csv":
return "csv"
elif self.input == "xlsx":
return "xlsx"
elif self.input!="NA":
return "text"
else:
return "file"
except Exception as e:
print("Error {}").format(e)
def load_files(self, flag):
"""
Reads all files from the given input directory/Read given text/Read text from given xlsx or csv file
and create a dataframe for the same.
NOTE: ALl files need to be in either xlsx or csv format and need to have a column name as set in config.ini
:return: DataFrame with column of Sentences
"""
col_name = self.config.get('file_directory', 'input_column_name')
if flag == "text":
input_text = {col_name: [self.input]}
df_text = pd.DataFrame(input_text)
elif self.config.get('file_directory', 'input_file') != 'NA':
input_file = self.config.get('file_directory', 'input_file')
flag = Path(input_file).suffix
if flag == ".csv":
df_text = pd.read_csv(input_file)
if flag == ".xlsx":
df_text = pd.read_excel(input_file)
else:
dir_name = self.config.get('file_directory', 'input_file_dir')
list_files = os.listdir(dir_name)
list_df = []
for file in list_files:
file_path = os.path.join(dir_name, file)
if flag == "xlsx":
df = pd.read_excel(file_path, usecols=[col_name], index_col=None, header=0)
elif flag == "csv":
df = pd.read_csv(file_path, usecols=[col_name], index_col=None, header=0)
list_df.append(df)
df_text = | pd.concat(list_df, axis=0, ignore_index=True) | pandas.concat |
import requests
import json
from flask import Flask, request
from json import dumps
#from flask.ext.jsonpify import jsonify
from flask_cors import CORS
from datetime import datetime
import pandas as pd
import difflib
import numpy as np
import pickle as pkl
import sys
username = "aditya1495"
apiKey = "3fe2254bb42e851aef4b9ac513a84d6b217a77e0"
fxmlUrl = "https://flightxml.flightaware.com/json/FlightXML3/"
app = Flask(__name__)
CORS(app)
pkl_folder = 'saved_models/'
@app.route('/getNewDelays', methods=['GET'])
def getLiveFeed():
payload = {'howMany':'1'}
response = requests.get(fxmlUrl + "AirportDelays",
params=payload, auth=(username, apiKey))
print (response.json());
if response.status_code == 200:
return json.dumps(response.json())
else:
return json.dumps({'data': ''})
@app.route('/flightFetch', methods=['GET'])
def findFlights():
origin = request.args.get('origin')
destination = request.args.get('destination')
payload = {'howMany':'5', 'origin' : origin, 'destination' : destination, 'type' : 'nonstop'}
response = requests.get(fxmlUrl + "FindFlight",
params=payload, auth=(username, apiKey))
df_al = | pd.DataFrame.from_csv('airlines.csv') | pandas.DataFrame.from_csv |
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert o is None
else:
assert_series_equal(o, e)
def test_resample():
index = pd.date_range(start='20190101', freq='15min', periods=5)
arg = pd.Series([1, 0, 0, 0, 2], index=index)
idx_exp = pd.date_range(start='20190101', freq='1h', periods=2)
expected = pd.Series([0.25, 2.], index=idx_exp)
out = forecast.resample(arg)
assert_series_equal(out, expected)
assert forecast.resample(None) is None
@pytest.fixture
def rfs_series():
return pd.Series([1, 2],
index=pd.DatetimeIndex(['20190101 01', '20190101 02']))
@pytest.mark.parametrize(
'start,end,start_slice,end_slice,fill_method,exp_val,exp_idx', [
(None, None, None, None, 'interpolate', [1, 1.5, 2],
['20190101 01', '20190101 0130', '20190101 02']),
('20190101', '20190101 0230', None, None, 'interpolate',
[1, 1, 1, 1.5, 2, 2],
['20190101', '20190101 0030', '20190101 01', '20190101 0130',
'20190101 02', '20190101 0230']),
('20190101', '20190101 02', '20190101 0030', '20190101 0130', 'bfill',
[1., 1, 2], ['20190101 0030', '20190101 01', '20190101 0130'])
]
)
def test_reindex_fill_slice(rfs_series, start, end, start_slice, end_slice,
fill_method, exp_val, exp_idx):
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_some_nan():
rfs_series = pd.Series([1, 2, None, 4], index=pd.DatetimeIndex([
'20190101 01', '20190101 02', '20190101 03', '20190101 04',
]))
start, end, start_slice, end_slice, fill_method = \
None, None, None, None, 'interpolate'
exp_val = [1, 1.5, 2, 2.5, 3, 3.5, 4]
exp_idx = [
'20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03', '20190101 0330', '20190101 04']
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_all_nan():
arg = pd.Series([None]*3, index=pd.DatetimeIndex(
['20190101 01', '20190101 02', '20190101 03']))
out = forecast.reindex_fill_slice(arg, freq='30min')
exp = pd.Series([None]*5, index=pd.DatetimeIndex(
['20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03']))
assert_series_equal(out, exp)
def test_reindex_fill_slice_empty():
out = forecast.reindex_fill_slice(pd.Series(dtype=float), freq='30min')
assert_series_equal(out, pd.Series(dtype=float))
def test_reindex_fill_slice_none():
out = forecast.reindex_fill_slice(None, freq='30min')
assert out is None
def test_cloud_cover_to_ghi_linear():
cloud_cover = pd.Series([0, 50, 100.])
ghi_clear = pd.Series([1000, 1000, 1000.])
out = forecast.cloud_cover_to_ghi_linear(cloud_cover, ghi_clear)
expected = pd.Series([1000, 675, 350.])
assert_series_equal(out, expected)
out = forecast.cloud_cover_to_ghi_linear(cloud_cover, ghi_clear, offset=20)
expected = pd.Series([1000, 600, 200.])
assert_series_equal(out, expected)
@pytest.mark.xfail(raises=AssertionError, strict=True)
def test_cloud_cover_to_irradiance_ghi_clear():
index = pd.date_range(start='20190101', periods=3, freq='1h')
cloud_cover = pd.Series([0, 50, 100.], index=index)
ghi_clear = pd.Series([10, 10, 1000.], index=index)
zenith = pd.Series([90.0, 89.9, 45], index=index)
out = forecast.cloud_cover_to_irradiance_ghi_clear(
cloud_cover, ghi_clear, zenith
)
# https://github.com/pvlib/pvlib-python/issues/681
ghi_exp = pd.Series([10., 6.75, 350.])
dni_exp = pd.Series([0., 0., 4.74198165e+01])
dhi_exp = pd.Series([10., 6.75, 316.46912616])
assert_series_equal(out[0], ghi_exp)
assert_series_equal(out[1], dni_exp)
assert_series_equal(out[2], dhi_exp)
@pytest.mark.xfail(raises=AssertionError, strict=True)
def test_cloud_cover_to_irradiance():
index = pd.date_range(start='20190101', periods=3, freq='1h')
cloud_cover = pd.Series([0, 50, 100.], index=index)
latitude = 32.2
longitude = -110.9
elevation = 700
zenith = pd.Series([90.0, 89.9, 45], index=index)
apparent_zenith = pd.Series([89.9, 89.85, 45], index=index)
out = forecast.cloud_cover_to_irradiance(
latitude, longitude, elevation, cloud_cover, apparent_zenith, zenith
)
# https://github.com/pvlib/pvlib-python/issues/681
ghi_exp = pd.Series([10., 6.75, 350.], index=index)
dni_exp = pd.Series([0., 0., 4.74198165e+01], index=index)
dhi_exp = pd.Series([10., 6.75, 316.46912616], index=index)
assert_series_equal(out[0], ghi_exp)
assert_series_equal(out[1], dni_exp)
assert_series_equal(out[2], dhi_exp)
@pytest.mark.parametrize('mixed,expected', [
([1, 1/2, 1/3, 1/4, 1/5, 1/6], [1., 0, 0, 0, 0, 0]),
([0, 0, 0, 0, 0, 1/6], [0, 0, 0, 0, 0, 1.]),
([0, 0, 0, 0, 0, 1/6, 1, 1/2, 1/3, 1/4, 1/5, 1/6],
[0, 0, 0, 0, 0, 1., 1., 0, 0, 0, 0, 0]),
([65.0, 66.0, 44.0, 32.0, 30.0, 26.0], # GH 144
[65.0, 67.0, 0.0, 0.0, 22.0, 6.0]), # 4th element is -4 if no clipping
([1, 1/2], [1., 0]),
([0, 1/2], [0, 1.]),
([0, 1/2, 1, 1/2], [0, 1., 1., 0])
])
def test_unmix_intervals(mixed, expected):
npts = len(mixed)
if npts in [2, 4]:
index = | pd.date_range(start='20190101 03Z', freq='3h', periods=npts) | pandas.date_range |
# -*- coding: utf-8 -*-
# https://zhuanlan.zhihu.com/p/142685333
import pandas as pd
import datetime
import tushare as ts
import numpy as np
from math import log,sqrt,exp
from scipy import stats
import plotly.graph_objects as go
import plotly
import plotly.express as px
pro = ts.pro_api()
plotly.offline.init_notebook_mode(connected=True)
def extra_data(date): # 提取数据
# 提取50ETF合约基础信息
df_basic = pro.opt_basic(exchange='SSE', fields='ts_code,name,call_put,exercise_price,list_date,delist_date')
df_basic = df_basic.loc[df_basic['name'].str.contains('50ETF')]
df_basic = df_basic[(df_basic.list_date<=date)&(df_basic.delist_date>date)] # 提取当天市场上交易的期权合约
df_basic = df_basic.drop(['name','list_date'],axis=1)
df_basic['date'] = date
# 提取日线行情数据
df_cal = pro.trade_cal(exchange='SSE', cal_date=date, fields = 'cal_date,is_open,pretrade_date')
if df_cal.iloc[0, 1] == 0:
date = df_cal.iloc[0, 2] # 判断当天是否为交易日,若否则选择前一个交易日
opt_list = df_basic['ts_code'].tolist() # 获取50ETF期权合约列表
df_daily = pro.opt_daily(trade_date=date,exchange = 'SSE',fields='ts_code,trade_date,settle')
df_daily = df_daily[df_daily['ts_code'].isin(opt_list)]
# 提取50etf指数数据
df_50etf = pro.fund_daily(ts_code='510050.SH', trade_date = date,fields = 'close')
s = df_50etf.iloc[0, 0]
# 提取无风险利率数据(用一周shibor利率表示)
df_shibor = pro.shibor(date = date,fields = '1w')
rf = df_shibor.iloc[0,0]/100
# 数据合并
df = | pd.merge(df_basic,df_daily,how='left',on=['ts_code']) | pandas.merge |
from datetime import timedelta
import numpy as np
import pytest
from pandas import Categorical, DataFrame, NaT, Period, Series, Timedelta, Timestamp
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_pytimedelta(self):
# GH#8209
ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])
result = ser.fillna(timedelta(1))
expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_fillna_period(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
res = ser.fillna(Period("2012-01", freq="M"))
exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")])
tm.assert_series_equal(res, exp)
assert res.dtype == "Period[M]"
def test_fillna_dt64_timestamp(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# reg fillna
result = ser.fillna(Timestamp("20130104"))
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130104"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
result = ser.fillna(NaT)
expected = ser
tm.assert_series_equal(result, expected)
def test_fillna_dt64_non_nao(self):
# GH#27419
ser = Series([Timestamp("2010-01-01"), NaT, Timestamp("2000-01-01")])
val = np.datetime64("1975-04-05", "ms")
result = ser.fillna(val)
expected = Series(
[Timestamp("2010-01-01"), Timestamp("1975-04-05"), Timestamp("2000-01-01")]
)
tm.assert_series_equal(result, expected)
def test_fillna_numeric_inplace(self):
x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
tm.assert_series_equal(y, expected)
# ---------------------------------------------------------------
# CategoricalDtype
@pytest.mark.parametrize(
"fill_value, expected_output",
[
("a", ["a", "a", "b", "a", "a"]),
({1: "a", 3: "b", 4: "b"}, ["a", "a", "b", "b", "b"]),
({1: "a"}, ["a", "a", "b", np.nan, np.nan]),
({1: "a", 3: "b"}, ["a", "a", "b", "b", np.nan]),
(Series("a"), ["a", np.nan, "b", np.nan, np.nan]),
(Series("a", index=[1]), ["a", "a", "b", np.nan, np.nan]),
(Series({1: "a", 3: "b"}), ["a", "a", "b", "b", np.nan]),
(Series(["a", "b"], index=[3, 4]), ["a", np.nan, "b", "a", "b"]),
],
)
def test_fillna_categorical(self, fill_value, expected_output):
# GH#17033
# Test fillna for a Categorical series
data = ["a", np.nan, "b", np.nan, np.nan]
ser = Series( | Categorical(data, categories=["a", "b"]) | pandas.Categorical |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = | Series([np.nan, np.nan, 3, 4]) | pandas.Series |
import requests
import bs4
import pandas as pd
def get_meetup_events(group):
"""Returns a list of events and their details for a given meetup group."""
url = 'https://api.meetup.com/{group}/events?&sign=true&photo-host=public&page=200&status=past'.format(group=group)
r = requests.get(url)
events = r.json()
return events
def events_to_csv(events, fname):
dd = | pd.DataFrame(events) | pandas.DataFrame |
import unittest
import copy
import numpy as np
import numpy.testing as np_test
import pandas as pd
import pandas.testing as pd_test
import warnings
from pyblackscholesanalytics.market.market import MarketEnvironment
from pyblackscholesanalytics.options.options import PlainVanillaOption, DigitalOption
from pyblackscholesanalytics.utils.utils import scalarize
class TestPlainVanillaOption(unittest.TestCase):
"""Class to test public methods of PlainVanillaOption class"""
def setUp(self) -> None:
warnings.filterwarnings("ignore")
# common market environment
mkt_env = MarketEnvironment()
# option objects
self.call_opt = PlainVanillaOption(mkt_env)
self.put_opt = PlainVanillaOption(mkt_env, option_type="put")
# pricing parameters
S_scalar = 100
S_vector = [90, 100, 110]
t_scalar_string = "01-06-2020"
t_date_range = pd.date_range(start="2020-04-19", end="2020-12-21", periods=5)
# common pricing parameter setup
common_params = {"np_output": True, "minimization_method": "Least-Squares"}
# scalar parameters setup
self.scalar_params = copy.deepcopy(common_params)
self.scalar_params["S"] = S_scalar
self.scalar_params["t"] = t_scalar_string
# vector parameters setup
self.vector_params = copy.deepcopy(common_params)
self.vector_params["S"] = S_vector
self.vector_params["t"] = t_date_range
# complex pricing parameter setup
# (S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
K_vector = [75, 85, 90, 95]
mK = len(K_vector)
n = 3
sigma_grid_K = np.array([0.1 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
r_grid_K = np.array([0.01 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
self.complex_params = {"S": S_vector[0],
"K": K_vector,
"t": pd.date_range(start="2020-04-19", end="2020-12-21", periods=n),
"sigma": sigma_grid_K,
"r": r_grid_K,
"np_output": False,
"minimization_method": "Least-Squares"}
def test_price_scalar(self):
"""Test price - scalar case"""
# call
test_call = scalarize(self.call_opt.price(**self.scalar_params))
expected_call = 7.548381716811839
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.price(**self.scalar_params))
expected_put = 4.672730506407959
self.assertEqual(test_put, expected_put)
def test_price_vector_np(self):
"""Test price - np.ndarray output case"""
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = np.array([[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = np.array([[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]])
np_test.assert_allclose(test_put, expected_put)
def test_price_vector_df(self):
"""Test price - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = pd.DataFrame(data=[[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_PnL_scalar(self):
"""Test P&L - scalar case"""
# call
test_call = scalarize(self.call_opt.PnL(**self.scalar_params))
expected_call = 4.060979245868182
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.PnL(**self.scalar_params))
expected_put = -5.368600081057167
self.assertEqual(test_put, expected_put)
def test_PnL_vector_np(self):
"""Test P&L - np.ndarray output case"""
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = np.array([[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = np.array([[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]])
np_test.assert_allclose(test_put, expected_put)
def test_PnL_vector_df(self):
"""Test P&L - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = pd.DataFrame(data=[[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = pd.DataFrame(data=[[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_delta_scalar(self):
"""Test Delta - scalar case"""
# call
test_call = scalarize(self.call_opt.delta(**self.scalar_params))
expected_call = 0.6054075531684143
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.delta(**self.scalar_params))
expected_put = -0.3945924468315857
self.assertEqual(test_put, expected_put)
def test_delta_vector_np(self):
"""Test Delta - np.ndarray output case"""
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = np.array([[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = np.array([[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]])
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
def test_delta_vector_df(self):
"""Test Delta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = pd.DataFrame(data=[[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_gamma_scalar(self):
"""Test Gamma - scalar case"""
# call
test_call = scalarize(self.call_opt.gamma(**self.scalar_params))
expected_call = 0.025194958512498786
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.gamma(**self.scalar_params))
expected_put = copy.deepcopy(expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put gamma coincide
self.assertEqual(test_call, test_put)
def test_gamma_vector_np(self):
"""Test Gamma - np.ndarray output case"""
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = np.array([[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-6)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
# assert call and put gamma coincide
np_test.assert_allclose(test_call, test_put)
def test_gamma_vector_df(self):
"""Test Gamma - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
| pd_test.assert_frame_equal(test_put, expected_put) | pandas.testing.assert_frame_equal |
import numpy as np
import pandas as pd
import seaborn as sns
import random
from traitlets import Int, List, Bool, CFloat, Unicode
from sepal_ui.model import Model
import component.parameter.app as cp
import component.scripts as cs
import component.parameter as param
import ee
import json
from geopandas import GeoDataFrame
ee.Initialize()
class BasinModel(Model):
lat = CFloat(0, allow_none=True).tag(sync=True)
lon = CFloat(0, allow_none=True).tag(sync=True)
years = List([2010, 2020]).tag(sync=True)
thres = Int(80).tag(sync=True)
level = Int(8).tag(sync=True)
"int: target level of the catchment"
method = Unicode("").tag(sync=True)
"Unicode: Selection basin id method (all - filter)"
selected_hybas = List([]).tag(sync=True)
"list: current selected hybasid(s) from the dropdown base list"
hybasin_list = List().tag(sync=True)
"list: hybasin id list of upstream catchments"
manual = Bool(False).tag(sync=True)
"bool: wheter to set the coordinates manually or not"
marker = Bool(False).tag(sync=True)
"bool: whether a marker (AOI) is set or not"
# Statistics
ready = Bool(False).tag(sync=True)
sett_timespan = List([2010, 2020]).tag(sync=True)
"list: user selected span of time in the statistics settings panel"
selected_var = Unicode('').tag(sync=True)
"str: current selected variable from pie chart or variable selector widget"
selected_hybasid_chart = List([]).tag(sync=True)
"list: selected hybasid(s) from the statistics dashboard list or from catchments pie"
def __init__(self):
"""
Params:
base_basin (ee.FeatureCollection): basin corresponding to the filtered
wwf hydrosheds at the given aoi and to the given level.
upstream_catchment (ee.FeatureCollection): upstream catchments from
the given point at the given level using the base_basin.
forest_change (ee.Image): forest change mas within the given upstream
catchments at the given livel using the base basin.
data (dict): upstream catchments in a geojson format
zonal_df (df): Zonal statistics dataframe
"""
self.base_basin = None
self.upstream_catchs = None
self.forest_change = None
self.lat_link = False
self.lon_link = False
self.data = None
self.zonal_df = None
def get_upstream_basin_ids(self, geometry, max_steps=100):
"""Return a list with all uperstream catchments ids from the base basin
Args:
geometry (ee.Geometry): geometry to filter the base catchment level
max_steps (int) : Arbritrary number to loop over the collection
Params:
level (int): WWF catchment level to query the inputs
"""
def get_upper(i, acc):
acc = ee.List(acc)
# get the last accumulated element, (can be a fc with lots of features)
feature_collection = ee.FeatureCollection(acc.get(acc.size().subtract(1)))
# we will retrieve all the HYBAS_ID's from the last element
base_ids = feature_collection.aggregate_array("HYBAS_ID")
# We will query what are the upstream features from the above ones
upper_catchments = self.base_basin.filter(
ee.Filter.inList("NEXT_DOWN", base_ids)
)
# and append them into the feature collection (to start again)
return acc.add(upper_catchments)
self.base_basin = cs.get_hydroshed(level=self.level)
upstream_catchs = ee.FeatureCollection(
ee.List(
ee.List.sequence(1, max_steps).iterate(
get_upper, [self.base_basin.filterBounds(geometry)]
)
).iterate(
lambda fc, acc: ee.FeatureCollection(acc).merge(
ee.FeatureCollection(fc)
),
ee.FeatureCollection([]),
)
)
self.hybasin_list = upstream_catchs.aggregate_array("HYBAS_ID").getInfo()
def get_upstream_fc(self):
"""Filter and get upstream catchments"""
return self.get_selected(self.hybasin_list)
def get_gfc(self, aoi):
"""Creates a forest change map based on gfw dataset
Params:
aoi (ee.Geometry): area of interest to clip the change mask
iniy (int): the initial year of the analysis
stpoy (int): end year for the loss
thres (int): minimum value for the tree cover
"""
iniy_ = self.years[0] - 2000
stopy_ = self.years[1] - 2000
gfc = ee.Image(cp.gfc_dataset).clip(aoi)
treecov = gfc.select(["treecover2000"])
lossy = gfc.select(["lossyear"]).unmask(0)
gain = gfc.select(["gain"])
forest_change = (
ee.Image(0)
.where(treecov.lte(self.thres).And(gain.eq(1)), 50) # gain V
.where(treecov.lte(self.thres).And(gain.eq(0)), 30) # non-forest
.where(
treecov.gt(self.thres).And(lossy.lt(iniy_)), 30
) # non-forest (lost forest before start date)
.where(
treecov.gt(self.thres).And(lossy.gt(stopy_)), 40
) # stable forest (forest lost after the dates)
.where(
treecov.gt(self.thres)
.And(gain.eq(1))
.And(lossy.gte(iniy_))
.And(lossy.lte(stopy_)),
51,
) # gain+loss
.where(treecov.gt(self.thres).And(gain.eq(1)).And(lossy.eq(0)), 50) # gain
.where(
treecov.gt(self.thres)
.And(gain.eq(0))
.And(lossy.gte(iniy_))
.And(lossy.lte(stopy_)),
lossy,
) # loss
.where(
treecov.gt(self.thres).And(gain.eq(0)).And(lossy.eq(0)), 40
) # stable forest
.selfMask()
)
return forest_change
def get_selected(self, hybas_ids, from_json=False):
"""Return the selected Feature Collection or geojson dict
hybas_ids (list): hydrobasin id's to calculate statistics.
"""
if from_json:
gdf = GeoDataFrame.from_features(self.data["features"])
return json.loads(gdf[gdf["HYBAS_ID"].isin(hybas_ids)].to_json())
return self.base_basin.filter(ee.Filter.inList("HYBAS_ID", hybas_ids))
@staticmethod
def get_bounds(dataset):
"""Get bounds of the given feature collection"""
if isinstance(dataset, ee.FeatureCollection):
ee_bounds = dataset.geometry().bounds().coordinates()
coords = ee_bounds.get(0).getInfo()
ll, ur = coords[0], coords[2]
return ll[0], ll[1], ur[0], ur[1]
elif isinstance(dataset, dict):
return list(GeoDataFrame.from_features(dataset["features"]).total_bounds)
def calculate_statistics(self):
"""Get hydrobasin id statistics on the given hybasin_id
hybas_ids (list): hydrobasin id's to calculate statistics.
"""
if self.method=="filter" and not self.selected_hybas:
raise Exception("Please select a subcatchment.")
feature_collection = self.base_basin.filter(
ee.Filter.inList(
"HYBAS_ID",
self.selected_hybas if self.method != "all" else self.hybasin_list
)
)
return (
ee.Image.pixelArea()
.divide(10000)
.addBands(self.get_gfc(feature_collection))
.reduceRegions(
collection=feature_collection,
reducer=ee.Reducer.sum().group(1),
scale=ee.Image(param.gfc_dataset).projection().nominalScale(),
)
).getInfo()
@staticmethod
def get_dataframe(result):
"""parse reduce region result as Pandas Dataframe
Args:
result (list): reduce region dictionary
"""
hybas_stats = {}
for feature in result["features"]:
hybas_id = feature["properties"]["HYBAS_ID"]
groups = feature["properties"]["groups"]
zonal_stats = {group["group"]: group["sum"] for group in groups}
hybas_stats[hybas_id] = zonal_stats
df = (
pd.melt(
pd.DataFrame.from_dict(hybas_stats, "index")
# .rename(columns=param.gfc_names)
,
ignore_index=False,
)
.reset_index()
.rename(columns={"index": "basin", "value": "area"})
)
# Prepare base dataframe
df["basin"] = df.basin.astype(str)
df["variable"] = df.variable.astype(int)
df["group"] = df["variable"].apply(lambda x: cp.gfc_translation[x])
# Create a year label and set 0 to everything is not forest-loss
df["year"] = df["variable"].apply(lambda x: x+2000 if x<=20 else 0).astype(int)
# Add a color for every catchment
color_palette = np.array(
sns.color_palette("hls", len(df.basin.unique())).as_hex()
)
random.shuffle(color_palette)
df['catch_color'] = color_palette[ | pd.factorize(df.basin) | pandas.factorize |
"""
2018 <NAME>
2.ensemble-z-analysis/scripts/train_models_given_z.py
This script will train various compression models given a specific z dimension.
Each model will train several times with different initializations.
The script pulls hyperparameters from a parameter file that was determined
after initial hyperparameter sweeps testing latent dimensionality.
Usage:
python train_models_given_z.py
With required command line arguments:
--dataset The focus dataset to apply compression models to
--num_components The z dimensionality we're testing
--param_config A tsv file (param by z dimension) indicating the
specific parameter combination for the z dimension
--out_dir The directory to store the output files
And optional command line arguments
--num_seeds The number of specific models to generate
default: 5
--shuffle If provided, shuffle the input expression matrix
Output:
The script will save associated weights and z matrices for each permutation as
well as reconstruction costs for all algorithms, sample specific correlations,
and training histories for Tybalt and ADAGE models. The population of models
(ensemble) are saved, across dimensions z, for downstream evaluation.
"""
import os
import argparse
import numpy as np
import pandas as pd
from scipy.stats import pearsonr, spearmanr
from tybalt.data_models import DataModel
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', choices=['TCGA', 'GTEX', 'TARGET'],
help='the dataset used for compression')
parser.add_argument('-n', '--num_components', help='dimensionality of z')
parser.add_argument('-p', '--param_config',
help='text file optimal hyperparameter assignment for z')
parser.add_argument('-o', '--out_dir', help='where to save the output files')
parser.add_argument('-s', '--num_seeds', default=5,
help='number of different seeds to run on current data')
parser.add_argument('-r', '--shuffle', action='store_true',
help='randomize gene expression data for negative control')
parser.add_argument('-m', '--subset_mad_genes', default=8000,
help='subset num genes based on mean absolute deviation')
args = parser.parse_args()
# Load command arguments
dataset = args.dataset.lower()
num_components = int(args.num_components)
param_config = args.param_config
out_dir = args.out_dir
num_seeds = int(args.num_seeds)
shuffle = args.shuffle
subset_mad_genes = int(args.subset_mad_genes)
def get_recon_correlation(df, recon_mat_dict, algorithm, cor_type,
genes=False):
"""
Get gene or sample correlations between input and reconstructed input
Arguments:
df - the input dataframe
recon_mat_dict - dictionary of different algorithms reconstructions
algorithm - string representing the compression algorithm
cor_type - string representing Pearson or Spearman correlations
genes - boolean if to calculate correaltion over genes (sample by default)
"""
recon_mat = recon_mat_dict[algorithm]
if genes:
df = df.T
recon_mat = recon_mat.T
if cor_type == 'pearson':
r = [pearsonr(recon_mat.iloc[x, :],
df.iloc[x, :])[0] for x in range(df.shape[0])]
elif cor_type == 'spearman':
r = [spearmanr(recon_mat.iloc[x, :],
df.iloc[x, :])[0] for x in range(df.shape[0])]
return r
def compile_corr_df(pearson_list, spearman_list, algorithm_list, column_names,
seed, data_type):
"""
Compile together correlations across algorithms
Arguments:
pearson_list - a list of pearson correlations across algorithms
spearman_list - a list of spearman correlations across algorithms
algorithm_list - list of algorithm names
column_names - list of names supplied to the compiled dataframe
seed - the current random seed
data_type - training or testing set
"""
pearson_df = pd.DataFrame(pearson_list,
index=algorithm_list,
columns=column_names)
pearson_df.index.name = 'algorithm'
spearman_df = pd.DataFrame(spearman_list,
index=algorithm_list,
columns=column_names)
spearman_df.index.name = 'algorithm'
pearson_df = pearson_df.reset_index().melt(id_vars=['algorithm'],
var_name='id',
value_name='correlation')
spearman_df = spearman_df.reset_index().melt(id_vars=['algorithm'],
var_name='id',
value_name='correlation')
corr_df = pd.concat([pearson_df.assign(cor_type='pearson'),
spearman_df.assign(cor_type='spearman')])
corr_df = corr_df.assign(seed=seed)
corr_df = corr_df.assign(data=data_type)
return corr_df
# Extract parameters from parameter configuation file
param_df = | pd.read_table(param_config, index_col=0) | pandas.read_table |
from requests import get, exceptions
from bs4 import BeautifulSoup
from datetime import datetime
from pandas import DataFrame, read_excel
from time import sleep
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
def get_label(soup):
artist = soup.find("span", attrs={"itemprop": "byArtist"}).a.contents[0].strip()
releaser = soup.find("p", attrs={"id": "band-name-location"}).find("span", attrs={"class": "title"}).contents[0].strip()
label_tag = soup.find("span", attrs={"class": "back-to-label-name"})
if label_tag:
return label_tag.contents[0].strip()
else:
return releaser if artist.lower() != releaser.lower() else None
def get_tags(soup):
tags = []
for tag in soup.findAll("a", attrs={"class": "tag"}):
tags.append(tag.contents[0])
return tags
def get_soup(url):
try:
release_request = get(url)
return BeautifulSoup(release_request.text, "html.parser")
except exceptions.ConnectionError:
sleep(5.0)
return get_soup(url)
def parse_release(url):
soup = get_soup(url)
if soup.find("h2", attrs={"class": "trackTitle"}):
title = soup.find("h2", attrs={"class": "trackTitle"}).contents[0].strip()
artist = soup.find("span", attrs={"itemprop": "byArtist"}).a.contents[0].strip()
releasedate_str = soup.find("meta", attrs={"itemprop": "datePublished"})["content"]
releasedate = datetime(int(releasedate_str[0:4]), int(releasedate_str[4:6]), int(releasedate_str[6:8])).date()
formats_raw = soup.findAll("li", attrs={"class": "buyItem"})
label = get_label(soup)
tags = get_tags(soup)
if len(soup.find("span", attrs={"class": "location"}).contents) > 0:
location = soup.find("span", attrs={"class": "location"}).contents[0].strip()
formats = []
for format_raw in formats_raw:
if format_raw.h3.button:
secondary_text = format_raw.h3.find("div", attrs={"class": "merchtype secondaryText"})
format = secondary_text.contents[0].strip() if secondary_text else format_raw.h3.button.span.contents[0]
formats.append(format)
return {
"title": title,
"artist": artist,
"date": releasedate,
"url": url,
"formats": formats,
"tags": tags,
"location": location,
"label": label
}
url = "https://bandcamp.com/tag/{0}?page={1}&sort_field=date"
with open("cities.txt", "r") as f:
cities = [city.lower() for city in f.read().split("\n")]
start_urls = [url.format(city.lower(), i) for i in range(1, 11, 1) for city in cities]
data = read_excel("data.xlsx")
ignore_list = []
diff = []
for start_url in start_urls:
stad = start_url.split("/")[-1].split("?")[0]
if stad not in ignore_list:
print(stad, start_url)
r = get(start_url)
soup = BeautifulSoup(r.text, "html.parser")
releases = soup.find("div", attrs={"class": "results"})
good_page = soup.find("ul", attrs={"id": "sortNav", "class": "horizNav"})
while good_page is None:
print("offline, even wachten")
sleep(10.0)
r = get(start_url)
soup = BeautifulSoup(r.text, "html.parser")
releases = soup.find("div", attrs={"class": "results"})
good_page = soup.find("ul", attrs={"id": "sortNav", "class": "horizNav"})
if releases:
if len(releases.ul.findAll("li", attrs={"class": "item"})) > 0:
for release in releases.ul.findAll("li", attrs={"class": "item"}):
release_url = release.a["href"]
if release_url not in data["url"].values:
release_info = parse_release(release_url)
if release_info:
artist_location_in_belgium = "Belg" in release_info["location"]
artist_location_matches_cities = release_info["location"].lower() in cities
if artist_location_in_belgium or artist_location_matches_cities:
for format in release_info["formats"]:
for tag in release_info["tags"]:
line = release_info.copy()
line.pop("formats")
line["format"] = format
line.pop("tags")
line["tag"] = tag
data = data.append(DataFrame([line]))
diff.append(line)
print(release_info["artist"], release_info["title"], format, len(diff), len(data.index))
else:
ignore_list.append(stad)
print(stad, "added to ignore list")
else:
ignore_list.append(stad)
print(stad, "added to ignore list")
data.drop_duplicates().to_excel("data.xlsx", index=False)
filename = "diff_" + str(datetime.now().date()) + ".xlsx"
| DataFrame(diff) | pandas.DataFrame |
import matplotlib.cm as cm
import pandas as pd
import seaborn as sns
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
import matplotlib.pyplot as plt
import numpy as np
###############################################################################################################
# IMPORTANT: USE ONLY WITH LIST OF TWEETS CONTAINING A SIGNIFICANT AMOUNT FROM EACH USER PRESENT IN THE LIST #
# FOR EXAMPLE TWEETS OBTAINED WITH data-mining/getTimelines.py #
###############################################################################################################
FILENAME_TWEET = "../data-mining/results/timeline.csv" # List of tweets to consider
OUTPUT_FILENAME = "ReactionsVsFollowers.pdf" # Filename to store the plot
BUBBLE_SCALE = (300, 1600) # Scale of the bubbles
X_LOG = True # Wether or not to use log scale on X axis
Y_LOG = True # Wether or not to use log scale on Y axis
# Load all tweets
tweets = | pd.read_csv(FILENAME_TWEET, dtype='str') | pandas.read_csv |
"""
分析模块
"""
import warnings
from typing import Tuple, Union
import re
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels.api import OLS, add_constant
from QUANTAXIS.QAFactor import utils
from QUANTAXIS.QAFactor.parameters import DAYS_PER_MONTH, DAYS_PER_QUARTER, DAYS_PER_YEAR
from QUANTAXIS.QAFactor.process import demean_forward_returns
from QUANTAXIS.QAFactor.utils import get_forward_returns_columns
def mean_return_by_quantile(
factor_data: pd.DataFrame,
by_datetime: bool = False,
by_group: bool = False,
demeaned: bool = True,
group_adjust: bool = False,
) -> Tuple:
"""
按分位计算因子远期收益和标准差
参数
---
:param factor_data: 索引为 ['日期' '资产'] 的 MultiIndex, values 包括因子的值,各期因子远期收益,因子分位数, 因子分组 [可选], 因子权重 [可选]
:param by_datetime: 按日期计算各分位数的因子远期收益均值
:param by_group: 按分组计算各分位数的因子远期收益均值
:param demeaned: 按日期计算超额收益
:param group_adjust: 按日期和分组计算超额收益
返回
---
:return mean_ret: 各分位数因子远期收益均值
:return std_error_ret: 各分位数因子远期收益标准差
"""
if group_adjust:
grouper = ["datetime", "group"]
factor_data = demean_forward_returns(factor_data, grouper)
elif demeaned:
factor_data = demean_forward_returns(factor_data)
else:
factor_data = factor_data.copy()
grouper = ["factor_quantile"]
if by_datetime:
grouper.append("datetime")
if by_group:
grouper.append("group")
mean_ret, std_error_ret = weighted_mean_return(factor_data, grouper=grouper)
return mean_ret, std_error_ret
def weighted_mean_return(factor_data: pd.DataFrame, grouper: list):
"""
计算加权平均收益/标准差
"""
forward_returns_columns = get_forward_returns_columns(factor_data.columns)
def agg(values, weights):
count = len(values)
average = np.average(values, weights=weights, axis=0)
variance = (
np.average((values - average)**2,
weights=weights,
axis=0) * count / max((count - 1),
1)
)
return pd.Series(
[average,
np.sqrt(variance),
count],
index=["mean",
"std",
"count"]
)
group_stats = factor_data.groupby(grouper)[forward_returns_columns.append(
pd.Index(["weights"])
)].apply(
lambda x: x[forward_returns_columns].
apply(agg,
weights=x["weights"].fillna(0.0).values)
)
mean_ret = group_stats.xs("mean", level=-1)
std_error_ret = group_stats.xs(
"std",
level=-1
) / np.sqrt(group_stats.xs("count",
level=-1))
return mean_ret, std_error_ret
def mean_returns_spread(
mean_returns: pd.DataFrame,
upper_quant: int,
lower_quant: int,
std_err=None
):
"""
计算 upper_quant 与 lower_quant 之间的收益差,与联合收益标准差
参数
---
:param mean_returns: 平均回报
:param upper_quant: 上分位
:param lower_quant: 下分位
:param std_err: 收益标准差
"""
mean_return_difference = mean_returns.xs(upper_quant
) - mean_returns.xs(lower_quant)
if std_err is None:
joint_std_err = None
else:
std1 = std_err.xs(upper_quant)
std2 = std_err.xs(lower_quant)
joint_std_err = np.sqrt(std1**2 + std2**2)
return mean_return_difference, joint_std_err
def factor_alpha_beta(
factor_data: pd.DataFrame,
returns: pd.DataFrame = None,
demeaned: bool = True,
group_adjust: bool = False,
equal_weight: bool = False,
):
"""
计算因子的 alpha (超额收益), alpha 的 t-统计量 以及 beta 值
参数
---
:param factor_data: 索引为 ['日期' '股票'] 的 MultiIndex, values 包括因子值,远期收益,因子分位,因子分组 [可选]
:param returns: 因子远期收益,默认为 None, 如果为 None 的时候,会通过调用 `factor_returns` 来计算相应的收益
:param demeaned: 是否基于一个多空组合
:param group_adjust: 是否进行行业中性处理
:param equal_weight:
返回
---
"""
if returns is None:
returns = factor_returns(
factor_data,
demeaned,
group_adjust,
equal_weight
)
universe_ret = (
factor_data.groupby(level="datetime")[get_forward_returns_columns(
factor_data.columns
)].mean().loc[returns.index]
)
if isinstance(returns, pd.Series):
returns.name = universe_ret.columns.values[0]
returns = pd.DataFrame(returns)
alpha_beta = pd.DataFrame()
for period in returns.columns.values:
x = universe_ret[period].values
y = returns[period].values
x = add_constant(x)
reg_fit = OLS(y, x).fit()
try:
alpha, beta = reg_fit.params
except ValueError:
alpha_beta.loc["Ann. alpha", period] = np.nan
alpha_beta.loc["beta", period] = np.nan
else:
freq_adjust = pd.Timedelta(days=DAYS_PER_YEAR) / pd.Timedelta(
utils.get_period(period.replace("period_",
""))
)
alpha_beta.loc["Ann. alpha",
period] = (1 + alpha)**freq_adjust - 1.0
alpha_beta.loc["beta", period] = beta
return alpha_beta
def factor_returns(
factor_data: pd.DataFrame,
demeaned: bool = True,
group_adjust: bool = False,
equal_weight: bool = False,
by_asset: bool = False,
):
"""
计算按因子值加权的投资组合收益
参数
---
:param factor_data: 因子数据
:param demeaned: 是否构建多空组合
:param group_adjust: 是否按分组进行多空组合
:param equal_weight: 针对因子中位数分别构建多空组合
:param by_asset: 按股票展示组合收益, 默认为 False
返回值
---
"""
weights = factor_weights(factor_data, demeaned, group_adjust, equal_weight)
weighted_returns = factor_data[get_forward_returns_columns(
factor_data.columns
)].multiply(
weights,
axis=0
)
if by_asset:
returns = weighted_returns
else:
returns = weighted_returns.groupby(level="datetime").sum()
return returns
def factor_weights(
factor_data: pd.DataFrame,
demeaned: bool = True,
group_adjust: bool = False,
equal_weight: bool = False,
):
def to_weights(group, _demeaned, _equal_weight):
if _equal_weight:
group = group.copy()
if _demeaned:
# top assets positive weights, bottom ones negative
group = group - group.median()
negative_mask = group < 0
group[negative_mask] = -1.0
positive_mask = group > 0
group[positive_mask] = 1.0
if _demeaned:
# positive weights must equal negative weights
if negative_mask.any():
if negative_mask.sum() == 0:
group[negative_mask] = 0
group[negative_mask] /= negative_mask.sum()
if positive_mask.any():
if positive_mask.sum() == 0:
group[positive_mask] = 0
group[positive_mask] /= positive_mask.sum()
elif _demeaned:
group = group - group.mean()
if group.abs().sum() == 0: # 二分类可能和为 0
return group * 0.0
return group / group.abs().sum()
grouper = ["datetime"]
if group_adjust:
grouper.append("group")
weights = factor_data.groupby(grouper)["factor"].apply(
to_weights,
demeaned,
equal_weight
)
if group_adjust:
weights = weights.groupby(level="datetime"
).apply(to_weights,
False,
False)
return weights
def factor_information_coefficient(
factor_data: pd.DataFrame,
group_adjust: bool = False,
by_group: bool = False
):
"""
Computes the Spearman Rank Correlation based Information Coefficient (IC)
between factor values and N period forward returns for each period in
the factor index.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
group_adjust : bool
Demean forward returns by group before computing IC.
by_group : bool
If True, compute period wise IC separately for each group.
Returns
-------
ic : pd.DataFrame
Spearman Rank correlation between factor and
provided forward returns.
"""
def src_ic(group):
f = group["factor"]
_ic = group[get_forward_returns_columns(
factor_data.columns
)].apply(lambda x: stats.spearmanr(x,
f)[0])
return _ic
factor_data = factor_data.copy()
grouper = ["datetime"]
if group_adjust:
factor_data = demean_forward_returns(factor_data, grouper + ["group"])
if by_group:
grouper.append("group")
ic = factor_data.groupby(grouper).apply(src_ic)
return ic
def quantile_turnover(
quantile_factor: pd.DataFrame,
quantile: int,
period: Union[int,
str] = 1
):
"""
Computes the proportion of names in a factor quantile that were
not in that quantile in the previous period.
Parameters
----------
quantile_factor : pd.Series
DataFrame with date, asset and factor quantile.
quantile : int
Quantile on which to perform turnover analysis.
period: string or int, optional
Period over which to calculate the turnover. If it is a string it must
follow pandas.Timedelta constructor format (e.g. '1 days', '1D', '30m',
'3h', '1D1h', etc).
Returns
-------
quant_turnover : pd.Series
Period by period turnover for that quantile.
"""
quant_names = quantile_factor[quantile_factor == quantile]
quant_name_sets = quant_names.groupby(
level=["datetime"]
).apply(lambda x: set(x.index.get_level_values("code")))
if isinstance(period, int):
name_shifted = quant_name_sets.shift(period)
else:
period = utils.get_period(period)
shifted_idx = utils.add_custom_calendar_timedelta(
quant_name_sets.index,
- | pd.Timedelta(period) | pandas.Timedelta |
""" test feather-format compat """
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.io.feather_format import read_feather, to_feather # isort:skip
pyarrow = pytest.importorskip("pyarrow", minversion="1.0.1")
filter_sparse = pytest.mark.filterwarnings("ignore:The Sparse")
@filter_sparse
@pytest.mark.single_cpu
@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning")
class TestFeather:
def check_error_on_write(self, df, exc, err_msg):
# check that we are raising the exception
# on writing
with pytest.raises(exc, match=err_msg):
with tm.ensure_clean() as path:
to_feather(df, path)
def check_external_error_on_write(self, df):
# check that we are raising the exception
# on writing
with tm.external_error_raised(Exception):
with tm.ensure_clean() as path:
to_feather(df, path)
def check_round_trip(self, df, expected=None, write_kwargs={}, **read_kwargs):
if expected is None:
expected = df
with tm.ensure_clean() as path:
to_feather(df, path, **write_kwargs)
result = read_feather(path, **read_kwargs)
tm.assert_frame_equal(result, expected)
def test_error(self):
msg = "feather only support IO with DataFrames"
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, ValueError, msg)
def test_basic(self):
df = pd.DataFrame(
{
"string": list("abc"),
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_null": [1.0, np.nan, 3],
"bool": [True, False, True],
"bool_with_null": [True, np.nan, False],
"cat": pd.Categorical(list("abc")),
"dt": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3)), freq=None
),
"dttz": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3, tz="US/Eastern")),
freq=None,
),
"dt_with_null": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
"dtns": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3, freq="ns")), freq=None
),
}
)
df["periods"] = pd.period_range("2013", freq="M", periods=3)
df["timedeltas"] = pd.timedelta_range("1 day", periods=3)
df["intervals"] = pd.interval_range(0, 3, 3)
assert df.dttz.dtype.tz.zone == "US/Eastern"
self.check_round_trip(df)
def test_duplicate_columns(self):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_external_error_on_write(df)
def test_stringify_columns(self):
df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy()
msg = "feather must have string column names"
self.check_error_on_write(df, ValueError, msg)
def test_read_columns(self):
# GH 24025
df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list(range(1, 4)),
"col3": list("xyz"),
"col4": list(range(4, 7)),
}
)
columns = ["col1", "col3"]
self.check_round_trip(df, expected=df[columns], columns=columns)
def read_columns_different_order(self):
# GH 33878
df = pd.DataFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]})
self.check_round_trip(df, columns=["B", "A"])
def test_unsupported_other(self):
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
self.check_external_error_on_write(df)
def test_rw_use_threads(self):
df = pd.DataFrame({"A": np.arange(100000)})
self.check_round_trip(df, use_threads=True)
self.check_round_trip(df, use_threads=False)
def test_write_with_index(self):
df = pd.DataFrame({"A": [1, 2, 3]})
self.check_round_trip(df)
msg = (
r"feather does not support serializing .* for the index; "
r"you can \.reset_index\(\) to make the index into column\(s\)"
)
# non-default index
for index in [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]),
]:
df.index = index
self.check_error_on_write(df, ValueError, msg)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
msg = "feather does not serialize index meta-data on a default index"
self.check_error_on_write(df, ValueError, msg)
# column multi-index
df.index = [0, 1, 2]
df.columns = pd.MultiIndex.from_tuples([("a", 1)])
msg = "feather must have string column names"
self.check_error_on_write(df, ValueError, msg)
def test_path_pathlib(self):
df = tm.makeDataFrame().reset_index()
result = tm.round_trip_pathlib(df.to_feather, read_feather)
| tm.assert_frame_equal(df, result) | pandas._testing.assert_frame_equal |
"""
Wrappers around native scikit-learn estimators.
`sklearndf` wrappers accept and return data frames (while scikit-learn transformers
usually return a numpy arrays, and may not accept data frames as input).
Otherwise the wrappers are designed to precisely mirror the API and behavior of the
native estimators they wrap.
The wrappers also implement the additional column attributes introduced by `sklearndf`,
:meth:`~EstimatorDF.feature_names_in_`, :meth:`~TransformerDF.feature_names_out_`, and
:meth:`~TransformerDF.feature_names_original_`.
"""
import inspect
import logging
from abc import ABCMeta, abstractmethod
from functools import update_wrapper
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from weakref import WeakValueDictionary
import numpy as np
import pandas as pd
import sklearn.utils.metaestimators as sklearn_meta
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
MetaEstimatorMixin,
RegressorMixin,
TransformerMixin,
)
from pytools.api import AllTracker, inheritdoc, public_module_prefix
from pytools.meta import compose_meta
from ._adapter import LearnerNPDF
from sklearndf import ClassifierDF, EstimatorDF, LearnerDF, RegressorDF, TransformerDF
log = logging.getLogger(__name__)
__all__ = [
"ClassifierWrapperDF",
"EstimatorWrapperDF",
"EstimatorWrapperDFMeta",
"LearnerWrapperDF",
"MetaEstimatorWrapperDF",
"RegressorWrapperDF",
"StackingEstimatorWrapperDF",
"TransformerWrapperDF",
"make_df_classifier",
"make_df_estimator",
"make_df_regressor",
"make_df_transformer",
]
#
# type variables
#
T = TypeVar("T")
T_Self = TypeVar("T_Self")
T_NativeEstimator = TypeVar("T_NativeEstimator", bound=BaseEstimator)
T_NativeTransformer = TypeVar("T_NativeTransformer", bound=TransformerMixin)
T_NativeLearner = TypeVar("T_NativeLearner", RegressorMixin, ClassifierMixin)
T_NativeRegressor = TypeVar("T_NativeRegressor", bound=RegressorMixin)
T_NativeClassifier = TypeVar("T_NativeClassifier", bound=ClassifierMixin)
T_EstimatorWrapperDF = TypeVar("T_EstimatorWrapperDF", bound="EstimatorWrapperDF")
T_TransformerWrapperDF = TypeVar("T_TransformerWrapperDF", bound="TransformerWrapperDF")
T_RegressorWrapperDF = TypeVar("T_RegressorWrapperDF", bound="RegressorWrapperDF")
T_ClassifierWrapperDF = TypeVar("T_ClassifierWrapperDF", bound="ClassifierWrapperDF")
T_LearnerDF = TypeVar("T_LearnerDF", bound="LearnerDF")
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# base wrapper classes
#
class EstimatorWrapperDFMeta(type):
"""
Metaclass of DF wrappers, providing a reference to the type of the wrapped native
estimator.
"""
__wrapped__: Type[T_NativeEstimator]
@property
def native_estimator_type(cls) -> Type[BaseEstimator]:
"""
The type of native estimator that instances of this wrapper class delegate to.
"""
return cls.__wrapped__
@inheritdoc(match="[see superclass]")
class EstimatorWrapperDF(
EstimatorDF,
Generic[T_NativeEstimator],
metaclass=compose_meta(type(EstimatorDF), EstimatorWrapperDFMeta),
):
"""
Base class of DF wrappers for native estimators conforming with the scikit-learn
API.
Estimator wrapper classes should be created using function
:func:`.make_df_estimator`.
"""
__ARG_FITTED_DELEGATE_CONTEXT = "__EstimatorWrapperDF_fitted"
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""
:param args: positional arguments to use when initializing a new new delegate
estimator
:param kwargs: keyword arguments to use when initializing a new new delegate
estimator
"""
super().__init__()
# check if a fitted estimator was passed by class method is_fitted
fitted_delegate_context: Tuple[T_NativeEstimator, pd.Index, int] = kwargs.get(
EstimatorWrapperDF.__ARG_FITTED_DELEGATE_CONTEXT, None
)
if fitted_delegate_context is None:
# create a new delegate estimator with the given parameters
# noinspection PyProtectedMember
_native_estimator = type(self).__wrapped__(*args, **kwargs)
self._reset_fit()
else:
(
_native_estimator,
self._features_in,
self._n_outputs,
) = fitted_delegate_context
self._native_estimator = _native_estimator
self._validate_delegate_estimator()
def __new__(cls: Type[T], *args, **kwargs: Any) -> T:
if not hasattr(cls, "__wrapped__"):
raise TypeError(f"cannot instantiate abstract wrapper class {cls.__name__}")
else:
return super().__new__(cls)
@property
def is_fitted(self) -> bool:
"""[see superclass]"""
return self._features_in is not None
@property
def native_estimator(self) -> T_NativeEstimator:
"""
The native estimator that this wrapper delegates to.
"""
return self._native_estimator
@property
def _estimator_type(self) -> Optional[str]:
try:
# noinspection PyProtectedMember
return self.native_estimator._estimator_type
except AttributeError:
return None
@classmethod
def from_fitted(
cls: Type[T_EstimatorWrapperDF],
estimator: T_NativeEstimator,
features_in: pd.Index,
n_outputs: int,
) -> T_EstimatorWrapperDF:
"""
Make a new wrapped data frame estimator whose delegate is an estimator which
has already been fitted.
:param estimator: the fitted estimator
:param features_in: the column names of X used for fitting the estimator
:param n_outputs: the number of outputs in y used for fitting the estimator
:return: the wrapped data frame estimator
"""
return cls(
**{
EstimatorWrapperDF.__ARG_FITTED_DELEGATE_CONTEXT: (
estimator,
features_in,
n_outputs,
)
}
)
def get_params(self, deep: bool = True) -> Mapping[str, Any]:
"""[see superclass]"""
return self._native_estimator.get_params(deep=deep)
def set_params(self: T_Self, **params: Any) -> T_Self:
"""[see superclass]"""
self: EstimatorWrapperDF # support type hinting in PyCharm
self._native_estimator.set_params(**params)
return self
# noinspection PyPep8Naming
def fit(
self: T_Self,
X: pd.DataFrame,
y: Optional[Union[pd.Series, pd.DataFrame]] = None,
**fit_params: Any,
) -> T_Self:
"""[see superclass]"""
# support type hinting in PyCharm
self: EstimatorWrapperDF[T_NativeEstimator]
self._reset_fit()
try:
self._check_parameter_types(X, y)
self._fit(X, y, **fit_params)
self._post_fit(X, y, **fit_params)
except Exception as cause:
self._reset_fit()
raise self._make_verbose_exception(self.fit.__name__, cause) from cause
return self
def _validate_delegate_estimator(self) -> None:
pass
def _get_features_in(self) -> pd.Index:
return self._features_in
def _get_n_outputs(self) -> int:
return self._n_outputs
def _reset_fit(self) -> None:
self._features_in = None
self._n_outputs = None
# noinspection PyPep8Naming
def _fit(
self, X: pd.DataFrame, y: Optional[Union[pd.Series, pd.DataFrame]], **fit_params
) -> T_NativeEstimator:
# noinspection PyUnresolvedReferences
return self._native_estimator.fit(
self._prepare_X_for_delegate(X),
self._prepare_y_for_delegate(y),
**fit_params,
)
# noinspection PyPep8Naming,PyUnusedLocal
def _post_fit(
self,
X: pd.DataFrame,
y: Optional[Union[pd.Series, pd.DataFrame]] = None,
**fit_params,
) -> None:
self._features_in = X.columns.rename(self.COL_FEATURE_IN)
self._n_outputs = (
0 if y is None else 1 if isinstance(y, pd.Series) else y.shape[1]
)
# noinspection PyPep8Naming
def _check_parameter_types(
self,
X: pd.DataFrame,
y: Optional[Union[pd.Series, pd.DataFrame]],
*,
expected_columns: pd.Index = None,
) -> None:
if not isinstance(X, pd.DataFrame):
raise TypeError("arg X must be a DataFrame")
if self.is_fitted:
EstimatorWrapperDF._verify_df(
df_name="arg X",
df=X,
expected_columns=(
self.feature_names_in_
if expected_columns is None
else expected_columns
),
)
if y is not None and not isinstance(y, (pd.Series, pd.DataFrame)):
raise TypeError("arg y must be None, or a pandas series or data frame")
@staticmethod
def _verify_df(
df_name: str,
df: pd.DataFrame,
expected_columns: pd.Index,
expected_index: pd.Index = None,
) -> None:
def _compare_labels(axis: str, actual: pd.Index, expected: pd.Index):
error_message = f"{df_name} data frame does not have expected {axis}"
missing_columns = expected.difference(actual)
extra_columns = actual.difference(expected)
error_detail = []
if len(actual) != len(expected):
error_detail.append(
f"expected {len(expected)} columns but got {len(actual)}"
)
if len(missing_columns) > 0:
error_detail.append(
f"missing columns: "
f"{', '.join(str(item) for item in missing_columns)}"
)
if len(extra_columns) > 0:
error_detail.append(
f"extra columns: "
f"{', '.join(str(item) for item in extra_columns)}"
)
raise ValueError(f"{error_message} ({'; '.join(error_detail)})")
_compare_labels(axis="columns", actual=df.columns, expected=expected_columns)
if expected_index is not None:
_compare_labels(axis="index", actual=df.index, expected=expected_index)
def _validate_delegate_attribute(self, attribute_name: str) -> None:
if not hasattr(self.native_estimator, attribute_name):
raise AttributeError(
f"delegate estimator of type {type(self.native_estimator).__name__} "
f"does not have attribute {attribute_name}"
)
# noinspection PyPep8Naming
def _prepare_X_for_delegate(self, X: pd.DataFrame) -> Any:
# convert X before passing it to the delegate estimator
return self._adjust_X_type_for_delegate(self._adjust_X_columns_for_delegate(X))
def _prepare_y_for_delegate(
self, y: Optional[Union[pd.Series, pd.DataFrame]]
) -> Any:
return self._adjust_y_type_for_delegate(y)
# noinspection PyPep8Naming
def _adjust_X_columns_for_delegate(self, X: pd.DataFrame) -> pd.DataFrame:
# make sure columns of X are aligned with frame used to fit this estimator
if not self.is_fitted:
# return X unchanged if estimator is not fitted yet
return X
features_in = self._get_features_in()
if X.columns.is_(features_in):
return X
else:
return X.reindex(columns=features_in, copy=False)
# noinspection PyPep8Naming
def _adjust_X_type_for_delegate(
self, X: pd.DataFrame, *, to_numpy: Optional[bool] = None
) -> Any:
# convert X before passing it to the delegate estimator
return X.values if to_numpy else X
def _adjust_y_type_for_delegate(
self,
y: Optional[Union[pd.Series, pd.DataFrame]],
*,
to_numpy: Optional[bool] = None,
) -> Any:
# convert y before passing it to the delegate estimator
return y if y is None or not to_numpy else y.values
def _make_verbose_exception(self, method: str, cause: Exception) -> Exception:
verbose_message = f"{type(self).__name__}.{method}: {cause}"
# noinspection PyBroadException
try:
return type(cause)(verbose_message)
except Exception:
return RuntimeError(verbose_message)
def __dir__(self) -> Iterable[str]:
# include non-private attributes of delegate estimator in directory
return {
*super().__dir__(),
*(
attr
for attr in self._native_estimator.__dir__()
if not attr.startswith("_")
),
}
def __getattr__(self, name: str) -> Any:
# get a non-private attribute of the delegate estimator
if name.startswith("_"):
# raise attribute error
self.__getattribute__(name)
else:
try:
return getattr(self._native_estimator, name)
except AttributeError:
# raise attribute error
self.__getattribute__(name)
def __setattr__(self, name: str, value: Any) -> None:
# set a public attribute of the delegate estimator
if name.startswith("_"):
super().__setattr__(name, value)
else:
setattr(self._native_estimator, name, value)
@inheritdoc(match="[see superclass]")
class TransformerWrapperDF(
TransformerDF,
EstimatorWrapperDF[T_NativeTransformer],
Generic[T_NativeTransformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for native transformers conforming with the scikit-learn
API.
Transformer wrapper classes should be created using function
:func:`.make_df_transformer`.
"""
# noinspection PyPep8Naming
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""[see superclass]"""
self._check_parameter_types(X, None)
transformed = self._transform(X)
return self._transformed_to_df(
transformed=transformed, index=X.index, columns=self.feature_names_out_
)
# noinspection PyPep8Naming
def fit_transform(
self, X: pd.DataFrame, y: Optional[pd.Series] = None, **fit_params: Any
) -> pd.DataFrame:
"""[see superclass]"""
self._reset_fit()
try:
self._check_parameter_types(X, y)
transformed = self._fit_transform(X, y, **fit_params)
self._post_fit(X, y, **fit_params)
except Exception as cause:
self._reset_fit()
raise self._make_verbose_exception(
self.fit_transform.__name__, cause
) from cause
return self._transformed_to_df(
transformed=transformed, index=X.index, columns=self.feature_names_out_
)
# noinspection PyPep8Naming
def inverse_transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""[see superclass]"""
self._check_parameter_types(X, None, expected_columns=self.feature_names_out_)
transformed = self._inverse_transform(X)
return self._transformed_to_df(
transformed=transformed, index=X.index, columns=self.feature_names_in_
)
def _reset_fit(self) -> None:
try:
# noinspection PyProtectedMember
super()._reset_fit()
finally:
self._features_original = None
# noinspection PyPep8Naming
def _prepare_X_for_delegate(self, X: pd.DataFrame, *, inverse: bool = False) -> Any:
return self._adjust_X_type_for_delegate(
self._adjust_X_columns_for_delegate(X, inverse=inverse),
to_numpy=inverse or None,
)
# noinspection PyPep8Naming
def _adjust_X_columns_for_delegate(
self, X: pd.DataFrame, *, inverse: Optional[bool] = None
) -> pd.DataFrame:
if inverse:
# when converting X for an inverse transform, ensure the data frame is
# aligned with the output features, and convert the data frame to a
# numpy array
features_out = self.feature_names_out_
if X.columns.is_(features_out):
return X
else:
return X.reindex(columns=features_out, copy=False)
else:
return super()._adjust_X_columns_for_delegate(X)
@staticmethod
def _transformed_to_df(
transformed: Union[pd.DataFrame, np.ndarray], index: pd.Index, columns: pd.Index
):
if isinstance(transformed, pd.DataFrame):
# noinspection PyProtectedMember
TransformerWrapperDF._verify_df(
df_name="transformed",
df=transformed,
expected_columns=columns,
expected_index=index,
)
return transformed
else:
return pd.DataFrame(data=transformed, index=index, columns=columns)
# noinspection PyPep8Naming
def _transform(self, X: pd.DataFrame) -> np.ndarray:
# noinspection PyUnresolvedReferences
return self.native_estimator.transform(self._prepare_X_for_delegate(X))
# noinspection PyPep8Naming
def _fit_transform(
self, X: pd.DataFrame, y: Optional[pd.Series], **fit_params
) -> np.ndarray:
return self.native_estimator.fit_transform(
self._prepare_X_for_delegate(X),
self._prepare_y_for_delegate(y),
**fit_params,
)
# noinspection PyPep8Naming
def _inverse_transform(self, X: pd.DataFrame) -> np.ndarray:
try:
inverse_transform_fn = self.native_estimator.inverse_transform
except AttributeError:
raise NotImplementedError(
f"{type(self).__name__} does not implement method inverse_transform()"
)
return inverse_transform_fn(self._prepare_X_for_delegate(X, inverse=True))
@inheritdoc(match="[see superclass]")
class LearnerWrapperDF(
LearnerDF,
EstimatorWrapperDF[T_NativeLearner],
Generic[T_NativeLearner],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for native learners conforming with the scikit-learn
API.
"""
#: Name of :class:`pd.Series` objects containing the predictions of single-output
#: learners.
#:
#: See :meth:`~.LearnerDF.predict`.
COL_PREDICTION = "prediction"
# noinspection PyPep8Naming
def predict(
self, X: pd.DataFrame, **predict_params: Any
) -> Union[pd.Series, pd.DataFrame]:
"""[see superclass]"""
self._check_parameter_types(X, None)
# noinspection PyUnresolvedReferences
return self._prediction_to_series_or_frame(
X,
self.native_estimator.predict(
self._prepare_X_for_delegate(X), **predict_params
),
)
# noinspection PyPep8Naming
def fit_predict(
self, X: pd.DataFrame, y: pd.Series, **fit_params: Any
) -> Union[pd.Series, pd.DataFrame]:
"""[see superclass]"""
self._reset_fit()
try:
self._check_parameter_types(X, y)
# noinspection PyUnresolvedReferences
result = self._prediction_to_series_or_frame(
X,
self.native_estimator.fit_predict(
self._prepare_X_for_delegate(X),
self._prepare_y_for_delegate(y),
**fit_params,
),
)
self._post_fit(X, y, **fit_params)
except Exception as cause:
self._reset_fit()
raise self._make_verbose_exception(
self.fit_predict.__name__, cause
) from cause
return result
# noinspection PyPep8Naming
def score(
self, X: pd.DataFrame, y: pd.Series, sample_weight: Optional[pd.Series] = None
) -> float:
"""[see superclass]"""
self._check_parameter_types(X, y)
if y is None:
raise ValueError("arg y must not be None")
if sample_weight is not None and not isinstance(sample_weight, pd.Series):
raise TypeError("arg sample_weight must be None or a Series")
return self.native_estimator.score(
self._prepare_X_for_delegate(X),
self._prepare_y_for_delegate(y),
sample_weight,
)
# noinspection PyPep8Naming
def _prediction_to_series_or_frame(
self, X: pd.DataFrame, y: Union[np.ndarray, pd.Series, pd.DataFrame]
) -> Union[pd.Series, pd.DataFrame]:
if isinstance(y, pd.Series) or isinstance(y, pd.DataFrame):
# if we already have a series or data frame, check it and return it
# unchanged
return y
elif isinstance(y, np.ndarray):
if len(y) == len(X):
# predictions are usually provided as a numpy array the same length as X
if y.ndim == 1:
# single-output predictions yield a numpy array of shape (n_samples)
return pd.Series(data=y, name=self.COL_PREDICTION, index=X.index)
if y.ndim == 2:
# multi-output predictions yield a numpy array of shape (n_samples,
# n_outputs)
return pd.DataFrame(data=y, index=X.index)
raise TypeError(
f"Unexpected shape of numpy array returned as prediction:" f" {y.shape}"
)
raise TypeError(
f"unexpected data type returned as prediction: " f"{type(y).__name__}"
)
class RegressorWrapperDF(
RegressorDF,
LearnerWrapperDF[T_NativeRegressor],
Generic[T_NativeRegressor],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for native regressors conforming with the scikit-learn
API.
Regressor wrapper classes should be created using function
:func:`.make_df_regressor`.
"""
@inheritdoc(match="[see superclass]")
class ClassifierWrapperDF(
ClassifierDF,
LearnerWrapperDF[T_NativeClassifier],
Generic[T_NativeClassifier],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for native classifiers conforming with the scikit-learn
API.
Classifier wrapper classes should be created using function
:func:`.make_df_classifier`.
"""
@property
def classes_(self) -> Sequence[Any]:
"""[see superclass]"""
self._ensure_fitted()
# noinspection PyUnresolvedReferences
return self._native_estimator.classes_
# noinspection PyPep8Naming
def predict_proba(
self, X: pd.DataFrame, **predict_params: Any
) -> Union[pd.DataFrame, List[pd.DataFrame]]:
"""[see superclass]"""
self._ensure_delegate_method("predict_proba")
self._check_parameter_types(X, None)
# noinspection PyUnresolvedReferences
return self._prediction_with_class_labels(
X,
self.native_estimator.predict_proba(
self._prepare_X_for_delegate(X), **predict_params
),
)
# noinspection PyPep8Naming
def predict_log_proba(
self, X: pd.DataFrame, **predict_params: Any
) -> Union[pd.DataFrame, List[pd.DataFrame]]:
"""[see superclass]"""
self._ensure_delegate_method("predict_log_proba")
self._check_parameter_types(X, None)
# noinspection PyUnresolvedReferences
return self._prediction_with_class_labels(
X,
self.native_estimator.predict_log_proba(
self._prepare_X_for_delegate(X), **predict_params
),
)
# noinspection PyPep8Naming
def decision_function(
self, X: pd.DataFrame, **predict_params: Any
) -> Union[pd.Series, pd.DataFrame]:
"""[see superclass]"""
self._ensure_delegate_method("decision_function")
self._check_parameter_types(X, None)
# noinspection PyUnresolvedReferences
return self._prediction_with_class_labels(
X,
self.native_estimator.decision_function(
self._prepare_X_for_delegate(X), **predict_params
),
)
def _ensure_delegate_method(self, method: str) -> None:
if not hasattr(self.native_estimator, method):
raise NotImplementedError(
f"{type(self.native_estimator).__name__} does not implement method "
f"{method}"
)
# noinspection PyPep8Naming
def _prediction_with_class_labels(
self,
X: pd.DataFrame,
y: Union[pd.Series, pd.DataFrame, list, np.ndarray],
classes: Optional[Sequence[Any]] = None,
) -> Union[pd.Series, pd.DataFrame, List[pd.DataFrame]]:
if classes is None:
classes = getattr(self.native_estimator, "classes_", None)
if isinstance(y, pd.DataFrame):
return y.set_axis(classes, axis=1, inplace=False)
elif isinstance(y, np.ndarray):
if len(y) == len(X):
# predictions of probabilities are usually provided as a NumPy array
# the same length as X
if y.ndim == 1:
# for a binary classifier, we get a series with probabilities
# for the second class
return | pd.Series(data=y, index=X.index, name=classes[1]) | pandas.Series |
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
class TestSparseSeriesIndexing(object):
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == | SparseDtype(np.int64) | pandas.core.sparse.api.SparseDtype |
# -*- coding: utf-8 -*-
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from elasticsearch import helpers, Elasticsearch
from covid19dh import covid19, cite
from config import search_host, search_username, search_password, search_port, search_index_name, covid19datahub_title, covid19datahub_citation, covid19datahub_licence, covid19datahub_goal, covid19datahub_authors, mongohost, mongouser, mongopassword, mongodatabase, cordversion
from config import DV_ALIAS, BASE_URL, API_TOKEN, PARSABLE_EXTENSIONS_PY, PARSABLE_EXTENSIONS, gitroot
from pymongo import MongoClient
from pyDataverse.api import Api, NativeApi
import pandas as pd
import json
def custom_openapi():
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="CoronaWhy Data API",
description="CoronaWhy is globally distributed, volunteer-powered research organisation. We're trying to assist the medical community's ability to answer key questions related to COVID-19.",
version="0.1",
routes=app.routes,
)
openapi_schema['tags'] = tags_metadata
app.openapi_schema = openapi_schema
return app.openapi_schema
tags_metadata = [
{
"name": "country",
"externalDocs": {
"description": "Put this citation in working papers and published papers that use this dataset: %s" % covid19datahub_citation,
"authors": covid19datahub_authors,
"url": "https://covid19datahub.io",
},
},
{
"name": "dataverse",
"externalDocs": {
"description": "Dataverse integration by API. Available actions: [showfiles, getfile]",
},
},
{
"name": "cord",
"description": "Metadata by cord_id",
"externalDocs": {
"description": "CORD-19 collection access by cord_id",
"url": "https://api.apps.coronawhy.org/",
},
},
{
"name": "altmetrics",
"description": "Altmetrics by DOI or cord_id",
"externalDocs": {
"description": "CORD-19 papers Altmetrics",
"url": "https://api.apps.coronawhy.org/",
},
},
{
"name": "search",
"description": "CORD-19 search",
"externalDocs": {
"description": "CORD-19 papers search",
"url": "https://api.apps.coronawhy.org/",
},
}
]
app = FastAPI(
openapi_tags=tags_metadata
)
app.openapi = custom_openapi
#@<EMAIL>.get("/items/{item_id}")
#def read_item(item_id: int, q: str = None):
# return {"item_id": item_id, "q": q}
@app.get("/country/{item_id}", tags=["country"])
# http://api.apps.coronawhy.org/data/country/FRA
def data_item(item_id: str, q: str = None):
jsondataset = covid19(item_id, verbose = False)
data = {}
datapoints = json.loads(jsondataset.to_json(orient='records'))
data['authors'] = str(covid19datahub_authors)
data['goal'] = str(covid19datahub_goal)
data['licence'] = covid19datahub_licence
data['citation'] = covid19datahub_citation
data['downloads'] = 0
data['data'] = datapoints
data['citations'] = cite(jsondataset)
#return json.dumps(data, sort_keys=True, indent=4)
return data
@app.get("/dataverse/{action}", tags=["dataverse"])
def dataverse(action: str, doi: str = None, fileid: str = None):
api = NativeApi(BASE_URL, API_TOKEN)
PID = 'doi:10.5072/FK2/3OZLV6'
if doi:
PID = doi
if action == 'showfiles':
files = api.get_datafiles(PID, ':latest').json()
if not fileid:
return files
if action == 'getfile':
if not fileid:
df = pd.DataFrame(files['data'])
filesindex = {}
for i in df.index:
filesindex[df.iloc[i].label] = df.iloc[i].dataFile
pdfiles = | pd.DataFrame(filesindex) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import tushare as ts
import matplotlib.pyplot as plt
import datetime
#pd.set_option('display.max_rows', None)
#pd.set_option('display.max_columns', None)
tspro = ts.pro_api('09f77414f088aad7959f5eecba391fe685ea50462e208ce451b1b6a6')
StockBasic = tspro.query('stock_basic', list_status='L')
HighPoint2015 = | pd.read_pickle('retrieve_2015_data/HighPoint2015010120151231.pkl') | pandas.read_pickle |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import pkgutil
from datetime import datetime
from typing import cast, List
from unittest import TestCase
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import pytz
from dateutil import parser
from dateutil.relativedelta import relativedelta
from kats.compat.pandas import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from kats.consts import (
DEFAULT_TIME_NAME,
DEFAULT_VALUE_NAME,
TimeSeriesData,
TSIterator,
)
def load_data(file_name: str) -> pd.DataFrame:
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
# pyre-fixme[6]: For 1st param expected `bytes` but got `Optional[bytes]`.
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
TIME_COL_NAME = "ds"
VALUE_COL_NAME = "y"
MULTIVAR_VALUE_DF_COLS: List[str] = [VALUE_COL_NAME, VALUE_COL_NAME + "_1"]
EMPTY_DF = pd.DataFrame()
EMPTY_TIME_SERIES = pd.Series([], name=DEFAULT_TIME_NAME, dtype=float)
EMPTY_VALUE_SERIES = pd.Series([], name=DEFAULT_VALUE_NAME, dtype=float)
EMPTY_VALUE_SERIES_NO_NAME = pd.Series([], dtype=float)
EMPTY_TIME_DATETIME_INDEX = pd.DatetimeIndex(pd.Series([], dtype=object))
EMPTY_DF_WITH_COLS: pd.DataFrame = pd.concat([EMPTY_TIME_SERIES, EMPTY_VALUE_SERIES], axis=1)
NUM_YEARS_OFFSET = 12
class TimeSeriesBaseTest(TestCase):
def setUp(self) -> None:
# load Dataframes for testing
self.AIR_DF = load_data("air_passengers.csv")
self.AIR_DF_DATETIME = self.AIR_DF.copy(deep=True)
self.AIR_DF_DATETIME.ds = self.AIR_DF_DATETIME.ds.apply(
lambda x: parser.parse(x)
)
self.AIR_DF_UNIXTIME = self.AIR_DF.copy(deep=True)
self.AIR_DF_UNIXTIME.ds = self.AIR_DF_DATETIME.ds.apply(
lambda x: (x - datetime(1970, 1, 1)).total_seconds()
)
self.AIR_DF_WITH_DEFAULT_NAMES = self.AIR_DF.copy(deep=True)
self.AIR_DF_WITH_DEFAULT_NAMES.columns = [DEFAULT_TIME_NAME, DEFAULT_VALUE_NAME]
self.MULTIVAR_AIR_DF = self.AIR_DF.copy(deep=True)
self.MULTIVAR_AIR_DF[VALUE_COL_NAME + "_1"] = self.MULTIVAR_AIR_DF.y * 2
self.MULTIVAR_AIR_DF_DATETIME = self.MULTIVAR_AIR_DF.copy(deep=True)
self.MULTIVAR_AIR_DF_DATETIME.ds = self.MULTIVAR_AIR_DF_DATETIME.ds.apply(
lambda x: parser.parse(x)
)
self.MULTIVAR_VALUE_DF = self.MULTIVAR_AIR_DF[MULTIVAR_VALUE_DF_COLS]
self.AIR_TIME_SERIES = self.AIR_DF.ds
self.AIR_TIME_SERIES_PD_DATETIME = pd.to_datetime(self.AIR_TIME_SERIES)
self.AIR_TIME_SERIES_UNIXTIME = self.AIR_TIME_SERIES_PD_DATETIME.apply(
lambda x: (x - datetime(1970, 1, 1)).total_seconds()
)
self.AIR_VALUE_SERIES = self.AIR_DF[VALUE_COL_NAME]
self.AIR_TIME_DATETIME_INDEX = pd.DatetimeIndex(self.AIR_TIME_SERIES)
class TimeSeriesDataInitTest(TimeSeriesBaseTest):
def setUp(self) -> None:
super(TimeSeriesDataInitTest, self).setUp()
# Univariate TimeSeriesData initialized from a pd.DataFrame
self.ts_from_df = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
# Univariate TimeSeriesData initialized from a pd.DataFrame with time
# as a datetime.datetime object
self.ts_from_df_datetime = TimeSeriesData(
df=self.AIR_DF_DATETIME, time_col_name=TIME_COL_NAME
)
# Univariate TimeSeriesData initialized from a pd.DataFrame with time
# as unix time
self.ts_from_df_with_unix = TimeSeriesData(
df=self.AIR_DF_UNIXTIME,
use_unix_time=True,
unix_time_units="s",
time_col_name=TIME_COL_NAME,
)
# Multivariate TimeSeriesData initialized from a pd.DataFrame
self.ts_from_df_multi = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
# Multivariate TimeSeriesData initialized from a pd.DataFrame with time
# as a datetime.datetime object
self.ts_from_df_multi_datetime = TimeSeriesData(
df=self.MULTIVAR_AIR_DF_DATETIME, time_col_name=TIME_COL_NAME
)
# Univariate TimeSeriesData initialized from two pd.Series with time
# as a string
self.ts_from_series_univar_no_datetime = TimeSeriesData(
time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES
)
# Univariate TimeSeriesData initialized from two pd.Series with time
# as a pd.Timestamp
self.ts_from_series_univar_with_datetime = TimeSeriesData(
time=self.AIR_TIME_SERIES_PD_DATETIME, value=self.AIR_VALUE_SERIES
)
# Univariate TimeSeriesData initialized from two pd.Series with time
# as unix time
self.ts_from_series_with_unix = TimeSeriesData(
time=self.AIR_TIME_SERIES_UNIXTIME,
value=self.AIR_VALUE_SERIES,
use_unix_time=True,
unix_time_units="s",
time_col_name=TIME_COL_NAME,
)
# Univariate TimeSeriesData initialized with time as a pd.Series and
# value as a pd.DataFrame
self.ts_from_series_and_df_univar = TimeSeriesData(
time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES.to_frame()
)
# Multivariate TimeSeriesData initialized from a pd.Series for time
# and DataFrame for value
self.ts_from_series_and_df_multivar = TimeSeriesData(
time=self.AIR_TIME_SERIES, value=self.MULTIVAR_VALUE_DF
)
# Univariate TimeSeriesData initialized with time as a pd.DateTimeIndex
# and value as a pd.Series
self.ts_from_index_and_series_univar = TimeSeriesData(
time=self.AIR_TIME_DATETIME_INDEX,
value=self.AIR_VALUE_SERIES,
time_col_name=TIME_COL_NAME,
)
# Multivariate TimeSeriesData initialized with time as a
# pd.DateTimeIndex and value as a pd.DataFrame
self.ts_from_index_and_series_multivar = TimeSeriesData(
time=self.AIR_TIME_DATETIME_INDEX,
value=self.MULTIVAR_VALUE_DF,
time_col_name=TIME_COL_NAME,
)
# TimeSeriesData initialized from None Objects
self.ts_df_none = TimeSeriesData(df=None)
self.ts_time_none_and_value_none = TimeSeriesData(time=None, value=None)
# TimeSeriesData initialized from Empty Objects
self.ts_df_empty = TimeSeriesData(df=EMPTY_DF)
self.ts_time_empty_value_empty = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_VALUE_SERIES
)
self.ts_time_empty_value_empty_no_name = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_VALUE_SERIES_NO_NAME
)
self.ts_time_empty_value_empty_df = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_DF
)
self.ts_time_empty_value_empty_df_with_cols = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_DF_WITH_COLS
)
# univariate data with missing time
self.ts_univariate_missing = TimeSeriesData(
df=pd.DataFrame(
{
"time": ["2010-01-01", "2010-01-02", "2010-01-03", "2010-01-05"],
"value": [1, 2, 3, 4],
}
)
)
# multivariate data with missing time
self.ts_multi_missing = TimeSeriesData(
df=pd.DataFrame(
{
"time": ["2010-01-01", "2010-01-02", "2010-01-03", "2010-01-05"],
"value1": [1, 2, 3, 4],
"value2": [4, 3, 2, 1],
}
)
)
# univariate data with unixtime in US/Pacific with time zone
self.unix_list = (
(
pd.date_range(
"2020-03-01", "2020-03-10", tz="US/Pacific", freq="1d"
).astype(int)
/ 1e9
)
.astype(int)
.to_list()
)
self.ts_univar_PST_tz = TimeSeriesData(
df=pd.DataFrame({"time": self.unix_list, "value": [0] * 10}),
use_unix_time=True,
unix_time_units="s",
tz="US/Pacific",
)
# univariate data with unixtime in US/Pacific without time zone
self.ts_univar_PST = TimeSeriesData(
df=pd.DataFrame({"time": self.unix_list, "value": [0] * 10}),
use_unix_time=True,
unix_time_units="s",
)
# univariate data with date str with tz
date = ["2020-10-31", "2020-11-01", "2020-11-02"]
self.ts_univar_str_date_tz = TimeSeriesData(
df=pd.DataFrame({"time": date, "value": [0] * 3}),
date_format="%Y-%m-%d",
tz="US/Pacific",
)
# univariate data with date str without tz
self.ts_univar_str_date = TimeSeriesData(
df=pd.DataFrame({"time": date, "value": [0] * 3}),
date_format="%Y-%m-%d",
)
# univariate data in US/Pacific Time Zone with missing data
self.ts_univar_PST_missing_tz = TimeSeriesData(
df=pd.DataFrame(
{"time": (self.unix_list[0:4] + self.unix_list[7:10]), "value": [0] * 7}
),
use_unix_time=True,
unix_time_units="s",
tz="US/Pacific",
)
# Testing univariate time series intialized from a DataFrame
def test_init_from_df_univar(self) -> None:
# DataFrame with string time
assert_series_equal(self.ts_from_df.time, self.AIR_TIME_SERIES_PD_DATETIME)
assert_series_equal(
cast(pd.Series, self.ts_from_df.value), self.AIR_VALUE_SERIES
)
# DataFrame with datetime time
assert_series_equal(
self.ts_from_df_datetime.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_df_datetime.value), self.AIR_VALUE_SERIES
)
# DataFrame with unix time
assert_series_equal(
self.ts_from_df_with_unix.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_df_with_unix.value), self.AIR_VALUE_SERIES
)
# Testing multivariate time series initialized from a DataFrame
def test_init_from_df_multi(self) -> None:
assert_series_equal(
self.ts_from_df_multi.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_frame_equal(
cast(pd.DataFrame, self.ts_from_df_multi.value), self.MULTIVAR_VALUE_DF
)
# Testing univariate time series initialized from a Series and Series/DataFrame
def test_init_from_series_univar(self) -> None:
# time and value from Series, with time as string
assert_series_equal(
self.ts_from_series_univar_no_datetime.time,
self.AIR_TIME_SERIES_PD_DATETIME,
)
# time and value from Series, with time as pd.Timestamp
assert_series_equal(
self.ts_from_series_univar_with_datetime.time,
self.AIR_TIME_SERIES_PD_DATETIME,
)
assert_series_equal(
cast(pd.Series, self.ts_from_series_univar_no_datetime.value),
self.AIR_VALUE_SERIES,
)
# time and value from Series, with time out of order and `sort_by_time=True`
unsorted_df = self.AIR_DF.sample(frac=1)
resorted_ts = TimeSeriesData(
time=unsorted_df.ds,
value=unsorted_df.y,
time_col_name=TIME_COL_NAME,
sort_by_time=True,
)
self.assertEqual(resorted_ts, self.ts_from_df)
# time and value from Series, with time as unix time
assert_series_equal(
self.ts_from_series_with_unix.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_series_with_unix.value), self.AIR_VALUE_SERIES
)
# time from Series and value from DataFrame
assert_series_equal(
self.ts_from_series_and_df_univar.time, self.AIR_TIME_SERIES_PD_DATETIME
)
print(type(self.ts_from_series_and_df_univar.value))
assert_series_equal(
cast(pd.Series, self.ts_from_series_and_df_univar.value),
self.AIR_VALUE_SERIES,
)
# Testing multivariate time series initialized from a Series/DataFrame
def test_init_from_series_multivar(self) -> None:
# Testing multivariate time series initialized from a
assert_series_equal(
self.ts_from_series_and_df_multivar.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_frame_equal(
cast(pd.DataFrame, self.ts_from_series_and_df_multivar.value),
self.MULTIVAR_VALUE_DF,
)
# Testing univariate time series with time initialized as a
# pd.DateTimeIndex
def test_init_from_index_univar(self) -> None:
assert_series_equal(
self.ts_from_index_and_series_univar.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_index_and_series_univar.value),
self.AIR_VALUE_SERIES,
)
# Testing multivariate time series with time initialized as a
# pd.DateTimeIndex
def test_init_from_index_multivar(self) -> None:
assert_series_equal(
self.ts_from_index_and_series_multivar.time,
self.AIR_TIME_SERIES_PD_DATETIME,
)
assert_frame_equal(
cast(pd.DataFrame, self.ts_from_index_and_series_multivar.value),
self.MULTIVAR_VALUE_DF,
)
# Testing initialization from None Objects
def test_none(self) -> None:
# Testing initialization from None DataFrame
assert_series_equal(self.ts_df_none.time, EMPTY_TIME_SERIES)
assert_series_equal(cast(pd.Series, self.ts_df_none.value), EMPTY_VALUE_SERIES)
# Testing initialization from two None Series
assert_series_equal(self.ts_time_none_and_value_none.time, EMPTY_TIME_SERIES)
assert_series_equal(
cast(pd.Series, self.ts_time_none_and_value_none.value), EMPTY_VALUE_SERIES
)
# Testing initialization from Empty Objects
def test_empty(self) -> None:
# Testing intialization from empty DataFrame
assert_series_equal(self.ts_df_empty.time, EMPTY_TIME_SERIES)
assert_series_equal(cast(pd.Series, self.ts_df_empty.value), EMPTY_VALUE_SERIES)
# Testing intialization from two empty Series
assert_series_equal(self.ts_time_empty_value_empty.time, EMPTY_TIME_SERIES)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty.value), EMPTY_VALUE_SERIES
)
# Testing intialization from two empty no name Series
assert_series_equal(
self.ts_time_empty_value_empty_no_name.time, EMPTY_TIME_SERIES
)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty_no_name.value),
EMPTY_VALUE_SERIES,
)
# Make sure the time and value objects here have the default names
self.assertEqual(
self.ts_time_empty_value_empty_no_name.time.name, DEFAULT_TIME_NAME
)
self.assertEqual(
self.ts_time_empty_value_empty_no_name.value.name, DEFAULT_VALUE_NAME
)
# Testing initialization from time as empty Series and value as empty
# DataFrame
assert_series_equal(self.ts_time_empty_value_empty_df.time, EMPTY_TIME_SERIES)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty_df.value), EMPTY_VALUE_SERIES
)
# Testing initialization from time as empty Series and value as empty
# DataFrame
assert_series_equal(
self.ts_time_empty_value_empty_df_with_cols.time, EMPTY_TIME_SERIES
)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty_df_with_cols.value),
EMPTY_VALUE_SERIES,
)
# Testing incorrect initializations
def test_incorrect_init_types(self) -> None:
# Incorrect initialization with DF
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Optional[pd.core.frame.DataFrame]` for 1st
# param but got `List[Variable[_T]]`.
TimeSeriesData(df=[])
# Incorrect initialization with value
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=None)
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Union[None, pd.core.frame.DataFrame,
# pd.core.series.Series]` for 2nd param but got `List[Variable[_T]]`.
TimeSeriesData(time=self.AIR_TIME_SERIES, value=[])
# Incorrect initialization with time
with self.assertRaises(ValueError):
TimeSeriesData(time=None, value=self.AIR_VALUE_SERIES)
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Union[None,
# pd.core.indexes.datetimes.DatetimeIndex, pd.core.series.Series]` for 1st
# param but got `List[Variable[_T]]`.
TimeSeriesData(time=[], value=self.AIR_VALUE_SERIES)
# Incorrect initialization with time and value
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Union[None,
# pd.core.indexes.datetimes.DatetimeIndex, pd.core.series.Series]` for 1st
# param but got `List[Variable[_T]]`.
TimeSeriesData(time=[], value=[])
# Incorrect initialization with value dtypes
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES.map(str))
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.MULTIVAR_VALUE_DF.applymap(str))
# Testing incorrect initializations
def test_incorrect_init_lengths(self) -> None:
# Incorrect initialization with different length time and values
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES[:-1])
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES[:-1], value=self.AIR_VALUE_SERIES)
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.MULTIVAR_VALUE_DF[:-1])
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES[:-1], value=self.MULTIVAR_VALUE_DF)
# Testing DataFrame conversion
def test_to_dataframe(self) -> None:
# Univariate case
assert_frame_equal(self.ts_from_df.to_dataframe(), self.AIR_DF_DATETIME)
# Multivariate case
assert_frame_equal(
self.ts_from_df_multi_datetime.to_dataframe(), self.MULTIVAR_AIR_DF_DATETIME
)
# Series Cases
assert_frame_equal(
self.ts_from_series_univar_no_datetime.to_dataframe(), self.AIR_DF_DATETIME
)
assert_frame_equal(
self.ts_from_series_univar_with_datetime.to_dataframe(),
self.AIR_DF_DATETIME,
)
# Series/DataFrame Cases
assert_frame_equal(
self.ts_from_series_and_df_univar.to_dataframe(), self.AIR_DF_DATETIME
)
assert_frame_equal(
self.ts_from_series_and_df_multivar.to_dataframe(),
self.MULTIVAR_AIR_DF_DATETIME,
)
# Empty/None Cases
assert_frame_equal(self.ts_df_none.to_dataframe(), EMPTY_DF_WITH_COLS)
assert_frame_equal(
self.ts_time_none_and_value_none.to_dataframe(), EMPTY_DF_WITH_COLS
)
assert_frame_equal(self.ts_df_empty.to_dataframe(), EMPTY_DF_WITH_COLS)
assert_frame_equal(
self.ts_time_empty_value_empty.to_dataframe(), EMPTY_DF_WITH_COLS
)
assert_frame_equal(
self.ts_time_empty_value_empty_df.to_dataframe(), EMPTY_DF_WITH_COLS
)
# Testing Data Interpolate
def test_interpolate(self) -> None:
# univariate
self.assertEqual(
self.ts_univariate_missing.interpolate(freq="D", method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 3.5, 4],
}
)
),
)
self.assertEqual(
self.ts_univariate_missing.interpolate(freq="D", method="ffill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 3, 4],
}
)
),
)
self.assertEqual(
self.ts_univariate_missing.interpolate(freq="D", method="bfill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 4, 4],
}
)
),
)
# multivariate
self.assertEqual(
self.ts_multi_missing.interpolate(freq="D", method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 3.5, 4],
"value2": [4, 3, 2, 1.5, 1],
}
)
),
)
self.assertEqual(
self.ts_multi_missing.interpolate(freq="D", method="ffill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 3, 4],
"value2": [4, 3, 2, 2, 1],
}
)
),
)
self.assertEqual(
self.ts_multi_missing.interpolate(freq="D", method="bfill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 4, 4],
"value2": [4, 3, 2, 1, 1],
}
)
),
)
# test with no frequency given univariate
self.assertEqual(
self.ts_univariate_missing.interpolate(method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 3.5, 4],
}
)
),
)
# no frequency given, for multivariate
self.assertEqual(
self.ts_multi_missing.interpolate(method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 3.5, 4],
"value2": [4, 3, 2, 1.5, 1],
}
)
),
)
def test_to_array(self) -> None:
# Univariate case
np.testing.assert_array_equal(
self.ts_from_df.to_array(), self.AIR_DF_DATETIME.to_numpy()
)
# Multivariate case
np.testing.assert_array_equal(
self.ts_from_df_multi_datetime.to_array(),
self.MULTIVAR_AIR_DF_DATETIME.to_numpy(),
)
# Series Cases
np.testing.assert_array_equal(
self.ts_from_series_univar_no_datetime.to_array(),
self.AIR_DF_DATETIME.to_numpy(),
)
np.testing.assert_array_equal(
self.ts_from_series_univar_with_datetime.to_array(),
self.AIR_DF_DATETIME.to_numpy(),
)
# Series/DataFrame Cases
np.testing.assert_array_equal(
self.ts_from_series_and_df_univar.to_array(),
self.AIR_DF_DATETIME.to_numpy(),
)
np.testing.assert_array_equal(
self.ts_from_series_and_df_multivar.to_array(),
self.MULTIVAR_AIR_DF_DATETIME.to_numpy(),
)
# Empty/None Cases
np.testing.assert_array_equal(self.ts_df_none.to_array(), np.empty)
np.testing.assert_array_equal(
self.ts_time_none_and_value_none.to_array(), np.empty
)
np.testing.assert_array_equal(self.ts_df_empty.to_array(), np.empty)
np.testing.assert_array_equal(
self.ts_time_empty_value_empty.to_array(), np.empty
)
np.testing.assert_array_equal(
self.ts_time_empty_value_empty_df.to_array(), np.empty
)
def test_tz(self) -> None:
self.ts_univar_PST_tz.validate_data(
validate_frequency=True, validate_dimension=True
)
self.assertEqual(self.ts_univar_PST_tz.freq_to_timedelta(), pd.Timedelta("1d"))
self.assertEqual(self.ts_univar_PST_tz.tz(), pytz.timezone("US/Pacific"))
self.assertTrue(
(
np.array(self.unix_list)
== (self.ts_univar_PST_tz.time.values.astype(int) / 1e9).astype(int)
).all()
)
with self.assertRaisesRegex(
ValueError, "Only constant frequency is supported for time!"
):
self.ts_univar_PST.validate_data(
validate_frequency=True, validate_dimension=True
)
self.ts_univar_str_date.validate_data(
validate_frequency=True, validate_dimension=True
)
self.assertEqual(
self.ts_univar_str_date.freq_to_timedelta(), pd.Timedelta("1d")
)
self.ts_univar_str_date_tz.validate_data(
validate_frequency=True, validate_dimension=True
)
self.assertEqual(
self.ts_univar_str_date_tz.freq_to_timedelta(), pd.Timedelta("1d")
)
self.assertEqual(self.ts_univar_PST_tz.tz(), pytz.timezone("US/Pacific"))
# test ambiguous
tsd = TimeSeriesData(
df=pd.DataFrame(
{
"time": [
"2018-10-28 01:30:00",
"2018-10-28 02:00:00",
"2018-10-28 02:30:00",
"2018-10-28 02:00:00",
"2018-10-28 02:30:00",
"2018-10-28 03:00:00",
"2018-10-28 03:30:00",
],
"value": [0] * 7,
}
),
tz="CET",
tz_ambiguous="infer",
)
tsd.validate_data(validate_frequency=True, validate_dimension=True)
# test nonexistent
tsd = TimeSeriesData(
df=pd.DataFrame(
{
"time": [
"2020-03-08 02:00:00",
"2020-03-08 02:30:00",
"2020-03-08 03:00:00",
],
"value": [0] * 3,
}
),
tz="US/Pacific",
tz_nonexistent="shift_forward",
)
def test_infer_freq_robust(self) -> None:
self.assertEqual(
self.ts_univariate_missing.infer_freq_robust(),
pd.Timedelta(value=1, unit="D"),
)
self.assertEqual(
self.ts_univar_PST_missing_tz.infer_freq_robust(),
pd.Timedelta(value=1, unit="D"),
)
def test_is_data_missing(self) -> None:
self.assertEqual(self.ts_univariate_missing.is_data_missing(), True)
self.assertEqual(self.ts_univar_PST_missing_tz.is_data_missing(), True)
self.assertEqual(self.ts_from_series_and_df_univar.is_data_missing(), False)
self.assertEqual(self.ts_from_series_and_df_multivar.is_data_missing(), False)
def test_min_max_values(self) -> None:
# test min/max value for univariate
self.assertEqual(self.ts_from_df.min, np.nanmin(self.ts_from_df.value.values))
self.assertEqual(self.ts_from_df.max, np.nanmax(self.ts_from_df.value.values))
# test min/max value for multivariate
self.assertEqual(
# pyre-fixme[16]: `float` has no attribute `equals`.
self.ts_from_df_multi.min.equals(
self.ts_from_df_multi.value.min(skipna=True)
),
True,
)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
self.ts_from_df_multi.max.equals(
self.ts_from_df_multi.value.max(skipna=True)
),
True,
)
# test min/max value for empty TS
empty_ts = TimeSeriesData(pd.DataFrame())
self.assertEqual(np.isnan(empty_ts.min), True)
self.assertEqual(np.isnan(empty_ts.max), True)
# test if min/max changes if values are re-assigned for univariate
ts_from_df_new = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
new_val = np.random.randn(len(self.AIR_DF))
ts_from_df_new.value = pd.Series(new_val)
self.assertEqual(ts_from_df_new.min, np.min(new_val))
self.assertEqual(ts_from_df_new.max, np.max(new_val))
# test if min/max changes if values are re-assigned with NaNs for univariate
new_val[-1] = np.nan
ts_from_df_new.value = pd.Series(new_val)
self.assertEqual(ts_from_df_new.min, np.nanmin(new_val))
self.assertEqual(ts_from_df_new.max, np.nanmax(new_val))
# test min/max changes if values are re-assigned for multivariate
ts_from_df_multi_new = TimeSeriesData(
self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
new_val_multi = np.random.randn(
self.MULTIVAR_VALUE_DF.shape[0], self.MULTIVAR_VALUE_DF.shape[1] - 1
)
ts_from_df_multi_new.value = pd.DataFrame(new_val_multi)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.min.equals(pd.DataFrame(new_val_multi).min()),
True,
)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.max.equals(pd.DataFrame(new_val_multi).max()),
True,
)
# test min/max changes if values are re-assigned with NaNs for multivariate
new_val_multi[0] = np.nan
ts_from_df_multi_new.value = pd.DataFrame(new_val_multi)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.min.equals(
pd.DataFrame(new_val_multi).min(skipna=True)
),
True,
)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.max.equals(
pd.DataFrame(new_val_multi).max(skipna=True)
),
True,
)
class TimeSeriesDataOpsTest(TimeSeriesBaseTest):
def setUp(self) -> None:
super(TimeSeriesDataOpsTest, self).setUp()
# Creating DataFrames
# DataFrame with date offset
transformed_df_date = self.AIR_DF_DATETIME.copy(deep=True)
transformed_df_date.ds = transformed_df_date.ds.apply(
lambda x: x + relativedelta(years=NUM_YEARS_OFFSET)
)
transformed_df_date_concat = self.AIR_DF.append(
transformed_df_date, ignore_index=True
)
transformed_df_date_double = self.AIR_DF_DATETIME.copy(deep=True)
transformed_df_date_double.ds = transformed_df_date.ds.apply(
lambda x: x + relativedelta(years=NUM_YEARS_OFFSET * 2)
)
transformed_df_date_concat_double = self.AIR_DF.append(
transformed_df_date_double, ignore_index=True
)
# DataFrames with value offset
transformed_df_value = self.AIR_DF.copy(deep=True)
transformed_df_value.y = transformed_df_value.y.apply(lambda x: x * 2)
transformed_df_value_inv = self.AIR_DF.copy(deep=True)
transformed_df_value_inv.y = transformed_df_value_inv.y.apply(lambda x: x * -1)
# DataFrame with date and value offset
transformed_df_date_and_value = transformed_df_date.copy(deep=True)
transformed_df_date_and_value.y = transformed_df_date_and_value.y.apply(
lambda x: x * 2
)
# DataFrame with date offset (multivariate)
transformed_df_date_multi = transformed_df_date.copy(deep=True)
transformed_df_date_multi[VALUE_COL_NAME + "_1"] = (
transformed_df_date_multi.y * 2
)
transformed_df_date_concat_multi = self.MULTIVAR_AIR_DF.append(
transformed_df_date_multi, ignore_index=True
)
transformed_df_date_concat_mixed = self.MULTIVAR_AIR_DF_DATETIME.append(
transformed_df_date
)
transformed_df_date_double_multi = transformed_df_date_double.copy(deep=True)
transformed_df_date_double_multi[VALUE_COL_NAME + "_1"] = (
transformed_df_date_double_multi.y * 2
)
transformed_df_date_concat_double_multi = self.MULTIVAR_AIR_DF.append(
transformed_df_date_double_multi, ignore_index=True
)
transformed_df_date_concat_double_mixed = self.MULTIVAR_AIR_DF_DATETIME.append(
transformed_df_date_double
)
# DataFrame with value offset (multivariate)
transformed_df_value_none_multi = self.MULTIVAR_AIR_DF.copy(deep=True)
transformed_df_value_none_multi.y = transformed_df_value_none_multi.y_1
transformed_df_value_none_multi.y_1 = np.nan
# DataFrame with date and value offset (multivariate)
transformed_df_date_and_value_multi = transformed_df_date_and_value.copy(
deep=True
)
transformed_df_date_and_value_multi[VALUE_COL_NAME + "_1"] = (
transformed_df_date_and_value_multi.y * 2
)
# DataFrame with all constant values
df_zeros = self.AIR_DF.copy(deep=True)
df_zeros.y.values[:] = 0
df_ones = self.AIR_DF.copy(deep=True)
df_ones.y.values[:] = 1
df_twos = df_ones.copy(deep=True)
df_twos.y.values[:] = 2
df_neg_ones = self.AIR_DF.copy(deep=True)
df_neg_ones.y.values[:] = -1
df_ones_multi = df_ones.copy(deep=True)
df_ones_multi[VALUE_COL_NAME + "_1"] = df_ones_multi.y * 2
# Creating TimeSeriesData objects
# Univariate TimeSeriesData initialized from a pd.DataFrame
self.ts_univ_1 = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
self.ts_univ_2 = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
self.ts_univ_default_names = TimeSeriesData(df=self.AIR_DF_WITH_DEFAULT_NAMES)
self.ts_univ_default_names_2 = TimeSeriesData(df=self.AIR_DF_WITH_DEFAULT_NAMES)
# Multivariate TimeSeriesData initialized from a pd.DataFrame
self.ts_multi_1 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_2 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date offset
self.ts_date_transform_univ = TimeSeriesData(
df=transformed_df_date, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_univ = TimeSeriesData(
df=transformed_df_date_concat, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_double_univ = TimeSeriesData(
df=transformed_df_date_double, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_double_univ = TimeSeriesData(
df=transformed_df_date_concat_double, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date offset (multivariate)
self.ts_date_transform_multi = TimeSeriesData(
df=transformed_df_date_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_multi = TimeSeriesData(
df=transformed_df_date_concat_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_mixed = TimeSeriesData(
df=transformed_df_date_concat_mixed, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_double_multi = TimeSeriesData(
df=transformed_df_date_double_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_double_multi = TimeSeriesData(
df=transformed_df_date_concat_double_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_double_mixed = TimeSeriesData(
df=transformed_df_date_concat_double_mixed, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with value offset
self.ts_value_transform_univ = TimeSeriesData(
df=transformed_df_value, time_col_name=TIME_COL_NAME
)
self.ts_value_transform_inv_univ = TimeSeriesData(
df=transformed_df_value_inv, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with value offset (multivariate)
self.ts_value_transform_none_multi = TimeSeriesData(
df=transformed_df_value_none_multi, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date and value offset
self.ts_date_and_value_transform_univ = TimeSeriesData(
df=transformed_df_date_and_value, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date and value offset (multivariate)
self.ts_date_and_value_transform_multi = TimeSeriesData(
df=transformed_df_date_and_value_multi, time_col_name=TIME_COL_NAME
)
# TimeSeriesData object with all constant values
self.ts_zero = TimeSeriesData(df=df_zeros, time_col_name=TIME_COL_NAME)
self.ts_ones = TimeSeriesData(df=df_ones, time_col_name=TIME_COL_NAME)
self.ts_twos = TimeSeriesData(df=df_twos, time_col_name=TIME_COL_NAME)
self.ts_neg_ones = TimeSeriesData(df=df_neg_ones, time_col_name=TIME_COL_NAME)
self.ts_ones_multi = TimeSeriesData(
df=df_ones_multi, time_col_name=TIME_COL_NAME
)
# Empty TimeSeriesData Object
self.ts_empty = TimeSeriesData(df=EMPTY_DF)
self.ts_empty_with_cols = TimeSeriesData(
df=EMPTY_DF_WITH_COLS, time_col_name=TIME_COL_NAME
)
# Copies for Extended objects
self.ts_univ_extend = TimeSeriesData(
df=self.AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_univ_extend_2 = TimeSeriesData(
df=self.AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_univ_extend_err = TimeSeriesData(
df=self.AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_2 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_3 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_4 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_err = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_err_2 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_empty_extend = TimeSeriesData(df=EMPTY_DF)
self.ts_empty_extend_err = TimeSeriesData(df=EMPTY_DF)
# Other values
self.length = len(self.AIR_DF)
def test_eq(self) -> None:
# Univariate equality
self.assertTrue(self.ts_univ_1 == self.ts_univ_2)
# Multivariate equality
self.assertTrue(self.ts_multi_1 == self.ts_multi_2)
# Univariate inequality
self.assertFalse(self.ts_univ_1 == self.ts_date_transform_univ)
self.assertFalse(self.ts_univ_1 == self.ts_value_transform_univ)
self.assertFalse(self.ts_univ_1 == self.ts_date_and_value_transform_univ)
# Multivariate inequality
self.assertFalse(self.ts_multi_1 == self.ts_date_transform_multi)
self.assertFalse(self.ts_multi_1 == self.ts_value_transform_none_multi)
self.assertFalse(self.ts_multi_1 == self.ts_date_and_value_transform_multi)
# Univariate vs. Multivariate inequality
self.assertFalse(self.ts_univ_1 == self.ts_multi_1)
self.assertFalse(self.ts_multi_1 == self.ts_univ_1)
def test_ne(self) -> None:
# Univariate equality
self.assertFalse(self.ts_univ_1 != self.ts_univ_2)
# Multivariate equality
self.assertFalse(self.ts_multi_1 != self.ts_multi_2)
# Univariate inequality
self.assertTrue(self.ts_univ_1 != self.ts_date_transform_univ)
self.assertTrue(self.ts_univ_1 != self.ts_value_transform_univ)
self.assertTrue(self.ts_univ_1 != self.ts_date_and_value_transform_univ)
# Multivariate inequality
self.assertTrue(self.ts_multi_1 != self.ts_date_transform_multi)
self.assertTrue(self.ts_multi_1 != self.ts_value_transform_none_multi)
self.assertTrue(self.ts_multi_1 != self.ts_date_and_value_transform_multi)
# Univariate vs. Multivariate inequality
self.assertTrue(self.ts_univ_1 != self.ts_multi_1)
self.assertTrue(self.ts_multi_1 != self.ts_univ_1)
def test_add(self) -> None:
# Add same DataFrames
self.assertEqual(self.ts_univ_1 + self.ts_univ_2, self.ts_value_transform_univ)
# Add different DataFrames
self.assertEqual(
self.ts_univ_1 + self.ts_value_transform_inv_univ, self.ts_zero
)
# Add Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_univ_1 + self.ts_multi_1, self.ts_value_transform_none_multi
)
# Empty Case
self.assertEqual(self.ts_empty + self.ts_empty, self.ts_empty)
# Add DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 + self.ts_date_transform_univ
def test_sub(self) -> None:
# Subtract same DataFrames
self.assertEqual(self.ts_univ_1 - self.ts_univ_2, self.ts_zero)
# Subtract different DataFrames
self.assertEqual(
self.ts_univ_1 - self.ts_value_transform_inv_univ,
self.ts_value_transform_univ,
)
# Subtract Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_multi_1 - self.ts_value_transform_inv_univ,
self.ts_value_transform_none_multi,
)
# Empty Case
self.assertEqual(self.ts_empty - self.ts_empty, self.ts_empty)
# Subtract DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 - self.ts_date_transform_univ
def test_div(self) -> None:
# Divide same DataFrames
self.assertEqual(self.ts_univ_1 / self.ts_univ_2, self.ts_ones)
# Divide different DataFrames
self.assertEqual(
self.ts_univ_1 / self.ts_value_transform_inv_univ, self.ts_neg_ones
)
# Divide Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_value_transform_univ / self.ts_ones_multi,
self.ts_value_transform_none_multi,
)
# Empty Case
self.assertEqual(self.ts_empty / self.ts_empty, self.ts_empty)
# Divide DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 / self.ts_date_transform_univ
def test_mul(self) -> None:
# Multiply same DataFrames
self.assertEqual(self.ts_ones * self.ts_ones, self.ts_ones)
# Multiply different DataFrames
self.assertEqual(self.ts_univ_1 * self.ts_twos, self.ts_value_transform_univ)
# Multiply Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_multi_1 * self.ts_twos, self.ts_value_transform_none_multi
)
# Empty Case
self.assertEqual(self.ts_empty * self.ts_empty, self.ts_empty)
# Multiply DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 * self.ts_date_transform_univ
def test_len(self) -> None:
# Normal case
self.assertEqual(len(self.ts_univ_1), self.length)
# Empty case
self.assertEqual(len(self.ts_empty), 0)
def test_empty(self) -> None:
# Empty case
self.assertTrue(self.ts_empty.is_empty())
# Not empty case
self.assertFalse(self.ts_univ_1.is_empty())
def test_extend(self) -> None:
# Testing cases with validate=True
# Univariate case
self.ts_univ_extend.extend(self.ts_date_transform_univ)
self.assertEqual(self.ts_univ_extend, self.ts_date_transform_concat_univ)
# Multivariate case
self.ts_multi_extend.extend(self.ts_date_transform_multi)
self.assertEqual(self.ts_multi_extend, self.ts_date_transform_concat_multi)
# Univariate and multivariate case
self.ts_multi_extend_2.extend(self.ts_date_transform_univ)
self.assertEqual(self.ts_multi_extend_2, self.ts_date_transform_concat_mixed)
# Empty case
self.ts_univ_default_names.extend(self.ts_empty)
self.assertEqual(self.ts_univ_default_names, self.ts_univ_default_names_2)
# Catching errors
with self.assertRaises(ValueError):
self.ts_univ_extend_err.extend(self.ts_date_transform_double_univ)
# Multivariate case
self.ts_multi_extend_err.extend(self.ts_date_transform_double_multi)
# Univariate and multivariate case
self.ts_multi_extend_err_2.extend(self.ts_date_transform_double_univ)
# Empty case
self.ts_empty_extend_err.extend(self.ts_empty)
# Testing cases with validate=False
# Univariate case
self.ts_univ_extend_2.extend(self.ts_date_transform_double_univ, validate=False)
self.assertEqual(
self.ts_univ_extend_2, self.ts_date_transform_concat_double_univ
)
# Multivariate case
self.ts_multi_extend_3.extend(
self.ts_date_transform_double_multi, validate=False
)
self.assertEqual(
self.ts_multi_extend_3, self.ts_date_transform_concat_double_multi
)
# Univariate and multivariate case
self.ts_multi_extend_4.extend(
self.ts_date_transform_double_univ, validate=False
)
self.assertEqual(
self.ts_multi_extend_4, self.ts_date_transform_concat_double_mixed
)
# Empty case
self.ts_empty_extend.extend(self.ts_empty, validate=False)
self.assertEqual(self.ts_empty_extend, self.ts_empty)
def test_get_item(self) -> None:
# Univariate test case
self.assertEqual(
self.ts_date_transform_concat_univ[: len(self.ts_univ_1)], self.ts_univ_1
)
# Multivariate test case
self.assertEqual(
self.ts_date_transform_concat_multi[: len(self.ts_multi_1)], self.ts_multi_1
)
# Multivariate test case where we select a specific column
for col in self.ts_date_transform_concat_multi.value.columns:
ts_univ = TimeSeriesData(
time=self.ts_date_transform_concat_multi.time,
value=self.ts_date_transform_concat_multi.value[col],
time_col_name=self.ts_date_transform_concat_multi.time_col_name,
)
self.assertEqual(self.ts_date_transform_concat_multi[col], ts_univ)
# Multivariate test case where we select multiple columns
self.assertEqual(
self.ts_date_transform_concat_multi[MULTIVAR_VALUE_DF_COLS],
self.ts_date_transform_concat_multi,
)
# Full/Empty cases
self.assertEqual(self.ts_univ_1[:], self.ts_univ_1)
self.assertEqual(
self.ts_univ_1[0:0],
TimeSeriesData(
time=pd.Series(name=TIME_COL_NAME),
value=pd.Series(name=VALUE_COL_NAME),
time_col_name=TIME_COL_NAME,
),
)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot(self) -> plt.Figure:
# Univariate test case
ax = self.ts_univ_1.plot(cols=["y"])
self.assertIsNotNone(ax)
return plt.gcf()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot_multivariate(self) -> plt.Figure:
# Multivariate test case
ax = self.ts_multi_1.plot()
self.assertIsNotNone(ax)
return plt.gcf()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot_params(self) -> plt.Figure:
# Test more parameter overrides.
ax = self.ts_multi_1.plot(
figsize=(8, 3), plot_kwargs={"cmap": "Purples"}, grid=False
)
self.assertIsNotNone(ax)
return plt.gcf()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot_grid_ax(self) -> plt.Figure:
# Test grid and ax parameter overrides.
fig, ax = plt.subplots(figsize=(6, 4))
ax = self.ts_univ_1.plot(ax=ax, grid_kwargs={"lw": 2, "ls": ":"})
self.assertIsNotNone(ax)
return fig
def test_plot_missing_column(self) -> None:
# Columns not in data.
with self.assertRaises(ValueError):
self.ts_univ_1.plot(cols=["z"])
def test_plot_empty(self) -> None:
# No data to plot.
with self.assertRaises(ValueError):
self.ts_empty.plot()
class TimeSeriesDataMiscTest(TimeSeriesBaseTest):
def setUp(self) -> None:
super(TimeSeriesDataMiscTest, self).setUp()
# Creating TimeSeriesData objects
# Univariate TimeSeriesData initialized from a pd.DataFrame
self.ts_univ = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
# Multivariate TimeSeriesData initialized from a pd.DataFrame
self.ts_multi = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
def test_is_univariate(self) -> None:
# Univariate case
self.assertTrue(self.ts_univ.is_univariate())
# Multivariate case
self.assertFalse(self.ts_multi.is_univariate())
def test_time_to_index(self) -> None:
# Univariate case
assert_index_equal(self.ts_univ.time_to_index(), self.AIR_TIME_DATETIME_INDEX)
# Multivariate case
assert_index_equal(self.ts_multi.time_to_index(), self.AIR_TIME_DATETIME_INDEX)
def test_repr(self) -> None:
# Univariate case
self.assertEqual(self.ts_univ.__repr__(), self.AIR_DF_DATETIME.__repr__())
# Multivariate case
self.assertEqual(
self.ts_multi.__repr__(), self.MULTIVAR_AIR_DF_DATETIME.__repr__()
)
def test_repr_html(self) -> None:
# Univariate case
self.assertEqual(self.ts_univ._repr_html_(), self.AIR_DF_DATETIME._repr_html_())
# Multivariate case
self.assertEqual(
self.ts_multi._repr_html_(), self.MULTIVAR_AIR_DF_DATETIME._repr_html_()
)
class TSIteratorTest(TestCase):
def test_ts_iterator_univariate_next(self) -> None:
df = pd.DataFrame(
[["2020-03-01", 100], ["2020-03-02", 120], ["2020-03-03", 130]],
columns=["time", "y"],
)
kats_data = TimeSeriesData(df=df)
kats_iterator = TSIterator(kats_data)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-01")]), check_names=False
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([100]), check_names=False
)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-02")]), check_names=False
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([120]), check_names=False
)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-03")]), check_names=False
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([130]), check_names=False
)
def test_ts_iterator_multivariate_next(self) -> None:
df = pd.DataFrame(
[
["2020-03-01", 100, 200],
["2020-03-02", 120, 220],
["2020-03-03", 130, 230],
],
columns=["time", "y1", "y2"],
)
kats_data = TimeSeriesData(df=df)
kats_iterator = TSIterator(kats_data)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-01")]), check_names=False
)
assert_frame_equal(
cast(pd.DataFrame, val.value),
| pd.DataFrame([[100, 200]], columns=["y1", "y2"]) | pandas.DataFrame |
from unittest.mock import patch
import pandas as pd
import pytest
from pandas._testing import assert_frame_equal
from src.app_visualization.data_viz_components.metrics_and_KPIs import get_airline_turnover, add_cost_20min_delay, \
add_cost_10min_delay, cost_of_delay, get_airport_delay_cost, get_number_of_indemnities_asked, compensation_due, \
get_number_of_indemnities_asked_and_compensation_due, get_cost_of_lost_customer, cost_of_delay_gb_airlines, \
get_total_to_be_paid, get_new_turnover_for_each_airline, get_percentage_of_lost_sales, \
get_percentage_of_delay_by_company, get_prediction_with_all_cost_id_df
TESTED_MODULE = 'src.app_visualization.data_viz_components.metrics_and_KPIs'
def test_get_airline_turnover__add_columns_compagnie_aerienne_to_preds_df(get_regression_pred_df, get_df_companies):
# Given
expected = pd.DataFrame({'COMPAGNIE AERIENNE': ['NA', 'THA', 'COA'],
'RETARD MINUTES': [187.0, 40.0, 120],
'VOL': [4661, 5026, 2021],
'AEROPORT ARRIVEE': ['AAL', 'LTK', 'JNB'],
'NOMBRE DE PASSAGERS': [10, 40, 30],
'CHIFFRE D AFFAIRE': [7651000000, 2300000, 40500000]
})
# When
actual = get_airline_turnover(get_regression_pred_df, get_df_companies)
# Then
assert_frame_equal(actual, expected)
def test_add_cost_20min_delay__add_columns_with_the_cost_of_20min_delay_wrt_arrival_airport(get_regression_pred_df,
get_df_airport):
# Given
expected = pd.DataFrame({'COMPAGNIE AERIENNE': ['NA', 'THA', 'COA'],
'RETARD MINUTES': [187.0, 40.0, 120],
'VOL': [4661, 5026, 2021],
'AEROPORT ARRIVEE': ['AAL', 'LTK', 'JNB'],
'NOMBRE DE PASSAGERS': [10, 40, 30],
'PRIX RETARD PREMIERE 20 MINUTES': [24, 33, 53]
})
# When
actual = add_cost_20min_delay(get_df_airport, get_regression_pred_df)
# Then
| assert_frame_equal(actual, expected) | pandas._testing.assert_frame_equal |
import os
from turtle import pd
from . import app
from pandas import DataFrame
from scipy.spatial import distance
import pandas as pd
import math
from math import sqrt
from math import atan2
from numpy.linalg import norm, det
from numpy import cross, dot
from numpy import radians
from numpy import array, zeros
from numpy import cos, sin, arcsin
from similaritymeasures import curve_length_measure, frechet_dist
from obspy.geodetics import degrees2kilometers
import numpy as np
from flask import Flask, flash, request, redirect, render_template, send_from_directory
from werkzeug.utils import secure_filename
from datetime import datetime, date
import json
UPLOAD_FOLDER = './UPLOADS'
######################CarComp##########################
firstx = 0
firsty = 0
def log_to_dataFrame(file_path):
"""
Converts a log file of a ride to a Pandas dataframe.
Parameters
--------
file_path : str
A path to a log file.
Example of a log file
--------
2020-06-29 13:06:24,595 - INFO - ;LAT;480492306;LON;175678507;UTMX;69136106;UTMY;532496222;HMSL;126112;GSPEED;0;CRS;0;HACC;66720;NXPT;1139
2020-06-29 13:06:24,648 - INFO - ;LAT;480492313;LON;175678494;UTMX;69136096;UTMY;532496230;HMSL;126121;GSPEED;4;CRS;0;HACC;52510;NXPT;1139
2020-06-29 13:06:24,698 - INFO - ;LAT;480492305;LON;175678495;UTMX;69136097;UTMY;532496221;HMSL;126146;GSPEED;1;CRS;0;HACC;49421;NXPT;1140
Returns
--------
A dataframe with all the logs.
"""
logs = pd.read_csv(file_path, header=None, sep=';', names=['TIME', '1', 'LAT', '3', 'LON', '5', 'UTMX', '7', 'UTMY',
'9', 'HMSL', '11', 'GSPEED', '13', 'CRS', '15', 'HACC',
'17', 'NXPT'])
logs = logs.drop(columns=['1', '3', '5', '7', '9', '11', '13', '15', '17'])
logs = logs.dropna()
return logs
def read_csv_ref_lap(file_path):
"""
Creates a dataframe of a reference lap from a csv file.
Parameters
--------
file_path : str
A path to a csv file.
Example of a log file
--------
LAT,LON,GSPEED,CRS,NLAT,NLON,NCRS
48.049214299999996,17.5678361,1.08,219.10375000000002,48.0492134,17.567835199999998,215.70312
48.0492134,17.567835199999998,1.03,215.70312,48.0492127,17.567834299999998,215.56731000000002
48.0492127,17.567834299999998,1.11,215.56731000000002,48.049211899999996,17.567833399999998,216.61797
Returns
--------
A dataframe with a reference lap.
"""
logs = pd.read_csv(file_path)
return logs
def normalize_logs(logs):
"""
Normalizes data of the logs dataframe.
In particular, the 'LAT' and 'LON' columns is divided by 10 000 000.
The 'GSPEED' column is divided by 100.
The CRS column is divided by 100 000.
Parameters
--------
logs : DataFrame
A dataframe with logs of a ride.
"""
logs['TIME'] = logs['TIME'].apply(lambda x: x.split(' ')[1])
logs['TIME'] = pd.to_datetime(logs['TIME'], format='%H:%M:%S,%f').dt.time
logs['TIME'] = logs['TIME'].apply(lambda x: datetime.combine(date.today(), x) - datetime.combine(date.today(), logs['TIME'][0]))
logs['TIME'] = logs['TIME'].apply(lambda x: x.total_seconds())
logs['LAT'] = logs['LAT'].apply(lambda x: x * 0.0000001)
logs['LON'] = logs['LON'].apply(lambda x: x * 0.0000001)
logs['GSPEED'] = logs['GSPEED'].apply(lambda x: x * 0.01)
logs['CRS'] = logs['CRS'].apply(lambda x: x * 0.00001)
def drop_unnecessary_columns(logs):
"""
Drops the columns 'UTMX', 'UTMY', 'HMSL', 'HACC' and 'NXPT' of the logs dataframe.
Parameters
--------
logs : DataFrame
A dataframe with logs of a ride.
"""
logs.drop(columns=['UTMX', 'UTMY', 'HMSL', 'HACC', 'NXPT'], inplace=True)
def drop_logs_where_car_stayed(logs: DataFrame):
"""
Drops rows from the logs dataframe where the LAT and LON are not changing.
Resets indices of a dataframe in the end.
Parameters
--------
logs : DataFrame
A dataframe with logs of a ride.
"""
last_lat = None
last_lon = None
dropped_rows = list()
for index, row in logs.iterrows():
if row['LAT'] == last_lat and row['LON'] == last_lon:
dropped_rows.append(index)
else:
last_lat = row['LAT']
last_lon = row['LON']
logs.drop(dropped_rows, inplace=True)
logs.reset_index(drop=True, inplace=True)
def create_columns_with_future_position(logs):
"""
Creates columns NLAT, NLON and NCRS which are the next position of a car.
Parameters
--------
logs : DataFrame
A dataframe with logs of a ride.
"""
next_lat = logs['LAT']
next_lat = next_lat.append(pd.Series([np.nan]), ignore_index=True)
next_lat = next_lat.iloc[1:]
next_lat = next_lat.reset_index(drop=True)
next_lon = logs['LON']
next_lon = next_lon.append(pd.Series([np.nan]), ignore_index=True)
next_lon = next_lon.iloc[1:]
next_lon = next_lon.reset_index(drop=True)
next_crs = logs['CRS']
next_crs = next_crs.append(pd.Series([np.nan]), ignore_index=True)
next_crs = next_crs.iloc[1:]
next_crs = next_crs.reset_index(drop=True)
logs['NLAT'] = next_lat
logs['NLON'] = next_lon
logs['NCRS'] = next_crs
logs = logs.dropna() # Drop the last row which contains NaN values.
def segment(p1, p2):
"""
Parameters
===========
p1 : list
The first point.
p2 : list
The second point.
Returns
==========
A line segment of points represented in a quadruple.
"""
return (p1[0], p1[1], p2[0], p2[1])
def ccw(a, b, c):
'''
Determines whether three points are located in a counterclockwise way.
'''
return (c[1] - a[1]) * (b[0] - a[0]) > (b[1] - a[1]) * (c[0] - a[0])
def intersection(s1, s2):
a = (s1[0], s1[1])
b = (s1[2], s1[3])
c = (s2[0], s2[1])
d = (s2[2], s2[3])
return ccw(a, c, d) != ccw(b, c, d) and ccw(a, b, c) != ccw(a, b, d)
def separate_laps(traces, ref_lap=None):
"""
Separate all the log dataframe into several laps.
Parameters
--------
traces : DataFrame
A dataframe with logs of a ride.
ref_lap : DataFrame
A dataframe with logs of a reference ride.
It is used to define finish line.
It is and optional parameter. Default value is None.
"""
ref_lap = traces if ref_lap is None else ref_lap
points = traces[['LON', 'LAT']].values.tolist()
# use last points to determine normal vector
last_point1 = [ref_lap['LON'].iloc[-1], ref_lap['LAT'].iloc[-1]]
last_point2 = [ref_lap['LON'].iloc[-2], ref_lap['LAT'].iloc[-2]]
a = last_point2[0] - last_point1[0]
b = last_point2[1] - last_point1[1]
dst = distance.euclidean(last_point1, last_point2)
distance_multiplier = math.ceil(0.0001 / (2 * dst))
v_normal = np.array([-b, a])
start_point = np.array(last_point1)
point_top = start_point + distance_multiplier * v_normal
point_bottom = start_point - distance_multiplier * v_normal
start_segment = segment(point_top, point_bottom)
laps = [0]
for i in range(len(points) - 1):
if points[i] == points[i + 1]:
continue
# segment between point1 and point2
seg = segment(points[i], points[i + 1])
has_intersection = intersection(seg, start_segment)
# add start of a new lap
if has_intersection:
intersection(seg, start_segment)
laps.append(i + 1)
print('Lap ending at index: {}'.format(i))
print(seg, start_segment)
return laps
def normalize_for_graph(logs):
"""
Drops all columns except LAT, and LON
Parameters
--------
logs : DataFrame
A dataframe with logs of a ride.
"""
logs.drop(columns=['UTMX', 'UTMY', 'HMSL', 'GSPEED', 'CRS', 'HACC', 'NXPT'], inplace=True)
logs.rename(columns={"LAT": "y", "LON": "x"}, inplace=True)
def get_raw_data(file_path) -> DataFrame:
log_df = log_to_dataFrame(file_path)
normalize_logs(log_df)
return log_df
def get_essential_data(file_path) -> DataFrame:
log_df = log_to_dataFrame(file_path)
normalize_logs(log_df)
drop_unnecessary_columns(log_df)
drop_logs_where_car_stayed(log_df)
return log_df
def get_graph_data(file_path) -> DataFrame:
log_df = log_to_dataFrame(file_path)
normalize_logs(log_df)
normalize_for_graph(log_df)
# get_laps_json(log_df)
return log_df
def get_lap_data(reference_file_path, traces_file_path):
reference_df = log_to_dataFrame(reference_file_path)
normalize_logs(reference_df)
traces_df = log_to_dataFrame(traces_file_path)
normalize_logs(traces_df)
laps = separate_laps(traces_df, reference_df)
analyzed_laps = analyze_laps(traces_df, reference_df, laps)
return analyzed_laps
def get_raw_data_json(file_path) -> str:
data = get_raw_data(file_path)
return data.to_json(orient="records")
def get_essential_data_json(file_path) -> str:
data = get_essential_data(file_path)
return data.to_json(orient="records")
def get_track_graph_data(file_path) -> str:
data = get_graph_data(file_path)
data.x = data.x.apply(lambda deg: degrees2kilometers(deg) * 1000)
data.y = data.y.apply(lambda deg: degrees2kilometers(deg) * 1000)
global firsty
global firstx
firsty = data.x[0]
firstx = data.y[0]
data.x -= data.x[0]
data.y -= data.y[0]
return data.to_json(orient="records")
def get_reference_xy(data) -> str:
data.drop(columns=['TIME', 'CRS', 'GSPEED'], inplace=True)
return data.to_json(orient="records")
def get_reference_crs(data) -> str:
data.drop(columns=['x', 'y', 'GSPEED'], inplace=True)
data.rename(columns={"TIME": "x", "CRS": "y"}, inplace=True)
return data.to_json(orient="records")
def get_data_xy(data) -> str:
data.drop(columns=['TIME', 'CRS'], inplace=True)
return data.to_json(orient="records")
def get_data_crs(data) -> str:
data.drop(columns=['x', 'y'], inplace=True)
data.rename(columns={"TIME": "x", "CRS": "y"}, inplace=True)
return data.to_json(orient="records")
def average(lst):
return sum(lst) / len(lst)
def analyze_laps(traces, reference_lap, laps):
data_dict = {
'lapNumber': [],
'pointsPerLap': [],
'curveLength': [],
'averagePerpendicularDistance': [],
'lapData': []
}
for i in range(len(laps) - 1):
lap_data = traces.iloc[laps[i]: laps[i + 1]]
drop_unnecessary_columns(lap_data)
perpendicular_distance = find_out_difference_perpendiculars(lap_data, reference_lap)
average_dist = round(perpendicular_distance / 100.0, 3)
data_dict['lapNumber'].append(i)
data_dict['pointsPerLap'].append(len(lap_data))
data_dict['curveLength'].append(0)
data_dict['averagePerpendicularDistance'].append(average_dist)
lap_data.LAT = lap_data.LAT.apply(lambda deg: degrees2kilometers(deg) * 1000)
lap_data.LON = lap_data.LON.apply(lambda deg: degrees2kilometers(deg) * 1000)
lap_data.LAT -= firstx
lap_data.LON -= firsty
data_dict['lapData'].append(json.loads(lap_data.to_json(orient="records")))
# tha last circuit (lap) was not saved yet so save that one
lap_data = traces.iloc[laps[-1:]]
drop_unnecessary_columns(lap_data)
perpendicular_distance = find_out_difference_perpendiculars(lap_data, reference_lap)
average_dist = round(perpendicular_distance / 100.0, 3)
data_dict['lapNumber'].append(len(laps))
data_dict['pointsPerLap'].append(len(lap_data))
data_dict['curveLength'].append(0)
data_dict['averagePerpendicularDistance'].append(average_dist)
lap_data.LAT = lap_data.LAT.apply(lambda deg: degrees2kilometers(deg) * 1000)
lap_data.LON = lap_data.LON.apply(lambda deg: degrees2kilometers(deg) * 1000)
lap_data.LAT -= firstx
lap_data.LON -= firsty
data_dict['lapData'].append(json.loads(lap_data.to_json(orient="records")))
data_frame = pd.DataFrame(data=data_dict)
print("???!!!")
return data_frame
def save_laps_to_files(file_path, file_name, laps):
laps.sort_values(by=['averagePerpendicularDistance'], inplace=True)
laps.to_csv('{}/{}_lap-stats.csv'.format(file_path, file_name),
index=False,
header=['Lap number', 'Points per lap', 'Avg. perp. diff. (cm)'],
columns=['lapNumber', 'pointsPerLap', 'averagePerpendicularDistance'])
laps.to_csv('{}/{}_lap-data.csv'.format(file_path, file_name),
index=False,
header=['Lap number', 'Lap data'],
columns=['lapNumber', 'lapData'])
def put_laps_to_json(laps):
return laps.to_json(orient="records")
def put_export_to_json(laps):
print(laps.to_json(orient="columns"))
return laps.to_json(orient="columns")
def get_number_of_lines(file_path):
with open(file_path) as f:
for i, l in enumerate(f):
pass
return i + 1
def create_curve(dataframe):
curve = zeros((dataframe.shape[0], 2))
curve[:, 0] = dataframe.LON
curve[:, 1] = dataframe.LAT
return curve
def earth_distance(point1, point2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
All args must be of equal length.
"""
lon1, lat1, lon2, lat2 = map(radians, [point1[1], point1[0], point2[1], point2[0]])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2
c = 2 * arcsin(sqrt(a))
km = 6367 * c
return km
def distance_of_curve(lap):
return sum(earth_distance(pt1, pt2)
for pt1, pt2 in zip(lap, lap[1:]))
def find_out_difference(ref_lap, laps):
"""
With the usage of several curve metrics, finds out differences between
a referrence lap and laps of a ride
Parameters
--------
ref_lap : DataFrame
A dataframe of with logs of a a reference ride.
laps : list
A list of dataframes.
Each dataframe represents one lap of a ride.
Returns
--------
A dataframe object with three columns: a Measurements count, a Frechet distance and a Curve length measure.
"""
ref_curve = create_curve(ref_lap)
measurement_column = 'Measurements count'
frechet_column = 'Frechet distance'
curve_len_column = 'Curve length measure'
data_structure = {measurement_column: [],
frechet_column: [],
curve_len_column: []}
differences_df = | pd.DataFrame(data=data_structure) | pandas.DataFrame |
from bedrock.annotator.annotator import Annotator
from bedrock.doc.doc import Doc
from bedrock.doc.token import Token
from bedrock.doc.annotation import Annotation
from bedrock.doc.relation import Relation
from bedrock.doc.layer import Layer
from typing import List
import pandas as pd
from fuzzywuzzy import process
from fuzzywuzzy import fuzz
import _pickle as pickle
from typing import Callable
import bedrock.libs.CharSplit.char_split as char_split
from typing import Set
import warnings
import re
# DictionaryAnnotator is the actual implementation of the annotator. It searches terms in the dictionary in the
# sentences of a document and will give back a list of annotations and relations
class DictionaryAnnotator(Annotator):
ROOT = 'root'
TREE = 'tree'
TERM = 'term'
QUERY = 'query'
LENGTH = 'length'
SPLIT = 'split'
ADDED = 'added'
WORD = 'word'
COUNT = 'count'
SEP = ':'
WHITESPACE = ' '
def __init__(self, origin: str, layer_name: str = "", terms: List[str] = None, features: List[str] = None,
feature_values: List[str] = None, model_path: str = None, min_matching_score: int = 90,
word_basic_form_fn: Callable[[str], str] = lambda word: word, stop_words: Set = {}):
if model_path is None and (len(terms) != len(feature_values) or len(feature_values) != len(features)):
raise Exception('Lengths of terms, feature values and features vary.')
if min_matching_score < 0 or min_matching_score > 100:
warnings.warn('Minimum matching score have to be between 0 and 100: {}', min_matching_score)
min_matching_score = 90
self._min_matching_score = min_matching_score
self._origin = origin
self._layer_name = layer_name
self._stemmer_fn = word_basic_form_fn
self._stop_words = stop_words
# create a new model if no path is given
if model_path is None:
self._data = pd.DataFrame(
{
self.TERM: terms,
Annotation.FEATURE_VAL: feature_values,
Annotation.FEATURE: features
}
)
self._features_values = feature_values
self.__create_model()
# load an model if the path is given
else:
with open(model_path, 'rb') as pickle_in:
self._data, self._word_list = pickle.load(pickle_in)
self._regex = re.compile('|'.join(map(re.escape, list(self._word_list[self.WORD]))), re.I)
def __create_model(self):
""" will create a list of words that occurs in the dictionary
"""
self._data = self.__split_and_stem(self._data)
word_list = pd.DataFrame(columns=[self.WORD, self.COUNT])
for idx, row in self._data.iterrows():
for word in row[self.SPLIT]:
already_in = word_list[word_list[self.WORD] == word]
if len(already_in.index) > 0:
word_list.loc[word_list[self.WORD] == word, self.COUNT] = word_list.loc[word_list[self.WORD] == word, self.COUNT] + 1
else:
word_list = word_list.append({
self.WORD: word,
self.COUNT: 1
}, ignore_index=True)
self._word_list = word_list
self._regex = re.compile('|'.join(map(re.escape, list(self._word_list[self.WORD]))), re.I)
# adds split column with split and stemmed terms
def __split_and_stem(self, data: pd.DataFrame) -> pd.DataFrame:
"""
split_and_stem will split the terms in the dictionary to get words and stem them
:param data: the data frame that contains the dictionary
"""
# split the list by a whitespace char
t = data[self.TERM].apply(lambda x: x.split(self.WHITESPACE))
# remove words that are shorter or equal than 2 chars
t = t.apply(lambda x: list(filter(lambda a: len(a) > 2, x)))
# remove stop words
t = t.apply(lambda x: list(filter(lambda a: a not in self._stop_words, x)))
# split compounding words
t = t.apply(lambda x: self.__split_compounds(x))
# to lower case
t = t.apply(lambda x: list(map(lambda a: a.lower(), x)))
# find the stemming of the remaining words
t = t.apply(lambda x: list(map(self._stemmer_fn, x)))
data[self.SPLIT] = t
data[self.QUERY] = data[self.SPLIT].apply(lambda x: self.WHITESPACE.join(x))
data[self.LENGTH] = data[self.SPLIT].apply(lambda x: len(x))
return data
def __split_compounds(self, words: List[str]) -> List[str]:
"""
split compounds will split a list of words into their compounds
:param words: a list of strings
"""
splitted_words = []
while len(words) > 0:
word = words.pop()
if len(word) <= 6 or word[0].isupper() is False:
splitted_words.append(word)
continue
rate, head, tail = char_split.split_compound(word)[0]
if rate < 0.7:
splitted_words.append(word)
continue
words.append(head)
words.append(tail)
return splitted_words
def __token_sorting_key(self, token):
"""
token_sorting_key will return the field in a token to sort the tokens whereby
:param token:
:returns the begin field in a token
"""
return token[Token.BEGIN]
def save_model(self, path: str):
"""
save model will store the model of the dictionary annotator to a given path
:param path:
"""
with open(path, 'wb') as pickle_file:
pickle.dump([self._data, self._word_list], pickle_file)
def get_min_matching_score(self):
"""
get_min_matching_score will return the currently set minimum score
:returns the set minimum matching score:
"""
return self._min_matching_score
def set_min_matching_score(self, score: int):
"""
set_min_matching_score will overwrite the currently set minimum score
:param the new minimum score:
"""
self._min_matching_score = score
def get_annotations(self, doc: Doc) -> (pd.DataFrame, pd.DataFrame):
"""
get_annotations will find the given named entities of the dictionary in the doc text
:param doc:
:returns a tuple of dataframes, the first contains the annotations the second contains relations between annotations
"""
old_annotations = doc.get_annotations()
doc_text = doc.get_text()
# prelabel all words that occurs in the dictionary
matches = | pd.DataFrame(columns=[Annotation.BEGIN, Annotation.END, self.QUERY]) | pandas.DataFrame |
"""
Eo-tilematcher package.
"""
import geopandas as gpd
import pandas as pd
import pygeos
from pathlib import Path
DATA_DIR = Path(__file__).parent / "data"
def _db_loader(file_name):
subdir = file_name.split("_")[0]
tiles_db = gpd.read_file(DATA_DIR / subdir / file_name, driver="ESRI Shapefile")
return tiles_db
def _sentinel2_db_loader():
return _db_loader("sentinel2_tiles.shp")
def _landsat_db_loader():
return _db_loader("landsat_tiles.shp")
SPACECRAFTS_DB = dict(
sentinel2=None,
landsat5=None,
landsat8=None,
)
SPACECRAFTS_LOADERS = dict(
sentinel2=_sentinel2_db_loader,
landsat5=_landsat_db_loader,
landsat8=_landsat_db_loader,
)
def get_spacecraft_db(spacecraft):
spacecraft = spacecraft.lower()
if spacecraft not in SPACECRAFTS_DB:
raise ValueError(
f"Spacecraft '{spacecraft}' not supported.\n"
f"Allowed values: {str(list(SPACECRAFTS_DB.keys()))}"
)
if SPACECRAFTS_DB[spacecraft] is None:
SPACECRAFTS_DB[spacecraft] = SPACECRAFTS_LOADERS[spacecraft]()
return SPACECRAFTS_DB[spacecraft]
def get_contains_intersect_on_tiles(gpd_to_match, gpd_tiles, gpd_tiles_col):
"""get if contains or intersects (but not contains) shape on sat tiles
Parameters
----------
gpd_to_match: geodataframe
roi to be intersected by tiles
gpd_tiles: geodataframe
geodataframe of sat tiles or path#rows
gpd_tiles_col: str
tiles/pathrow column on gpd_tiles
Returns
-------
geodataframe that matches shapes to tiles (either contains or
intersects)
"""
contains_ = []
intersects_ = []
tile_name_out = gpd_tiles_col
gpd_to_match = gpd_to_match.reset_index(drop=True)
for i, r in gpd_to_match.iterrows():
# get those that contains geom
flag_cont = gpd_tiles.geometry.contains(r["geometry"])
flag_int = gpd_tiles.geometry.intersects(r["geometry"])
if any(flag_cont):
# any contains
gpd_tf = gpd_tiles[flag_cont]
for it, rt in gpd_tf.iterrows():
gpd_ = gpd_to_match.iloc[[i]].copy()
gpd_["match_polygon"] = rt["geometry"].intersection(r["geometry"]).wkt
gpd_["match"] = "total"
gpd_[tile_name_out] = rt[gpd_tiles_col]
contains_.append(gpd_)
elif any(flag_int):
gpd_tf = gpd_tiles[flag_int]
for it, rt in gpd_tf.iterrows():
gpd_ = gpd_to_match.iloc[[i]].copy()
gpd_["match_polygon"] = rt["geometry"].intersection(r["geometry"]).wkt
gpd_["match"] = "partial"
gpd_[tile_name_out] = rt[gpd_tiles_col]
intersects_.append(gpd_)
else:
# switch to overlay
gpd_test = gpd_to_match.iloc[[i]].copy()
gpd_over = gpd.overlay(gpd_test, gpd_tiles)
for io, ro in gpd_over.iterrows():
gpd_tf = gpd_tiles[gpd_tiles[gpd_tiles_col] == ro[gpd_tiles_col]]
for it, rt in gpd_tf.iterrows():
gpd_ = gpd_to_match.iloc[[i]]
# by construction
if rt["geometry"].contains(r["geometry"]):
gpd_["match_polygon"] = (
rt["geometry"].intersection(r["geometry"]).wkt
)
gpd_["match"] = "total-overlay"
gpd_[tile_name_out] = rt[gpd_tiles_col]
contains_.append(gpd_)
elif rt["geometry"].intersects(r["geometry"]):
gpd_["match_polygon"] = (
rt["geometry"].intersection(r["geometry"]).wkt
)
gpd_["match"] = "partial-overlay"
gpd_[tile_name_out] = rt[gpd_tiles_col]
intersects_.append(gpd_)
else:
raise ("Could not make any match")
if len(contains_) and len(intersects_):
gpd_contains_, gpd_intersects_ = pd.concat(
contains_, ignore_index=True
), pd.concat(intersects_, ignore_index=True)
gpd_contains_intersects_ = pd.concat(
[gpd_contains_, gpd_intersects_], ignore_index=True
)
return gpd_contains_intersects_
elif len(contains_) > 0:
gpd_contains_ = pd.concat(contains_, ignore_index=True)
return gpd_contains_
elif len(intersects_) > 0:
gpd_intersects_ = | pd.concat(intersects_, ignore_index=True) | pandas.concat |
"""
Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo"
but are implicitly also testing nsmallest_foo.
"""
from itertools import product
import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
main_dtypes = [
"datetime",
"datetimetz",
"timedelta",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"uint8",
"uint16",
"uint32",
"uint64",
]
@pytest.fixture
def s_main_dtypes():
"""
A DataFrame with many dtypes
* datetime
* datetimetz
* timedelta
* [u]int{8,16,32,64}
* float{32,64}
The columns are the name of the dtype.
"""
df = pd.DataFrame(
{
"datetime": pd.to_datetime(["2003", "2002", "2001", "2002", "2005"]),
"datetimetz": pd.to_datetime(
["2003", "2002", "2001", "2002", "2005"]
).tz_localize("US/Eastern"),
"timedelta": pd.to_timedelta(["3d", "2d", "1d", "2d", "5d"]),
}
)
for dtype in [
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"uint8",
"uint16",
"uint32",
"uint64",
]:
df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)
return df
@pytest.fixture(params=main_dtypes)
def s_main_dtypes_split(request, s_main_dtypes):
"""Each series in s_main_dtypes."""
return s_main_dtypes[request.param]
def assert_check_nselect_boundary(vals, dtype, method):
# helper function for 'test_boundary_{dtype}' tests
ser = Series(vals, dtype=dtype)
result = getattr(ser, method)(3)
expected_idxr = [0, 1, 2] if method == "nsmallest" else [3, 2, 1]
expected = ser.loc[expected_idxr]
tm.assert_series_equal(result, expected)
class TestSeriesNLargestNSmallest:
@pytest.mark.parametrize(
"r",
[
Series([3.0, 2, 1, 2, "5"], dtype="object"),
Series([3.0, 2, 1, 2, 5], dtype="object"),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3.0, 2, 1, 2, 5], dtype="complex128"),
Series(list("abcde")),
Series(list("abcde"), dtype="category"),
],
)
def test_nlargest_error(self, r):
dt = r.dtype
msg = f"Cannot use method 'n(largest|smallest)' with dtype {dt}"
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with pytest.raises(TypeError, match=msg):
method(arg)
def test_nsmallest_nlargest(self, s_main_dtypes_split):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
ser = s_main_dtypes_split
tm.assert_series_equal(ser.nsmallest(2), ser.iloc[[2, 1]])
tm.assert_series_equal(ser.nsmallest(2, keep="last"), ser.iloc[[2, 3]])
empty = ser.iloc[0:0]
tm.assert_series_equal(ser.nsmallest(0), empty)
tm.assert_series_equal(ser.nsmallest(-1), empty)
tm.assert_series_equal(ser.nlargest(0), empty)
tm.assert_series_equal(ser.nlargest(-1), empty)
tm.assert_series_equal(ser.nsmallest(len(ser)), ser.sort_values())
tm.assert_series_equal(ser.nsmallest(len(ser) + 1), ser.sort_values())
tm.assert_series_equal(ser.nlargest(len(ser)), ser.iloc[[4, 0, 1, 3, 2]])
tm.assert_series_equal(ser.nlargest(len(ser) + 1), ser.iloc[[4, 0, 1, 3, 2]])
def test_nlargest_misc(self):
ser = Series([3.0, np.nan, 1, 2, 5])
tm.assert_series_equal(ser.nlargest(), ser.iloc[[4, 0, 3, 2]])
tm.assert_series_equal(ser.nsmallest(), ser.iloc[[2, 3, 0, 4]])
msg = 'keep must be either "first", "last"'
with pytest.raises(ValueError, match=msg):
ser.nsmallest(keep="invalid")
with pytest.raises(ValueError, match=msg):
ser.nlargest(keep="invalid")
# GH#15297
ser = Series([1] * 5, index=[1, 2, 3, 4, 5])
expected_first = Series([1] * 3, index=[1, 2, 3])
expected_last = Series([1] * 3, index=[5, 4, 3])
result = ser.nsmallest(3)
tm.assert_series_equal(result, expected_first)
result = ser.nsmallest(3, keep="last")
tm.assert_series_equal(result, expected_last)
result = ser.nlargest(3)
tm.assert_series_equal(result, expected_first)
result = ser.nlargest(3, keep="last")
tm.assert_series_equal(result, expected_last)
@pytest.mark.parametrize("n", range(1, 5))
def test_nlargest_n(self, n):
# GH 13412
ser = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
result = ser.nlargest(n)
expected = ser.sort_values(ascending=False).head(n)
tm.assert_series_equal(result, expected)
result = ser.nsmallest(n)
expected = ser.sort_values().head(n)
tm.assert_series_equal(result, expected)
def test_nlargest_boundary_integer(self, nselect_method, any_int_dtype):
# GH#21426
dtype_info = np.iinfo(any_int_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val, min_val + 1, max_val - 1, max_val]
assert_check_nselect_boundary(vals, any_int_dtype, nselect_method)
def test_nlargest_boundary_float(self, nselect_method, float_dtype):
# GH#21426
dtype_info = np.finfo(float_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
min_2nd, max_2nd = np.nextafter([min_val, max_val], 0, dtype=float_dtype)
vals = [min_val, min_2nd, max_2nd, max_val]
assert_check_nselect_boundary(vals, float_dtype, nselect_method)
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
def test_nlargest_boundary_datetimelike(self, nselect_method, dtype):
# GH#21426
# use int64 bounds and +1 to min_val since true minimum is NaT
# (include min_val/NaT at end to maintain same expected_idxr)
dtype_info = np.iinfo("int64")
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val]
assert_check_nselect_boundary(vals, dtype, nselect_method)
def test_nlargest_duplicate_keep_all_ties(self):
# see GH#16818
ser = Series([10, 9, 8, 7, 7, 7, 7, 6])
result = ser.nlargest(4, keep="all")
expected = Series([10, 9, 8, 7, 7, 7, 7])
tm.assert_series_equal(result, expected)
result = ser.nsmallest(2, keep="all")
expected = Series([6, 7, 7, 7, 7], index=[7, 3, 4, 5, 6])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,expected", [([True, False], [True]), ([True, False, True, True], [True])]
)
def test_nlargest_boolean(self, data, expected):
# GH#26154 : ensure True > False
ser = Series(data)
result = ser.nlargest(1)
expected = Series(expected)
tm.assert_series_equal(result, expected)
def test_nlargest_nullable(self, any_nullable_numeric_dtype):
# GH#42816
dtype = any_nullable_numeric_dtype
arr = np.random.randn(10).astype(dtype.lower(), copy=False)
ser = Series(arr.copy(), dtype=dtype)
ser[1] = pd.NA
result = ser.nlargest(5)
expected = (
Series(np.delete(arr, 1), index=ser.index.delete(1))
.nlargest(5)
.astype(dtype)
)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import os
import urllib
import pandas as pd
from gensim.models import Word2Vec
from SPARQLWrapper import SPARQLWrapper, JSON
# DISCLAIMER
# File modified from https://github.com/mariaangelapellegrino/Evaluation-Framework
class data_manager:
def __init__(self, gold_standard_file, vectors_file, w2v_model_name):
self.gold_standard_file = gold_standard_file
self.vectors_file = vectors_file
self.w2v_model_name = w2v_model_name
self.vector_size = int(
self.w2v_model_name.split("_")[3].rsplit("v")[0])
print("Data manager intialized.")
def retrieve_vectors(self):
if not os.path.isfile(self.vectors_file):
print("Vectors not computed. Retrieving vectors.")
gold = list(
self._read_file(
self.gold_standard_file, ["DBpedia_URI15"]
)["DBpedia_URI15"]
)
if "DB" in self.w2v_model_name:
processed_gold = [w.lstrip("http://dbpedia.org/resource/") for
w in gold]
processed_gold = [urllib.parse.quote(w, encoding="utf-8",
safe=":/%#") for w in processed_gold]
processed_gold = [w.replace("%C2%96", "%E2%80%93") if "%C2%96"
in w else w for w in processed_gold]
self._create_vectors(gold, processed_gold)
else:
processed_entities_list = list()
entities = list()
for entity in gold:
if "_ _" not in entity and " " not in entity and '"' not in entity:
wiki_entity = self._run_query(entity)
if not wiki_entity == "":
entities.append(entity)
processed_entity = wiki_entity.lstrip(
"http://www.wikidata.org/entity/")
processed_entities_list.append(
processed_entity)
self._create_vectors(entities, processed_entities_list)
print("Retrieving vectors.")
vectors = self._read_vectors_file()
return vectors
def intersect_vectors_goldStandard(self, vectors, column_score,
gold_standard_data=None,
column_key="DBpedia_URI15"):
gold = self._read_file(self.gold_standard_file, [column_key,
column_score])
gold.rename(columns={column_key: "id"}, inplace=True)
gold.rename(columns={column_score: column_score}, inplace=True)
merged = pd.merge(gold, vectors, on="id", how="inner")
output_left_merge = pd.merge(gold, vectors, how="outer",
indicator=True)
ignored = output_left_merge[output_left_merge["_merge"] == "left_only"]
return merged, ignored
def _create_vectors(self, gold, processed_gold):
w2v_model = self._load_w2v_model()
vectors_dict = dict()
print("Creating vectors for the evaluation dataset.")
for idx in range(len(gold)):
if processed_gold[idx] in w2v_model.wv.vocab:
vector = w2v_model.wv.get_vector(processed_gold[idx])
vectors_dict[gold[idx]] = vector
vectors = pd.DataFrame.from_dict(vectors_dict, orient="index")
vectors.reset_index(level=0, inplace=True)
print("Writing vectors to file.")
vectors.to_csv(self.vectors_file, header=False, index=False,
encoding="latin1")
print("Vectors created.")
return None
def _read_vectors_file(self):
local_vectors = pd.read_csv(
self.vectors_file,
names=self._create_header(),
encoding="latin1", index_col=False)
return local_vectors
def _create_header(self):
headers = ["id"]
for i in range(0, self.vector_size):
headers.append(i)
return headers
def _read_file(self, filename, columns):
return pd.read_csv(filename, "\t", usecols=columns, encoding="latin1")
def _load_w2v_model(self):
print("Loading Word2Vec model.")
if "DB" in self.w2v_model_name:
model_file = "../../../../Data/processed/models/DBpedia/" \
+ self.w2v_model_name
else:
model_file = "../../../../Data/processed/models/Wikidata/" \
+ self.w2v_model_name
w2v_model = Word2Vec.load(model_file)
print("Loaded.")
return w2v_model
def _run_query(self, dbpedia_instance):
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
query = """
PREFIX owl: <http://www.w3.org/2002/07/owl#>
SELECT DISTINCT ?wikiEntity
WHERE
{""" + "<" + dbpedia_instance + ">" + \
"""
owl:sameAs ?wikiEntity .
FILTER regex(str(?wikiEntity), "wikidata.org/entity/Q") .
}
"""
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
results_df = | pd.io.json.json_normalize(results["results"]["bindings"]) | pandas.io.json.json_normalize |
from sklearn.metrics import mean_absolute_error, mean_squared_error
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from neupre.instructions.base import mape
from neupre.misc.dataops import load_data_point_online
plt.style.use('ggplot')
class BaseBackend(object):
def __init__(self, buffsize):
# self.X_train_onestep = None
# self.y_train_onestep = None
self.X_train_multistep = None
self.y_train_multistep = None
self.X_train_onestep96 = None
self.y_train_onestep96 = None
self.X_test_onestep96 = None
self.X_train_start_date = None
self.y_train_start_date = None
self.counter = 0
self.last_line = 0
# self.predictions_onestep = None
self.predictions_multistep = None
self.predictions_onestep96 = None
self.tensor3D = False
self.path = None
self.data_mean = None
self.data_std = None
self.statspath = None
self.buffsize = buffsize
self.maes_multi = []
self.mses_multi = []
self.mapes_multi = []
self.maes_one96 = []
self.mses_one96 = []
self.mapes_one96 = []
self.mapes_working_days_multi = []
self.mapes_holidays_multi = []
self.mapes_working_days_one96 = []
self.mapes_holidays_one96 = []
self.mapes_jan_one96 = []
self.mapes_feb_one96 = []
self.mapes_mar_one96 = []
self.mapes_apr_one96 = []
self.mapes_may_one96 = []
self.mapes_jun_one96 = []
self.mapes_jul_one96 = []
self.mapes_aug_one96 = []
self.mapes_sep_one96 = []
self.mapes_oct_one96 = []
self.mapes_nov_one96 = []
self.mapes_dec_one96 = []
self.mapes_jan_multi = []
self.mapes_feb_multi = []
self.mapes_mar_multi = []
self.mapes_apr_multi = []
self.mapes_may_multi = []
self.mapes_jun_multi = []
self.mapes_jul_multi = []
self.mapes_aug_multi = []
self.mapes_sep_multi = []
self.mapes_oct_multi = []
self.mapes_nov_multi = []
self.mapes_dec_multi = []
def sim(self):
print("Skipping ", self.last_line, " lines")
new_data = load_data_point_online(maxline=96, skip=self.last_line, path=self.path)
self.last_line += 96
print("Will skip ", self.last_line, " lines")
# print("Onestep MAE was", mean_absolute_error(new_data[0], self.predictions_onestep[-1])) # first value was prediction
# print("Onestep MSE was", mean_squared_error(new_data[0], self.predictions_onestep[-1]))
# calculate errors of predictions in last step
self.maes_multi.append(mean_absolute_error(new_data, self.predictions_multistep[-1]))
self.mses_multi.append(mean_squared_error(new_data, self.predictions_multistep[-1]))
self.maes_one96.append(mean_absolute_error(new_data, self.predictions_onestep96[-1]))
self.mses_one96.append(mean_squared_error(new_data, self.predictions_onestep96[-1]))
y_test = np.copy(new_data)
y_pred_multi = np.copy(self.predictions_multistep[-1])
y_pred_one96 = np.copy(self.predictions_onestep96[-1])
y_test *= self.data_std
y_test += self.data_mean
y_pred_multi *= self.data_std
y_pred_multi += self.data_mean
y_pred_one96 *= self.data_std
y_pred_one96 += self.data_mean
self.mapes_multi.append(mape(y_test, y_pred_multi))
self.mapes_one96.append(mape(y_test, y_pred_one96))
prediction_day = self.X_train_start_date + pd.DateOffset(days=self.buffsize)
# statistics...
if prediction_day.dayofweek >= 5:
self.mapes_holidays_one96.append(self.mapes_one96[-1])
self.mapes_holidays_multi.append(self.mapes_multi[-1])
elif prediction_day.dayofweek < 5 and prediction_day not in pd.date_range(start='2013-12-21', periods=24)\
and prediction_day not in pd.date_range(start='2014-04-15', periods=20):
self.mapes_working_days_one96.append(self.mapes_one96[-1])
self.mapes_working_days_multi.append(self.mapes_multi[-1])
if prediction_day.month == 1:
self.mapes_jan_one96.append(self.mapes_one96[-1])
self.mapes_jan_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 2:
self.mapes_feb_one96.append(self.mapes_one96[-1])
self.mapes_feb_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 3:
self.mapes_mar_one96.append(self.mapes_one96[-1])
self.mapes_mar_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 4:
self.mapes_apr_one96.append(self.mapes_one96[-1])
self.mapes_apr_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 5:
self.mapes_may_one96.append(self.mapes_one96[-1])
self.mapes_may_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 6:
self.mapes_jun_one96.append(self.mapes_one96[-1])
self.mapes_jun_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 7:
self.mapes_jul_one96.append(self.mapes_one96[-1])
self.mapes_jul_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 8:
self.mapes_aug_one96.append(self.mapes_one96[-1])
self.mapes_aug_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 9:
self.mapes_sep_one96.append(self.mapes_one96[-1])
self.mapes_sep_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 10:
self.mapes_oct_one96.append(self.mapes_one96[-1])
self.mapes_oct_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 11:
self.mapes_nov_one96.append(self.mapes_one96[-1])
self.mapes_nov_multi.append(self.mapes_multi[-1])
elif prediction_day.month == 12:
self.mapes_dec_one96.append(self.mapes_one96[-1])
self.mapes_dec_multi.append(self.mapes_multi[-1])
print("Prediction for %s\n" % prediction_day.isoformat())
print("Multistep MAE was ", self.maes_multi[-1])
print("Multistep MSE was ", self.mses_multi[-1])
print("Multistep MAPE was ", self.mapes_multi[-1])
print("Onestep96 MAE was ", self.maes_one96[-1])
print("Onestep96 MSE was ", self.mses_one96[-1])
print("Onestep96 MAPE was ", self.mapes_one96[-1])
with open('%s/stats.txt' % self.statspath, 'a') as f:
f.write("%d\n" % self.counter)
f.write("Prediction for %s\n" % prediction_day.isoformat())
f.write("MAE multi: %f\n" % self.maes_multi[-1])
f.write("MSE multi: %f\n" % self.mses_multi[-1])
f.write("MAPE multi: %f\n" % self.mapes_multi[-1])
f.write("MAE one96: %f\n" % self.maes_one96[-1])
f.write("MSE one96: %f\n" % self.mses_one96[-1])
f.write("MAPE one96: %f\n\n" % self.mapes_one96[-1])
self.counter += 1
# create new training set, inputs and predict the next step
# self.X_train_onestep = np.delete(self.X_train_onestep, range(96), 0)
self.X_train_multistep = np.delete(self.X_train_multistep, 0, 0)
self.X_train_onestep96 = np.delete(self.X_train_onestep96, range(96), 0)
if self.tensor3D:
self.X_test_onestep96 = np.reshape(self.X_test_onestep96, (self.X_test_onestep96.shape[0], self.X_test_onestep96.shape[1], 1))
self.X_train_onestep96 = np.append(self.X_train_onestep96, self.X_test_onestep96, 0)
if self.tensor3D:
# self.X_train_multistep = np.vstack((self.X_train_multistep, np.reshape(self.y_train_multistep[-1], (
# 1, self.y_train_multistep[-1].shape[0], 1))))
new_entry = np.append(self.X_train_multistep[-1], self.y_train_multistep[-1])[96:]
new_entry = np.reshape(new_entry, (1, new_entry.shape[0], 1))
self.X_train_multistep = np.vstack((self.X_train_multistep, new_entry))
else:
# self.X_train_multistep = np.vstack((self.X_train_multistep, self.y_train_multistep[-1]))
new_entry = np.append(self.X_train_multistep[-1], self.y_train_multistep[-1])[96:]
self.X_train_multistep = np.vstack((self.X_train_multistep, new_entry))
self.y_train_multistep = np.delete(self.y_train_multistep, 0, 0)
self.y_train_multistep = np.vstack((self.y_train_multistep, new_data))
self.y_train_onestep96 = np.delete(self.y_train_onestep96, range(96), 0)
self.y_train_onestep96 = np.append(self.y_train_onestep96, new_data)
self.X_train_start_date += pd.DateOffset()
self.y_train_start_date += pd.DateOffset()
self.train()
self.X_test_onestep96 = []
for index in reversed(range(96)):
index += 1
self.X_test_onestep96.append([
self.y_train_onestep96[-index - 96 * 13],
self.y_train_onestep96[-index - 96 * 7],
self.y_train_onestep96[-index - 96 * 6 - 1],
self.y_train_onestep96[-index - 96 * 6],
self.y_train_onestep96[-index - 96 * 5],
self.y_train_onestep96[-index]
])
self.X_test_onestep96 = np.array(self.X_test_onestep96)
# X_test_multistep = self.y_train_multistep[-1]
X_test_multistep = np.append(self.X_train_multistep[-1], self.y_train_multistep[-1])[96:]
if self.tensor3D:
X_test_multistep = np.reshape(X_test_multistep, (1, X_test_multistep.shape[0], 1))
self.X_test_onestep96 = np.reshape(self.X_test_onestep96,
(self.X_test_onestep96.shape[0], self.X_test_onestep96.shape[1], 1))
else:
X_test_multistep = np.reshape(X_test_multistep, (1, X_test_multistep.shape[0]))
p2, p3 = self.predict(X_test_multistep, self.X_test_onestep96)
p3 = np.reshape(p3, (1, p3.shape[0]))
self.predictions_multistep = np.vstack((self.predictions_multistep, p2))
self.predictions_onestep96 = np.vstack((self.predictions_onestep96, p3))
# TODO try without clf, because of draw
plt.clf()
self.plot()
def plot(self):
# p1 = self.predictions_onestep[-1]
p2 = self.predictions_multistep[-1]
p3 = self.predictions_onestep96[-1]
train_data = self.X_train_multistep[:-1, :96]
train_data = np.reshape(train_data, (train_data.shape[0] * train_data.shape[1]))
train_data = np.append(train_data, self.X_train_multistep[-1])
train_data = np.append(train_data, self.y_train_multistep[-1])
train_index = pd.date_range(start=self.X_train_start_date, periods=self.buffsize * 96, freq='15T')
p3_index = p2_index = pd.date_range(start=train_index.date[-1] + pd.DateOffset(), periods=96, freq='15T')
train_series = pd.Series(data=train_data, index=train_index)
pred2_series = pd.Series(data=p2, index=p2_index)
pred3_series = | pd.Series(data=p3, index=p3_index) | pandas.Series |
import multiprocessing as mp
import os
import string
import warnings
import numpy as np
import pandas as pd
import uncertainties as un
from nptdms import TdmsFile
from numpy import NaN, sqrt
from scipy.stats import t
from tables import NoSuchNodeError
from uncertainties import unumpy as unp
from . import diodes
from ..images import schlieren
from ... import uncertainty
from ...dir import d_drive
from ...simulation import thermo
_DIR = os.path.split(__file__)[0]
_STRUCTURE_END_DATES = (
pd.Timestamp("2019-11-01"),
pd.Timestamp("2020-05-05")
)
_SPATIAL_VARIATIONS = pd.read_csv(
os.path.join(
_DIR,
"../../data",
"spatial_variations.csv"
)
)
def _collect_schlieren_dirs(
base_dir,
test_date
):
"""
When reading in camera data from these tests, we will ignore the spatial
directory since it contains no schlieren information. It will still be
used, but not in this step. Directories containing a `.old` file have a
different structure than newer directories, which must be accounted for.
Parameters
----------
base_dir : str
Base data directory, (e.g. `/d/Data/Raw/`)
test_date : str
ISO 8601 formatted date of test data
Returns
-------
list
ordered list of directories containing diode output
"""
raw_dir = os.path.join(
base_dir,
test_date
)
if not os.path.isdir(raw_dir):
return []
contents = os.listdir(raw_dir)
if ".old" in contents:
raw_dir = os.path.join(
base_dir,
test_date,
"Camera"
)
contents = os.listdir(raw_dir)
return sorted([
os.path.join(raw_dir, item)
for item in contents
if os.path.isdir(os.path.join(raw_dir, item))
and "shot" in item.lower()
and os.path.exists(os.path.join(raw_dir, item, "frames"))
and os.path.exists(os.path.join(raw_dir, item, "bg"))
])
class _ProcessStructure0:
@classmethod
def _collect_test_dirs(
cls,
base_dir,
test_date
):
"""
The first step of reading in an old test directory is to determine
which directories contain valid tests. Under the old DAQ system, the
.vi would generate a new folder each time it was run. Only tests which
successfully generate a `diodes.tdms` file can be considered completed
tests. Some of these may still be failed detonations; this issue will
be dealt with on joining with schlieren data, which contains
information about whether or not a detonation attempt succeeded.
Parameters
----------
base_dir : str
Base data directory, (e.g. `/d/Data/Raw/`)
test_date : str
ISO 8601 formatted date of test data
Returns
-------
list
ordered list of directories containing diode output
"""
raw_dir = os.path.join(
base_dir,
test_date,
"Sensors"
)
return sorted([
root
for root, _, files in os.walk(raw_dir, topdown=True)
if "diodes.tdms" in files
])
@classmethod
def _get_cutoff_pressure(
cls,
df_tdms_pressure,
kind="fuel",
):
"""
This function accepts a dataframe imported from a `pressure.tdms` file.
Old test data was output in amps; this was good and I should probably
have kept it that way. Old tests also logged each fill event
separately. Extract the desired data, build a confidence interval,
apply the calibration, and output the resulting value including
uncertainty.
Parameters
----------
df_tdms_pressure : pd.DataFrame
Dataframe containing test-specific pressure trace
kind : str
Kind of cutoff pressure to get, e.g. fuel, oxidizer
Returns
-------
un.ufloat
Float with applied uncertainty
"""
kind = kind.title()
if kind not in {"Fuel", "Oxidizer", "Vacuum", "Diluent"}:
raise ValueError("bad kind")
# in these tests there is no vacuum logging. These are undiluted tests,
# which means the diluent pressure is identical to the vacuum pressure.
if kind == "Vacuum":
kind = "Diluent"
pressure = df_tdms_pressure[
"/'%s Fill'/'Manifold'" % kind
].dropna() * uncertainty.PRESSURE_CAL["slope"] + \
uncertainty.PRESSURE_CAL["intercept"]
# TODO: update calculation to be like new pressure calc
return unp.uarray(
pressure,
uncertainty.u_pressure(pressure, daq_err=False)
).mean()
@classmethod
def _get_partial_pressure(
cls,
df_tdms_pressure,
kind="fuel"
):
"""
Fill order: vacuum -> (diluent) -> oxidizer -> fuel
Parameters
----------
df_tdms_pressure : pd.DataFrame
Dataframe containing test-specific pressure trace
kind : str
Kind of cutoff pressure to get, e.g. fuel, oxidizer
Returns
-------
un.ufloat
Float with applied uncertainty
"""
p_ox = cls._get_cutoff_pressure(df_tdms_pressure, "oxidizer")
if kind.lower() == "fuel":
return cls._get_cutoff_pressure(df_tdms_pressure, "fuel") - p_ox
elif kind.lower() == "oxidizer":
return p_ox
else:
raise ValueError("only fuels and oxidizers in this analysis")
@classmethod
def _get_initial_pressure(
cls,
df_tdms_pressure
):
"""
In old data, the initial mixture pressure is the fuel cutoff pressure
Parameters
----------
df_tdms_pressure : pd.DataFrame
Dataframe containing test-specific pressure trace
Returns
-------
un.ufloat
Float with applied uncertainty
"""
return cls._get_cutoff_pressure(df_tdms_pressure, kind="fuel")
@classmethod
def _get_initial_temperature(
cls,
df_tdms_temperature
):
"""
Old temperatures need to come from the tube thermocouple, which is
type K, because the manifold thermocouple was jacked up at the time.
Parameters
----------
df_tdms_temperature : pd.DataFrame
Dataframe containing test-specific temperature trace
Returns
-------
un.ufloat
Test-averaged initial temperature with applied uncertainty
"""
# TODO: update calculation to be like new pressure calc
return un.ufloat(
df_tdms_temperature["/'Test Readings'/'Tube'"].mean(),
uncertainty.u_temperature(
df_tdms_temperature["/'Test Readings'/'Tube'"],
tc_type="K",
collapse=True
)
)
@classmethod
def __call__(
cls,
base_dir,
test_date,
f_a_st=0.04201680672268907,
multiprocess=False
):
"""
Process data from an old-style data set.
Parameters
----------
base_dir : str
Base data directory, (e.g. `/d/Data/Raw/`)
test_date : str
ISO 8601 formatted date of test data
f_a_st : float
Stoichiometric fuel/air ratio for the test mixture. Default value
is for propane/air.
multiprocess : bool
Set to True to parallelize processing of a single day's tests
Returns
-------
List[pd.DataFrame, dict]
A list in which the first item is a dataframe of the processed
tube data and the second is a dictionary containing
background-subtracted schlieren images
"""
df = pd.DataFrame(
columns=["date", "shot", "sensors", "diodes", "schlieren"],
)
df["sensors"] = cls._collect_test_dirs(base_dir, test_date)
df["schlieren"] = _collect_schlieren_dirs(base_dir, test_date)
df = df[df["schlieren"].apply(lambda x: "failed" not in x)]
df["date"] = test_date
df["shot"] = [
int(os.path.split(d)[1].lower().replace("shot", "").strip())
for d in df["schlieren"].values
]
images = dict()
if multiprocess:
pool = mp.Pool()
results = pool.starmap(
cls._process_single_test,
[[idx, row, f_a_st] for idx, row in df.iterrows()]
)
pool.close()
for idx, row_results in results:
df.at[idx, "phi"] = row_results["phi"]
df.at[idx, "u_phi"] = row_results["u_phi"]
df.at[idx, "p_0"] = row_results["p_0"]
df.at[idx, "u_p_0"] = row_results["u_p_0"]
df.at[idx, "t_0"] = row_results["t_0"]
df.at[idx, "u_t_0"] = row_results["u_t_0"]
df.at[idx, "p_fuel"] = row_results["p_fuel"]
df.at[idx, "u_p_fuel"] = row_results["u_p_fuel"]
df.at[idx, "p_oxidizer"] = row_results["p_oxidizer"]
df.at[idx, "u_p_oxidizer"] = row_results["u_p_oxidizer"]
df.at[idx, "wave_speed"] = row_results["wave_speed"]
df.at[idx, "u_wave_speed"] = row_results["u_wave_speed"]
df.at[idx, "diodes"] = row_results["diodes"]
images.update(row_results["schlieren"])
else:
for idx, row in df.iterrows():
_, row_results = cls._process_single_test(idx, row, f_a_st)
# output results
df.at[idx, "phi"] = row_results["phi"]
df.at[idx, "u_phi"] = row_results["u_phi"]
df.at[idx, "p_0"] = row_results["p_0"]
df.at[idx, "u_p_0"] = row_results["u_p_0"]
df.at[idx, "t_0"] = row_results["t_0"]
df.at[idx, "u_t_0"] = row_results["u_t_0"]
df.at[idx, "p_fuel"] = row_results["p_fuel"]
df.at[idx, "u_p_fuel"] = row_results["u_p_fuel"]
df.at[idx, "p_oxidizer"] = row_results["p_oxidizer"]
df.at[idx, "u_p_oxidizer"] = row_results["u_p_oxidizer"]
df.at[idx, "wave_speed"] = row_results["wave_speed"]
df.at[idx, "u_wave_speed"] = row_results["u_wave_speed"]
df.at[idx, "diodes"] = row_results["diodes"]
images.update(row_results["schlieren"])
return df, images
@classmethod
def _process_single_test(
cls,
idx,
row,
f_a_st
):
"""
Process a single row of test data. This has been separated into its
own function to facilitate the use of multiprocessing.
Parameters
----------
row : pd.Series
Current row of test data
f_a_st : float
Stoichiometric fuel/air ratio for the test mixture.
Returns
-------
Tuple(Int, Dict)
Calculated test data and associated uncertainty values for the
current row
"""
# background subtraction
image = {
"{:s}_shot{:02d}".format(
row["date"],
row["shot"]
): schlieren.bg_subtract_all_frames(row["schlieren"])
}
# gather pressure data
df_tdms_pressure = TdmsFile(
os.path.join(
row["sensors"],
"pressure.tdms"
)
).as_dataframe()
p_init = cls._get_initial_pressure(df_tdms_pressure)
p_fuel = cls._get_partial_pressure(
df_tdms_pressure,
kind="fuel"
)
p_oxidizer = cls._get_partial_pressure(
df_tdms_pressure,
kind="oxidizer"
)
phi = thermo.get_equivalence_ratio(p_fuel, p_oxidizer, f_a_st)
# gather temperature data
loc_temp_tdms = os.path.join(
row["sensors"],
"temperature.tdms"
)
if os.path.exists(loc_temp_tdms):
df_tdms_temperature = TdmsFile(
os.path.join(
row["sensors"],
"temperature.tdms"
)
).as_dataframe()
t_init = cls._get_initial_temperature(df_tdms_temperature)
else:
t_init = un.ufloat(NaN, NaN)
# wave speed measurement
diode_loc = os.path.join(row["sensors"], "diodes.tdms")
wave_speed = diodes.calculate_velocity(diode_loc)[0]
# output results
out = dict()
out["diodes"] = diode_loc
out["schlieren"] = image
out["phi"] = phi.nominal_value
out["u_phi"] = phi.std_dev
out["p_0"] = p_init.nominal_value
out["u_p_0"] = p_init.std_dev
out["t_0"] = t_init.nominal_value
out["u_t_0"] = t_init.std_dev
out["p_fuel"] = p_fuel.nominal_value
out["u_p_fuel"] = p_fuel.std_dev
out["p_oxidizer"] = p_oxidizer.nominal_value
out["u_p_oxidizer"] = p_oxidizer.std_dev
out["wave_speed"] = wave_speed.nominal_value
out["u_wave_speed"] = wave_speed.std_dev
return idx, out
class _ProcessStructure1:
@classmethod
def __call__(
cls,
base_dir,
test_date,
sample_time=pd.Timedelta(seconds=70),
mech="gri30.cti",
diode_spacing=1.0668,
multiprocess=False
):
"""
Process data from a day of testing using the newer directory structure
Parameters
----------
base_dir : str
Base data directory, (e.g. `/d/Data/Raw/`)
test_date : str
ISO 8601 formatted date of test data
sample_time : None or pd.Timedelta
Length of hold period at the end of a fill state. If None is passed,
value will be read from nominal test conditions.
mech : str
Mechanism for cantera calculations
diode_spacing : float
Diode spacing, in meters
multiprocess : bool
Set true to parallelize data analysis
Returns
-------
Tuple[pd.DataFrame, Dict]
Tuple containing a dataframe of test results and a dictionary of
background subtracted schlieren images
"""
dir_data = os.path.join(base_dir, test_date)
df_tests = cls._find_test_times(base_dir, test_date)
n_found_tests = len(df_tests)
n_shot_dirs = len([d for d in os.listdir(dir_data) if "Shot" in d])
if n_found_tests == 0:
raise ValueError("No tests detected in sensor log.tdms")
elif n_found_tests != n_shot_dirs:
raise ValueError("Number of tests does not match number of shots")
df_nominal = cls._load_nominal_conditions(dir_data)
df_sensor = TdmsFile(os.path.join(
dir_data, "sensor log.tdms"
)).as_dataframe()
df_pressure = cls._extract_sensor_data(df_sensor, "pressure")
df_temperature = cls._extract_sensor_data(df_sensor, "temperature")
del df_sensor
df_schlieren = | pd.DataFrame(columns=["shot", "schlieren"]) | pandas.DataFrame |
import joblib
import pandas as pd
class DecisionTreeClassifier:
def __init__(self):
path_to_artifacts = "../../research/"
self.value_fill_missing = joblib.load(path_to_artifacts + "pi_train_mode.joblib")
self.model = joblib.load(path_to_artifacts + "pi_decision_tree.joblib")
def preprocessing(self, input_data):
print("Preprocessing...")
input_data = | pd.DataFrame(input_data, index=[0]) | pandas.DataFrame |
import os
import numpy as np
import tensorflow as tf
from matplotlib import image
import cv2
import pandas as pd
import debiasmedimg.settings as settings
from copy import deepcopy
def get_filenames(csv_file, domains, merge=False):
"""
Extract the filenames of all images in the folders for all domains
:param csv_file: Path to the csv files containing info about the images
:param domains: List of domains names
:param merge: Whether to return one array of all images
:return: filenames
"""
csv_df = pd.read_csv(csv_file)
number_of_domains = len(domains)
files = [[] for _ in range(0, number_of_domains)]
for index, row in csv_df.iterrows():
# Only use images without issues
if pd.isna(row["issues"]) and not pd.isna(row["img_path"]):
# Find the id of the corresponding domain the current sample belongs to
if row["origin"] in domains:
domain_id = domains.index(row["origin"])
files[domain_id].extend([settings.DB_DIR + row["img_path"]])
if merge:
files = [item for sublist in files for item in sublist]
files = np.array([np.array(xi) for xi in files])
return files
def get_filtered_filenames(csv_file, domain):
"""
Extract the filenames of all images in the folders for all domains
:param csv_file: Path to the csv files containing info about the images
:param domain: Domain to return paths from
:return: filenames
"""
csv_df = pd.read_csv(csv_file)
files = []
for index, row in csv_df.iterrows():
# Only use images without issues
if pd.isna(row["issues"]) and row["origin"] == domain:
# Find the id of the corresponding domain the current sample belongs to
files.extend([settings.DB_DIR + row["img_path"]])
files = np.array([np.array(xi) for xi in files])
return files
def _downscale_img(img):
"""
Function for downsampling to 256x256 pixels
:param img: Image to downscale using a Gaussian pyramid
:return: Downsampled images
"""
assert img.shape[0] == img.shape[1], "Images have to be squares"
# Find largest power of two that is less than the image size
expo = img.shape[0].bit_length() - 1
# Make sure image isn't smaller than 256x256 pixels
if expo < 8:
return img
img = cv2.resize(img, dsize=(2 ** expo, 2 ** expo), interpolation=cv2.INTER_CUBIC)
g = img.copy()
# Resize image to 256x256 (=2**8)
for i in range(expo - 8):
g = cv2.pyrDown(g)
return g
def minimal_pad(img, color=1):
"""
Add minimal padding to the image to make sure the image is square-shaped
:param img: Image to pad
:param color: Color to pad in (default is white)
"""
img_padded = deepcopy(img)
if img.shape[0] < img.shape[1]:
# Pad height
padding_height = img.shape[1] - img.shape[0]
padding_height_1 = int(padding_height / 2)
padding_height_2 = int(padding_height / 2)
if not padding_height % 2 == 0:
padding_height_1 = int(padding_height / 2)
padding_height_2 = int(padding_height / 2) + 1
img_padded = np.pad(img, pad_width=((padding_height_1, padding_height_2), (0, 0),
(0, 0)), mode='constant', constant_values=(color,))
elif img.shape[1] < img.shape[0]:
# Pad width
padding_width = img.shape[0] - img.shape[1]
padding_width_1 = int(padding_width / 2)
padding_width_2 = int(padding_width / 2)
if not padding_width % 2 == 0:
padding_width_1 = int(padding_width / 2)
padding_width_2 = int(padding_width / 2 + 1)
img_padded = np.pad(img, pad_width=((0, 0), (padding_width_1, padding_width_2),
(0, 0)), mode='constant', constant_values=(color,))
return img_padded
def get_sample_from_path(path, color=1):
"""
Get an image from the given path in original size and downscaled
:param path: location of the image
:param color: Color to use for padding image to a square shape (if necessary)
:return: normalized image
"""
# Read in image and downscale it
x = image.imread(path)
if x.shape[2] == 4:
# If Image is read in as RGBA for some reason -> Can cut it off
x = x[:, :, :-1]
original_size = [deepcopy(x.shape[0]), deepcopy(x.shape[1])]
if not x.shape[0] == x.shape[1]:
x = minimal_pad(x, color=color)
x_small = _downscale_img(x)
# Normalize original size and small size
x = normalize(x)
x_small = normalize(x_small)
# Add dimension to make samples out of individual images
x = tf.expand_dims(x, axis=0)
x_small = tf.expand_dims(x_small, axis=0)
return x, x_small, original_size
def get_all_samples(file_list):
"""
Read in all samples from a path for evaluation
:param file_list: List of files to load
:return: Np array of images
"""
samples = []
for file in file_list:
img = image.imread(file)
if img.shape[2] == 4:
# Image is read in as RGBA for some reason, but all entries in A are 1 -> Can cut it off
img = img[:, :, :-1]
if not img.shape[0] == img.shape[1]:
img = minimal_pad(img)
samples.append(normalize_for_evaluation(img))
samples = np.array([np.array(xi) for xi in samples])
return samples
def get_domain_name(path, domains):
"""
Given a path, extract the domain name
:param path: Path to extract from
:param domains: Possible domains the image could belong to
:return:
"""
domain_name = None
for domain in domains:
if domain in path:
domain_name = domain
assert domain_name is not None
return domain_name
def get_real_samples(file_names, n_samples, batch_number, domains, return_domain_names=False, all_files=None):
"""
Select a batch of random samples, returns images and their domain names
:param file_names: Dataset to sample from
:param n_samples: Number of samples
:param batch_number: Current batch
:param domains: List of domain names the samples could belong to
:param return_domain_names: Whether to return the names of the domains of the images
:param all_files: all files the samples are cut in a list of arrays with the order of the arrays reflecting the domains
:return: Images and targets
"""
# choose n instances
# Images are shuffled randomly at the beginning of a new epoch
ix = np.arange(batch_number * n_samples, (batch_number + 1) * n_samples)
# ix = np.random.randint(0, len(file_names), n_samples)
# retrieve selected images
x = file_names[ix]
# Replace paths with images
samples = []
domain_names = []
for index, path in enumerate(x):
# x[index] = _random_jitter(image)
# Normalize images
img = image.imread(path)
if img.shape[2] == 4:
# Image is read in as RGBA for some reason, but all entries in A are 1 -> Can cut it off
img = img[:, :, :-1]
if not img.shape[0] == img.shape[1]:
img = minimal_pad(img)
img = _downscale_img(img)
samples.append(normalize(img))
if return_domain_names and all_files is None:
# get domain names based on path (domain has to be included in path)
domain_name = get_domain_name(path, domains)
domain_names.append(domain_name)
elif return_domain_names:
domain_idx = -1
for idx, domain in enumerate(all_files):
if path in domain:
domain_idx = idx
assert not domain_idx == -1
domain_name = domains[domain_idx]
domain_names.append(domain_name)
samples = np.array([np.array(xi) for xi in samples])
if return_domain_names:
# Return images and their domain names
return samples, domain_names
# Return images
return samples
def create_patches(image_to_cut, patch_size=16):
"""
This function takes an image and cuts it into a variable number of non-overlapping patches
:param image_to_cut: a numpy array of an image
:param patch_size: the size that the patches should have in the end
Returns: an array of patches [n_patches x patch_size x patch_size x 3] and the number of patches
"""
first_indices_horizontal = np.arange(0, image_to_cut.shape[0] - patch_size + 1, patch_size)
first_indices_vertical = np.arange(0, image_to_cut.shape[1] - patch_size + 1, patch_size)
# Calculate the number of patches
number_resulting_patches = first_indices_horizontal.size * first_indices_vertical.size
patches = np.zeros((number_resulting_patches, patch_size, patch_size, image_to_cut.shape[2]))
patch_number = 0
for idx_ver in first_indices_vertical:
for idx_hor in first_indices_horizontal:
patches[patch_number, ...] = np.array(image_to_cut[idx_ver:idx_ver + patch_size, idx_hor:idx_hor + patch_size, :])
patch_number += 1
return patches, patch_number
def get_previous_range(img):
"""
Establish which range the image belongs to
:param img: Image to test
:return: Minimum and maximum of the range the image probably belongs to
"""
if np.amax(img) > 1.0:
old_min = 0
old_max = 255
elif np.amin(img) < 0.0:
old_min = -1.0
old_max = 1.0
else:
old_min = 0.0
old_max = 1.0
return old_min, old_max
def normalize(img):
"""
Normalize an image to range [-1,1]
:param img: Image to normalize
:return: Normalized image
"""
old_min, old_max = get_previous_range(img)
# NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin
# NewMin = -1, NewMax = 1, NewRange = 1 - (-1)
new_min = -1.0
new_max = 1.0
img = (img - old_min) * (new_max - new_min) / (old_max - old_min) + new_min
# Conv2D layers cast from float64 to float32, so make sure we have the correct type here
img = tf.cast(img, tf.float32)
return img
def normalize_for_display(img):
"""
Normalize an image to range [0,1]
:param img: Image to normalize
:return: Normalized image
"""
old_min, old_max = get_previous_range(img)
# NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin
# NewMin = 0, NewMax = 1, NewRange = 1 - 0
new_min = 0.0
new_max = 1.0
img = (img - old_min) * (new_max - new_min) / (old_max - old_min) + new_min
return img
def normalize_for_evaluation(img):
"""
Normalize an image to range [0,255]
:param img: Image to normalize
:return: Normalized image
"""
old_min, old_max = get_previous_range(img)
# NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin
# NewMin = 0, NewMax = 255, NewRange = 1 - 0
new_min = 0
new_max = 255
img = (img - old_min) * (new_max - new_min) / (old_max - old_min) + new_min
img = tf.cast(img, tf.int32)
return np.asarray(img)
def save_to_csv(run_id, epoch, a_name, b_name, means, approach, dataset, validate=True, only_ab=False):
"""
Save the evaluation results to a csv file
:param run_id: Run id of the evaluated run
:param epoch: Epoch that has been evaluated
:param a_name: Name of domain A
:param b_name: Name of domain b
:param means: List of evaluation results
:param approach: Which approach was used for transformation
:param dataset: Name of the dataset that was bias transferred
:param validate: Whether we are currently validating or testing
:param only_ab: Whether we only evaluated a transformation from A to B, not the other way around
"""
if only_ab:
val_dict = [{"run_id": run_id, "epoch": epoch, "a": a_name, "b": b_name, "ssim_inout_a": means[0],
"fid_original": means[1], "fid_b": means[2]}]
val_df = pd.DataFrame.from_dict(val_dict)
else:
val_dict = [{"run_id": run_id, "epoch": epoch, "a": a_name, "b": b_name, "ssim_inout_a": means[0],
"ssim_inout_b": means[1], "fid_original": means[2], "fid_a": means[3], "fid_b": means[4]}]
val_df = | pd.DataFrame.from_dict(val_dict) | pandas.DataFrame.from_dict |
import time
import traceback
from abc import abstractmethod
from datetime import datetime
from itertools import product
from pathlib import Path
import feather
import pandas as pd
from ..options import ModelOptions
from ..plotting import plot
from ..utils import ModelHandler, common_utils, file_utils
from . import EVALUATORS
class EvaluationHandler(ModelHandler):
"""Base class for evaluating models. Inherits ModelHandler
Relevant options:
<Global>
data_config: Path to the data configuration file used to initialize the data handler.
evaluation_dir: Directory where to save results
<Models>
model_dir: Directory where to load models.
predictions_dir: Directory where to load/save predictions.
reclassify: Run the model again even if a prediction file is found
<Evaluators>
type: The type of evaluator corresponding to one of the keys of EVALUATORS attribute
of the subclass
Args:
ModelHandler ([type]): [description]
Raises:
AttributeError: [description]
Returns:
[type]: [description]
"""
PREDICTIONS_STATS_FILE_NAME = "predictions_stats.csv"
# EVALUATORS = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
plot.set_plotting_method(self.opts) # pylint: disable=no-member
@abstractmethod
def classify_database(self, model, database, db_type="test"):
pass
def get_predictions_dir(self, model_opts, database):
preds_dir = self.get_option("predictions_dir", model_opts)
if not preds_dir:
raise AttributeError(
"Please provide a directory where to save the predictions using"
+ " the predictions_dir option in the config file"
)
return Path(preds_dir)
def get_predictions_file_name(self, model_opts, database):
return (
database.name
+ "_"
+ model_opts.model_id
+ "_v"
+ str(model_opts.load_version)
+ ".feather"
)
def get_predictions(self, model_opts, database):
preds_dir = self.get_predictions_dir(model_opts, database)
file_name = self.get_predictions_file_name(model_opts, database)
pred_file = preds_dir / file_name
if not model_opts.get("reclassify", False) and pred_file.exists():
predictions = feather.read_dataframe(pred_file)
else:
# * Load predictions stats database
scenario_info = {}
preds_stats = None
preds_stats_dir = Path(self.get_option("predictions_dir", model_opts))
preds_stats_path = preds_stats_dir / self.PREDICTIONS_STATS_FILE_NAME
if preds_stats_path.exists():
preds_stats = | pd.read_csv(preds_stats_path) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: 新浪财经-股票期权
https://stock.finance.sina.com.cn/option/quotes.html
期权-中金所-沪深 300 指数
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
期权-上交所-50ETF
期权-上交所-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
import datetime
from typing import Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 期权-中金所-沪深300指数
def option_cffex_hs300_list_sina() -> Dict[str, List[str]]:
"""
新浪财经-中金所-沪深300指数-所有合约, 返回的第一个合约为主力合约
目前新浪财经-中金所只有 沪深300指数 一个品种的数据
:return: 中金所-沪深300指数-所有合约
:rtype: dict
"""
url = "https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
symbol = soup.find(attrs={"id": "option_symbol"}).find("li").text
temp_attr = soup.find(attrs={"id": "option_suffix"}).find_all("li")
contract = [item.text for item in temp_attr]
return {symbol: contract}
def option_cffex_hs300_spot_sina(symbol: str = "io2104") -> pd.DataFrame:
"""
中金所-沪深300指数-指定合约-实时行情
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
:param symbol: 合约代码; 用 option_cffex_hs300_list_sina 函数查看
:type symbol: str
:return: 中金所-沪深300指数-指定合约-看涨看跌实时行情
:rtype: pd.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/OptionService.getOptionData"
params = {
"type": "futures",
"product": "io",
"exchange": "cffex",
"pinzhong": symbol,
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : data_text.rfind("}") + 1])
option_call_df = pd.DataFrame(
data_json["result"]["data"]["up"],
columns=[
"看涨合约-买量",
"看涨合约-买价",
"看涨合约-最新价",
"看涨合约-卖价",
"看涨合约-卖量",
"看涨合约-持仓量",
"看涨合约-涨跌",
"行权价",
"看涨合约-标识",
],
)
option_put_df = pd.DataFrame(
data_json["result"]["data"]["down"],
columns=[
"看跌合约-买量",
"看跌合约-买价",
"看跌合约-最新价",
"看跌合约-卖价",
"看跌合约-卖量",
"看跌合约-持仓量",
"看跌合约-涨跌",
"看跌合约-标识",
],
)
data_df = pd.concat([option_call_df, option_put_df], axis=1)
data_df['看涨合约-买量'] = pd.to_numeric(data_df['看涨合约-买量'])
data_df['看涨合约-买价'] = pd.to_numeric(data_df['看涨合约-买价'])
data_df['看涨合约-最新价'] = pd.to_numeric(data_df['看涨合约-最新价'])
data_df['看涨合约-卖价'] = pd.to_numeric(data_df['看涨合约-卖价'])
data_df['看涨合约-卖量'] = pd.to_numeric(data_df['看涨合约-卖量'])
data_df['看涨合约-持仓量'] = pd.to_numeric(data_df['看涨合约-持仓量'])
data_df['看涨合约-涨跌'] = pd.to_numeric(data_df['看涨合约-涨跌'])
data_df['行权价'] = pd.to_numeric(data_df['行权价'])
data_df['看跌合约-买量'] = pd.to_numeric(data_df['看跌合约-买量'])
data_df['看跌合约-买价'] = pd.to_numeric(data_df['看跌合约-买价'])
data_df['看跌合约-最新价'] = pd.to_numeric(data_df['看跌合约-最新价'])
data_df['看跌合约-卖价'] = pd.to_numeric(data_df['看跌合约-卖价'])
data_df['看跌合约-卖量'] = pd.to_numeric(data_df['看跌合约-卖量'])
data_df['看跌合约-持仓量'] = pd.to_numeric(data_df['看跌合约-持仓量'])
data_df['看跌合约-涨跌'] = pd.to_numeric(data_df['看跌合约-涨跌'])
return data_df
def option_cffex_hs300_daily_sina(symbol: str = "io2202P4350") -> pd.DataFrame:
"""
新浪财经-中金所-沪深300指数-指定合约-日频行情
:param symbol: 具体合约代码(包括看涨和看跌标识), 可以通过 ak.option_cffex_hs300_spot_sina 中的 call-标识 获取
:type symbol: str
:return: 日频率数据
:rtype: pd.DataFrame
"""
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
url = f"https://stock.finance.sina.com.cn/futures/api/jsonp.php/var%20_{symbol}{year}_{month}_{day}=/FutureOptionAllService.getOptionDayline"
params = {"symbol": symbol}
r = requests.get(url, params=params)
data_text = r.text
data_df = pd.DataFrame(
eval(data_text[data_text.find("[") : data_text.rfind("]") + 1])
)
data_df.columns = ["open", "high", "low", "close", "volume", "date"]
data_df = data_df[[
"date",
"open",
"high",
"low",
"close",
"volume",
]]
data_df['date'] = pd.to_datetime(data_df['date']).dt.date
data_df['open'] = pd.to_numeric(data_df['open'])
data_df['high'] = pd.to_numeric(data_df['high'])
data_df['low'] = pd.to_numeric(data_df['low'])
data_df['close'] = pd.to_numeric(data_df['close'])
data_df['volume'] = pd.to_numeric(data_df['volume'])
return data_df
# 期权-上交所-50ETF
def option_sse_list_sina(symbol: str = "50ETF", exchange: str = "null") -> List[str]:
"""
新浪财经-期权-上交所-50ETF-合约到期月份列表
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: 合约到期时间
:rtype: list
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getStockName"
params = {"exchange": f"{exchange}", "cate": f"{symbol}"}
r = requests.get(url, params=params)
data_json = r.json()
date_list = data_json["result"]["data"]["contractMonth"]
return ["".join(i.split("-")) for i in date_list][1:]
def option_sse_expire_day_sina(
trade_date: str = "202102", symbol: str = "50ETF", exchange: str = "null"
) -> Tuple[str, int]:
"""
指定到期月份指定品种的剩余到期时间
:param trade_date: 到期月份: 202002, 20203, 20206, 20209
:type trade_date: str
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: (到期时间, 剩余时间)
:rtype: tuple
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.get(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
if int(data["remainderDays"]) < 0:
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{'XD' + symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.get(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
return data["expireDay"], int(data["remainderDays"])
def option_sse_codes_sina(symbol: str = "看涨期权", trade_date: str = "202202", underlying: str = "510050") -> pd.DataFrame:
"""
上海证券交易所-所有看涨和看跌合约的代码
:param symbol: choice of {"看涨期权", "看跌期权"}
:type symbol: str
:param trade_date: 期权到期月份
:type trade_date: "202002"
:param underlying: 标的产品代码 华夏上证 50ETF: 510050 or 华泰柏瑞沪深 300ETF: 510300
:type underlying: str
:return: 看涨看跌合约的代码
:rtype: Tuple[List, List]
"""
if symbol == "看涨期权":
url = "".join(
["http://hq.sinajs.cn/list=OP_UP_", underlying, str(trade_date)[-4:]]
)
else:
url = "".join(
["http://hq.sinajs.cn/list=OP_DOWN_", underlying, str(trade_date)[-4:]]
)
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_temp = data_text.replace('"', ",").split(",")
temp_list = [i[7:] for i in data_temp if i.startswith("CON_OP_")]
temp_df = pd.DataFrame(temp_list)
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
temp_df.columns = [
'序号',
'期权代码',
]
return temp_df
def option_sse_spot_price_sina(symbol: str = "10003720") -> pd.DataFrame:
"""
新浪财经-期权-期权实时数据
:param symbol: 期权代码
:type symbol: str
:return: 期权量价数据
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list=CON_OP_{symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"买量",
"买价",
"最新价",
"卖价",
"卖量",
"持仓量",
"涨幅",
"行权价",
"昨收价",
"开盘价",
"涨停价",
"跌停价",
"申卖价五",
"申卖量五",
"申卖价四",
"申卖量四",
"申卖价三",
"申卖量三",
"申卖价二",
"申卖量二",
"申卖价一",
"申卖量一",
"申买价一",
"申买量一 ",
"申买价二",
"申买量二",
"申买价三",
"申买量三",
"申买价四",
"申买量四",
"申买价五",
"申买量五",
"行情时间",
"主力合约标识",
"状态码",
"标的证券类型",
"标的股票",
"期权合约简称",
"振幅",
"最高价",
"最低价",
"成交量",
"成交额",
]
data_df = pd.DataFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_df
def option_sse_underlying_spot_price_sina(symbol: str = "sh510300") -> pd.DataFrame:
"""
期权标的物的实时数据
:param symbol: sh510050 or sh510300
:type symbol: str
:return: 期权标的物的信息
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list={symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"证券简称",
"今日开盘价",
"昨日收盘价",
"最近成交价",
"最高成交价",
"最低成交价",
"买入价",
"卖出价",
"成交数量",
"成交金额",
"买数量一",
"买价位一",
"买数量二",
"买价位二",
"买数量三",
"买价位三",
"买数量四",
"买价位四",
"买数量五",
"买价位五",
"卖数量一",
"卖价位一",
"卖数量二",
"卖价位二",
"卖数量三",
"卖价位三",
"卖数量四",
"卖价位四",
"卖数量五",
"卖价位五",
"行情日期",
"行情时间",
"停牌状态",
]
data_df = pd.DataFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_df
def option_sse_greeks_sina(symbol: str = "10003045") -> pd.DataFrame:
"""
期权基本信息表
:param symbol: 合约代码
:type symbol: str
:return: 期权基本信息表
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list=CON_SO_{symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1: data_text.rfind('"')].split(",")
field_list = [
"期权合约简称",
"成交量",
"Delta",
"Gamma",
"Theta",
"Vega",
"隐含波动率",
"最高价",
"最低价",
"交易代码",
"行权价",
"最新价",
"理论价值",
]
data_df = pd.DataFrame(
list(zip(field_list, [data_list[0]] + data_list[4:])), columns=["字段", "值"]
)
return data_df
def option_sse_minute_sina(symbol: str = "10003720") -> pd.DataFrame:
"""
指定期权品种在当前交易日的分钟数据, 只能获取当前交易日的数据, 不能获取历史分钟数据
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的当前交易日的分钟数据
:rtype: pandas.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionDaylineService.getOptionMinline"
params = {"symbol": f"CON_OP_{symbol}"}
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = data_json["result"]["data"]
data_df = pd.DataFrame(temp_df)
data_df.columns = ["时间", "价格", "成交", "持仓", "均价", "日期"]
data_df = data_df[[
"日期",
"时间",
"价格",
"成交",
"持仓",
"均价"
]]
data_df['日期'] = pd.to_datetime(data_df['日期']).dt.date
data_df['日期'].ffill(inplace=True)
data_df['价格'] = pd.to_numeric(data_df['价格'])
data_df['成交'] = pd.to_numeric(data_df['成交'])
data_df['持仓'] = pd.to_numeric(data_df['持仓'])
data_df['均价'] = pd.to_numeric(data_df['均价'])
return data_df
def option_sse_daily_sina(symbol: str = "10003889") -> pd.DataFrame:
"""
指定期权的日频率数据
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的所有日频率历史数据
:rtype: pandas.DataFrame
"""
url = "http://stock.finance.sina.com.cn/futures/api/jsonp_v2.php//StockOptionDaylineService.getSymbolInfo"
params = {"symbol": f"CON_OP_{symbol}"}
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.get(url, params=params, headers=headers)
data_text = r.text
data_json = json.loads(data_text[data_text.find("(") + 1 : data_text.rfind(")")])
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["日期", "开盘", "最高", "最低", "收盘", "成交量"]
temp_df['日期'] = pd.to_datetime(temp_df['日期']).dt.date
temp_df['开盘'] = pd.to_numeric(temp_df['开盘'])
temp_df['最高'] = pd.to_numeric(temp_df['最高'])
temp_df['最低'] = pd.t | o_numeric(temp_df['最低']) | pandas.to_numeric |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = pd.Timestamp('2022-09-21') # Opening of universities
t34 = pd.Timestamp('2022-10-31') # Start of autumn break
t35 = pd.Timestamp('2022-11-06') # End of autumn break
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t25 = pd.Timestamp('2021-11-22') # Start of mandatory telework + start easing in leisure restrictions
t26 = pd.Timestamp('2021-12-18') # Start of Christmass break for schools
t27 = pd.Timestamp('2021-12-26') # Start of Christmass break for general population
t28 = pd.Timestamp('2022-01-06') # End of Christmass break
t29 = pd.Timestamp('2022-01-28') # End of measures
t30 = pd.Timestamp('2022-02-28') # Start of Spring Break
t31 = pd.Timestamp('2022-03-06') # End of Spring Break
t32 = pd.Timestamp('2022-04-04') # Start of Easter Break
t33 = pd.Timestamp('2022-04-17') # End of Easter Break
t34 = pd.Timestamp('2022-07-01') # Start of summer holidays
t35 = pd.Timestamp('2022-09-01') # End of summer holidays
t36 = pd.Timestamp('2022-09-21') # Opening of universities
t37 = pd.Timestamp('2022-10-31') # Start of autumn break
t38 = pd.Timestamp('2022-11-06') # End of autumn break
scenarios_work = [1, 0.7, 0.7, 0.7, 0.7]
scenarios_schools = [1, 1, 1, 1, 1]
scenarios_leisure = [1, 1, 0.75, 0.50, 0.25]
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
# End of autumn break --> Date of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t25 < t <= t25 + pd.Timedelta(5, unit='D'):
# Date of measures --> End easing in leisure restrictions
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
return self.ramp_fun(policy_old, policy_new, t, t25, 5)
elif t25 + pd.Timedelta(5, unit='D') < t <= t26:
# End easing in leisure restrictions --> Early schools closure before Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
elif t26 < t <= t27:
# Early schools closure before Christmas holiday --> Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=0)
elif t27 < t <= t28:
# Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario]-0.2, leisure=scenarios_leisure[scenario], transport=scenarios_work[scenario]-0.2, school=0)
elif t28 < t <= t29:
# Christmass holiday --> End of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=scenarios_leisure[scenario], work=scenarios_work[scenario], school=1)
elif t29 < t <= t30:
# End of Measures --> Spring break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1, work=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
# Spring Break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
elif t31 < t <= t32:
# Spring Break --> Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t32 < t <= t33:
# Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t33 < t <= t34:
# Easter --> Summer
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t35 < t <= t36:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.7)
elif t36 < t <= t37:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t37 < t <= t38:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
###################
## Spatial model ##
###################
def policies_all_spatial(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-07') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-05-07') # Start of relaxations
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-11-01') # Start of autumn break
t23 = pd.Timestamp('2021-11-07') # End of autumn break
t24 = pd.Timestamp('2021-12-26') # Start of Christmass break
t25 = pd.Timestamp('2022-01-06') # End of Christmass break
t26 = pd.Timestamp('2022-02-28') # Start of Spring Break
t27 = pd.Timestamp('2022-03-06') # End of Spring Break
t28 = pd.Timestamp('2022-04-04') # Start of Easter Break
t29 = pd.Timestamp('2022-04-17') # End of Easter Break
t30 = pd.Timestamp('2022-07-01') # Start of summer holidays
t31 = pd.Timestamp('2022-09-01') # End of summer holidays
t32 = pd.Timestamp('2022-09-21') # Opening of universities
t33 = pd.Timestamp('2022-10-31') # Start of autumn break
t34 = pd.Timestamp('2022-11-06') # End of autumn break
spatial_summer_lockdown_2020 = tuple(np.array([prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_lockdown, # W
prev_rest_lockdown, # Bxl
prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_relaxation, prev_rest_relaxation, # W
prev_rest_lockdown, # F
0.7*prev_rest_relaxation, 0.7*prev_rest_relaxation])) # W
co_F = 0.60
co_W = 0.50
co_Bxl = 0.45
spatial_summer_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
co_F = 1.00
co_W = 0.50
co_Bxl = 0.45
relaxation_flanders_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
# 2020
elif t3 < t <= t4:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_relaxation, school=0)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_lockdown_2020, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.8)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0.8)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=0)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_spatial_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = | pd.Timestamp('2020-08-07') | pandas.Timestamp |
import pandas as pd
def subset_grm(grm, grm_indiv, target_indiv):
set_target_indiv = set(target_indiv)
isin = np.array([ g in set_target_indiv for g in grm_indiv ])
grm = grm[:, isin][isin, :]
grm_indiv = list(np.array(grm_indiv)[isin])
return grm, grm_indiv
def subset_y(df, indiv):
df_indiv = pd.DataFrame({'indiv': indiv})
df = pd.merge(df_indiv, df, on='indiv')
return df.iloc[:, 1:].values
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(prog='run_pyemma.py', description='''
Run a series of variance component estimation given GRM and y.
''')
parser.add_argument('--grm', help='''
Provide the prefix of GRM files.
Will assume [prefix].grm.gz and [prefix].grm.id.
''')
parser.add_argument('--reml', action='store_true', help='''
If specified, it will use reml.
''')
parser.add_argument('--grm_cache', default=None, help='''
Optional. If specified, will use the cached GRM EVD if file exists.
If specified but does not exists, will read grm and calculate EVD
from scratch and cache the result.
It is the prefix. Will append reml or mle accordingly.
CAUTION: If using GRM cache, the --y_table should have exactly
the same set of individuals.
''')
parser.add_argument('--y_table', nargs='+', help='''
The table of y along with the column name of individual ID.
''')
parser.add_argument('--y_list', default=None, help='''
Optional. If specified, will limit the analysis to
the phenotypes in the list
''')
parser.add_argument('--output', help='''
Output result summary in TSV.GZ format.
''')
parser.add_argument('--evd_min_max_ratio', type=float, default=None, help='''
The cutoff on eigen-vectors and -values in EVD: value / max(value) > evd_min_max_ratio.
''')
args = parser.parse_args()
import logging, time, sys, os
# configing util
logging.basicConfig(
level = logging.INFO,
stream = sys.stderr,
format = '%(asctime)s %(message)s',
datefmt = '%Y-%m-%d %I:%M:%S %p'
)
import numpy as np
import gzip
import pickle
from tqdm import tqdm
import scipy.stats
from pyutil import file_exists, read_table, load_list, intersection
import pyemma
if args.grm_cache is not None:
if args.reml is True:
grm_cache = args.grm_cache + '.reml.pkl.gz'
else:
grm_cache = args.grm_cache + '.mle.pkl.gz'
else:
grm_cache = None
logging.info('Loading phenotype table.')
df_y = read_table(
args.y_table[0],
indiv_col=args.y_table[1]
)
pheno_indiv = df_y.indiv.to_list()
if args.y_list is not None:
y_list = load_list(args.y_list)
df_y = df_y[['indiv'] + y_list]
pheno_list = df_y.columns[1:].to_list()
if grm_cache is None or not file_exists(grm_cache):
logging.info('Loading GRM from scratch.')
grm, grm_indiv = pyemma.load_grm(args.grm + '.grm.gz', args.grm + '.grm.id')
common_indiv = intersection(grm_indiv, pheno_indiv)
grm, indiv = subset_grm(grm, grm_indiv, common_indiv)
ymat = subset_y(df_y, indiv)
logging.info('Starting GRM EVD.')
eig_val, eig_vec = pyemma.pyemma_mle_mat_fac(grm, min_max_ratio=args.evd_min_max_ratio)
to_cache = {'vec': eig_vec, 'val': eig_val, 'indiv': indiv}
if args.reml is True:
eig_val_inter, eig_vec_inter = pyemma.pyemma_reml_mat_fac(np.ones((grm.shape[0], 1)), grm, min_max_ratio=args.evd_min_max_ratio)
to_cache['vec_intercept'] = eig_vec_inter
to_cache['val_intercept'] = eig_val_inter
if grm_cache is not None:
logging.info('Caching GRM EVD.')
with gzip.open(grm_cache, 'wb') as f:
pickle.dump(
to_cache,
f,
protocol=4
)
else:
logging.info('Loading GRM from cache.')
with gzip.open(grm_cache, 'rb') as f:
tmp = pickle.load(f)
eig_vec = tmp['vec']
eig_val = tmp['val']
if args.reml is True:
eig_vec_inter = tmp['vec_intercept']
eig_val_inter = tmp['val_intercept']
indiv = tmp['indiv']
ymat = subset_y(df_y, indiv)
if ymat.shape[0] != len(indiv):
raise ValueError('We need to have all GRM individaul appear in phenotype table to proceed.')
res = []
x = np.ones((ymat.shape[0], 1))
for i in tqdm(range(ymat.shape[1])):
if args.reml is False:
res_i = pyemma.pyemma_w_X(ymat[:, i], x, eig_vec, eig_val)
else:
res_i = pyemma.pyemma_reml(ymat[:, i], eig_vec_inter, eig_val_inter, eig_vec, eig_val, x)
res_i['phenotype'] = pheno_list[i]
res.append(res_i)
res = | pd.concat(res, axis=0) | pandas.concat |
#!/usr/bin/env python
__author__ = '<NAME>'
import os
import pandas as pd
import argparse
from copy import deepcopy
from _collections import OrderedDict
import pandas as pd
from BCBio import GFF
from RouToolPa.Collections.General import SynDict, IdList
from RouToolPa.Parsers.VCF import CollectionVCF
from MACE.Routines import Visualization, StatsVCF
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True, type=lambda s: s.split(","),
help="Comma_separated_list of input file with precalculated coverage in windows.")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
parser.add_argument("-e", "--output_formats", action="store", dest="output_formats", type=lambda s: s.split(","),
default=("png", ),
help="Comma-separated list of formats (supported by matlotlib) of "
"output figure.Default: svg,png")
"""
parser.add_argument("-g", "--draw_gaps", action="store_true", dest="draw_gaps",
help="Draw gaps, ignored if reference genome is not set. Default: False")
"""
parser.add_argument("-m", "--mean_coverage_file", action="store", dest="mean_coverage_file", required=True,
help="File with mean coverage for all samples")
parser.add_argument("-l", "--label_list", action="store", dest="label_list", required=True, type=lambda s: s.split(","),
help="Comma-separated list of labels to use for samples")
parser.add_argument("--scaffold_column_name", action="store", dest="scaffold_column_name", default="scaffold",
help="Name of column in coverage file with scaffold ids per window. Default: scaffold")
parser.add_argument("--window_column_name", action="store", dest="window_column_name", default="window",
help="Name of column in coverage file with window id. Default: window")
parser.add_argument("--coverage_column_name_list", action="store", dest="coverage_column_name_list",
default=["median", "mean"],
type=lambda s: s.split(","),
help="Coverage file with mean/median coverage per window. Default: median,mean")
parser.add_argument("--label_column_name", action="store", dest="label_column_name", default="label",
help="Name of column in mean coverage file with labels of samples. Default: label")
parser.add_argument("-w", "--window_size", action="store", dest="window_size", default=100000, type=int,
help="Size of the windows Default: 100000")
parser.add_argument("-s", "--window_step", action="store", dest="window_step", default=None, type=int,
help="Step of the sliding windows. Default: window size, i.e windows are staking")
parser.add_argument("-a", "--scaffold_white_list", action="store", dest="scaffold_white_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of the only scaffolds to draw. Default: all")
parser.add_argument("-b", "--scaffold_black_list", action="store", dest="scaffold_black_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of scaffolds to skip at drawing. Default: not set")
parser.add_argument("-y", "--sort_scaffolds", action="store_true", dest="sort_scaffolds", default=False,
help="Order scaffolds according to their names. Default: False")
parser.add_argument("-z", "--scaffold_ordered_list", action="store", dest="scaffold_ordered_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of scaffolds to draw first and exactly in same order. "
"Scaffolds absent in this list are drawn last and in order according to vcf file . "
"Default: not set")
parser.add_argument("-n", "--scaffold_length_file", action="store", dest="scaffold_length_file", required=True,
help="File with lengths of scaffolds")
parser.add_argument("--scaffold_syn_file", action="store", dest="scaffold_syn_file",
help="File with scaffold id synonyms")
parser.add_argument("--syn_file_key_column", action="store", dest="syn_file_key_column",
default=0, type=int,
help="Column(0-based) with key(current id) for scaffolds in synonym file. Default: 0")
parser.add_argument("--syn_file_value_column", action="store", dest="syn_file_value_column",
default=1, type=int,
help="Column(0-based) with value(synonym id) for scaffolds in synonym file synonym. Default: 1")
parser.add_argument("--colormap", action="store", dest="colormap",
help="Matplotlib colormap to use for SNP densities. Default: not set, "
"colors from HapMap article are used")
parser.add_argument("--coverage_thresholds", action="store", dest="coverage_thresholds",
default=(0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 2.0, 2.5),
type=lambda s: list(map(float, s.split(","))),
help="Comma-separated list of coverage thresholds(relative to mean/median) to use for "
"window coloring."
"Default: (0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 2.0, 2.5)")
parser.add_argument("--test_colormaps", action="store_true", dest="test_colormaps",
help="Test colormaps. If set --colormap option will be ignored")
parser.add_argument("--absolute_coverage_values", action="store_true", dest="absolute_coverage_values",
help="Use absolute coverage values. Default: False")
parser.add_argument("--subplots_adjust_left", action="store", dest="subplots_adjust_left", type=float,
help="Adjust left border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_top", action="store", dest="subplots_adjust_top", type=float,
help="Adjust top border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_right", action="store", dest="subplots_adjust_right", type=float,
help="Adjust right border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_bottom", action="store", dest="subplots_adjust_bottom", type=float,
help="Adjust bottom border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--figure_width", action="store", dest="figure_width", type=float, default=15,
help="Width of figure in inches. Default: 15")
parser.add_argument("--figure_height_per_scaffold", action="store", dest="figure_height_per_scaffold",
type=float, default=0.5,
help="Height of figure per chromosome track. Default: 0.5")
args = parser.parse_args()
mean_coverage_df = pd.read_csv(args.mean_coverage_file, sep='\t', header=0, index_col=0,
usecols=[args.label_column_name] + args.coverage_column_name_list)
chr_syn_dict = SynDict(filename=args.scaffold_syn_file,
key_index=args.syn_file_key_column,
value_index=args.syn_file_value_column)
chr_len_df = pd.read_csv(args.scaffold_length_file, sep='\t', header=None, names=("scaffold", "length"), index_col=0)
if args.scaffold_syn_file:
chr_len_df.rename(index=chr_syn_dict, inplace=True)
coverage_df_dict = OrderedDict()
final_scaffold_set = set()
for entry, label in zip(args.input, args.label_list):
coverage_df = pd.read_csv(entry, sep="\t", usecols=[args.scaffold_column_name,
args.window_column_name] + args.coverage_column_name_list,
index_col=(args.scaffold_column_name, args.window_column_name))
scaffold_to_keep = StatsVCF.get_filtered_entry_list(coverage_df.index.get_level_values(level=0).unique().to_list(),
entry_white_list=args.scaffold_white_list)
coverage_df = coverage_df[coverage_df.index.isin(scaffold_to_keep, level=0)]
if args.scaffold_syn_file:
coverage_df.rename(index=chr_syn_dict, inplace=True)
coverage_df_dict[label] = coverage_df
#print("AAA")
#print(coverage_df_dict[label].index.get_level_values(0).unique().to_list())
final_scaffold_set |= set(coverage_df_dict[label].index.get_level_values(0).unique().to_list())
#print(final_scaffold_set)
#print(chr_syn_dict)
for scaf in final_scaffold_set:
scaf_df_list = []
#len_df = pd.DataFrame(columns=["length"])
len_dict = OrderedDict()
for label in args.label_list:
scaf_df_list.append(coverage_df_dict[label].loc[[scaf]])
#print(scaf_df_list[-1])
#print(label)
scaf_df_list[-1].rename(index={scaf: label}, inplace=True)
len_dict[label] = chr_len_df.loc[scaf]
len_df = | pd.DataFrame.from_dict(len_dict, orient="index") | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
import gzip
import json
import os
import pandas as pd
pd.set_option('display.max_colwidth', None)
| pd.set_option('display.max_columns', None) | pandas.set_option |
import numpy as np
import pandas as pd
import bottleneck
from scipy import sparse
import gc
from .utils import *
def MetaNeighbor(
adata,
study_col,
ct_col,
genesets,
node_degree_normalization=True,
save_uns=True,
fast_version=False,
fast_hi_mem=False,
mn_key="MetaNeighbor",
):
"""Runs MetaNeighbor
For each gene set of interest, the function builds a network of rank
correlations between all cells. Next,It builds a network of rank correlations
between all cells for a gene set. Next, the neighbor voting predictor
produces a weighted matrix of predicted labels by performing matrix
multiplication between the network and the binary vector indicating cell type
membership, then dividing each element by the null predictor (i.e., node
degree). That is, each cell is given a score equal to the fraction of its
neighbors (including itself), which are part of a given cell type. For
cross-validation, we permute through all possible combinations of
leave-one-dataset-out cross-validation, and we report how well we can recover
cells of the same type as area under the receiver operator characteristic
curve (AUROC). This is repeated for all folds of cross-validation, and the
mean AUROC across folds is reported.
Arguments:
adata {AnnData} -- Object containing all single cell experiements conactenated
study_col {str} -- String referencing column in andata.obs that identifies study label for datasets
ct_col {str} -- String referencing column in andata.obs that identifies cellt type labels
genesets {pd.DataFrame} -- One hot encoded dataframe of genes x gene sets
Keyword Arguments:
node_degree_normalization {bool} -- Flag for normalizing votes by node degree (default: {True})
save_uns {bool} -- Flag for saving results in adata.uns[mn_key], return if False (default: {True})
fast_version {bool} -- Flag for low memory fast version (default: {False})
fast_hi_mem {bool} -- Flag for slightly faster (default: {False})
mn_key {str} -- String for storing results in adata.uns (default: {'MetaNeighbor'})
Returns:
None/pd.DataFrame -- if save_uns is False, return dataframe of cell-type x gene set AUROCs
"""
assert study_col in adata.obs_keys(), "Study Col not in adata"
assert ct_col in adata.obs_keys(), "Cluster Col not in adata"
assert ~isinstance(
adata.obs[study_col].values[0], float
), "Study Col is a floating point, must be string or int"
assert ~isinstance(
adata.obs[ct_col].values[0], float
), "Cell Type Col is a floating point, must be string or int"
assert (
np.unique(adata.obs[study_col]).shape[0] > 1
), f"Found only 1 unique study_id in {study_col}"
shared_genes = np.intersect1d(adata.var_names.values, genesets.index.values)
assert (
shared_genes.shape[0] > 1
), "No matching genes between genesets and sample matrix"
genesets = genesets.loc[shared_genes]
genesets = genesets.loc[:, genesets.sum() > 0]
assert genesets.shape[1] > 0, "All Genesets are empty"
genesets = genesets.astype(bool)
results = {}
study_vec = adata.obs[study_col].values
ct_vec = adata.obs[ct_col].values
if fast_hi_mem: # Stores as dense arrray (faster)
expression = adata[:, shared_genes].X.toarray()
else:
expression = adata[:, shared_genes].X
for gset in genesets.columns:
adata_gs = expression[:, np.where(genesets[gset].values)[0]]
if fast_version:
results[gset] = score_low_mem(
adata_gs, study_vec, ct_vec, node_degree_normalization
)
else:
results[gset] = score_default(
adata_gs, study_vec, ct_vec, node_degree_normalization, means=True
)
if save_uns:
adata.uns[mn_key] = pd.DataFrame(results)
adata.uns[f"{mn_key}_params"] = {
"fast": fast_version,
"node_degree_normalization": node_degree_normalization,
"study_col": study_col,
"ct_col": ct_col,
}
else:
return pd.DataFrame(results)
def score_low_mem(
X, S, C, node_degree_normalization):
"""Compute Neighbor Voting using low memory method
Compute using the approximate low memory method
Arguments:
X {array} -- Array (sparse or dense) of geneset x cells
S {vector} -- Study labels, length cells
C {vector} -- Cell type labels, legnth cells
node_degree_normalization {bool} -- Flag for whether to normalize votes by node degree
Returns:
pd.Series -- Series containing AUROCs for each cell type for the given gene set
"""
slice_cells = np.ravel(np.sum(X, axis=1) > 0)
X = X[slice_cells, :]
S = S[slice_cells]
C = C[slice_cells]
cell_labels = design_matrix(C)
cell_cols = cell_labels.columns
cell_labels = cell_labels.values
X_norm = np.asfortranarray(normalize_cells(X).T)
studies = np.unique(S)
res = {}
for study in studies:
is_study = np.where(S == study)[0]
is_not_study = np.where(S != study)[0]
votes = compute_votes(
X_norm[:, is_study].T,
X_norm[:, is_not_study],
cell_labels[is_not_study],
node_degree_normalization,
)
votes = pd.DataFrame(votes, index=C[is_study], columns=cell_cols)
roc = compute_aurocs(votes)
res[study] = np.diag(roc.reindex(roc.columns).values)
res = np.nanmean( | pd.DataFrame(res) | pandas.DataFrame |
#!/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import csv
import zipfile
import tarfile
import configparser
import collections
import statistics
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from datetime import datetime
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Type of train data.
OS = 0
WEB = 1
FRAMEWORK = 2
CMS = 3
class Creator:
def __init__(self, utility):
# Read config.ini.
self.utility = utility
config = configparser.ConfigParser()
self.file_name = os.path.basename(__file__)
self.full_path = os.path.dirname(os.path.abspath(__file__))
self.root_path = os.path.join(self.full_path, '../')
config.read(os.path.join(self.root_path, 'config.ini'))
# Define master signature file path.
master_sig_dir = os.path.join(self.root_path, config['Common']['signature_path'])
self.master_prod_sig = os.path.join(master_sig_dir, config['VersionChecker']['signature_file'])
self.master_cont_sig = os.path.join(master_sig_dir, config['ContentExplorer']['signature_file'])
self.pd_prod_sig = pd.read_csv(self.master_prod_sig,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
self.pd_cont_sig = pd.read_csv(self.master_cont_sig,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
self.delete_prod_row_index = []
self.delete_cont_row_index = []
# Define master train data path.
self.train_categories = config['VersionCheckerML']['category'].split('@')
train_dir = os.path.join(self.full_path, config['VersionCheckerML']['train_path'])
self.train_os_in = os.path.join(train_dir, config['VersionCheckerML']['train_os_in'])
self.train_web_in = os.path.join(train_dir, config['VersionCheckerML']['train_web_in'])
self.train_framework_in = os.path.join(train_dir, config['VersionCheckerML']['train_framework_in'])
self.train_cms_in = os.path.join(train_dir, config['VersionCheckerML']['train_cms_in'])
for category in self.train_categories:
if category == 'OS':
self.pd_train_os = pd.read_csv(self.train_os_in,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
elif category == 'WEB':
self.pd_train_web = pd.read_csv(self.train_web_in,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
elif category == 'FRAMEWORK':
self.pd_train_fw = pd.read_csv(self.train_framework_in,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
elif category == 'CMS':
self.pd_train_cms = pd.read_csv(self.train_cms_in,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
else:
self.utility.print_message(FAIL, 'Choose category is not found.')
exit(1)
self.delete_train_os_row_index = []
self.delete_train_web_row_index = []
self.delete_train_fw_row_index = []
self.delete_train_cms_row_index = []
self.compress_dir = os.path.join(self.root_path, config['Creator']['compress_dir'])
self.signature_dir = os.path.join(self.root_path, config['Creator']['signature_dir'])
self.prohibit_ext_list = config['Creator']['prohibit_ext'].split('@')
self.save_file = config['Creator']['result_file'].replace('*', datetime.now().strftime('%Y%m%d%H%M%S'))
self.save_path = os.path.join(self.signature_dir, self.save_file)
self.header = str(config['Creator']['header']).split('@')
self.score_table_path = os.path.join(self.full_path, config['Exploit']['data_path'])
self.score_table = os.path.join(self.score_table_path, config['Creator']['score_table'])
self.threshold = float(config['Creator']['threshold'])
self.unknown_score = float(config['Creator']['unknown_score'])
self.turn_inside_num = int(config['Creator']['turn_inside_num'])
if self.turn_inside_num > 2:
self.turn_inside_num = 2
self.try_othello_num = int(config['Creator']['try_othello_num'])
self.del_open_root_dir = int(config['Creator']['del_open_root_dir'])
# Check necessary directories.
self.is_dir_existance(self.compress_dir)
self.is_dir_existance(self.signature_dir)
# Load score table.
self.pd_score_table = pd.read_csv(self.score_table)
# Check necessary directory.
def is_dir_existance(self, target_dir):
if os.path.exists(target_dir) is False:
os.mkdir(target_dir)
self.utility.print_message(WARNING, 'Directory is not found: {}.'.format(target_dir))
self.utility.print_message(WARNING, 'Maked directory: {}.'.format(target_dir))
# Count directory layer.
def count_dir_layer(self, target_dir):
# Count directory number.
split_symbol = '/'
if os.name == 'nt':
split_symbol = '\\'
tmp_dir_list = os.path.splitdrive(target_dir)[1].split(split_symbol)
tmp_dir_list.remove('')
return len(tmp_dir_list), tmp_dir_list
# Grep.
def execute_grep(self, target_product, target_dir):
base_index = 0
report = []
if os.path.exists(target_dir):
for root, _, files in os.walk(target_dir):
file_count = 0
ext_list = []
for file in files:
msg = 'Check file : {}/{}'.format(root.replace(target_dir, '').replace('\\', '/'), file)
self.utility.print_message(OK, msg)
_, ext = os.path.splitext(file)
if ext[1:] not in self.prohibit_ext_list:
# Count file number and target extension.
file_count += 1
ext_list.append(ext[1:])
# Save information each directory.
record = []
record.insert(0, base_index)
record.insert(1, target_product)
record.insert(2, root.replace(target_dir, ''))
record.insert(3, list(set(ext_list)))
record.insert(4, collections.Counter(ext_list))
record.insert(5, list(set(files)))
report.append(record)
base_index += 1
# Save extracted information.
pd.DataFrame(report).to_csv(self.save_path, mode='a', header=False, index=False)
else:
self.utility.print_message(FAIL, 'Path or file is not found.\n=> {}'.format(target_dir))
sys.exit(1)
return report
# Show graph.
def show_graph(self, target, graph):
self.utility.print_message(NOTE, 'Creating network image...')
plt.figure(figsize=(10, 10))
nx.draw_networkx(graph)
plt.axis('off')
file_name = os.path.join(self.full_path, target + '.png')
plt.savefig(file_name)
plt.show()
# Calculate score of node.
def calc_score(self, ext_type):
score_list = []
for ext in ext_type:
# Get defined score from score table.
pd_score = self.pd_score_table[self.pd_score_table['extension'] == ext.lower()]
# Calculate score.
if len(pd_score) != 0:
if pd_score['probability'].values[0] == 1.0:
return 1.0
elif pd_score['probability'].values[0] == 0.0:
return 0.0
else:
score_list.append(pd_score['probability'].values[0])
else:
score_list.append(self.unknown_score)
return statistics.median(score_list)
# Return score of extension.
def return_score(self, files):
total_file_score = 0.0
for file in files:
_, ext = os.path.splitext(file)
pd_score = self.pd_score_table[self.pd_score_table['extension'] == ext[1:].lower()]
if len(pd_score) > 0:
total_file_score += pd_score['probability'].values[0]
return total_file_score
# Set node label.
def set_node_label(self, score):
label = 0.0
if score == 0.0:
label = 0.00
elif 0.1 <= score <= 0.3:
label = 0.25
elif 0.4 <= score <= 0.6:
label = 0.50
elif 0.7 < score <= 0.9:
label = 0.75
elif score == 1.0:
label = 1.00
return label
# Create Network using networkx.
def create_network(self, records):
# Create direction graph.
graph = nx.DiGraph()
dir_pool = {}
node_index = 0
for index, record in enumerate(records):
self.utility.print_message(NOTE, '{}/{} Analyzing "{}"'.format(index + 1, len(records), record[2]))
_, dirs = self.count_dir_layer(record[2])
parent_dir = ''
label = '\\'
for layer_index, dir_name in enumerate(dirs):
label += str(dir_name) + '\\'
# Set parent node.
if label in dir_pool.keys():
parent_dir = label
else:
# Calculate score and classification.
score = 0.0
if len(record[3]) != 0:
score = self.calc_score(record[3])
rank = self.set_node_label(score)
# Add new node within attributes.
dir_pool[label] = node_index
graph.add_node(node_index,
path=record[2],
ext_type=record[3],
ext_count=record[4],
files=record[5],
score=score,
rank=rank)
node_index += 1
# Create edge that connecting two nodes.
if parent_dir != '' and label != parent_dir:
graph.add_edge(dir_pool[parent_dir], dir_pool[label])
msg = 'Create edge node.{} <-> node.{}'.format(dir_pool[parent_dir], dir_pool[label])
self.utility.print_message(OK, msg)
return graph
# Extract tar file.
def extract_tar(self, file, path):
with tarfile.open(file) as tf:
tf.extractall(path)
# Extract zip file.
def extract_zip(self, file, path):
with zipfile.ZipFile(file) as zf:
zf.extractall(os.path.join(path))
# Decompress compressed package file.
def decompress_file(self, package_path):
# Extract path and file name from target directory.
self.utility.print_message(NOTE, 'Starting decompress: {}.'.format(package_path))
# Create extraction directory name.
extract_dir_name = ''
if '.tar' in os.path.splitext(package_path)[0]:
extract_dir_name = os.path.splitext(package_path)[0]
else:
extract_dir_name = os.path.splitext(package_path)[0].replace('.tar', '')
try:
# Execute extraction.
if '.tar' in package_path:
self.utility.print_message(OK, 'Decompress... : {}'.format(package_path))
self.extract_tar(package_path, extract_dir_name)
elif '.zip' in package_path:
self.utility.print_message(OK, 'Decompress... : {}'.format(package_path))
self.extract_zip(package_path, extract_dir_name)
except Exception as e:
self.utility.print_exception(e, '{}'.format(e.args))
return extract_dir_name
# Explore open path.
def explore_open_path(self, graph, all_paths):
open_paths = []
for idx, path in enumerate(all_paths):
tmp_open_paths = []
close_path_index = len(path) - 1
self.utility.print_message(NOTE, '{}/{} Explore path: {}'.format(idx + 1, len(all_paths), path))
for idx2, node_index in enumerate(path[::-1]):
msg = 'Checking turn inside node.{}:{}'.format(node_index, graph.nodes[node_index]['path'])
self.utility.print_message(OK, msg)
# Add open path.
rank = graph.nodes[node_index]['rank']
if graph.nodes[node_index]['rank'] >= self.threshold:
self.utility.print_message(OK, 'Add node {} to open path list.'.format(node_index))
tmp_open_paths.append([node_index, graph.nodes[node_index]['path'], rank])
# Set close path index.
close_path_index = len(path) - idx2 - 2
# Execute "Othello".
elif 0 < (len(path) - idx2 - 1) < len(path) - 1:
# Extract ranks of parent and child node.
parent_node_rank = graph.nodes[path[len(path) - idx2 - 2]]['rank']
child_node_rank = graph.nodes[path[len(path) - idx2]]['rank']
# Checking turn inside the node rank.
if parent_node_rank >= self.threshold and child_node_rank >= self.threshold:
msg = 'Turned inside rank={} -> 1.0.'.format(graph.nodes[node_index]['rank'])
self.utility.print_message(WARNING, msg)
self.utility.print_message(WARNING, 'Add node {} to open path list.'.format(node_index))
tmp_open_paths.append([node_index, graph.nodes[node_index]['path'], 1.0])
graph.nodes[node_index]['rank'] = 1.0
# Set close path index.
close_path_index = len(path) - idx2 - 2
else:
if close_path_index < len(path) - idx2 - 1:
# Set close path index.
close_path_index = len(path) - idx2 - 1
# Do not execute "Othello".
else:
if close_path_index < len(path) - idx2 - 1:
# Set close path index.
close_path_index = len(path) - idx2 - 1
# Cut unnecessary path (root path -> open path).
if close_path_index != -1:
for tmp_path in tmp_open_paths:
delete_seq = len(graph.nodes[path[close_path_index]]['path'])
open_paths.append([tmp_path[0], tmp_path[1][delete_seq:], tmp_path[2]])
else:
open_paths.extend(tmp_open_paths)
return list(map(list, set(map(tuple, open_paths))))
# Add train data.
def add_train_data(self, category, vendor, prod_name, prod_ver, files, target_path):
category_list = []
vendor_list = []
prod_name_list = []
version_list = []
path_list = []
# Add train data info to temporally buffer.
if '@' not in target_path:
category_list.append(category)
vendor_list.append(vendor)
prod_name_list.append(prod_name)
version_list.append(prod_ver)
path_list.append('(' + target_path + ')')
# Add file path signature info to temporally buffer.
for file in files:
target_file = '(' + target_path + file + ')'
if '@' in target_file:
continue
category_list.append(category)
vendor_list.append(vendor)
prod_name_list.append(prod_name)
version_list.append(prod_ver)
path_list.append(target_file)
else:
self.utility.print_message(WARNING, 'This path is included special character "@": {}'.format(target_path))
return category_list, vendor_list, prod_name_list, version_list, path_list
# Push path signature to temporally buffer.
def push_path_sig(self, sig_path, category, vendor, prod, version, target_path):
if '@' not in target_path:
sig_path[0].append(category)
sig_path[1].append(vendor)
sig_path[2].append(prod)
sig_path[3].append(version)
sig_path[4].append(target_path)
sig_path[5].append('*')
sig_path[6].append('*')
sig_path[7].append('0')
else:
self.utility.print_message(WARNING, 'This path is included special character "@": {}'.format(target_path))
# Push file signature to temporally buffer.
def push_file_sig(self, sig_file, category, vendor, prod, version, target_file):
if '@' not in target_file:
sig_file[0].append(category)
sig_file[1].append(vendor)
sig_file[2].append(prod)
sig_file[3].append(version)
sig_file[4].append(target_file)
else:
self.utility.print_message(WARNING, 'This path is included special character "@": {}'.format(target_file))
# Push train data to temporally buffer.
def push_train_data(self, train, category, categories, vendors, prods, versions, targets):
train[category][0].extend(categories)
train[category][1].extend(vendors)
train[category][2].extend(prods)
train[category][3].extend(versions)
train[category][4].extend(targets)
# Check existing signature of same product.
def is_existing_same_product(self, category, vendor, prod_name, version):
ret = True
# Check file signature.
df_extract_sig = self.pd_prod_sig[(self.pd_prod_sig[1] == vendor) &
(self.pd_prod_sig[2] == prod_name) &
(self.pd_prod_sig[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same product signature: {}/{}/{}'.format(vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
# Check path signature.
df_extract_sig = self.pd_cont_sig[(self.pd_cont_sig[1] == vendor) &
(self.pd_cont_sig[2] == prod_name) &
(self.pd_cont_sig[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same path signature: {}/{}/{}'.format(vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
# Check train data.
if category == 'OS':
df_extract_sig = self.pd_train_os[(self.pd_train_os[1] == vendor) &
(self.pd_train_os[2] == prod_name) &
(self.pd_train_os[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same {} train data: {}/{}/{}'.format(category, vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
elif category == 'WEB':
df_extract_sig = self.pd_train_web[(self.pd_train_web[1] == vendor) &
(self.pd_train_web[2] == prod_name) &
(self.pd_train_web[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same {} train data: {}/{}/{}'.format(category, vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
elif category == 'FRAMEWORK':
df_extract_sig = self.pd_train_fw[(self.pd_train_fw[1] == vendor) &
(self.pd_train_fw[2] == prod_name) &
(self.pd_train_fw[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same {} train data: {}/{}/{}'.format(category, vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
elif category == 'CMS':
df_extract_sig = self.pd_train_cms[(self.pd_train_cms[1] == vendor) &
(self.pd_train_cms[2] == prod_name) &
(self.pd_train_cms[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same {} train data: {}/{}/{}'.format(category, vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
return ret
# Main control.
def extract_file_structure(self, category, vendor, package):
# Check package path.
package_path = os.path.join(self.compress_dir, package)
if os.path.exists(package_path) is False:
self.utility.print_message(FAIL, 'Package is not found: {}.'.format(package_path))
return
# Extract product name and version.
# ex) Package name must be "wordpress_4.9.8_.tar.gz".
package_info = package.split('@')
prod_name = ''
prod_ver = ''
if len(package_info) < 2:
prod_name = package_info[0]
prod_ver = 'unknown'
else:
prod_name = package_info[0]
prod_ver = package_info[1]
# Check existing same product signature.
if self.is_existing_same_product(category, vendor, prod_name, prod_ver) is False:
return
# Decompress compressed package file.
extract_path = self.decompress_file(package_path)
# Create unique root directory.
root_dir = os.path.join(self.compress_dir, prod_name + '_' + prod_ver)
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
os.mkdir(root_dir)
shutil.move(extract_path, root_dir)
# Create report header.
pd.DataFrame([], columns=self.header).to_csv(self.save_path, mode='w', index=False)
# Extract file structures.
try:
# Extract file path each products.
target_name = prod_name + ' ' + prod_ver
self.utility.print_message(NOTE, 'Extract package {}'.format(root_dir))
record = self.execute_grep(target_name, root_dir)
graph = self.create_network(record)
# Extract all paths to end node from root node.
all_paths = []
node_num = len(graph._adj)
for end_node_idx in range(node_num):
msg = '{}/{} Analyzing node={}'.format(end_node_idx + 1, node_num, end_node_idx)
self.utility.print_message(OK, msg)
if len(graph._adj[end_node_idx]) == 0:
for path in nx.all_simple_paths(graph, source=0, target=end_node_idx):
msg = 'Extract path that source={} <-> target={}, path={}'.format(0, end_node_idx, path)
self.utility.print_message(OK, msg)
all_paths.append(path)
# Execute "Othello".
open_paths = []
for try_num in range(self.try_othello_num):
self.utility.print_message(OK, '{}/{} Execute "Othello".'.format(try_num + 1, self.try_othello_num))
open_paths.extend(self.explore_open_path(graph, all_paths))
# Create signature.
open_paths = list(map(list, set(map(tuple, open_paths))))
# Initialize temporally buffer.
sig_file = []
for _ in range(len(self.pd_prod_sig.columns)):
sig_file.append([])
sig_path = []
for _ in range(len(self.pd_cont_sig.columns)):
sig_path.append([])
train = []
for _ in range(len(self.train_categories)):
temp = []
for _ in range(len(self.pd_train_os.columns)):
temp.append([])
train.append(temp)
for idx, item in enumerate(open_paths):
# Create signature.
files = graph.nodes[item[0]]['files']
if item[2] == 1.0 and len(files) > 0:
# Create target path.
target_path = item[1].replace('\\', '/')
if target_path.endswith('/') is False:
target_path += '/'
# Add signature to master signature file.
if self.return_score(files) / len(files) == 1.0:
# Add path signature info to temporally buffer.
self.push_path_sig(sig_path, category, vendor, prod_name, prod_ver, target_path)
self.utility.print_message(OK, '{}/{} Add path signature: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Add file path signature info to temporally buffer.
for file in files:
target_file = '(' + target_path + file + ')'
self.push_file_sig(sig_file, category, vendor, prod_name, prod_ver, target_file)
self.utility.print_message(OK, '{}/{} Add file signature: {}.'.format(idx + 1,
len(open_paths),
target_file))
# Add extra path signature to master signature file.
tmp_target_path = target_path.split('/')
tmp_target_path = [s for s in tmp_target_path if s]
if len(tmp_target_path) > 1:
for path_idx in range(self.del_open_root_dir):
extra_target_path = '/'.join(tmp_target_path[path_idx + 1:])
extra_target_path = '/' + extra_target_path + '/'
self.push_path_sig(sig_path, category, vendor, prod_name, prod_ver, extra_target_path)
self.utility.print_message(OK, '{}/{} Add path signature: {}.'
.format(idx + 1, len(open_paths), extra_target_path))
# Add extra file path signature info to temporally buffer.
for file in files:
extra_target_file = '(' + extra_target_path + file + ')'
self.push_file_sig(sig_file, category, vendor, prod_name,
prod_ver, extra_target_file)
self.utility.print_message(OK, '{}/{} Add file signature: {}.'
.format(idx + 1, len(open_paths), extra_target_file))
else:
# Add train data info to temporally buffer.
categories, vendors, prods, versions, targets = self.add_train_data(category,
vendor,
prod_name,
prod_ver,
files,
target_path)
if len(categories) == 0:
continue
if category == 'OS':
self.push_train_data(train, OS, categories, vendors, prods, versions, targets)
elif category == 'WEB':
self.push_train_data(train, WEB, categories, vendors, prods, versions, targets)
elif category == 'FRAMEWORK':
self.push_train_data(train, FRAMEWORK, categories, vendors, prods, versions, targets)
elif category == 'CMS':
self.push_train_data(train, CMS, categories, vendors, prods, versions, targets)
self.utility.print_message(OK, '{}/{} Add train data: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Add extra path signature to master signature file.
tmp_target_path = target_path.split('/')
tmp_target_path = [s for s in tmp_target_path if s]
if len(tmp_target_path) > 1:
for path_idx in range(self.del_open_root_dir):
extra_target_path = '/'.join(tmp_target_path[path_idx + 1:])
extra_target_path = '/' + extra_target_path + '/'
categories, vendors, prods, versions, targets = self.add_train_data(category,
vendor,
prod_name,
prod_ver,
files,
extra_target_path)
if len(categories) == 0:
continue
if category == 'OS':
self.push_train_data(train, OS, categories, vendors, prods, versions, targets)
elif category == 'WEB':
self.push_train_data(train, WEB, categories, vendors, prods, versions, targets)
elif category == 'FRAMEWORK':
self.push_train_data(train, FRAMEWORK, categories, vendors, prods, versions, targets)
elif category == 'CMS':
self.push_train_data(train, CMS, categories, vendors, prods, versions, targets)
self.utility.print_message(OK, '{}/{} Add train data: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Create train data.
elif item[2] >= self.threshold:
target_path = item[1].replace('\\', '/')
if target_path.endswith('/') is False:
target_path += '/'
categories, vendors, prods, versions, targets = self.add_train_data(category,
vendor,
prod_name,
prod_ver,
files,
target_path)
if len(categories) == 0:
continue
if category == 'OS':
self.push_train_data(train, OS, categories, vendors, prods, versions, targets)
elif category == 'WEB':
self.push_train_data(train, WEB, categories, vendors, prods, versions, targets)
elif category == 'FRAMEWORK':
self.push_train_data(train, FRAMEWORK, categories, vendors, prods, versions, targets)
elif category == 'CMS':
self.push_train_data(train, CMS, categories, vendors, prods, versions, targets)
self.utility.print_message(OK, '{}/{} Add train data: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Add extra path signature to master signature file.
tmp_target_path = target_path.split('/')
tmp_target_path = [s for s in tmp_target_path if s]
if len(tmp_target_path) > 1:
for path_idx in range(self.del_open_root_dir):
extra_target_path = '/'.join(tmp_target_path[path_idx + 1:])
extra_target_path = '/' + extra_target_path + '/'
categories, vendors, prods, versions, targets = self.add_train_data(category,
vendor,
prod_name,
prod_ver,
files,
extra_target_path)
if len(categories) == 0:
continue
if category == 'OS':
self.push_train_data(train, OS, categories, vendors, prods, versions, targets)
elif category == 'WEB':
self.push_train_data(train, WEB, categories, vendors, prods, versions, targets)
elif category == 'FRAMEWORK':
self.push_train_data(train, FRAMEWORK, categories, vendors, prods, versions, targets)
elif category == 'CMS':
self.push_train_data(train, CMS, categories, vendors, prods, versions, targets)
self.utility.print_message(OK, '{}/{} Add train data: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Write path signature to master signature file.
if len(sig_path[0]) != 0:
series_category = pd.Series(sig_path[0])
series_vendor = pd.Series(sig_path[1])
series_prod = pd.Series(sig_path[2])
series_version = pd.Series(sig_path[3])
series_signature = pd.Series(sig_path[4])
series_dummy1 = pd.Series(sig_path[5])
series_dummy2 = pd.Series(sig_path[6])
series_dummy3 = pd.Series(sig_path[7])
temp_df = pd.DataFrame({0: series_category,
1: series_vendor,
2: series_prod,
3: series_version,
4: series_signature,
5: series_dummy1,
6: series_dummy2,
7: series_dummy3}, columns=None)
origin_num = len(self.pd_cont_sig)
self.pd_cont_sig = pd.concat([self.pd_cont_sig, temp_df])
self.pd_cont_sig = self.pd_cont_sig.drop_duplicates(subset=4, keep=False)
add_signature_num = len(self.pd_cont_sig) - origin_num
self.pd_cont_sig.sort_values(by=[0, 1, 2, 3, 4]).to_csv(self.master_cont_sig,
sep='@',
encoding='utf-8',
header=False,
index=False,
quoting=csv.QUOTE_NONE)
self.utility.print_message(NOTE, 'Add Path signature: {} items.'.format(add_signature_num))
# Write file signature to master signature file.
if len(sig_file[0]) != 0:
series_category = pd.Series(sig_file[0])
series_vendor = pd.Series(sig_file[1])
series_prod = pd.Series(sig_file[2])
series_version = pd.Series(sig_file[3])
series_signature = pd.Series(sig_file[4])
temp_df = pd.DataFrame({0: series_category,
1: series_vendor,
2: series_prod,
3: series_version,
4: series_signature}, columns=None)
origin_num = len(self.pd_prod_sig)
self.pd_prod_sig = pd.concat([self.pd_prod_sig, temp_df])
self.pd_prod_sig = self.pd_prod_sig.drop_duplicates(subset=4, keep=False)
add_signature_num = len(self.pd_prod_sig) - origin_num
self.pd_prod_sig.sort_values(by=[0, 1, 2, 3, 4]).to_csv(self.master_prod_sig,
sep='@',
encoding='utf-8',
header=False,
index=False,
quoting=csv.QUOTE_NONE)
self.utility.print_message(NOTE, 'Add File signature: {} items.'.format(add_signature_num))
# Write OS train data to master train data.
if len(train[OS][0]) != 0:
series_category = pd.Series(train[OS][0])
series_vendor = pd.Series(train[OS][1])
series_prod = pd.Series(train[OS][2])
series_version = pd.Series(train[OS][3])
series_signature = pd.Series(train[OS][4])
temp_df = pd.DataFrame({0: series_category,
1: series_vendor,
2: series_prod,
3: series_version,
4: series_signature}, columns=None)
origin_num = len(self.pd_train_os)
self.pd_train_os = pd.concat([self.pd_train_os, temp_df])
self.pd_train_os = self.pd_train_os.drop_duplicates(subset=[1, 2, 3, 4], keep=False)
add_signature_num = len(self.pd_train_os) - origin_num
self.pd_train_os.sort_values(by=[0, 1, 2, 3, 4]).to_csv(self.train_os_in,
sep='@',
encoding='utf-8',
header=False,
index=False,
quoting=csv.QUOTE_NONE)
self.utility.print_message(NOTE, 'Add OS train data: {} items.'.format(add_signature_num))
# Write Web train data to master train data.
if len(train[WEB][0]) != 0:
series_category = pd.Series(train[WEB][0])
series_vendor = | pd.Series(train[WEB][1]) | pandas.Series |
#!/usr/bin/env python
"""
Merge biotex results from 30k tweets per files
"""
import pandas as pd
from pathlib import Path
import json
# SentiWordNet
from nltk.corpus import wordnet as wn
from nltk.corpus import sentiwordnet as swn
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk import pos_tag, word_tokenize
# End Of SentiWordNet
import matplotlib.pyplot as plt
biotexparams = ['ftfidfc-all', 'ftfidfc-multi', 'c-value-all', 'c-value-multi']
def mergeBiotex(biotexResultDir, mergeResultDir):
columnsName = ['term', 'max', 'sum', 'occurence', 'average', 'umls', 'fastr']
for param in biotexparams:
dfTerms = pd.DataFrame(columns=columnsName)
i = 0
biotexResultDirParam = biotexResultDir.joinpath(param)
for file in biotexResultDirParam.glob("fastr*"):
i += 1
dfToMerge = pd.read_csv(file, sep=',')
dfToMerge.columns = [i, 'term','umls'+str(i), 'score'+str(i), 'fastr'+str(i)]
dfTerms = dfTerms.merge(dfToMerge, on='term', how='outer') # outer : union of keys from both frames,
# similar to a SQL full outer join; sort keys lexicographically.
# Default is inner : intersection
dfTerms["max"] = dfTerms[["max", 'score'+str(i)]].max(axis=1) # axis = 1 <=> column
dfTerms["sum"] = dfTerms[["sum", 'score'+str(i)]].sum(axis=1)
## Average
# occurence number for each term
# print(i)
dfTerms['occurence'].fillna(0, inplace=True) # transform NA to 0
dfTerms.loc[dfTerms['term'].isin(dfToMerge['term']), 'occurence'] += 1
dfTerms["average"] = dfTerms["sum"] / dfTerms['occurence']
## umls
dfTerms['umls'+str(i)] = dfTerms['umls'+str(i)].astype(bool)
dfTerms["umls"] = dfTerms["umls"] | dfTerms['umls'+str(i)]
## fastr
### tricky tip: replace empty value from fastr by values of fastr-i (some are still empty! Doesn't matter!)
dfTerms['fastr'].fillna(dfTerms['fastr'+str(i)], inplace=True)
# delete row after aggregation
dfTerms = dfTerms.drop([i, 'score'+str(i), 'umls'+str(i), 'fastr'+str(i)], 1) # ,1 : axis : column
dfTerms.to_csv(mergeResultDir.joinpath("merge30ktweets-english-"+param+".csv"))
print("save file: "+str(mergeResultDir.joinpath("merge30ktweets-english-"+param+".csv")))
#faire les sort : max, average et sum
# commenter les résultats
def cleanMergeResult(df):
"""
Clean some noise from biotex as
- ##########end##########
:param df : a dataframe to clean
:return: df : a clean dataframe
"""
df['term'].fillna("", inplace=True)
#print(df.loc[df['term'].str.contains('##########end##########', case=False)])
toDelete = df.loc[df['term'].str.contains('##########end##########', case=False)].index
if not toDelete.empty: # Do we have to delete something ?
df.drop(toDelete, inplace=True)
#print(df.head(n=20))
return df
def rankMergeResult(mergeResultDir, rankedfilename):
"""
This function rank biotex merged results : MAX, SUM, Average on score from initial biotex
Modification :
- E1 : After meeting 2020-04-15 : we decided to give up on multi-term and work only on all (as biotex params)
- E2 : Clean up results from biotex (remove #######end#####)
- E3 : Corroborate with E1 : extract multi terms from E1 (with all as biotex params)
- E6 : Measur post ranking : AVG
:param mergeResultDir:
:return:
"""
# Comment since E1
# column_order = ['ftfidfc-multi_max', 'ftfidfc-all_max', 'ftfidfc-multi_average', 'ftfidfc-all_average',
# 'ftfidfc-multi_sum', 'ftfidfc-all_sum', 'c-value-multi_max', 'c-value-all_max',
# 'c-value-multi_average', 'c-value-all_average', 'c-value-multi_sum', 'c-value-all_sum']
# End of comment since E1
# E6 measure : AVG
# rankedMeasures = ['max', 'sum', 'average']
# column_order = ['ftfidfc-all_max', 'ftfidfc-all_mutltiExtracted_max', 'ftfidfc-all_average',
# 'ftfidfc-all_mutltiExtracted_average', 'ftfidfc-all_sum', 'ftfidfc-all_mutltiExtracted_sum',
# 'c-value-all_max', 'c-value-all_mutltiExtracted_max', 'c-value-all_average',
# 'c-value-all_mutltiExtracted_average', 'c-value-all_sum', 'c-value-all_mutltiExtracted_sum']
rankedMeasures = ['average']
column_order = ['ftfidfc-all_average', 'ftfidfc-all_average_UMLS', 'ftfidfc-all_average_fastr',
'ftfidfc-all_mutltiExtracted_average', 'ftfidfc-all_mutltiExtracted_average_UMLS',
'ftfidfc-all_mutltiExtracted_average_fastr', 'c-value-all_average', 'c-value-all_average_UMLS',
'c-value-all_average_fastr', 'c-value-all_mutltiExtracted_average',
'c-value-all_mutltiExtracted_average_UMLS', 'c-value-all_mutltiExtracted_average_fastr']
dfcompare = pd.DataFrame()
nbTerms = 100
# for file in mergeResultDir.glob("merge*"): #since E1
for file in mergeResultDir.glob("*all.csv"):
df = cleanMergeResult(pd.read_csv(file)) # clean up acocrding to E2
for measure in rankedMeasures:
df.sort_values(by=measure, inplace=True, ascending=False)
# build a new column with a name extracted from the file. It contains Measure F-TFIDF-C or C-value
# All or multi terms from biotex
# and the new ranking measure (Max, sum, average) introduce by this function
dfcompare[str(file.name).replace("merge30ktweets-english-", "").replace(".csv", "") + "_" + measure] = \
df['term'].values
# add UMLS
dfcompare[str(file.name).replace("merge30ktweets-english-", "").replace(".csv", "") + "_" + measure +
'_UMLS']= df['umls'].values
# add Fastr
dfcompare[str(file.name).replace("merge30ktweets-english-", "").replace(".csv", "") + "_" + measure +
'_fastr'] = df['fastr'].values
# Start E3 : extract multi terms from other
dfextractMulti = pd.DataFrame()
## build a new column with only multi terms (terms which contains a space " ")
dfextractMulti[str(file.name).replace("merge30ktweets-english-", "").replace(".csv", "") +
"_mutltiExtracted_"+ measure] = df[df['term'].str.contains(" ")]['term'].values
## builc column for UMLS
dfextractMulti[str(file.name).replace("merge30ktweets-english-", "").replace(".csv", "") +
"_mutltiExtracted_" + measure + '_UMLS'] = df[df['term'].str.contains(" ")]['umls'].values
## builc column for fastr
dfextractMulti[str(file.name).replace("merge30ktweets-english-", "").replace(".csv", "") +
"_mutltiExtracted_" + measure + '_fastr'] = df[df['term'].str.contains(" ")]['fastr'].values
## Then concate with the previous. We could not add the column because of his inferior length
dfcompare = | pd.concat([dfcompare, dfextractMulti], axis=1) | pandas.concat |
# auto-verify-links.py
#
# This script crawls candidate URLs for municipalities websites and
# checks if they are active and likely to be the city hall or
# city council portals.
#
# Este script navega nas URLs candidatas a sites dos municípios e
# verifica se elas estão ativas e são prováveis portais das prefeituras
# e câmaras municipais.
#
import os
import argparse
import re
import urllib
from datetime import datetime, timezone
import random
import warnings
import multiprocessing
from functools import partial
import pandas as pd
from tqdm import tqdm
import requests
from bs4 import BeautifulSoup
from unidecode import unidecode
from datapackage import Package
from tableschema import Storage
USER_AGENT = 'transparencia-dados-abertos-brasil/0.0.2'
TIMEOUT = 20
INPUT_FOLDER = '../../data/unverified'
INPUT_FILE = 'municipality-website-candidate-links.csv'
MAX_SIMULTANEOUS = 10
MAX_QUANTITY = 0
OUTPUT_FOLDER = '../../data/valid'
OUTPUT_FILE = 'brazilian-municipality-and-state-websites.csv'
# Command line interface
parser = argparse.ArgumentParser(
description='''Crawls candidate URLs for municipalities websites and checks
if they are active and likely to be the city hall or city council portals.'''
)
parser.add_argument('input',
help='input file in CSV format',
default='',
nargs='?',
)
parser.add_argument('output',
help='output file in CSV (must have a schema in datapackage.json)',
default='',
nargs='?',
)
parser.add_argument('-q', '--quantity',
metavar='int', type=int,
help='maximum quantity of cities to process',
default=0,
)
parser.add_argument('-p', '--processes',
metavar='int', type=int,
help='number of processes (parallel downloads) to use',
default=8,
)
args = parser.parse_args()
if args.input:
INPUT_FOLDER = os.path.dirname(args.input)
INPUT_FILE = os.path.basename(args.input)
if args.output:
OUTPUT_FOLDER = os.path.dirname(args.output)
OUTPUT_FILE = os.path.basename(args.output)
if args.quantity:
MAX_QUANTITY = args.quantity
if args.processes:
MAX_SIMULTANEOUS = args.processes
candidates = pd.read_csv(os.path.join(INPUT_FOLDER, INPUT_FILE))
print (f'Found {len(candidates)} cities in {os.path.join(INPUT_FOLDER, INPUT_FILE)}.')
codes = candidates.code.unique()
random.shuffle(codes) # randomize sequence
if MAX_QUANTITY:
codes = codes[:MAX_QUANTITY] # take a subsample for quicker processing
goodlinks = | pd.DataFrame(columns=candidates.columns) | pandas.DataFrame |
#####################################
# DataReader.py
#####################################
# Description:
# * Convert data in format into pandas DataFrame.
import dateutil.parser as dtparser
import numpy as np
from pandas import DataFrame, isnull, read_csv, read_excel
import re
import os
from DynamicETL_Dashboard.Utilities.Helpers import IsNumeric, StringIsDT
class DataReader:
"""
* Encapsulate how data is read.
"""
def __init__(self):
"""
* Instantiate empty object.
"""
pass
####################
# Interface Methods:
####################
@staticmethod
def Read(path, sheetName = None, delim = None):
"""
* Return pandas dataframe from data at path.
Inputs:
* path: path to file.
Optional:
* sheetName: Sheet name in xls type file to read.
* delim: Delimiter if reading delimited file.
"""
DataReader.__Validate(path, sheetName, delim)
return DataReader.__ReadData(path, sheetName, delim)
####################
# Private Helpers:
####################
@staticmethod
def __Validate(path, sheetName, delim):
errs = []
if not isinstance(path, str):
errs.append('path must be a string.')
elif not os.path.isfile(path):
errs.append('path must point to file.')
elif not os.path.exists(path):
errs.append('File at path does not exist.')
if not sheetName is None and not isinstance(sheetName, str):
errs.append('sheetName must be a string.')
if not delim is None and not isinstance(delim, str):
errs.append('delim must be a string.')
if errs:
raise Exception('\n'.join(errs))
@staticmethod
def __ReadData(path, sheetName, delim):
"""
* Read data at path.
"""
if path.endswith('.csv'):
data = read_csv(path, delimiter = (',' if delim is None else delim))
elif path.endswith('.xls') or path.endswith('.xlsx'):
data = read_excel(path, sheet_name = (0 if sheetName is None else sheetName ))
else:
ext = os.path.split(path)
raise Exception('%s extension is invalid.' % ext)
# Convert data into suitable types:
return DataReader.__ConvertAll(data)
@staticmethod
def __ConvertAll(data):
"""
* Convert all columns into most appropriate type.
"""
for col in data.columns:
if DataReader.__IsInt(data[col]):
data[col] = data[col].astype('int64')
elif DataReader.__IsFloat(data[col]):
data[col] = data[col].astype('float64')
elif DataReader.__IsDT(data[col]):
data[col] = data[col].astype('datetime64')
return data
@staticmethod
def __IsInt(series):
"""
* Determine if TimeSeries object could be integer type.
"""
if all(isnull(series)):
return False
for val in series:
if not str(val).isnumeric() and not isnull(val):
return False
return True
@staticmethod
def __IsFloat(series):
"""
* Determine if TimeSeries object is floating point.
"""
if all( | isnull(series) | pandas.isnull |
import pandas as pd
import numpy as np
import time
import datetime
import random
from sklearn.preprocessing import Imputer
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
import sklearn.feature_selection
from sklearn.feature_extraction import DictVectorizer
from sklearn import model_selection
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
seed = 0
# Freeze the random seed
random.seed(seed)
np.random.seed(seed)
train_test_split_test_size = 0.1
models = {
'LR': LogisticRegression(),
'LDA': LinearDiscriminantAnalysis(),
'KNN': KNeighborsClassifier(),
'CART': DecisionTreeClassifier(),
'NB': GaussianNB(),
'RFC': RandomForestClassifier(),
'ABC': AdaBoostClassifier(),
'SVM': SVC(),
'GBC': GradientBoostingClassifier(),
'MLP': MLPClassifier()
}
def load_data():
df = | pd.read_csv("../data/adult.csv") | pandas.read_csv |
"""
Simplify Python Client of Google Cloud Speech-to-Text
Need to install (and restart first) with `import kora.install.speech`
Then
```
from kora.speech import Recognizer
sp = Recognizer(sa_file, lang='th', output_dir=None)
op = sp.open(uri)
op.to_df()
```
"""
import pandas as pd
import os.path
from pathlib import Path
from fastcore.foundation import patch
from google.cloud.speech_v1p1beta1 import SpeechClient
from google.cloud.speech_v1p1beta1.types import \
RecognitionAudio, RecognitionConfig, TranscriptOutputConfig
from google.api_core.operation import Operation
class Recognizer:
""" Make SpeechClient easier to use """
def __init__(self, sa_file, lang='th', output_dir=None):
""" Need a service account file for authentication
e.g. drive/MyDrive/service-account.json
"""
self.client = SpeechClient.from_service_account_file(sa_file)
self.lang = lang
self.output_dir = output_dir
self.ops = []
def open(self, uri):
audio = RecognitionAudio(uri=uri)
cfg = RecognitionConfig(language_code=self.lang, enable_word_time_offsets=True)
req = {"audio": audio, "config": cfg}
if self.output_dir:
output_name = Path(uri).stem + '.json'
output_uri = os.path.join(self.output_dir, output_name)
req['output_config'] = TranscriptOutputConfig(gcs_uri=output_uri)
op = self.client.long_running_recognize(request=req)
self.ops.append(op)
return op
@patch
def __repr__(self: Operation):
""" Show operation progress """
self.done() # refresh
pc = self.metadata.progress_percent
name = self.operation.name
return f"<{name} ({pc}%)>"
@patch
def to_df(self: Operation):
""" Return dataframe of its result """
left = []
right = []
data = []
for i, result in enumerate(self.result().results):
for w in result.alternatives[0].words:
left.append(w.start_time.total_seconds())
right.append(w.end_time.total_seconds())
data.append([w.word, i+1])
index = | pd.IntervalIndex.from_arrays(left, right, closed='left', name='time') | pandas.IntervalIndex.from_arrays |
"""Implements the utilities to generate general multi-objective mixed-integer linear program instances
Referenced articles:
@article{mavrotas2005multi,
title={Multi-criteria branch and bound: A vector maximization algorithm for mixed 0-1 multiple objective linear programming},
author={<NAME> and <NAME>},
journal={Applied mathematics and computation},
volume={171},
number={1},
pages={53--71},
year={2005},
publisher={Elsevier}
}
@article{boland2015criterion,
title={A criterion space search algorithm for biobjective mixed integer programming: The triangle splitting method},
author={<NAME> and <NAME> and <NAME>},
journal={INFORMS Journal on Computing},
volume={27},
number={4},
pages={597--618},
year={2015},
publisher={INFORMS}
}
@article{kirlik2014new,
title={A new algorithm for generating all nondominated solutions of multiobjective discrete optimization problems},
author={<NAME> and <NAME>},
journal={European Journal of Operational Research},
volume={232},
number={3},
pages={479--488},
year={2014},
publisher={Elsevier}
}
"""
from abc import ABCMeta, abstractmethod
from gurobipy import GRB, LinExpr, Model
import numpy as np
import os
import pandas as pd
class MomilpInstanceParameterSet:
"""Implements MOMILP instance parameter set"""
def __init__(
self,
constraint_coeff_range=(-1, 20),
continuous_var_obj_coeff_range=(-10, 10),
# if 'True', all the integer variables have zero coefficient in the discrete objectives
dummy_discrete_obj=True,
integer_var_obj_coeff_range=(-200, 200),
# num of binary variables out of the num of integer vars
num_binary_vars=10,
num_constraints=20,
num_continuous_vars=10,
# starting from the objective function at the first index
num_discrete_objs=1,
num_integer_vars=10,
num_objs=3,
obj_sense="max",
rhs_range=(50, 100)):
self.constraint_coeff_range = constraint_coeff_range
self.continuous_var_obj_coeff_range = continuous_var_obj_coeff_range
self.dummy_discrete_obj = dummy_discrete_obj
self.integer_var_obj_coeff_range = integer_var_obj_coeff_range
self.num_binary_vars = num_binary_vars
self.num_constraints = num_constraints
self.num_continuous_vars = num_continuous_vars
self.num_discrete_objs = num_discrete_objs
self.num_integer_vars = num_integer_vars
self.num_objs = num_objs
self.obj_sense = obj_sense
self.rhs_range = rhs_range
def to_dict(self):
"""Returns the dictionary representation of the parameter set"""
return self.__dict__
class MomilpInstance(metaclass=ABCMeta):
"""Implements an abstract MOMILP instance class"""
@abstractmethod
def write(self, path):
"""Writes the model"""
class MomilpInstanceData:
"""Implements a MOMILP instance data"""
def __init__(
self, param_2_value, constraint_coeff_df=None, continuous_var_obj_coeff_df=None,
integer_var_obj_coeff_df=None, rhs=None):
self._constraint_coeff_df = constraint_coeff_df
self._continuous_var_obj_coeff_df = continuous_var_obj_coeff_df
self._integer_var_obj_coeff_df = integer_var_obj_coeff_df
self._param_2_value = param_2_value
self._rhs = rhs
def constraint_coeff_df(self):
"""Returns the constraint coefficient data frame
NOTE: An (m by n) matrix where rows are constraints and columns are variables"""
return self._constraint_coeff_df
def continuous_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the continuous variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._continuous_var_obj_coeff_df
def integer_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the integer variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._integer_var_obj_coeff_df
def rhs(self):
"""Returns the right-hand-side values of the constraints
NOTE: A series of length m"""
return self._rhs
class KnapsackFileInstanceData(MomilpInstanceData):
"""Implements a Knapsack problem instance data retrived from a file
NOTE: Based on the data input schema defined in Kirlik and Sayin. (2014):
http://home.ku.edu.tr/~moolibrary/
A '.dat' file describing a multi-objective 0-1 knapsack problem
Line 1: Number of objective functions, p
Line 2: Number of objects, n
Line 3: Capacity of the knapsack, W
Line 5: Profits of the objects in each objective function, V
Line 6: Weights of the objects, w
"""
_ESCAPED_CHARACTERS = ["[", "]"]
_LINE_DELIMITER = ", "
_NEW_LINE_SEPARATOR = "\n"
def __init__(self, file_name, param_2_value):
super(KnapsackFileInstanceData, self).__init__(param_2_value)
self._file_name = file_name
self._create()
def _create(self):
"""Creates the instance data"""
lines = []
with open(self._file_name, "r") as f:
lines = f.readlines()
# read the number of objectives
num_objectives = int(self._process_lines(lines).iloc[0,0])
assert num_objectives == self._param_2_value["num_objs"], \
"the number of objectives in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_objectives, self._param_2_value["num_objs"])
# read the number of objects
num_continuous_vars = self._param_2_value["num_continuous_vars"]
assert num_continuous_vars == 0, "there should not be any continuous variables"
num_binary_vars = self._param_2_value["num_binary_vars"]
num_objects = int(self._process_lines(lines).iloc[0,0])
assert num_objects == num_binary_vars, \
"the number of objects in the data file is not equal to the number of binary variables in the " \
"configuration, '%d' != '%d'" % (num_objects, num_continuous_vars + num_binary_vars)
# read the knapsack capacities
self._rhs = self._process_lines(lines).iloc[0, :]
num_constraints = len(self._rhs)
assert num_constraints == self._param_2_value["num_constraints"], \
"the number of constraints in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_constraints, self._param_2_value["num_constraints"])
# read the objective function coefficients
self._continuous_var_obj_coeff_df = pd.DataFrame()
self._integer_var_obj_coeff_df = self._process_lines(lines, to_index=num_objectives).T
# read the constraint coefficients
self._constraint_coeff_df = self._process_lines(lines, to_index=num_constraints)
def _process_lines(self, lines, from_index=0, to_index=1):
"""Processes the lines between the indices, removes the processed lines, and returns the data frame for the
processed data"""
rows = []
for line in lines[from_index:to_index]:
for char in KnapsackFileInstanceData._ESCAPED_CHARACTERS:
line = line.replace(char, "")
line = line.split(KnapsackFileInstanceData._NEW_LINE_SEPARATOR)[0]
values = line.split(KnapsackFileInstanceData._LINE_DELIMITER)
if values[-1][-1] == ",":
values[-1] = values[-1][:-1]
if not values[-1]:
values = values[:-1]
rows.append(values)
del lines[from_index:to_index]
df = pd.DataFrame(rows, dtype='float')
return df
class MomilpFileInstanceData(MomilpInstanceData):
"""Implements a MOMILP instance data retrived from a file
NOTE: Based on the data input schema defined in Boland et al. (2015):
A '.txt' file describing a bi-objective problem
Line 1: Number of constraints, m
Line 2: Number of continuous variables, n_c
Line 3: Number of binary variables, n_b
Line 4: Array of coefficients for the first objective and the continuous variables, c^{1}
Line 5: Array of coefficients for the first objective and the binary variables, f^{1}
Line 6: Array of coefficients for the second objective and the continuous variables, c^{2}
Line 7: Array of coefficients for the second objective and the binary variables, f^{2}
Next 'n_c' lines: Array of constraint matrix coefficients for the continuous variables, a_{i,j}
Next line: Array of constraint matrix coefficients for the binary variables, a^{'}_{j}
Next line: Array of constraint right-hand-side values, b_j
The instance is converted to a three-obj problem by creating an additional objective with all zero coefficients.
"""
_INTEGER_VARIABLE_SUM_CONTRAINT_RHS_MULTIPLIER = 1/3
_LINE_DELIMITER = " "
_NEW_LINE_SEPARATOR = "\n"
def __init__(self, file_name, param_2_value):
super(MomilpFileInstanceData, self).__init__(param_2_value)
self._file_name = file_name
self._create()
def _create(self):
"""Creates the instance data"""
lines = []
with open(self._file_name, "r") as f:
lines = f.readlines()
num_constraints = int(self._process_lines(lines).iloc[0,0])
assert num_constraints == self._param_2_value["num_constraints"], \
"the number of constraints in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_constraints, self._param_2_value["num_constraints"])
num_continuous_vars = int(self._process_lines(lines).iloc[0,0])
assert num_continuous_vars == self._param_2_value["num_continuous_vars"], \
"the number of continuous vars in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_continuous_vars, self._param_2_value["num_continuous_vars"])
num_binary_vars = int(self._process_lines(lines).iloc[0,0])
assert num_binary_vars == self._param_2_value["num_binary_vars"], \
"the number of binary vars in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_binary_vars, self._param_2_value["num_binary_vars"])
# since we solve the BOMILP as TOMILP in the momilp solver, and the default discrete obj index is zero, we
# create zero arrays as the coefficient vectors for the first objective
self._continuous_var_obj_coeff_df = pd.DataFrame(np.zeros(shape=(1, num_continuous_vars)))
self._integer_var_obj_coeff_df = pd.DataFrame(np.zeros(shape=(1, num_binary_vars)))
self._continuous_var_obj_coeff_df = self._continuous_var_obj_coeff_df.append(self._process_lines(lines))
self._integer_var_obj_coeff_df = self._integer_var_obj_coeff_df.append(self._process_lines(lines))
self._continuous_var_obj_coeff_df = self._continuous_var_obj_coeff_df.append(
self._process_lines(lines)).reset_index(drop=True).T
self._integer_var_obj_coeff_df = self._integer_var_obj_coeff_df.append(
self._process_lines(lines)).reset_index(drop=True).T
continuous_var_columns = [i for i in range(num_continuous_vars)]
binary_var_columns = [len(continuous_var_columns) + i for i in range(num_binary_vars)]
continuous_var_constraint_df = self._process_lines(lines, to_index=num_continuous_vars).T
continuous_var_constraint_df = continuous_var_constraint_df.append(
pd.DataFrame(np.zeros(shape=(1, num_continuous_vars)))).reset_index(drop=True)
continuous_var_constraint_df.columns = continuous_var_columns
binary_var_constraint_df = pd.DataFrame(np.diag(self._process_lines(lines).iloc[0,:])).append(
pd.DataFrame(np.zeros(shape=(num_constraints - num_binary_vars - 1, num_binary_vars)))).append(
pd.DataFrame(np.ones(shape=(1, num_binary_vars)))).reset_index(drop=True)
binary_var_constraint_df.columns = binary_var_columns
self._constraint_coeff_df = pd.concat([continuous_var_constraint_df, binary_var_constraint_df], axis=1)
binary_var_sum_rhs = num_binary_vars * MomilpFileInstanceData._INTEGER_VARIABLE_SUM_CONTRAINT_RHS_MULTIPLIER
self._rhs = self._process_lines(lines).iloc[0, :].append( | pd.Series(binary_var_sum_rhs) | pandas.Series |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
| pd.DataFrame([[-25, -25], [np.inf, np.inf]]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pickle
from sklearn.decomposition import NMF
MOVIES = pd.read_csv('movies.csv', header=0)
ratings = pd.read_csv('ratings.csv', header=0)
tags = pd.read_csv('tags.csv', header=0)
links = pd.read_csv('links.csv', header=0)
ratings = ratings[['userId', 'movieId', 'rating']]
tags = tags[['userId', 'movieId', 'tag']]
links = links[['movieId', 'imdbId']]
data_dfs = [movies, tags, links, ratings]
DF = | pd.concat(data_dfs, join='outer', sort=True) | pandas.concat |
"""Pipeline to train model, find best parameters, give results."""
import logging
import os
import time
from os.path import join, relpath
import pandas as pd
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.inspection import permutation_importance
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
from sklearn.pipeline import Pipeline
from .DumpHelper import DumpHelper
from .TimerStep import TimerStep
logger = logging.getLogger(__name__)
def train(task, strategy, RS=None, dump_idx_only=False, T=0, n_bagging=None,
train_size=None, n_permutation=None, asked_fold=None,
results_folder=None):
"""Train a model (strategy) on some data (task) and dump results.
Parameters
----------
task : Task object
Define a prediction task. Used to retrieve the data of the wanted task.
strategy : Strategy object
Define the method (imputation + model) to use.
RS : int
Define a random state.
T : int
Trial number for the ANOVA selection step, from 1 to 5 if 5 trials for
the ANOVA selection.
Used only for names of folder when dumping results.
n_bagging : bool
Whether to use bagging.
"""
if task.is_classif() != strategy.is_classification() and not dump_idx_only:
raise ValueError('Task and strategy mix classif and regression.')
X, y = task.X, task.y # Expensive data retrieval is hidden here
logger.info(f'Started task "{task.meta.tag}" '
f'using "{strategy.name}" strategy on "{task.meta.db}".')
logger.info(f'X shape: {X.shape}')
logger.info(f'y shape: {y.shape}')
if RS is not None:
logger.info(f'Resetting strategy RS to {RS}')
strategy.reset_RS(RS) # Must be done before init DumpHelper
dh = DumpHelper(task, strategy, RS=RS, T=T, n_bagging=n_bagging,
results_folder=results_folder) # Used to dump results
# Create timer steps used in the pipeline to time training time
timer_start = TimerStep('start')
timer_mid = TimerStep('mid')
# Create pipeline with imputation and hyper-parameters tuning
if strategy.imputer is not None: # Has an imputation step
logger.info('Creating pipeline with imputer.')
steps = [
('timer_start', timer_start),
('imputer', strategy.imputer), # Imputation step
('timer_mid', timer_mid),
('searchCV_estimator', strategy.search), # HP tuning step
]
else:
logger.info('Creating pipeline without imputer.')
steps = [
('timer_mid', timer_mid),
('searchCV_estimator', strategy.search), # HP tuning step
]
estimator = Pipeline(steps)
if n_bagging is not None:
global_timer_start = TimerStep('global_start')
Bagging = BaggingClassifier if strategy.is_classification() else BaggingRegressor
estimator = Bagging(estimator, n_estimators=n_bagging, random_state=RS)
estimator = Pipeline([
('global_timer_start', global_timer_start),
('bagged_estimator', estimator),
])
print(f'Using {Bagging} with {n_bagging} estimators and RS={RS}.')
logger.info('Before size loop')
# Size of the train set
train_set_steps = strategy.train_set_steps if train_size is None else [train_size]
for n in train_set_steps:
print(f'SIZE {n}')
logger.info(f'Size {n}')
n_tot = X.shape[0]
if n_tot - n < strategy.min_test_set*n_tot:
# Size of the test set too small, skipping
continue
# Choose right splitter depending on classification or regression
if task.is_classif():
ss = StratifiedShuffleSplit(n_splits=strategy.n_splits,
test_size=n_tot-n,
random_state=RS)
else:
ss = ShuffleSplit(n_splits=strategy.n_splits, test_size=n_tot-n,
random_state=RS)
# Repetedly draw train and test sets
for i, (train_idx, test_idx) in enumerate(ss.split(X, y)):
print(f'FOLD {i}')
if asked_fold is not None and i != asked_fold:
print('skipped')
continue
X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]
y_train, y_test = y.iloc[train_idx], y.iloc[test_idx]
# Used to save the IDs of the sub-sampled dataset.
if dump_idx_only:
logger.info(f'Dumped IDs of {task.meta.tag}, size={n}, trial={T}, fold={i}')
folder = relpath('ids/')
os.makedirs(folder, exist_ok=True)
name = task.meta.name.replace('pvals', 'screening')
trial = int(T) + 1
fold = i + 1
common = f'{task.meta.db}-{name}-size{n}-trial{trial}-fold{fold}'
filepath_idx_train = join(folder, f'{common}-train-idx.csv')
filepath_idx_test = join(folder, f'{common}-test-idx.csv')
filepath_col_train = join(folder, f'{common}-train-col.csv')
filepath_col_test = join(folder, f'{common}-test-col.csv')
pd.Series(X_train.index).to_csv(filepath_idx_train, index=False)
| pd.Series(X_test.index) | pandas.Series |
import requests
from bs4 import BeautifulSoup
import pandas as pd
# This functio Displays ASCII art banner at the start of the program
def display_banner():
print(r'''
_ _ _ _____
| \ | | | | / ___|
| \| |_ _ _ __ ___ | |__ ___ _ __ \ `--. ___ _ __ __ _ _ __ ___ _ __
| . ` | | | | '_ ` _ \| '_ \ / _ \ '__| `--. \/ __| '__/ _` | '_ \ / _ \ '__|
| |\ | |_| | | | | | | |_) | __/ | /\__/ / (__| | | (_| | |_) | __/ |
\_| \_/\__,_|_| |_| |_|_.__/ \___|_| \____/ \___|_| \__,_| .__/ \___|_|
| |
|_|
New Zealand Lotto Number Scraper v1.0
Coded by <NAME> on Python 3.8.5
<EMAIL>
''')
# This function specifies the page we will extracting the data/results from
def extract(page):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36'} #lets the host know that we are not a bot
url = f'https://home.nzcity.co.nz/lotto/lotto.aspx?draw={page}' # Each page contains a new set of results
r = requests.get(url, headers)
soup = BeautifulSoup(r.content, 'html.parser')
return soup # This is our raw data
# This function loops through all tables that contain results on the page, finds each balls number as well the date of the draw
def transform(soup):
number_tables = soup.find_all('table', class_ = 'lottomain') # Finds all result tables
for item in number_tables:
ball1_data = soup.find('img', id = 'ctl00_ContentPlaceHolder1_ball1', alt = True) # Results are all images so the alt text is selected
ball2_data = soup.find('img', id = 'ctl00_ContentPlaceHolder1_ball2', alt = True)
ball3_data = soup.find('img', id = 'ctl00_ContentPlaceHolder1_ball3', alt = True)
ball4_data = soup.find('img', id = 'ctl00_ContentPlaceHolder1_ball4', alt = True)
ball5_data = soup.find('img', id = 'ctl00_ContentPlaceHolder1_ball5', alt = True)
ball6_data = soup.find('img', id = 'ctl00_ContentPlaceHolder1_ball6', alt = True)
bonusBall_data = soup.find('img', id = 'ctl00_ContentPlaceHolder1_bonus1', alt = True)
# A dictionary of the results is created
lotto_numbers = {
'ball1' : ball1_data['alt'],
'ball2' : ball2_data['alt'],
'ball3' : ball3_data['alt'],
'ball4' : ball4_data['alt'],
'ball5' : ball5_data['alt'],
'ball6' : ball6_data['alt'],
'bonusBall' : bonusBall_data['alt'].replace('Bonus number ', '')
}
draw_date = soup.find(id = 'ctl00_ContentPlaceHolder1_LottoDrawDate') # The draw date is situated outside of the draw results table in the main part of the soup
lotto_date = {
'drawDate' : draw_date.string # The draw date is changed to a string and added to a dictionary
}
total_results = {
**lotto_date, **lotto_numbers # The two dictionaries of the resutls and dates of draws are combined
}
results.append(total_results) # The new dictionary is appened to the results list
return
# This function looks at the latest draw number to determine how many times to run the for loop to get the results data
def totalDraws():
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36'}
url = f'https://home.nzcity.co.nz/lotto/lotto.aspx' # The main homepage of the NZ lotto results, it has the last draw number
r = requests.get(url, headers)
main_page_soup = BeautifulSoup(r.content, 'html.parser')
latest_draw = main_page_soup.find(id = 'ctl00_ContentPlaceHolder1_LottoDrawNumber').string # The label id on the main page that contains the latest draw number. It is changed to a string as a label is initially returned
return latest_draw
# This function is the main function that runs the extract and transform functions
def mainloop():
for i in range((total_num_draws - (history_weeks * 2)), total_num_draws, 1): # This loop loops through the total number of draws/pages. It starts at the specified user input history(*2 becuase 2 entries per week) and goes up to the lastest(newest) draw.
print(f'Getting draw result number {i} of {total_num_draws -1} total draws.')
c = extract(i) # Extracts all the data from i page number
transform(c) # Transforms the data we need
return
# This function saves the results to a CSV file
def saveToCsv(results):
df = | pd.DataFrame(results) | pandas.DataFrame |
import pandas as pd
import plotly.express as px
import panel as pn
data = {
"Day": ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday",],
"Orders": [15539, 21345, 18483, 24003, 23489, 24092, 12034],
}
dataframe = | pd.DataFrame(data) | pandas.DataFrame |
"""
accounting.py
Accounting and Financial functions.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : <EMAIL>
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt
credits :
"""
import datetime
import numpy as np
import pandas as pd
from pf.constants import DAYS_IN_YEAR
from pf.util import get_age
################################################################################################################################
# Financial Statements
################################################################################################################################
def calc_balance(accounts=None, category_dict=None):
"""
Calculate daily balances of grouped assets/liabilities based on `category_dict`s from `accounts`, returns a DataFrame.
Balance sheet is split into these sections:
Assets
Current
Cash
...
Long Term
Investments
Property
...
Liabilities
Current
Credit Card
...
Long Term
Loans
...
categories = {
'Assets' : {
'Current': {
# User category keys and account DataFrame columns list for values
'Cash & Cash Equivalents': [
('Cash', 'BofA Checking'),
('Cash', 'BofA Savings'),
...
],
'User Category': [...]
...
},
'Long Term': {...}
},
'Liabilities' : {
'Current': {...},
'Long Term': {...}
}
}
"""
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
balance_dict = {
(k0, k1, k2): accounts[v2].sum(axis=1) if v2 else pd.Series(0, index=accounts.index)
for k0, v0 in category_dict.iteritems()
for k1, v1 in v0.iteritems()
for k2, v2 in v1.iteritems()
}
# Convert to DataFrame
balance = pd.DataFrame(balance_dict)
return balance.fillna(0.0)
def balance_sheet(balance=None, period=datetime.datetime.now().year):
"""
Calculate and return a balance sheet.
Balance will be based on the last entry of account data (e.g. December 31st) for the given `period` time period,
which defaults to the current year.
All levels may be user defined by the category dictonary. The value of the last level must contain valid pandas DataFrame
column selectors, e.g. `Account Type` for single index column / level 0 access or `('Cash', 'Account Name')` for
multilevel indexing.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
balance = calc_balance(accounts, category_dict=categories)
balancesheet = balance_sheet(balance, period=2015)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
balance_sheets = []
for p in period:
# Force period to string
p = str(p)
# Sum over Period and convert to Statement DataFrame
p_balance = pd.DataFrame(balance[p].iloc[-1])
p_balance.columns = ['$']
p_balance.index.names = ['Category', 'Type', 'Item']
# Calculate Net
net = p_balance[['$']].sum(level=[0, 1]).sum(level=1)
net.index = pd.MultiIndex.from_tuples([('Net', x0, 'Total') for x0 in net.index])
net.index.names = ['Category', 'Type', 'Item']
# Add Net
balance_df = pd.concat([p_balance, net])
# Calculate percentages of level 0
balance_df['%'] = 100.0 * balance_df.div(balance_df.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = balance_df.sum(level=[0, 1])
l1_totals.index = pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index])
l1_totals.index.names = ['Category', 'Type', 'Item']
l0_totals = balance_df.sum(level=[0])
l0_totals.index = pd.MultiIndex.from_tuples([(x0, 'Total', ' ') for x0 in l0_totals.index])
l0_totals.index.names = ['Category', 'Type', 'Item']
# Add totals to dataframe
balance_df = balance_df.combine_first(l1_totals)
balance_df = balance_df.combine_first(l0_totals)
# Update columns with period
balance_df.columns = pd.MultiIndex.from_product([[p], balance_df.columns])
# Add to main list
balance_sheets.append(balance_df)
# Concatenate all the periods together
balance_sheets_df = pd.concat(balance_sheets, 1)
return balance_sheets_df
def calc_income(paychecks=None, transactions=None, category_dict=None, tax_type=None):
"""
Calculate daily income of grouped revenue/expenses/taxes based on `category_dict`s from `paychecks` and `transactions`,
returns a DataFrame.
Income Statement is split into these sections:
Revenue
Operating
Technical Services
...
Non-Operating
Interest Income
Dividend & Capital Gains
...
Expenses
Operating
Medical
...
Non-Operating
...
Taxes
Operating
Federal
State
...
All levels may be user defined by the category dictonary. However the last level must contain a dictionary
with at least a `category` key and set of categories for the value along with optional parameters.
```
'Revenue': {
'Operating': {
# Paychecks
'Technical Services': {
'source': 'paycheck', # Optional string to select data source, defaults to 'transactions'
'categories': {'Paycheck', ...}, # Required set of categories
'labels': set(), # Optional set of labels, defaults to set() if not passed in
'logic': '', # Optional 'not' string to set inverse of 'labels', defaults to ''
'tax_type' '' # Optional string for tax ('realized' or 'unrealized'), defaults to 'realized'
},
'User Category': {...}
},
'Non-Operating': {
'User Category': {
'categories': {...}
}
}
},
'Expenses': {
'Operating': {...},
'Non-Operating': {..}
},
'Taxes': {
'Operating': {...},
'Non-Operating': {..}
}
```
"""
# Clean category
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if not v2.has_key('source'):
category_dict[k0][k1][k2]['source'] = 'transactions'
if not v2.has_key('labels'):
category_dict[k0][k1][k2]['labels'] = set()
if not v2.has_key('logic'):
category_dict[k0][k1][k2]['logic'] = ''
if not v2.has_key('agg'):
category_dict[k0][k1][k2]['agg'] = np.ones(len(category_dict[k0][k1][k2]['categories']))
if not v2.has_key('tax_type'):
category_dict[k0][k1][k2]['tax_type'] = 'realized'
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
income_dict = {}
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if v2['source'] == 'transactions':
income_dict[(k0, k1, k2)] = transactions[
(
# If it is in the category
transactions['Category'].isin(v2['categories'])
& transactions['Account Name'].isin(tax_type[v2['tax_type']])
) & (
# And if is has the correct label
(transactions['Labels'].apply(
lambda x: x.isdisjoint(v2['labels']) if v2['logic'] else not x.isdisjoint(v2['labels'])
)) |
# Or it does not have any labels
(transactions['Labels'].apply(lambda x: v2['labels'] == set()))
)
]['Amount']
else:
income_dict[(k0, k1, k2)] = (v2['agg'] * paychecks[list(v2['categories'])]).sum(axis=1)
# Convert to DataFrame
cats = income_dict.keys()
cats.sort()
income = pd.DataFrame(
data=[],
columns=pd.MultiIndex.from_tuples(cats),
index=pd.date_range(transactions.index[-1], transactions.index[0])
)
for cat in income_dict:
cat_df = pd.DataFrame(income_dict[cat].values, index=income_dict[cat].index, columns=pd.MultiIndex.from_tuples([cat]))
income[cat] = cat_df.groupby(lambda x: x.date()).sum()
return income.fillna(0.0)
def income_statement(income=None, period=datetime.datetime.now().year, nettax=None):
"""
Calculate and return an Income Statement.
Income will be based on the last entry of account data (e.g. December 31st) for the given `period` time period,
which defaults to the current year.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
income = calc_income(paychecks=paychecks, transactions=transactions, category_dict=categories)
incomestatement = income_statement(income, period=2016)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
income_statements = []
for p in period:
# Force period to string and set default nettax
p = str(p)
nettax = nettax if nettax else {'Taxes'}
# Convert to DataFrame
p_income = pd.DataFrame(income[p].sum(), columns=['$'])
p_income.index.names = ['Category', 'Type', 'Item']
# Calculate percentages of level 0
p_income['%'] = 100.0 * p_income.div(p_income.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = p_income.sum(level=[0, 1])
l1_totals.index = pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index])
l1_totals.index.names = ['Category', 'Type', 'Item']
l0_totals = p_income.sum(level=[0])
l0_totals.index = pd.MultiIndex.from_tuples([(x0, 'Total', ' ') for x0 in l0_totals.index])
l0_totals.index.names = ['Category', 'Type', 'Item']
# Add totals to dataframe
p_income = p_income.combine_first(l1_totals)
p_income = p_income.combine_first(l0_totals)
# Calculate Net
before = [(x, 'Total', ' ') for x in set(p_income.index.levels[0]).difference(nettax)]
after = [(x, 'Total', ' ') for x in set(p_income.index.levels[0])]
net = pd.DataFrame({
'$': [
p_income.loc[before]['$'].sum(),
p_income.loc[after]['$'].sum(),
p_income.loc[after]['$'].sum()
]
}, index=pd.MultiIndex.from_tuples([
('Net', 'Net Income', 'Before Taxes'),
('Net', 'Net Income', 'After Taxes'),
('Net', 'Total', ' ')
]))
# Add Net
income_df = pd.concat([p_income, net])
# Update columns with period
income_df.columns = pd.MultiIndex.from_product([[p], income_df.columns])
# Add to main list
income_statements.append(income_df)
# Concatenate all the periods together
income_statement_df = pd.concat(income_statements, 1)
return income_statement_df
def calc_cashflow(transactions=None, category_dict=None, tax_type=None):
"""
Calculate daily cashflow of grouped inflow/outflow based on `category_dict`s from `transactions`, returns a DataFrame.
Cashflow is split into these sections:
Inflow
Operating
Technical Services
...
Non-Operating
Interest Income
Dividend & Capital Gains
...
Outflow
Operating
Rent
Food
...
Non-Operating
Interest Payments
...
All of the first 3 levels may be user defined by the category dictonary. However the last level must contain a dictionary
with at least a `category` key and set of categories for the value along with optional parameters.
```
categories = {
'Inflow': {
'Operating': {
# Paychecks
'Technical Services': {
'categories': {'Paycheck', }, # Required set of categories
'labels': set(), # Optional set of labels, defaults to set() if not passed in
'logic': '' # Optional 'not' string to set inverse of 'labels', defaults to ''
'tax_type' '' # Optional string for tax ('realized' or 'unrealized'), defaults to 'realized'
},
'User Category': {...}
},
'Non-Operating': {
'User Category': {
'categories': {...}
}
}
},
'Outflow': {
'Operating': {...},
'Non-Operating': {..}
}
}
```
"""
# Add empty 'labels' key to dictionary if they do not have the item
# Add default 'logic' if it does not exist
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if not v2.has_key('labels'):
category_dict[k0][k1][k2]['labels'] = set()
if not v2.has_key('logic'):
category_dict[k0][k1][k2]['logic'] = ''
if not v2.has_key('tax_type'):
category_dict[k0][k1][k2]['tax_type'] = 'realized'
# Aggregate transactions based on category definition, via 3 level dictionary comprehension
#pylint: disable=cell-var-from-loop
cashflow_dict = {
(k0, k1, k2): transactions[
# If it is in the category & in the tax type
(transactions['Category'].isin(v2['categories']) & transactions['Account Name'].isin(tax_type[v2['tax_type']])) &
(
# And if is has the correct label
(transactions['Labels'].apply(
lambda x: x.isdisjoint(v2['labels']) if v2['logic'] else not x.isdisjoint(v2['labels'])
)) |
# Or it does not have any labels
(transactions['Labels'].apply(lambda x: v2['labels'] == set()))
)
]['Amount']
for k0, v0 in category_dict.iteritems()
for k1, v1 in v0.iteritems()
for k2, v2 in v1.iteritems()
}
# Convert to DataFrame
cols = cashflow_dict.keys()
cols.sort()
cashflow = pd.DataFrame(
data=[],
columns=pd.MultiIndex.from_tuples(cols),
index=pd.date_range(transactions.index[-1], transactions.index[0])
)
for cat in cashflow_dict:
c = pd.DataFrame(cashflow_dict[cat].values, index=cashflow_dict[cat].index, columns=pd.MultiIndex.from_tuples([cat]))
cashflow[cat] = c.groupby(lambda x: x.date()).sum()
return cashflow.fillna(0.0)
def cashflow_statement(cashflow=None, period=datetime.datetime.now().year):
"""
Return a Cashflow Statement for a period from cashflow DataFrame.
Cashflow will be based on the last entry of account data (e.g. December 31st) for the given `period` time period, which
defaults to the current year. A Net section is automagically calculated.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
cashflow = calc_cashflow(transactions, category_dict=categories)
cashflowstatement = cashflow_statement(cashflow, period=2015)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
cashflow_statements = []
for p in period:
# Force period to string
p = str(p)
# Sum over Period and convert to Statement DataFrame
p_cashflow = pd.DataFrame(cashflow[p].sum(), columns=['$'])
p_cashflow.index.names = ['Category', 'Type', 'Item']
# Calculate Net
net = p_cashflow[['$']].sum(level=[0, 1]).sum(level=1)
net.index = pd.MultiIndex.from_tuples([('Net', x0, 'Total') for x0 in net.index])
net.index.names = ['Category', 'Type', 'Item']
# Add Net
cashflow_df = pd.concat([p_cashflow, net])
# Calculate percentages of level 0
cashflow_df['%'] = 100.0 * cashflow_df.div(cashflow_df.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = cashflow_df.sum(level=[0, 1])
l1_totals.index = pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index])
l1_totals.index.names = ['Category', 'Type', 'Item']
l0_totals = cashflow_df.sum(level=[0])
l0_totals.index = pd.MultiIndex.from_tuples([(x0, 'Total', ' ') for x0 in l0_totals.index])
l0_totals.index.names = ['Category', 'Type', 'Item']
# Add totals to dataframe
cashflow_df = cashflow_df.combine_first(l1_totals)
cashflow_df = cashflow_df.combine_first(l0_totals)
# Update columns with period
cashflow_df.columns = pd.MultiIndex.from_product([[p], cashflow_df.columns])
# Add to main list
cashflow_statements.append(cashflow_df)
# Concatenate all the periods together
cashflow_statement_df = pd.concat(cashflow_statements, 1)
return cashflow_statement_df
################################################################################################################################
# Net Worth Calculations
################################################################################################################################
def calculate_net_worth(accounts=None):
"""Calculate Net Worth (Assets - Debts) based on `accounts` DataFrame"""
# Aggregate accounts by assets and debts
net_worth = pd.DataFrame({
'Assets': accounts[accounts > 0.0].fillna(0.0).sum(1),
'Debts': accounts[accounts < 0.0].fillna(0.0).sum(1),
})
# Calculate Net Worth
net_worth['Net'] = net_worth['Assets'] + net_worth['Debts']
# Calculate Debt Ratio
net_worth['Debt Ratio'] = 100.0 * (net_worth['Debts'].abs() / net_worth['Assets'])
# Calculate Dollar and Percent Change
for x in ['Assets', 'Debts', 'Net']:
net_worth['{} Change ($)'.format(x)] = net_worth[x].diff(1).fillna(0.0)
net_worth['{} Change (%)'.format(x)] = 100.0 * net_worth[x].pct_change().fillna(0.0)
return net_worth
def calculate_stats(net_worth=None):
"""Calculate the statistics (Current, Max, Min, Mean, Median, Std. Dev.) of a `net_worth` DataFrame"""
# Remove Infs and NaNs
net_worth = net_worth.replace([-np.Inf, np.Inf, np.nan], 0.0)
# Calculate Statistics
stats = pd.DataFrame({'Current': net_worth.iloc[-1]})
stats['Max'] = net_worth.max(skipna=True)
stats['Min'] = net_worth.min(skipna=True)
stats['Mean'] = net_worth.mean(skipna=True)
stats['Median'] = net_worth.median(skipna=True)
stats['Std Dev'] = net_worth.std(skipna=True)
return stats
def calculate_growth(net_worth=None, offsets=None):
"""
Calculates the growth of cetain time periods from a net_worth DataFrame.
The default time periods (1Mo, 3Mo, 6Mo, YTD, 1Yr, 3Yr, 5Yr, Life) may be overriden by providing a list of nested tuples
containing (string label, (pandas `DataTimeOffset` objects)). Multiple `DataTimeOffset` will be added together.
The default is
```
[
('1 Mo', (pd.tseries.offsets.MonthEnd(-1),)),
('3 Mo', (pd.tseries.offsets.MonthEnd(-3),)),
('6 Mo', (pd.tseries.offsets.MonthEnd(-6),)),
('YTD', (-pd.tseries.offsets.YearBegin(), pd.tseries.offsets.MonthEnd())),
('1 Yr', (pd.tseries.offsets.MonthEnd(-1 * 12),)),
('2 Yr', (pd.tseries.offsets.MonthEnd(-2 * 12),)),
('3 Yr', (pd.tseries.offsets.MonthEnd(-3 * 12),)),
('4 Yr', (pd.tseries.offsets.MonthEnd(-4 * 12),)),
('5 Yr', (pd.tseries.offsets.MonthEnd(-5 * 12),)),
('Life', (pd.DateOffset(days=-(net_worth.index[-1] - net_worth.index[0]).days),))
]
```
"""
# Set up
columns = ['Assets', 'Debts', 'Net']
current_index = net_worth.index[-1]
# Calculate Grow over time periods (1mo, 3mo, 6mo, ytd, 1yr, 3yr,, 5yr, 10yr, life)
offsets = offsets if offsets else [
('1 Mo', (pd.tseries.offsets.MonthEnd(-1),)),
('3 Mo', (pd.tseries.offsets.MonthEnd(-3),)),
('6 Mo', (pd.tseries.offsets.MonthEnd(-6),)),
('YTD', (-pd.tseries.offsets.YearBegin(), pd.tseries.offsets.MonthEnd())),
('1 Yr', (pd.tseries.offsets.MonthEnd(-1 * 12),)),
('2 Yr', (pd.tseries.offsets.MonthEnd(-2 * 12),)),
('3 Yr', ( | pd.tseries.offsets.MonthEnd(-3 * 12) | pandas.tseries.offsets.MonthEnd |
import argparse
from statistics import median_high, median_low
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from qpputils import dataparser as dt
# Define the Font for the plots
# plt.rcParams.update({'font.size': 35, 'font.family': 'serif', 'font.weight': 'normal'})
# Define the Font for the plots
plt.rcParams.update({'font.size': 40, 'font.family': 'Hind Guntur', 'font.weight': 'normal'})
"""The next three lines are used to force matplotlib to use font-Type-1 """
# plt.rcParams['ps.useafm'] = True
# plt.rcParams['pdf.use14corefonts'] = True
# plt.rcParams['text.usetex'] = True
# TODO: add logging and qrels file generation for UQV
QUERY_GROUPS = {'top': 'MaxAP', 'low': 'MinAP', 'medh': 'MedHiAP', 'medl': 'MedLoAP'}
QUANTILES = {'med': 'Med', 'top': 'Top', 'low': 'Low'}
parser = argparse.ArgumentParser(description='Script for query files pre-processing',
epilog='Use this script with Caution')
parser.add_argument('-t', '--queries', default=None, metavar='queries.txt', help='path to UQV queries txt file')
parser.add_argument('--remove', default=None, metavar='queries.txt',
help='path to queries txt file that will be removed from the final file NON UQV ONLY')
parser.add_argument('--group', default='title', choices=['low', 'top', 'medh', 'medl', 'cref'],
help='Return only the <> performing queries of each topic')
parser.add_argument('--quant', default=None, choices=['low', 'high'],
help='Return a quantile of the variants for each topic')
parser.add_argument('--ap', default=None, metavar='QLmap1000', help='path to queries AP results file')
parser.add_argument('--stats', action='store_true', help='Print statistics')
parser.add_argument('--plot_vars', action='store_true', help='Print vars AP graph')
def create_overlap_ref_queries(*queries):
df = dt.QueriesTextParser(queries[0], 'uqv').queries_df
for query_file in queries[1:]:
_df = dt.QueriesTextParser(query_file, 'uqv').queries_df
df = df.merge(_df, how='inner')
print(df)
return df
def add_original_queries(uqv_obj: dt.QueriesTextParser):
"""Don't use this function ! not tested"""
original_obj = dt.QueriesTextParser('QppUqvProj/data/ROBUST/queries.txt')
uqv_df = uqv_obj.queries_df.set_index('qid')
original_df = original_obj.queries_df.set_index('qid')
for topic, vars in uqv_obj.query_vars.items():
uqv_df.loc[vars, 'topic'] = topic
missing_list = []
for topic, topic_df in uqv_df.groupby('topic'):
if original_df.loc[original_df['text'].isin(topic_df['text'])].empty:
missing_list.append(topic)
missing_df = pd.DataFrame({'qid': '341-9-1', 'text': original_obj.queries_dict['341'], 'topic': '341'}, index=[0])
uqv_df = uqv_df.append(missing_df.set_index('qid'))
return uqv_df.sort_index().drop(columns='topic').reset_index()
def convert_vid_to_qid(df: pd.DataFrame):
_df = df.set_index('qid')
_df.rename(index=lambda x: f'{x.split("-")[0]}', inplace=True)
return _df.reset_index()
def filter_quant_variants(qdf: pd.DataFrame, apdb: dt.ResultsReader, q):
"""This function returns a df with QID: TEXT of the queries inside a quantile"""
_apdf = apdb.data_df
_list = []
for topic, q_vars in apdb.query_vars.items():
_df = _apdf.loc[q_vars]
# if 0 in q:
# # For the low quantile, 0 AP variants are removed
# _df = _df[_df['ap'] > 0]
q_vals = _df.quantile(q=q)
_qvars = _df.loc[(_df['ap'] > q_vals['ap'].min()) & (_df['ap'] <= q_vals['ap'].max())]
_list.extend(_qvars.index.tolist())
_res_df = qdf.loc[qdf['qid'].isin(_list)]
return _res_df
def filter_top_queries(qdf: pd.DataFrame, apdb: dt.ResultsReader):
_apdf = apdb.data_df
_list = []
for topic, q_vars in apdb.query_vars.items():
top_var = _apdf.loc[q_vars].idxmax()
_list.append(top_var[0])
_df = qdf.loc[qdf['qid'].isin(_list)]
return _df
def add_topic_to_qdf_from_apdb(qdf, apdb):
"""This functions will add a topic column to the queries DF using apdb"""
if 'topic' not in qdf.columns:
for topic, q_vars in apdb.query_vars.items():
qdf.loc[qdf['qid'].isin(q_vars), 'topic'] = topic
def add_topic_to_qdf(qdf: pd.DataFrame):
"""This functions will add a topic column to the queries DF"""
if 'topic' not in qdf.columns:
if 'qid' in qdf.columns:
qdf = qdf.assign(topic=lambda x: x.qid.apply(lambda y: y.split('-')[0]))
else:
qdf = qdf.reset_index().assign(topic=lambda x: x.qid.apply(lambda y: y.split('-')[0]))
return qdf
def filter_n_top_queries(qdf: pd.DataFrame, apdb: dt.ResultsReader, n):
"""This function returns a DF with top n queries per topic"""
add_topic_to_qdf_from_apdb(qdf, apdb)
_ap_vars_df = | pd.merge(qdf, apdb.data_df, left_on='qid', right_index=True) | pandas.merge |
import sys
import requests
import ConfigParser
#from multiprocessing.dummy import Pool as ThreadPool
from IPython import embed
import pandas as pd
import numpy as np
from genda import calculate_minor_allele_frequency, calculate_ld
from genda.AEI import AEI, dosage_round
pd.options.mode.chained_assignment = None
def get_aei(aei_path):
# Currently
global sample_mix_ups
aei = pd.read_pickle(aei_path)
new_columns =\
[i[0].split("/")[-1].rstrip('.MD.bam').rstrip('\
LA').upper().replace("DN", "D").replace("D ", "D")\
for i in aei.columns][::4]
new_columns = zip(np.repeat(new_columns, repeats=4),
[0,1,2,3]*len(new_columns))
aei.columns = pd.MultiIndex.from_tuples(new_columns)
return(aei)
def get_snp_annot(tabix_iter):
"""
"""
snp_id = []
pos = []
a0 = []
a1 = []
for i in tabix_iter:
i = i.split("\t")
snp_id.append(i[3])
pos.append(i[1])
a0.append(i[6])
a1.append(i[7])
annot_out = pd.DataFrame({'pos': pos, 'a0': a0, 'a1':a1},
index = snp_id)
return(annot_out)
def get_snp_annot2(tabix_file):
outf = pd.read_csv(tabix_file, sep="\t", usecols=[3,0,1,6,7], index_col=2,
compression='gzip')
return(outf)
def get_dosage_ann():
""" Convenience function for getting the dosage and snp annotation
"""
dos = {}
s_ann = {}
dos_path =\
("/export/home/barnarj/CCF_1000G_Aug2013_DatABEL/CCF_1000G_Aug2013_Chr"
"{0}.dose.double.ATB.RNASeq_MEQTL.txt")
SNP_ANNOT =\
("/proj/genetics/Projects/shared/Studies/Impute_CCF_Arrythmia/"
"Projects/CCF/Projects/ATB/Projects/ATB_RNASeq/OutputData/"
"ATB.RNASeq_Variant_Ann.bed.gz")
return(dos, s_ann)
def get_annot_table(reference, annot):
annoti = annot.fetch(reference=reference)
snpid = []
pos = []
a0 = []
a1 = []
for i in annoti:
i = i.split("\t")
snpid.append(i[3])
pos.append(i[1])
a0.append(i[6])
a1.append(i[7])
annot_out = pd.DataFrame({'pos': pos,
'a0': a0, 'a1': a1},
index= | pd.Index(snpid) | pandas.Index |
# In python and other programming languages developers (Like Phil and Jaleh) use other people's code to accomplish what they wish.
# When a developer wants to let other people use their code they create a package for their code called a module then they let others download that module on to their computer.
# When a module is downloaded onto a computer, that computer then has access to it and can use the whatever the module was created for.
# In Python we import modules so that our project can use the code inside.
import os
import glob
import pandas as pd
import time
# Variables are used to store information that we wish to reference throughout our project.
# For example, let's say I was creating a PacMan application and everytime PacMan ate food I wanted the words 'Nom Nom Nom' to appear.
# Then I would create a 'Variable' that contained the string 'Nom Nom Nom' and reference the variable name whenever I wanted those words to appear.
pacman_says = 'Nom Nom Nom'
print('When I see food I {} it all down\n'.format(pacman_says))
time.sleep(5)
# This creates a variables of all the files we have in the `games` folder that end in `.EVE`
# A variable can be a raw/base type which are simple in nature, or an object which are complicated and created from mulitple simple types.
game_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))
# ETL - Extract, Transform, Load
# This is one of the primary jobs of a Developer.
# It is here that the incoming user data is massaged and readied for our systems consumption.
# You may be curious to why we need to look and fix our clients code.. This is because in software development we can't trust anyone.
# This seems a little mean I know, but it's not because we don't trust everyone, it's because anyone could want to try and harm your system.
# This way we treat everyone equally and assure that what they are giving us is what we expect.
game_files.sort()
print('These are 5 of the files we found: {}\n'.format(game_files[:5]))
time.sleep(5)
# Variables live as long as they are in scope and a scope in python is defined by an indent (spaces before a line of text).
# For example;
# Scope1{
# Variable1 life begins
# Scope2{
# Variable2 life begins
# Variable2 life ends
# }
# Variable1 life ends
# }
game_frames = []
# When we are working on a list of values we can cycle through them one at a time and isolate the value in the list we wish to work on.
for game_file in game_files:
game_frame = pd.read_csv(game_file, names=['type', 'multi2', 'multi3', 'multi5', 'multi6', 'event'])
# Each loop through our game_files list we extract and read another file and add its contents to our main scope 'games' variable
game_frames.append(game_frame)
#-# QUESTION TIME #-# Based on the above code which variable will live past the for loop and which won't?
#-----------#
# Now that the games_frames variable is full of all the data that was in the `games/` folder files we are ready to concatenate (join all the data together) it into a large variable called a `Dataframe`
games = pd.concat(game_frames)
# With the whole Dataframe now available to us it's time to clean up the values within.
# Let's look in our Dataframe at the column 'multi5' and lock our Dataframe to show us only entries with '??' as their value.
# This is an example of Extraction in ETL because we've limited what we are seeing to a specific view of the data.
games.loc[games['multi5'] == '??', 'multi5'] == ''
print('These are the 5 top values from our `games` Dataframe:\n{}'.format(games[:5]))
time.sleep(5)
# Awesome work so far!
# Next let's seperate and rename the multi2 column in our games Dataframe so we can see the game_id's and the year they played.
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
print('\nLook at the column names before we change them: \n{}'.format(identifiers[:5]))
# And rename the columns here
identifiers.columns = ['game_id', 'year']
print('\nNow look at the column names after: \n{}'.format(identifiers[:5]))
# This is some final clean it will free up a bit of space on our computers.
games = pd.concat([games, identifiers], axis=1, sort=False)
games = games.fillna(' ')
| pd.Categorical(games['type'].iloc[:]) | pandas.Categorical |
"""Yahoo Finance Mutual Fund Model"""
__docformat__ = "numpy"
import logging
import os
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal import feature_flags as obbff
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.mutual_funds import yfinance_model
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_sector(fund: str, min_pct_to_display: float = 5, export: str = ""):
"""Display sector weightings for fund
Parameters
----------
fund: str
Fund symbol
min_pct_to_display: float
Minimum percentage to display sector
export: str
Type of format to export data
"""
sector_weights = yfinance_model.get_information(fund)
if "sectorWeightings" not in sector_weights.keys():
console.print(
f"Sector Weights are not found for {fund}. Either the symbol is incorrect or there is an issue "
"in pulling from yahoo.\n"
)
return
sector_weights = sector_weights["sectorWeightings"]
weights = {}
for weight in sector_weights:
weights.update(weight)
df_weight = | pd.DataFrame.from_dict(weights, orient="index") | pandas.DataFrame.from_dict |
from __future__ import annotations
import copy
import itertools
from typing import (
TYPE_CHECKING,
Sequence,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
internals as libinternals,
)
from pandas._libs.missing import NA
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
Shape,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
ensure_dtype_can_hold_na,
find_common_type,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_1d_only_ea_obj,
is_datetime64tz_dtype,
is_dtype_equal,
needs_i8_conversion,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna_all,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
NullArrayProxy,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block,
)
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
def _concatenate_array_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate array managers into one.
Parameters
----------
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
ArrayManager
"""
# reindex all arrays
mgrs = []
for mgr, indexers in mgrs_indexers:
for ax, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True
)
mgrs.append(mgr)
if concat_axis == 1:
# concatting along the rows -> concat the reindexed arrays
# TODO(ArrayManager) doesn't yet preserve the correct dtype
arrays = [
concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
for j in range(len(mgrs[0].arrays))
]
else:
# concatting along the columns -> combine reindexed arrays in a single manager
assert concat_axis == 0
arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
if copy:
arrays = [x.copy() for x in arrays]
new_mgr = ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
return new_mgr
def concat_arrays(to_concat: list) -> ArrayLike:
"""
Alternative for concat_compat but specialized for use in the ArrayManager.
Differences: only deals with 1D arrays (no axis keyword), assumes
ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
the dtype.
In addition ensures that all NullArrayProxies get replaced with actual
arrays.
Parameters
----------
to_concat : list of arrays
Returns
-------
np.ndarray or ExtensionArray
"""
# ignore the all-NA proxies to determine the resulting dtype
to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
dtypes = {x.dtype for x in to_concat_no_proxy}
single_dtype = len(dtypes) == 1
if single_dtype:
target_dtype = to_concat_no_proxy[0].dtype
elif all(x.kind in ["i", "u", "b"] and isinstance(x, np.dtype) for x in dtypes):
# GH#42092
target_dtype = np.find_common_type(list(dtypes), [])
else:
target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])
if target_dtype.kind in ["m", "M"]:
# for datetimelike use DatetimeArray/TimedeltaArray concatenation
# don't use arr.astype(target_dtype, copy=False), because that doesn't
# work for DatetimeArray/TimedeltaArray (returns ndarray)
to_concat = [
arr.to_array(target_dtype) if isinstance(arr, NullArrayProxy) else arr
for arr in to_concat
]
return type(to_concat_no_proxy[0])._concat_same_type(to_concat, axis=0)
to_concat = [
arr.to_array(target_dtype)
if isinstance(arr, NullArrayProxy)
else cast_to_common_type(arr, target_dtype)
for arr in to_concat
]
if isinstance(to_concat[0], ExtensionArray):
cls = type(to_concat[0])
return cls._concat_same_type(to_concat)
result = np.concatenate(to_concat)
# TODO decide on exact behaviour (we shouldn't do this only for empty result)
# see https://github.com/pandas-dev/pandas/issues/39817
if len(result) == 0:
# all empties -> check for bool to not coerce to float
kinds = {obj.dtype.kind for obj in to_concat_no_proxy}
if len(kinds) != 1:
if "b" in kinds:
result = result.astype(object)
return result
def concatenate_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
BlockManager
"""
# TODO(ArrayManager) this assumes that all managers are of the same type
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
concat_plans = [
_get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = _combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
if len(join_units) == 1 and not join_units[0].indexers:
values = blk.values
if copy:
values = values.copy()
else:
values = values.view()
fastpath = True
elif _is_uniform_join_units(join_units):
vals = [ju.block.values for ju in join_units]
if not blk.is_extension:
# _is_uniform_join_units ensures a single dtype, so
# we can use np.concatenate, which is more performant
# than concat_compat
values = np.concatenate(vals, axis=blk.ndim - 1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals, axis=1)
values = ensure_block_shape(values, blk.ndim)
values = ensure_wrapped_if_datetimelike(values)
fastpath = blk.values.dtype == values.dtype
else:
values = _concatenate_join_units(join_units, concat_axis, copy=copy)
fastpath = False
if fastpath:
b = blk.make_block_same_class(values, placement=placement)
else:
b = new_block(values, placement=placement, ndim=len(axes))
blocks.append(b)
return BlockManager(tuple(blocks), axes)
def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: dict[int, np.ndarray]):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape_list = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape_list[ax] = len(indexer)
mgr_shape = tuple(mgr_shape_list)
has_column_indexer = False
if 0 in indexers:
has_column_indexer = True
ax0_indexer = indexers.pop(0)
blknos = algos.take_nd(mgr.blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_nd(mgr.blklocs, ax0_indexer, fill_value=-1)
else:
if mgr.is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
blknos = mgr.blknos
blklocs = mgr.blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape_list = list(mgr_shape)
shape_list[0] = len(placements)
shape = tuple(shape_list)
if blkno == -1:
# only reachable in the `0 in indexers` case
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs)
and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
(
(
not has_column_indexer
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1
)
or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()
)
)
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
class JoinUnit:
def __init__(self, block, shape: Shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
# Note: block is None implies indexers is None, but not vice-versa
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)}, {self.indexers})"
@cache_readonly
def needs_filling(self) -> bool:
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
blk = self.block
if blk is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return blk.dtype
return ensure_dtype_can_hold_na(blk.dtype)
def _is_valid_na_for(self, dtype: DtypeObj) -> bool:
"""
Check that we are all-NA of a type/dtype that is compatible with this dtype.
Augments `self.is_na` with an additional check of the type of NA values.
"""
if not self.is_na:
return False
if self.block is None:
return True
if self.dtype == object:
values = self.block.values
return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
na_value = self.block.fill_value
if na_value is NaT and not is_dtype_equal(self.dtype, dtype):
# e.g. we are dt64 and other is td64
# fill_values match but we should not cast self.block.values to dtype
# TODO: this will need updating if we ever have non-nano dt64/td64
return False
if na_value is NA and needs_i8_conversion(dtype):
# FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat
# e.g. self.dtype == "Int64" and dtype is td64, we dont want
# to consider these as matching
return False
# TODO: better to use can_hold_element?
return is_valid_na_for_dtype(na_value, dtype)
@cache_readonly
def is_na(self) -> bool:
if self.block is None:
return True
if not self.block._can_hold_na:
return False
values = self.block.values
if isinstance(self.block.values.dtype, SparseDtype):
return False
elif self.block.is_extension:
# TODO(EA2D): no need for special case with 2D EAs
values_flat = values
else:
values_flat = values.ravel(order="K")
return isna_all(values_flat)
def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self._is_valid_na_for(empty_dtype):
# note: always holds when self.block is None
blk_dtype = getattr(self.block, "dtype", None)
if blk_dtype == np.dtype("object"):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if is_datetime64tz_dtype(empty_dtype):
i8values = np.full(self.shape, fill_value.value)
return DatetimeArray(i8values, dtype=empty_dtype)
elif is_1d_only_ea_dtype(empty_dtype):
empty_dtype = cast(ExtensionDtype, empty_dtype)
cls = empty_dtype.construct_array_type()
missing_arr = cls._from_sequence([], dtype=empty_dtype)
ncols, nrows = self.shape
assert ncols == 1, ncols
empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
return missing_arr.take(
empty_arr, allow_fill=True, fill_value=fill_value
)
elif isinstance(empty_dtype, ExtensionDtype):
# TODO: no tests get here, a handful would if we disabled
# the dt64tz special-case above (which is faster)
cls = empty_dtype.construct_array_type()
missing_arr = cls._empty(shape=self.shape, dtype=empty_dtype)
missing_arr[:] = fill_value
return missing_arr
else:
# NB: we should never get here with empty_dtype integer or bool;
# if we did, the missing_arr.fill would cast to gibberish
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if (not self.indexers) and (not self.block._can_consolidate):
# preserve these for validation in concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.values
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax)
return values
def _concatenate_join_units(
join_units: list[JoinUnit], concat_axis: int, copy: bool
) -> ArrayLike:
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype = _get_empty_dtype(join_units)
has_none_blocks = any(unit.block is None for unit in join_units)
upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)
to_concat = [
ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
for ju in join_units
]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
elif any(is_1d_only_ea_obj(t) for t in to_concat):
# TODO(EA2D): special case not needed if all EAs used HybridBlocks
# NB: we are still assuming here that Hybrid blocks have shape (1, N)
# concatting with at least one EA means we are concatting a single column
# the non-EA values are 2D arrays with shape (1, n)
# error: Invalid index type "Tuple[int, slice]" for
# "Union[ExtensionArray, ndarray]"; expected type "Union[int, slice, ndarray]"
to_concat = [
t if | is_1d_only_ea_obj(t) | pandas.core.dtypes.common.is_1d_only_ea_obj |
import pytest
import pytz
import dateutil
import numpy as np
from datetime import datetime
from dateutil.tz import tzlocal
import pandas as pd
import pandas.util.testing as tm
from pandas import (DatetimeIndex, date_range, Series, NaT, Index, Timestamp,
Int64Index, Period)
class TestDatetimeIndex(object):
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
# GH 18951: tz-aware to tz-aware
idx = date_range('20170101', periods=4, tz='US/Pacific')
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101 03:00:00', periods=4, tz='US/Eastern')
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
"""Tests the interval operations in the hicognition library"""
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from hicognition import interval_operations
class TestChunkIntervals(unittest.TestCase):
"""Tests for chunk_intervals"""
@classmethod
def setUp(cls):
cls.expected = [
pd.DataFrame(
{"chrom": ["chr1"] * 3, "start": [900000] * 3, "end": [950000] * 3}
),
pd.DataFrame(
{"chrom": ["chr1"] * 3, "start": [950000] * 3, "end": [1000000] * 3}
),
pd.DataFrame(
{"chrom": ["chr1"] * 3, "start": [1000000] * 3, "end": [1050000] * 3}
),
pd.DataFrame(
{"chrom": ["chr1"] * 3, "start": [1050000] * 3, "end": [1100000] * 3}
),
]
def test_regions_single_position(self):
"""tests for when regions are defined via
single column."""
test_region = pd.DataFrame(
{"chrom": ["chr1"] * 3, "pos": [1000000, 1000010, 1000050]}
)
# call function
result = interval_operations.chunk_intervals(test_region, 100000, 50000)
# test length
self.assertEqual(len(result), 4)
# test whether chunked frames are correct
for actual_df, expected_df in zip(result, self.expected):
assert_frame_equal(actual_df, expected_df)
def test_regions_dual_position(self):
"""tests for when regions are defined via
single column."""
test_region = pd.DataFrame(
{
"chrom": ["chr1"] * 3,
"start": [950000, 950010, 950050],
"end": [1050000, 1050010, 1050050],
}
)
# call function
result = interval_operations.chunk_intervals(test_region, 100000, 50000)
# test length
self.assertEqual(len(result), 4)
# test whether chunked frames are correct
for actual_df, expected_df in zip(result, self.expected):
assert_frame_equal(actual_df, expected_df)
class TestChunkIntervalsVariableSize(unittest.TestCase):
"""Tests for chunk intervals with variable size."""
@classmethod
def setUp(cls):
cls.expected_first = [
pd.DataFrame(
{"chrom": ["chr1"], "start": [80 + offset], "end": [80 + offset + 10]}
)
for offset in range(0, 140, 10)
]
cls.expected_second = [
pd.DataFrame(
{
"chrom": ["chr6"],
"start": [-325000 + offset],
"end": [-325000 + offset + 150000],
}
)
for offset in range(0, 4200000, 150000)
]
def test_single_region(self):
"""tests single region"""
test_region = pd.DataFrame({"chrom": ["chr1"], "start": [100], "end": [200]})
# call function
result = interval_operations.chunk_intervals_variable_size(test_region, 10, 0.2)
# test length
self.assertEqual(len(result), len(self.expected_first))
# test whether chunked frames are correct
for actual_df, expected_df in zip(result, self.expected_first):
assert_frame_equal(actual_df, expected_df)
def test_region_border_chromosome(self):
"""tests multiple regions"""
test_region = pd.DataFrame(
{"chrom": ["chr6"], "start": [275000], "end": [3275000]}
)
# call function
result = interval_operations.chunk_intervals_variable_size(test_region, 5, 0.2)
# test length
self.assertEqual(len(result), len(self.expected_second))
# test whether chunked frames are correct
for actual_df, expected_df in zip(result, self.expected_second):
| assert_frame_equal(actual_df, expected_df) | pandas.testing.assert_frame_equal |
import os
from requests.exceptions import HTTPError
import pandas as pd
import numpy as np
from polygon import RESTClient
import alpaca_trade_api as ati
data_columns = ['bid','ask']
def get_data_for_symbol(symbol, client, date, stop_time=None, start_time=None, limit=200):
"""
Fetches full volume quote data from polygon.io for a symbol and a chosen date.
Parameters
----------
symbol : str
STOCK/ETF name to get data of
client : RESTClient
polygon.io api client for fetching data
date : str
Date string in format to be read by pandas.Timestamp ('YYYY-MM-DD').
stop_time : int, optional
Time to stop data collection in ns after UNIX epoch. The default is to collect until 4 pm.
start_time : int, optional
Time to stare data collection in ns after UNIX epoch. The default is to collect at 9:30 am.
limit : int, optional
Maximum number of times to request data. The default is 200.
Returns
-------
bool
Whether the data collection was successful.
pd.DataFrame
retrieved data with the index being seconds after 9:30am.
"""
if start_time is None:
dt = pd.Timestamp(date, tz='US/Eastern') + pd.Timedelta(hours=9, minutes=30)
start_time = int(dt.tz_convert('UTC').asm8)
if stop_time is None:
dt = pd.Timestamp(date, tz='US/Eastern') + pd.Timedelta(hours=16, minutes=0)
stop_time = int(dt.tz_convert('UTC').asm8)
result=[]
counter=0
while (start_time < stop_time) and (counter < limit):
counter += 1
try:
response = client.historic_n___bbo_quotes_v2(symbol, date, limit=50000, timestamp=start_time)
except HTTPError:
print('HTTP error occured for {} on {} at {}'.format(symbol, date, start_time))
return 0, pd.DataFrame()
if not response.success:
print('Response marked as failure for {} on {} at {}'.format(symbol, date, start_time))
return 0, pd.DataFrame()
if not response.results:
print('Response was empty for {} on {} at {}'.format(symbol, date, start_time))
return 0, pd.DataFrame()
result += [{'time':r['t'], 'bid':r['p'], 'ask':r['P']} for r in response.results]
start_time = response.results[-1]['t']
if len(response.results) != 50000:
break
if (counter == limit) and (start_time < stop_time):
print('limit reached at {} samples'.format(len(result)))
success = False
else:
success = True
ba_df = pd.DataFrame(result)
ba_df = ba_df.loc[ba_df.time < stop_time, :]
ba_df.index = (pd.DatetimeIndex(ba_df.time,tz='UTC') - pd.Timestamp(date, tz='US/Eastern').tz_convert('UTC') - pd.Timedelta(hours=9, minutes=30)) / | pd.Timedelta(seconds=1) | pandas.Timedelta |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append( | Series(vals2) | pandas.Series |
from datetime import datetime
from stateful.storage.stream_controller import StreamController
from stateful.representable import Representable
import pandas as pd
from stateful.utils import list_of_instance
class Space(Representable):
def __init__(self, primary_key, primary_value, time_key, graph, configuration=None):
Representable.__init__(self)
self.time_key = time_key
self.primary_key = primary_key
self.primary_value = primary_value
self.controller = StreamController(graph, configuration)
self._iter = None
self._prev = None
self.length = 0
@property
def start(self):
return self.controller.start
@property
def end(self):
return self.controller.end
@property
def first(self):
return self.get(self.start)
@property
def last(self):
return self.get(self.end)
@property
def empty(self):
return len(self) == 0
@property
def keys(self):
return self.controller.keys
def set(self, name, stream):
self.controller.add_stream(name, stream)
def add(self, event: dict):
assert isinstance(event, dict), "Event has to be a dictionary"
assert self.time_key in event, "Event has to include time key"
self.length += 1
date = event.pop(self.time_key)
for key, value in event.items():
self.controller[key].add(date, value)
def get(self, date, include_date=True, include_id=True):
event = self.controller.get(date)
if include_date:
event[self.time_key] = date
if include_id:
event[self.primary_key] = self.primary_value
return event
def all(self, times=None):
return self.controller.all(times)
def add_stream(self, name):
self.controller.ensure_stream(name)
def head(self, n=5):
events = list(self)
events = [event.value for event in events]
if len(events) > (n * 2):
df = pd.DataFrame(events[:n] + events[-n:])
else:
df = pd.DataFrame(events)
df = df.set_index(self.time_key)
return df
def df(self):
events = self.all()
df = pd.DataFrame(events)
df = df.set_index(self.time_key)
return df
def __iter__(self):
self._iter = iter(self.controller)
return self
def __next__(self):
try:
date = next(self._iter)
state = self.get(date)
return state
except StopIteration:
self.controller.on(False)
raise StopIteration()
def __getitem__(self, item):
from stateful.storage.calculated_stream import CalculatedStream
if (isinstance(item, str) and ("/" in item or "-" in item)) or isinstance(item, datetime):
date = | pd.to_datetime(item, utc=True) | pandas.to_datetime |
import os
import df2img
import disnake
import pandas as pd
import bots.config_discordbot as cfg
from bots.config_discordbot import gst_imgur, logger
from bots.helpers import save_image
from bots.menus.menu import Menu
from gamestonk_terminal.stocks.insider import finviz_model
def lins_command(ticker: str = "", num: int = 10):
"""Display insider activity for a given stock ticker. [Source: Finviz]
Parameters
----------
ticker : Stock Ticker
num : Number of latest insider activity to display
"""
# Debug
if cfg.DEBUG:
logger.debug("disc-lins %s", num)
d_finviz_insider = finviz_model.get_last_insider_activity(ticker)
df = | pd.DataFrame.from_dict(d_finviz_insider) | pandas.DataFrame.from_dict |
"""
Created on Wed Nov 18 14:20:22 2020
@author: MAGESHWARI
"""
import os
from tkinter import *
from tkinter import messagebox as mb
from tkinter import filedialog
import re
import csv
import pandas as pd
def center_window(w=200, h=500):
# get screen width and height
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
# calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
def browse1():
global df1
# global directory
# global filename
# global contents
filepath = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
select_file_field.insert(0,filepath) # insert the path in textbox
df1 = pd.read_csv(filepath)
# file = open(filepath,'r') # open the selected file
# contents = file.read()
# print(contents)
def browse2():
global df2
global basefilepath
basefilepath = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
base_file_field.insert(0,basefilepath) # insert the path in textbox
df2 = pd.read_csv(basefilepath)
# , usecols = ["Date","Link Clicks (e4) (event4)","Unique Visitors", "Visits"]
def browse3():
global df3
global filepath3
filepath3 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file3_field.insert(0,filepath3) # insert the path in textbox
df3 = pd.read_csv(filepath3)
def browse4():
global df4
global filepath4
filepath4 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file4_field.insert(0,filepath4) # insert the path in textbox
df4 = pd.read_csv(filepath4)
def browse5():
global df5
global filepath5
filepath5 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file5_field.insert(0,filepath5) # insert the path in textbox
df5 = pd.read_csv(filepath5)
def browse6():
global df6
global filepath6
filepath6 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file6_field.insert(0,filepath6) # insert the path in textbox
df6 = pd.read_csv(filepath6)
def browse7():
global df7
global filepath7
filepath7 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file7_field.insert(0,filepath7) # insert the path in textbox
df7 = pd.read_csv(filepath7)
def browse8():
global df8
global filepath8
filepath8 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file8_field.insert(0,filepath8) # insert the path in textbox
df8 = pd.read_csv(filepath8)
def browse9():
global df9
global filepath9
filepath9 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file9_field.insert(0,filepath9) # insert the path in textbox
df9 = pd.read_csv(filepath9)
def browse10():
global df10
global filepath10
filepath10 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file10_field.insert(0,filepath10) # insert the path in textbox
df10 = pd.read_csv(filepath10)
def browse11():
global df11
global filepath11
filepath11 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file11_field.insert(0,filepath11) # insert the path in textbox
df11 = pd.read_csv(filepath11)
def browse12():
global df12
global filepath12
filepath12 = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
file12_field.insert(0,filepath12) # insert the path in textbox
df12 = pd.read_csv(filepath12)
def submit():
data1 = | pd.concat([df2, df1]) | pandas.concat |
"""Treatment estimation functions"""
from linearmodels.iv import IV2SLS
from linearmodels.system.model import SUR
from statsmodels.api import add_constant
from statsmodels.multivariate.multivariate_ols import _MultivariateOLS
import numpy as np
import pandas as pd
def estimate_treatment_effect(aps = None, Y = None, Z = None, D = None, data = None, Y_ind = None, Z_ind = None, D_ind = None, aps_ind = None,
estimator: str = "2SLS", verbose: bool = True):
"""Main treatment effect estimation function
Parameters
-----------
aps: array-like, default: None
Array of estimated APS values
Y: array-like, default: None
Array of outcome variables
Z: array-like, default: None
Array of treatment recommendations
D: array-like, default: None
Array of treatment assignments
data: array-like, default: None
2D array of estimation inputs
Y_ind: int, default: None
Index of outcome variable in `data`
Z_ind: int, default: None
Index of treatment recommendation variable in `data`
D_ind: int, default: None
Index of treatment assignment variable in `data`
aps_ind: int, default: None
Index of APS variable in `data`
estimator: str, default: "2SLS"
Method of IV estimation
verbose: bool, default: True
Whether to print output of estimation
Returns
-----------
IVResults
Fitted IV model object
Notes
-----
Treatment effect is estimated using IV estimation. The default is to use the 2SLS method of estimation, with the equations illustrated below.
.. math::
D_i = \\gamma_0(1-I) + \\gamma_1 Z_i + \\gamma_2 p^s(X_i;\\delta) + v_i \\
Y_i = \\beta_0(1-I) + \\beta_1 D_i + \\beta_2 p^s(X_i;\\delta) + \\epsilon_i
:math:`\\beta_1` is our causal estimation of the treatment effect. :math:`I` is an indicator for if the ML funtion takes only a single nondegenerate value in the sample.
aps, Y, Z, D, and data should never have any overlapping columns. This is not checkable through the code, so please double check this when passing in the inputs.
"""
if data is not None:
data = np.array(data)
vals = {"Y": Y, "Z": Z, "D": D, "APS": aps}
# If `data` given, then use index inputs for values not explicitly passed
infer = []
to_del = []
if data is not None:
inds = {"Y": Y_ind, "Z": Z_ind, "D": D_ind, "APS": aps_ind}
for key, val in vals.items():
if val is None:
if inds[key] is not None:
vals[key] = data[:,inds[key]]
to_del.append(inds[key])
else:
infer.append(key)
data = np.delete(data, to_del, axis=1)
if len(infer) != 0:
print(f"Indices for {infer} not explicitly passed. Assuming remaining columns in order {infer}...")
for i in range(len(infer)):
vals[infer[i]] = data[:,i]
Y = vals["Y"]
Z = vals["Z"]
D = vals["D"]
aps = vals["APS"]
if aps is None or Y is None or Z is None or D is None:
raise ValueError("Treatment effect estimation requires all values aps, Y, Z, and D to be passed!")
lm_inp = pd.DataFrame({"Y": Y, "Z": Z, "D": D, "aps": aps})
# Use only observations where aps is nondegenerate
aps = np.array(aps)
obs_tokeep = np.nonzero((aps > 0) & (aps < 1))
print(f"We will fit on {len(obs_tokeep[0])} values out of {len(Y)} from the dataset for which the APS estimation is nondegenerate.")
assert len(obs_tokeep[0]) > 0
lm_inp = lm_inp.iloc[obs_tokeep[0],:]
# Check for single non-degeneracy
single_nondegen = True
if len(np.unique(aps[obs_tokeep])) > 1:
single_nondegen = False
lm_inp = add_constant(lm_inp)
if estimator == "2SLS":
if single_nondegen:
results = IV2SLS(lm_inp['Y'], lm_inp[['aps']], lm_inp['D'], lm_inp['Z']).fit(cov_type='robust')
else:
results = IV2SLS(lm_inp['Y'], lm_inp[['const', 'aps']], lm_inp['D'], lm_inp['Z']).fit(cov_type='robust')
elif estimator == "OLS":
if single_nondegen:
results = IV2SLS(lm_inp['Y'], lm_inp[['Z', 'aps']], None, None).fit(cov_type='unadjusted')
else:
results = IV2SLS(lm_inp['Y'], lm_inp[['const', 'Z', 'aps']], None, None).fit(cov_type='unadjusted')
else:
raise NotImplementedError(f"Estimator option {estimator} not implemented yet!")
if verbose:
print(results)
return results
def estimate_treatment_effect_controls(aps, Y, Z, D, W,
estimator: str = "2SLS", verbose: bool = True):
"""Main treatment effect estimation function with added controls
Parameters
-----------
aps: array-like, default: None
Array of estimated APS values
Y: array-like, default: None
Array of outcome variables
Z: array-like, default: None
Array of treatment recommendations
D: array-like, default: None
Array of treatment assignments
W: array-like, default: None
Array of control variables
estimator: str, default: "2SLS"
Method of IV estimation
verbose: bool, default: True
Whether to print output of estimation
Returns
-----------
IVResults
Fitted IV model object
Notes
-----
Treatment effect is estimated using IV estimation. The default is to use the 2SLS method of estimation, with the equations illustrated below.
.. math::
D_i = \\gamma_0(1-I) + \\gamma_1 Z_i + \\gamma_2 p^s(X_i;\\delta) + \\gamma_3 W_i + v_i \\
Y_i = \\beta_0(1-I) + \\beta_1 D_i + \\beta_2 p^s(X_i;\\delta) + + \\beta_3 W_i \\epsilon_i
:math:`\\beta_1` is our causal estimation of the treatment effect. :math:`I` is an indicator for if the ML funtion takes only a single nondegenerate value in the sample.
"""
aps = np.array(aps)
Y = np.array(Y)
Z = np.array(Z)
D = np.array(D)
W = np.array(W)
# Use only observations where aps is nondegenerate
obs_tokeep = np.nonzero((aps > 0) & (aps < 1))
print(f"We will fit on {len(obs_tokeep[0])} values out of {len(Y)} from the dataset for which the APS estimation is nondegenerate.")
assert len(obs_tokeep[0]) > 0
aps = aps[obs_tokeep[0]]
Y = Y[obs_tokeep[0]]
Z = Z[obs_tokeep[0]]
D = D[obs_tokeep[0]]
W = W[obs_tokeep[0]]
cols = {"aps":aps, "Y":Y, "Z":Z, "D":D}
exog = ["aps"]
constant = False
if len(W.shape) > 1:
for i in range(W.shape[1]):
cols["W"+str(i)] = W[:,i]
exog.append("W"+str(i))
constant = (len(np.unique(W[:,i])) == 1) | constant
else:
constant = len(np.unique(W)) == 1
cols["W"] = W
exog.append("W")
# Check for single non-degeneracy
constant = (len(np.unique(aps)) == 1) | constant
if not constant:
cols["const"] = np.ones(len(Y))
exog.append("const")
df = pd.DataFrame(cols)
if estimator == "2SLS":
results = IV2SLS(df['Y'], df[exog], df['D'], df['Z']).fit(cov_type='robust')
elif estimator == "OLS":
results = IV2SLS(df['Y'], df[exog+['Z']], None, None).fit(cov_type='unadjusted')
else:
raise NotImplementedError(f"Estimator option {estimator} not implemented yet!")
if verbose:
print(results)
return results
def estimate_counterfactual_ml(aps = None, Y = None, Z = None, ml_out = None, cf_ml_out = None, data = None, Y_ind = None,
Z_ind = None, ml_out_ind = None, cf_ml_out_ind = None, aps_ind = None,
cov_type: str = "unadjusted", single_nondegen: bool = False, verbose: bool = True):
"""Estimate counterfactual performance of a new algorithm
Parameters
-----------
aps: array-like, default: None
Array of estimated APS values
Y: array-like, default: None
Array of outcome variables
Z: array-like, default: None
Array of treatment recommendations
ml_out: array-like, default: None
Original ML function outputs
cf_ml_out: array-like, default: None
Counterfactual ML function outputs
data: array-like, default: None
2D array of estimation inputs
Y_ind: int, default: None
Index of outcome variable in `data`
Z_ind: int, default: None
Index of treatment recommendation variable in `data`
ml_out_ind: int, default: None
Index of original ML output variable in `data`
cf_ml_out_ind: int, default: None
Index of counterfactual ML output variable in `data`
aps_ind: int, default: None
Index of APS variable in `data`
estimator: str, default: "2SLS"
Method of IV estimation
single_nondegen: bool, default: False
Indicator for whether the original ML algorithm takes on a single non-degenerate value in the sample
verbose: bool, default: True
Whether to print output of estimation
Returns
-----------
tuple(np.ndarray, OLSResults)
Tuple containing array of predicted float value scores and fitted OLS results.
Notes
-----
The process of estimating counterfactual value works as follows.
First we fit the below OLS regression using historical recommendations and outcome ``Z`` and ``Y``.
.. math::
Y_i = \\beta_0 + \\beta_1 Z_i + \\beta_2 p^s(X_i;\\delta) + \\epsilon_i
:math:`\\beta_1` is our estimated effect of treatment recommendation.
Then we take the original ML output ``ML1`` and the counterfactual ML output ``ML2`` and estimate the below value equation.
.. math::
\\hat{V}(ML') = \\frac{1}{n} \\sum_{i = 1}^n (Y_i + \\hat{\\beta_{ols}}(ML'(X_i) - ML(X_i))
"""
if data is not None:
data = np.array(data)
vals = {"Y": Y, "Z": Z, "APS": aps , "ml_out": ml_out, "cf_ml_out": cf_ml_out}
# If `data` given, then use index inputs for values not explicitly passed
infer = []
to_del = []
if data is not None:
inds = {"Y": Y_ind, "Z": Z_ind, "APS": aps_ind, "ml_out": ml_out_ind, "cf_ml_out": cf_ml_out_ind}
for key, val in vals.items():
if val is None:
if inds[key] is not None:
vals[key] = data[:,inds[key]]
to_del.append(inds[key])
else:
infer.append(key)
data = np.delete(data, to_del, axis=1)
if len(infer) != 0:
print(f"Indices for {infer} not explicitly passed. Assuming remaining columns in order {infer}...")
for i in range(len(infer)):
vals[infer[i]] = data[:,i]
Y = vals["Y"]
Z = vals["Z"]
aps = vals["APS"]
ml_out = np.array(vals["ml_out"])
cf_ml_out = np.array(vals["cf_ml_out"])
if aps is None or Y is None or Z is None or ml_out is None or cf_ml_out is None:
raise ValueError("Treatment effect estimation requires all values aps, Y, Z, D and ML recommendations to be passed!")
lm_inp = pd.DataFrame({"Y": Y, "Z": Z, "aps": aps})
if not single_nondegen:
lm_inp = add_constant(lm_inp)
ols_results = IV2SLS(lm_inp['Y'], lm_inp[['const', 'Z', 'aps']], None, None).fit(cov_type='unadjusted')
if verbose:
print(ols_results)
b_ols = ols_results.params['Z']
v = Y + b_ols * (cf_ml_out - ml_out)
v_score = np.mean(v)
if verbose:
print(f"Counterfactual value of new ML function: {v_score}")
return (v, ols_results)
def covariate_balance_test(aps = None, X = None, Z = None, data = None, X_ind = None, Z_ind = None, aps_ind = None,
X_labels = None, cov_type = "robust", verbose: bool = True):
"""Covariate Balance Test
Parameters
-----------
aps: array-like, default: None
Array of estimated APS values
X: array-like, default: None
Array of covariates to test
Z: array-like, default: None
Array of treatment recommendations
data: array-like, default: None
2D array of estimation inputs
X_ind: int/array_of_int, default: None
Indices/indices of covariates in `data`
Z_ind: int, default: None
Index of treatment recommendation variable in `data`
aps_ind: int, default: None
Index of APS variable in `data`
X_labels: array-like, default: None
Array of string labels to associate with each covariate
cov_type: str, default: "robust"
Covariance type of SUR. Any value other than "robust" defaults to simple (nonrobust) covariance.
verbose: bool, default: True
Whether to print output for each test
Returns
-----------
tuple(SystemResults, dict(X_label, dict(stat_label, value)))
Tuple containing the fitted SUR model results and a dictionary containing the results of covariate balance estimation for each covariate as well as the joint hypothesis.
Notes
-----
This function estimates a system of Seemingly Unrelated Regression (SUR) as defined in the linearmodels package.
APS, X, Z, and data should never have any overlapping columns. This is not checkable through the code, so please double check this when passing in the inputs.
For APS, X, Z, either the variables themselves should be passed, or their indices in `data`. If neither is passed then an error is raised.
"""
# Error checking
if X is None and (X_ind is None or data is None):
raise ValueError("covariate_balance_test: No valid data passed for X. You must either pass the variable directly into `X` or its index along with a `data` object.")
if aps is None and (aps_ind is None or data is None):
raise ValueError("covariate_balance_test: No valid data passed for aps. You must either pass the variable directly into `X` or its index along with a `data` object.")
if Z is None and (Z is None or data is None):
raise ValueError("covariate_balance_test: No valid data passed for Z. You must either pass the variable directly into `X` or its index along with a `data` object.")
if X_labels is not None:
if X.ndim == 1:
if len(X_labels) > 1:
raise ValueError(f"Column labels {X_labels} not the same length as inputs.")
else:
if len(X_labels) != X.shape[1]:
raise ValueError(f"Column labels {X_labels} not the same length as inputs.")
# Construct covariate balance inputs
if data is not None:
data = np.array(data)
if X_ind is not None:
X = data[:, X_ind]
if aps_ind is not None:
aps = data[:, aps_ind]
if Z_ind is not None:
Z = data[:, Z_ind]
if isinstance(X, np.ndarray):
if X.ndim == 1 and X_labels is None:
X_labels = ['X1']
elif X_labels is None:
X_labels = [f"X{i}" for i in range(X.shape[1])]
X = pd.DataFrame(X, columns = X_labels)
elif X_labels is not None:
X.columns = X_labels
else:
X_labels = X.columns
aps = np.array(aps)
Z = np.array(Z)
# Use only observations where aps is nondegenerate
obs_tokeep = np.nonzero((aps > 0) & (aps < 1))
print(f"We will run balance testing on {len(obs_tokeep[0])} values out of {len(Z)} from the dataset for which the APS estimation is nondegenerate.")
assert len(obs_tokeep[0]) > 0
exog = np.column_stack((Z, aps))
exog = pd.DataFrame(exog, columns = ['Z', 'aps'])
exog = exog.iloc[obs_tokeep[0],:]
X.reset_index(drop=True, inplace=True)
X = X.iloc[obs_tokeep[0]]
if cov_type != "robust":
cov_type = "unadjusted"
# Check for single non-degeneracy
single_nondegen = True
if len(np.unique(aps[obs_tokeep])) > 1:
single_nondegen = False
exog = add_constant(exog)
# Covariate balance test
mv_ols_res = SUR.multivariate_ls(X, exog).fit(cov_type = cov_type)
if verbose == True:
print(mv_ols_res)
# Joint hypothesis test: use multivariate_OLS from statsmodels
# Edge case: single variable then joint test is the same as the original
if len(X_labels) > 1:
mv_ols_joint = _MultivariateOLS(X, exog).fit()
L = np.zeros((1,3))
L[:,1] = 1
mv_test_res = mv_ols_joint.mv_test([("Z", L)])
else:
mv_test_res = None
# Compile results
res_dict = {}
for x_var in X_labels:
res_dict[x_var] = {}
res_dict[x_var]['coef'] = mv_ols_res.params[f"{x_var}_Z"]
res_dict[x_var]['p'] = mv_ols_res.pvalues[f"{x_var}_Z"]
res_dict[x_var]['t'] = mv_ols_res.tstats[f"{x_var}_Z"]
res_dict[x_var]['n'] = mv_ols_res.nobs/len(X_labels)
res_dict[x_var]['stderr'] = mv_ols_res.std_errors[f"{x_var}_Z"]
if mv_test_res is None:
res_dict['joint'] = {}
res_dict['joint']['p'] = mv_ols_res.pvalues[f"{X_labels[0]}_Z"]
res_dict['joint']['t'] = mv_ols_res.tstats[f"{X_labels[0]}_Z"]
else:
res_dict['joint'] = {}
res_dict['joint']['p'] = mv_test_res.results['Z']['stat'].iloc[0, 4]
res_dict['joint']['f'] = mv_test_res.results['Z']['stat'].iloc[0, 3]
return (mv_ols_res, res_dict)
def covariate_balance_test_controls(aps, X, Z, W, cov_type = "robust", verbose: bool = True):
"""Covariate Balance Test
Parameters
-----------
aps: array-like, default: None
Array of estimated APS values
X: array-like, default: None
Array of covariates to test
Z: array-like, default: None
Array of treatment recommendations
W: array-like, default: None
Array of control variables
cov_type: str, default: "robust"
Covariance type of SUR. Any value other than "robust" defaults to simple (nonrobust) covariance.
verbose: bool, default: True
Whether to print output for each test
Returns
-----------
tuple(SystemResults, dict(X, dict(stat_label, value)))
Tuple containing the fitted SUR model results and a dictionary containing the results of covariate balance estimation for each covariate as well as the joint hypothesis.
Notes
-----
This function estimates a system of Seemingly Unrelated Regression (SUR) as defined in the linearmodels package.
"""
aps = np.array(aps)
X = np.array(X)
Z = np.array(Z)
W = np.array(W)
# Use only observations where aps is nondegenerate
obs_tokeep = np.nonzero((aps > 0) & (aps < 1))
print(f"We will fit on {len(obs_tokeep[0])} values out of {len(Y)} from the dataset for which the APS estimation is nondegenerate.")
assert len(obs_tokeep[0]) > 0
aps = aps[obs_tokeep[0]]
X = X[obs_tokeep[0]]
Z = Z[obs_tokeep[0]]
W = W[obs_tokeep[0]]
cols = {"aps":aps, "Z":Z}
dep = []
if len(X.shape) > 1:
for i in range(X.shape[1]):
cols["X"+str(i)] = X[:,i]
dep.append("X"+str(i))
else:
cols["X"] = X
dep.append("X")
exog = ["aps", "Z"]
constant = False
if len(W.shape) > 1:
for i in range(W.shape[1]):
cols["W"+str(i)] = W[:,i]
exog.append("W"+str(i))
constant = (len(np.unique(W[:,i])) == 1) | constant
else:
constant = len(np.unique(W)) == 1
cols["W"] = W
exog.append("W")
# Check for single non-degeneracy
constant = (len(np.unique(aps)) == 1) | constant
if not constant:
cols["const"] = np.ones(len(aps))
exog.append("const")
df = | pd.DataFrame(cols) | pandas.DataFrame |
"""
서울 열린데이터 광장 Open API
1. TransInfo 클래스: 서울시 교통 관련 정보 조회
"""
import datetime
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
class TransInfo:
def __init__(self, serviceKey):
"""
서울 열린데이터 광장에서 발급받은 Service Key를 입력받아 초기화합니다.
"""
# Open API 서비스 키 초기화
self.serviceKey = serviceKey
# ServiceKey 등록
self.urlBase = f"http://openapi.seoul.go.kr:8088/"
print(">> Open API Services initialized!")
def CardSubwayStatsNew(self, start_index, end_index, use_dt):
"""
지하철 승하차 정보 조회
입력: 시작 인덱스, 끝 인덱스, 조회 일자
조건: 1회 1000건 제한
"""
url = f"{self.urlBase}{self.serviceKey}/xml/CardSubwayStatsNew/{start_index}/{end_index}/{use_dt}"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("row")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"USE_DT",
"LINE_NUM",
"SUB_STA_NM",
"RIDE_PASGR_NUM",
"ALIGHT_PASGR_NUM",
"WORK_DT",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
USE_DT,
LINE_NUM,
SUB_STA_NM,
RIDE_PASGR_NUM,
ALIGHT_PASGR_NUM,
WORK_DT,
]],
columns=variables,
)
df = pd.concat([df, data])
# Set col names
df.columns = variables
# Set Index
df.index = range(len(df))
# Datetime 변환
df["USE_DT"] = pd.to_datetime(df["USE_DT"], format="%Y%m%d")
df["WORK_DT"] = pd.to_datetime(df["WORK_DT"], format="%Y%m%d")
# 숫자형 변환
df["RIDE_PASGR_NUM"] = pd.to_numeric(df["RIDE_PASGR_NUM"])
df["ALIGHT_PASGR_NUM"] = | pd.to_numeric(df["ALIGHT_PASGR_NUM"]) | pandas.to_numeric |
import argparse
from typing import Final
# 进行数值计算
import numpy as np
# 用于读取csv文件和方便地进行方差、均值计算
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
# 使用方法: main.py --dataset <数据集路径>
class KMeans:
def __init__(self, feats: pd.DataFrame, k: int):
self.tries = 0
self.feats = feats
self.k = k
def _wcss(self, centroids, cluster) -> float:
"""
计算准则函数J
:param centroids:类的中心
:param cluster:分类内容
:return: J函数值
"""
ret = 0.0
for i, val in enumerate(self.feats.values):
ret += np.sqrt(
(centroids[int(cluster[i]), 0] - val[0]) ** 2 + (centroids[int(cluster[i]), 1] - val[1]) ** 2)
return ret
def cluster(self, max_tries: int = 32767):
"""
进行k-means分类
:param max_tries: 最大尝试次数
:return: 聚类中心在源数据集中的索引、聚类中心、聚类结果、J函数值
"""
self.tries = 0
# 将聚类结果初始化为0
cluster = np.zeros(self.feats.shape[0])
# 随机抽样k个点作为初始的聚类中心
centroid_indexes, centroids = self.feats.sample(n=self.k).index, self.feats.sample(n=self.k).values
while self.tries < max_tries:
self.tries += 1
# 对每个点计算
for id, row in enumerate(self.feats.values):
min_dist = float('inf')
for cid, centroid in enumerate(centroids):
# 计算距离,与距离最小的中心分为一类
dist = np.sqrt((centroid[0] - row[0]) ** 2 + (centroid[1] - row[1]) ** 2)
if dist < min_dist:
min_dist, cluster[id] = dist, cid
# 对每一类计算平均数,作为新的聚类中心
clustered_centroids = self.feats.copy().groupby(by=cluster).mean().values
# 如果新的中心与旧中心重合,聚类就结束了
if np.count_nonzero(centroids - clustered_centroids) == 0:
break
else:
centroids = clustered_centroids
return centroid_indexes, centroids, cluster, self._wcss(centroids, cluster)
def get_tries(self) -> int:
return self.tries
def clustering(path: str):
"""
调用聚类函数、可视化结果
:param path: 数据集的路径
:return:
"""
raw = pd.read_csv(path)
data = raw.copy()
data.insert(1, "SepalSquare", data["SepalLengthCm"] * data["SepalWidthCm"])
data.insert(1, "PetalSquare", data["PetalLengthCm"] * data["PetalWidthCm"])
# print(data.describe(), data)
sb.pairplot(data, vars=data.columns[1:7], hue="Species")
plt.show()
plt.clf()
# 取出选择的特征
feats = data.iloc[:, [1, 2]]
# print(feats)
# 调用聚类函数
km: KMeans = KMeans(feats=feats, k=3)
cid, cen, clu, cost = km.cluster()
result = raw.copy()
result.insert(1, "Class", clu)
# print(result)
class_map = dict()
species_names = list()
specie_count = dict()
class_count = dict()
error_count = dict()
species = pd.DataFrame(result.iloc[:, [1, 6]].copy().groupby(by=clu).agg(pd.Series.mode))
for i, row in species.iterrows():
species_names.append(row["Species"])
class_map[row["Class"]] = row["Species"]
class_count[row["Species"]] = \
result.copy().loc[result["Class"] == row["Class"]].shape[0]
specie_count[row["Species"]] = \
result.copy().loc[result["Species"] == row["Species"]].shape[0]
error_count[row["Species"]] = \
result.copy().loc[(result["Class"] == row["Class"]) & (result["Species"] != row["Species"])].shape[0]
pretty_clu = list([class_map[c] for c in clu])
samples = pd.DataFrame(data.copy().iloc[cid.values])
sb.scatterplot(data.iloc[:, 1], data.iloc[:, 2], hue=pretty_clu)
sb.scatterplot(cen[:, 0], cen[:, 1], s=100, color='b', marker='X', label="Center")
sb.scatterplot(samples["PetalSquare"], samples["SepalSquare"], s=50, color='r', marker='s',
label="Initial Elements Chosen")
plt.xlabel("PetalSquare")
plt.ylabel("SepalSquare")
plt.title("Clustering the Iris Dataset")
plt.show()
plt.clf()
error = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import matplotlib.pyplot as plt
from matplotlib import pyplot
import plotly.graph_objs as go
import streamlit as st
import warnings
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf
from sklearn.cluster import KMeans
from sklearn.ensemble import IsolationForest
import streamlit as st
from bokeh.plotting import figure
def main():
st.set_page_config(
page_title="Oil Spill Dashboard",
page_icon=":ship:",
layout="centered",
initial_sidebar_state="expanded",
)
def load_raw_data(x):
# data
time_series_df=pd.read_csv(x)
time_series_df.sort_values(by=['timestamp'], inplace=True, kind = "mergesort")
time_sorted_df = time_series_df.sort_values(by=['timestamp'], inplace=True)
time_series_df['timestamp'] = pd.to_datetime(time_series_df['timestamp'])
return time_series_df
def load_data(x):
time_series_df = load_raw_data(x)
# cleaning and indexing
time_series_df.drop(["call_sign", "flag" ,"draught" , "ship_and_cargo_type", "length", "width","eta" , "destination", "status", "maneuver", "accuracy" ,"collection_type" ,'mmsi_label'], axis=1, inplace=True)
time_series_df.drop(['created_at','imo', 'name'], axis=1, inplace=True)
time_series_df = time_series_df[time_series_df['speed'].notna()]
time_series_df = time_series_df.reset_index(drop=True)
time_series_df.drop(time_series_df[time_series_df['speed'] == 0].index, inplace = True)
return time_series_df
st.title("Oil spill prediction Dashboard :rocket:")
st.sidebar.title("Enter Parameters :paperclip:")
st.sidebar.markdown("Powered by AIS Data set")
load_csv_data = st.sidebar.checkbox("Upload Csv")
if(load_csv_data):
uploaded_file = st.file_uploader("Choose Csv file")
else:
uploaded_file = None
if uploaded_file is not None:
uploaded_file.seek(0)
time_series_df1 = load_raw_data(uploaded_file)
time_series_df = load_data(uploaded_file)
else:
time_series_df1 = load_raw_data('../Data/main/Maritius_AOI_20200701_0731_full.csv')
time_series_df = load_data('../Data/main/Maritius_AOI_20200701_0731_full.csv')
raw = st.sidebar.checkbox("Show Raw Dataset")
not_raw = st.sidebar.checkbox("Show cleaned Dataset")
if(raw):
st.subheader("AIS Dataset (Raw)")
st.dataframe(time_series_df1[:500].style.highlight_max(axis=0))
if(not_raw):
st.subheader("AIS Dataset (Cleaned)")
st.dataframe(time_series_df[:500].style.highlight_max(axis=0))
vessels = time_series_df.mmsi.unique()
st.markdown("Anomaly detection with time series data of: ",len(vessels))
classifier = st.sidebar.selectbox("Classifier",("Select one model","Code", "Benchmark model(IQR)","K-Means clustering","Isolation Forest", "All of the above(Best)"))
mv_value = st.sidebar.selectbox("Select vessel", vessels)
st.write("Selected Vessel: ", mv_value)
param = st.sidebar.radio("Vessel Parameter",("speed", "course", "heading", "rot"),key='param')
mv_data = time_series_df[time_series_df['mmsi']==mv_value]
if st.button("Plot all basic graphs"):
p = figure(
title='Speed Vs Time',
x_axis_label='Timestamp',
y_axis_label='Speed')
p.line(mv_data['timestamp'], mv_data['speed'], legend='Speed Trend', line_width=2)
st.bokeh_chart(p, use_container_width=True)
q = figure(
title='Course Vs Time',
x_axis_label='Timestamp',
y_axis_label='Course')
q.line(mv_data['timestamp'], mv_data['course'], legend='Course Trend', line_width=2)
st.bokeh_chart(q, use_container_width=True)
r = figure(
title='Heading Vs Time',
x_axis_label='Timestamp',
y_axis_label='Heading')
r.line(mv_data['timestamp'], mv_data['heading'], legend='Heading Trend', line_width=2)
st.bokeh_chart(r, use_container_width=True)
s = figure(
title='Rot Vs Time',
x_axis_label='Timestamp',
y_axis_label='Rot')
s.line(mv_data['timestamp'], mv_data['rot'], legend='Rot Trend', line_width=2)
st.bokeh_chart(s, use_container_width=True)
map_df = mv_data[time_series_df['latitude'].notna()]
map_df = map_df[time_series_df['longitude'].notna()]
if st.button("Plot Map"):
map_df.filter(['latitude', 'longitude'])
st.map(map_df)
mv_data = mv_data.drop(['mmsi','msg_type','latitude', 'longitude'], axis=1)
mv_data = mv_data[mv_data['speed'].notna()]
mv_data = mv_data.set_index(['timestamp'])
mv_data.index = pd.to_datetime(mv_data.index, unit='s')
names=mv_data.columns
rollmean = mv_data.resample(rule='D').mean()
rollstd = mv_data.resample(rule='D').std()
if classifier == "Benchmark model: Interquartile Range (IQR)":
df2 = mv_data
names=df2.columns
x = mv_data[names]
scaler = StandardScaler()
pca = PCA()
pipeline = make_pipeline(scaler, pca)
pipeline.fit(x)
features = range(pca.n_components_)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents, columns = ['pc1', 'pc2'])
mv_data['pc1']=pd.Series(principalDf['pc1'].values, index=mv_data.index)
mv_data['pc2']=pd.Series(principalDf['pc2'].values, index=mv_data.index)
result = adfuller(principalDf['pc1'])
st.write("p value", result[1])
pca1 = principalDf['pc1'].pct_change()
autocorrelation = pca1.dropna().autocorr()
st.write('Autocorrelation(pc1) is: ', autocorrelation)
plot_acf(pca1.dropna(), lags=20, alpha=0.05)
pca2 = principalDf['pc2'].pct_change()
autocorrelation = pca2.autocorr()
st.write('Autocorrelation(pc2) is: ', autocorrelation)
plot_acf(pca2.dropna(), lags=20, alpha=0.05)
q1_pc1, q3_pc1 = mv_data['pc1'].quantile([0.25, 0.75])
iqr_pc1 = q3_pc1 - q1_pc1
lower_pc1 = q1_pc1 - (1.5*iqr_pc1)
upper_pc1 = q3_pc1 + (1.5*iqr_pc1)
q1_pc2, q3_pc2 = mv_data['pc2'].quantile([0.25, 0.75])
iqr_pc2 = q3_pc2 - q1_pc2
lower_pc2 = q1_pc2 - (1.5*iqr_pc2)
upper_pc2 = q3_pc2 + (1.5*iqr_pc2)
mv_data['anomaly_pc1'] = ((mv_data['pc1']>upper_pc1) | (mv_data['pc1']<lower_pc1)).astype('int')
mv_data['anomaly_pc2'] = ((mv_data['pc2']>upper_pc2) | (mv_data['pc2']<lower_pc2)).astype('int')
total_anomaly = mv_data['anomaly_pc1'].value_counts() + mv_data['anomaly_pc2'].value_counts()
outliers_pc1 = mv_data.loc[(mv_data['pc1']>upper_pc1) | (mv_data['pc1']<lower_pc1), 'pc1']
outliers_pc2 = mv_data.loc[(mv_data['pc2']>upper_pc2) | (mv_data['pc2']<lower_pc2), 'pc2']
st.write("Outlier Propotion(pc1): ", len(outliers_pc1)/len(mv_data))
st.write("Outlier Propotion(pc2): ", len(outliers_pc2)/len(mv_data))
a = mv_data[mv_data['anomaly_pc1'] == 1] #anomaly
b = mv_data[mv_data['anomaly_pc2'] == 1] #anomaly
fig = plt.figure()
plt.plot(mv_data[param], color='blue', label='Normal')
plt.plot(a[param], linestyle='none', marker='X', color='red', markersize=12, label='Anomaly1')
plt.plot(b[param], linestyle='none', marker='X', color='green', markersize=12, label='Anomaly2')
plt.xlabel('Date and Time')
plt.ylabel(param)
plt.title('Anomalies with given MMSI')
plt.legend(loc='best')
plt.show()
plt.gcf().autofmt_xdate()
st.pyplot(fig)
data1 = a
data2 = b
if classifier == "K-Means clustering":
df2 = mv_data
names=df2.columns
x = mv_data[names]
scaler = StandardScaler()
pca = PCA()
pipeline = make_pipeline(scaler, pca)
pipeline.fit(x)
features = range(pca.n_components_)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents, columns = ['pc1', 'pc2'])
mv_data['pc1']=pd.Series(principalDf['pc1'].values, index=mv_data.index)
mv_data['pc2']=pd.Series(principalDf['pc2'].values, index=mv_data.index)
fraction = st.number_input("Fraction",0.00,1.00,step=0.01,key='fraction')
kmeans = KMeans(n_clusters=2, random_state=42)
kmeans.fit(principalDf.values)
labels = kmeans.predict(principalDf.values)
unique_elements, counts_elements = np.unique(labels, return_counts=True)
clusters = np.asarray((unique_elements, counts_elements))
# no of points in each clusters
fig = plt.figure()
plt.bar(clusters[0], clusters[1], tick_label=clusters[0])
plt.xlabel('Clusters')
plt.ylabel('Number of points')
plt.title('Number of points in each cluster')
st.pyplot(fig)
# cluster graph
fig = plt.figure()
plt.scatter(principalDf['pc1'], principalDf['pc2'], c=labels)
plt.xlabel('pc1')
plt.ylabel('pc2')
plt.title('K-means of clustering')
st.pyplot(fig)
# distance function to be used
def getDistanceByPoint(data, model):
distance = []
for i in range(0,len(data)):
Xa = np.array(data.loc[i])
Xb = model.cluster_centers_[model.labels_[i]-1]
distance.append(np.linalg.norm(Xa-Xb))
return pd.Series(distance, index=data.index)
outliers_fraction = fraction
distance = getDistanceByPoint(principalDf, kmeans)
number_of_outliers = int(outliers_fraction*len(distance))
threshold = distance.nlargest(number_of_outliers).min()
principalDf['anomaly1'] = (distance >= threshold).astype(int)
st.write("Anomaly Count by Kmeans", principalDf['anomaly1'].value_counts())
mv_data['anomaly1'] = | pd.Series(principalDf['anomaly1'].values, index=mv_data.index) | pandas.Series |
import time
import pandas as pd
import copy
import numpy as np
from shapely import affinity
from shapely.geometry import Polygon
import geopandas as gpd
def cal_arc(p1, p2, degree=False):
dx, dy = p2[0] - p1[0], p2[1] - p1[1]
arc = np.pi - np.arctan2(dy, dx)
return arc / np.pi * 180 if degree else arc
def helper_print_with_time(*arg, sep=','):
print(time.strftime("%H:%M:%S", time.localtime()), sep.join(map(str, arg)))
def cal_euclidean(p1, p2):
return np.linalg.norm([p1[0] - p2[0], p1[1] - p2[1]])
def get_shape_mbr(df_shape):
oid = 'OID' if 'FID' in df_shape.columns else 'OBJECTID'
df_mbr = copy.deepcopy(df_shape[[oid, 'geometry']])
df_mbr.reset_index(drop=True, inplace=True)
df_mbr['geometry'] = pd.Series([geo.minimum_rotated_rectangle for geo in df_mbr['geometry']])
df_mbr['xy'] = pd.Series([list(geo.exterior.coords) for geo in df_mbr['geometry']])
#
df_mbr['x0'] = pd.Series([xy[0][0] for xy in df_mbr['xy']])
df_mbr['x1'] = pd.Series([xy[1][0] for xy in df_mbr['xy']])
df_mbr['x2'] = pd.Series([xy[2][0] for xy in df_mbr['xy']])
df_mbr['y0'] = pd.Series([xy[0][1] for xy in df_mbr['xy']])
df_mbr['y1'] = pd.Series([xy[1][1] for xy in df_mbr['xy']])
df_mbr['y2'] = pd.Series([xy[2][1] for xy in df_mbr['xy']])
#
df_mbr['l1'] = pd.Series(
[cal_euclidean([x0, y0], [x1, y1]) for x0, y0, x1, y1 in df_mbr[['x0', 'y0', 'x1', 'y1']].values])
df_mbr['l2'] = pd.Series(
[cal_euclidean([x0, y0], [x1, y1]) for x0, y0, x1, y1 in df_mbr[['x1', 'y1', 'x2', 'y2']].values])
df_mbr['a1'] = pd.Series(
[cal_arc([x0, y0], [x1, y1], True) for x0, y0, x1, y1 in df_mbr[['x0', 'y0', 'x1', 'y1']].values])
df_mbr['a2'] = pd.Series(
[cal_arc([x0, y0], [x1, y1], True) for x0, y0, x1, y1 in df_mbr[['x1', 'y1', 'x2', 'y2']].values])
#
df_mbr['longer'] = df_mbr['l1'] >= df_mbr['l2']
#
df_mbr['lon_len'] = pd.Series([l1 if longer else l2 for l1, l2, longer in df_mbr[['l1', 'l2', 'longer']].values])
df_mbr['short_len'] = pd.Series([l2 if longer else l1 for l1, l2, longer in df_mbr[['l1', 'l2', 'longer']].values])
df_mbr['lon_arc'] = pd.Series([a1 if longer else a2 for a1, a2, longer in df_mbr[['a1', 'a2', 'longer']].values])
df_mbr['short_arc'] = pd.Series([a2 if longer else a1 for a1, a2, longer in df_mbr[['a1', 'a2', 'longer']].values])
df_mbr.drop(['x0', 'x1', 'x2', 'y0', 'y1', 'y2', 'l1', 'l2', 'a1', 'a2'], axis=1, inplace=True)
#
df_shape = pd.merge(df_shape, df_mbr[[oid, 'lon_len', 'short_len', 'lon_arc', 'short_arc']], how='left', on=oid)
return df_mbr, df_shape
def get_shape_normalize_final(df_use, if_scale_y):
df_use = copy.deepcopy(df_use)
#
df_use['mu_x'] = pd.Series([geo.centroid.x for geo in df_use['geometry']])
df_use['mu_y'] = pd.Series([geo.centroid.y for geo in df_use['geometry']])
df_use['geometry'] = pd.Series(
[affinity.translate(geo, -mx, -my) for mx, my, geo in df_use[['mu_x', 'mu_y', 'geometry']].values])
df_use['x_max'] = pd.Series([max(geo.exterior.xy[0]) for geo in df_use['geometry']])
df_use['x_min'] = pd.Series([min(geo.exterior.xy[0]) for geo in df_use['geometry']])
df_use['scale_x'] = (df_use['x_max'] - df_use['x_min'])
df_use['y_max'] = pd.Series([max(geo.exterior.xy[1]) for geo in df_use['geometry']])
df_use['y_min'] = pd.Series([min(geo.exterior.xy[1]) for geo in df_use['geometry']])
df_use['scale_y'] = (df_use['y_max'] - df_use['y_min'])
if if_scale_y:
df_use['geometry'] = pd.Series(
[affinity.scale(geo, 1 / del_x, 1 / del_y, origin='centroid') for del_x, del_y, geo in
df_use[['scale_x', 'scale_y', 'geometry']].values])
else:
df_use['geometry'] = pd.Series([affinity.scale(geo, 1 / del_x, 1 / del_x, origin='centroid') for del_x, geo in
df_use[['scale_x', 'geometry']].values])
df_use.drop(['mu_x', 'mu_y', 'scale_x', 'scale_y', 'x_max', 'x_min', 'y_max', 'y_min'], axis=1, inplace=True)
return df_use
def simplify_cos_on_node(df_node, tor_cos):
oid = 'OBJECTID'
df_line = copy.deepcopy(df_node)
#
df_line = df_line[df_line['PID'] != 0].reset_index(drop=True)
df_line['PID'] = df_line['PID'] - 1
#
coor_dic = {(int(oid), int(pid)): [x, y] for oid, pid, x, y in df_line[['OBJECTID', 'PID', 'x', 'y']].values}
df_line['x_l'] = pd.Series([coor_dic[(oid, (pid - 1 if pid >= 1 else pnum - 2))][0] for oid, pid, pnum in
df_line[['OBJECTID', 'PID', 'p_num']].values])
df_line['y_l'] = pd.Series([coor_dic[(oid, (pid - 1 if pid >= 1 else pnum - 2))][1] for oid, pid, pnum in
df_line[['OBJECTID', 'PID', 'p_num']].values])
df_line['x_r'] = pd.Series([coor_dic[(oid, (pid + 1 if pid < (pnum - 2) else 0))][0] for oid, pid, pnum in
df_line[['OBJECTID', 'PID', 'p_num']].values])
df_line['y_r'] = pd.Series([coor_dic[(oid, (pid + 1 if pid < (pnum - 2) else 0))][1] for oid, pid, pnum in
df_line[['OBJECTID', 'PID', 'p_num']].values])
#
df_line['dx_l'] = pd.Series([x - xl for x, xl in df_line[['x', 'x_l']].values])
df_line['dy_l'] = | pd.Series([y - yl for y, yl in df_line[['y', 'y_l']].values]) | pandas.Series |
# 1. Import packages
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import sys
import os
import gensim
from tqdm import tqdm
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Activation
import logging
import re
from utils.stemming import stemming_row
from utils.tokens import word_tokens
from utils.dictionary import create_dict
from utils.tfidf import tfidf
from utils.longest import longest_question
from utils.distance import distance
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
# 2. User setting 1: Import data
# Important: data needs to be stored in directory 'data' in parent folder of current working directory
path = os.getcwd()
os.chdir(path)
train_df = pd.read_csv("data/train_data.csv", delimiter=',')
test_df = pd.read_csv("data/test_data.csv", delimiter=',')
train_duplicate = | pd.read_csv("data/train_labels.csv", delimiter=',') | pandas.read_csv |
import sys
import os
#handling the paths and the model
cwd = os.getcwd()
sys.path.append(cwd)
from pathlib import Path
from pysd.py_backend.functions import Model
import pandas as pd
import varcontrol
import time
#handling the paths and the model
def run_model_web(switch0=0,start0=0,end0=0,effectiveness0=0,switch1=0,start1=0,end1=0,effectiveness1=0,switch2=0,start2=0,end2=0,effectiveness2=0,switch3=0,start3=0,end3=0,effectiveness3=0,
switch4=0,start4=0,end4=0,effectiveness4=0,switch5=0,start5=0,end5=0,effectiveness5=0,switch6=0,start6=0,end6=0,effectiveness6=0,switch7=0,start7=0,end7=0,effectiveness7=0,
switch8=0,start8=0,end8=0,effectiveness8=0,switch9=0,start9=0,end9=0,effectiveness9=0,switch10=0,start10=0,end10=0,effectiveness10=0,switch11=0,start11=0,end11=0,effectiveness11=0,
switch12=0,start12=0,end12=0,effectiveness12=0,switch13=0,start13=0,end13=0,effectiveness13=0,switch14=0,start14=0,end14=0,effectiveness14=0,switch15=0,start15=0,end15=0,effectiveness15=0,
switch16=0,start16=0,end16=0,effectiveness16=0,switch17=0,start17=0,end17=0,effectiveness17=0):
#for the args:
# there are a total of 18 sets of 4 (2 policies times 9 age groups)
# the first 9 sets are social distancing for the age groups in ascending order, then 9 sets for self quarantine in ascending order
full_args = [switch0,start0,end0,effectiveness0,switch1,start1,end1,effectiveness1,switch2,start2,end2,effectiveness2,switch3,start3,end3,effectiveness3,
switch4,start4,end4,effectiveness4,switch5,start5,end5,effectiveness5,switch6,start6,end6,effectiveness6,switch7,start7,end7,effectiveness7,
switch8,start8,end8,effectiveness8,switch9,start9,end9,effectiveness9,switch10,start10,end10,effectiveness10,switch11,start11,end11,effectiveness11,
switch12,start12,end12,effectiveness12,switch13,start13,end13,effectiveness13,switch14,start14,end14,effectiveness14,switch15,start15,end15,effectiveness15,
switch16,start16,end16,effectiveness16,switch17,start17,end17,effectiveness17]
pol_dict = {}
pol_dict['social distancing 00'] = full_args[:4]
pol_dict['social distancing 10'] = full_args[4:8]
pol_dict['social distancing 20'] = full_args[8:12]
pol_dict['social distancing 30'] = full_args[12:16]
pol_dict['social distancing 40'] = full_args[16:20]
pol_dict['social distancing 50'] = full_args[20:24]
pol_dict['social distancing 60'] = full_args[24:28]
pol_dict['social distancing 70'] = full_args[28:32]
pol_dict['social distancing 80'] = full_args[32:36]
pol_dict['self quarantine 00'] = full_args[36:40]
pol_dict['self quarantine 10'] = full_args[40:44]
pol_dict['self quarantine 20'] = full_args[44:48]
pol_dict['self quarantine 30'] = full_args[48:52]
pol_dict['self quarantine 40'] = full_args[52:56]
pol_dict['self quarantine 50'] = full_args[56:60]
pol_dict['self quarantine 60'] = full_args[60:64]
pol_dict['self quarantine 70'] = full_args[64:68]
pol_dict['self quarantine 80'] = full_args[68:]
start = time.time()
model = Model('corona_hackathon_agegroups_cons_treated.py')
path = Path.cwd()
out_path = path / 'output'
set_path = path / 'settings'
try:
file_lst = list(out_path.glob('*'))
for file in file_lst:
file.unlink()
except FileNotFoundError:
pass
out_path.mkdir(exist_ok=True)
#reading the settings
time_df = pd.read_csv(set_path / 'timesettings.csv',index_col=0)
init_df = pd.read_csv(set_path / 'initialconditions.csv',index_col=0)
model_df = pd.read_csv(set_path / 'modelsettings.csv',index_col=0)
contact_df = pd.read_csv(set_path / 'contacts.csv',index_col=0,header=0)
control_df = pd.read_csv(set_path / 'infectioncontrol.csv',index_col=0)
time_lst = varcontrol.time_lst
init_lst = varcontrol.agify_init()
model_lst = varcontrol.agify_model()
output_lst = ['Susceptible', 'total infected', 'Critical Cases', 'Diseased']
#updating the time settings
time_params = {}
for cond in time_lst:
time_params[cond] = time_df.loc[cond][0]
model.set_components(params=time_params)
#updating the initial conditions
init_params = {}
for cond in init_lst:
if cond[-2:] in varcontrol.age_groups:
name, col = cond.rsplit(' ',1)
if col == '00':
col = '0'
init_params[cond] = init_df.loc[name][col]
else:
init_params[cond] = init_df.loc[cond][0]
model.set_components(params=init_params)
#updating the model parameters
model_params = {}
for var in model_lst:
if var[-2:] in varcontrol.age_groups:
#infectivity per contact is run through all the model as the same variable
if not var.startswith('infectivity per contact') or var.startswith('contacts per person normal'):
name, col = var.rsplit(' ',1)
if col == '00':
col = '0'
model_params[var] = model_df.loc[name][col]
else:
model_params[var] = model_df.loc[var]['settings']
model.set_components(params=model_params)
contact_param = {}
contact_cat = ['80+', '70 - 79', '60 - 69', '50 - 59', '40 - 49', '30 - 39', '20 - 29', '10 - 19', '<10']
for i, group in enumerate(varcontrol.age_groups):
contact_param['contacts per person normal self %s' % group] = contact_df.loc[contact_cat[i]][contact_cat[i]]
for i, src in enumerate(varcontrol.age_groups):
for j, dst in enumerate(varcontrol.age_groups):
if int(src) < int(dst):
contact_param['contacts per person normal %sx%s' % (src,dst)] = contact_df.loc[contact_cat[i]][contact_cat[j]]
model.set_components(params=contact_param)
control_param = {}
for group in varcontrol.age_groups:
control_param['infection start %s' % group] = control_df.loc['infection start %s' % group]['settings']
model.set_components(params=control_param)
base_df = model.run(return_columns=output_lst)
#base_df.to_csv(out_path / '00_base_results.csv')
pol_params = {}
for group in varcontrol.age_groups:
pol_params['self quarantine policy SWITCH self %s' % group] = pol_dict['self quarantine %s' % group][0]
pol_params['self quarantine start %s' % group] = pol_dict['self quarantine %s' % group][1]
pol_params['self quarantine end %s' % group] = pol_dict['self quarantine %s' % group][2]
pol_params['self quarantine effectiveness %s' % group] = pol_dict['self quarantine %s' % group][3]
pol_params['social distancing policy SWITCH self %s' % group] = pol_dict['social distancing %s' % group][0]
pol_params['social distancing start %s' % group] = pol_dict['social distancing %s' % group][1]
pol_params['social distancing end %s' % group] = pol_dict['social distancing %s' % group][2]
pol_params['social distancing effectiveness %s' % group] = pol_dict['social distancing %s' % group][3]
pol_df = model.run(params=pol_params,return_columns=output_lst)
out_df = | pd.concat([base_df,pol_df],axis=1,keys=['base','policy']) | pandas.concat |
# coding: utf-8
# # ------------- Logistics -------------
# In[1]:
from __future__ import division
import numpy
import os
import pandas
import sklearn
import sys
import sqlite3
import pickle
from operator import itemgetter
from collections import Counter
import itertools
import matplotlib
import matplotlib.pyplot as plt
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, AdaBoostClassifier
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import normalize, scale, LabelEncoder
from sklearn import model_selection
from sklearn.feature_selection import VarianceThreshold, SelectFromModel, RFECV
#------------- Custom functions -------------#
def plot_recall(classifier_name, cm, output_directory):
#---Plot settings ---#
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(11.7, 8.27)
# Number of ticks in axes
plt.yticks(numpy.arange(0.0, 1.05, 0.05))
# Axes limit
axes = ax.axes
axes.set_ylim(0.0,1.05)
# Pad margins so that markers don't get clipped by the axes
plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.15)
numpy.set_printoptions(precision=3, suppress=True)
#---Plot data ---#
row_sums = cm.sum(axis=1)
normalized_cm = cm / row_sums[:, numpy.newaxis]
cm_diag = normalized_cm.diagonal()
bar_labels = sorted(list(set(ground_truth)))
y_pos = numpy.arange(len(bar_labels))
plt.bar(y_pos,
cm_diag,
align='center',
color='blue')
plt.ylabel('Percent of cells correctly classifed (recall)')
plt.xticks(y_pos, bar_labels, rotation='vertical')
plt.title('Cell Classes, ' + classifier_name)
plt_name = classifier_name + '_plot.png'
plt.savefig(os.path.join(output_directory, plt_name))
plt.clf()
def plot_confusion_matrix(cm, output_directory, classifier_name, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues
):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, numpy.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
cm_file_name = name + '_cm.txt'
cm_file = open(os.path.join(output_directory, cm_file_name), 'w+')
cm_file.write(str(cm))
cm_file.close()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = numpy.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt_name = classifier_name + '_confusion_matrix.png'
plt.savefig(os.path.join(output_directory, plt_name))
plt.clf()
def variance_threshold_select(df, thresh=0.0, na_replacement=-999):
df1 = df.copy(deep=True) # Make a deep copy of the dataframe
selector = VarianceThreshold(thresh)
selector.fit(df1.fillna(na_replacement)) # Fill NA values as VarianceThreshold cannot deal with those
df2 = df.loc[:,selector.get_support(indices=False)] # Get new dataframe with columns deleted that have NA values
return df2
def save_metadata(file, label_list):
with open(file, 'w') as f:
for i in label_list:
f.write('{}\n'.format( i ))
def plot_rank_importance(data, labels, output_directory):
model = RandomForestRegressor(n_estimators=20, max_features=2)
model = model.fit(data, labels)
model.feature_importances_
important_features = | pandas.Series(data=model.feature_importances_, index=data.columns) | pandas.Series |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.size']=6
# plt.rcParams['lines.markersize']=7
plt.rcParams['lines.linewidth'] = 0.8
from sklearn import decomposition
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path)
def cumul_var_ratio(var_ratio):
sum=0.0
cumul_var_ratio=[]
for i in range(len(var_ratio)):
sum=sum+var_ratio[i]
cumul_var_ratio.append(sum)
return cumul_var_ratio
samples_setss=[
[
pd.read_csv(root_path+'/Huaxian_dwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Huaxian_eemd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Huaxian_modwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Huaxian_ssa/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Huaxian_vmd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
],
[
pd.read_csv(root_path+'/Xianyang_dwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Xianyang_eemd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Xianyang_modwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Xianyang_ssa/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Xianyang_vmd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
],
[
pd.read_csv(root_path+'/Zhangjiashan_dwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Zhangjiashan_eemd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Zhangjiashan_modwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Zhangjiashan_ssa/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Zhangjiashan_vmd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
],
]
decomposers=['DWT','EEMD','MODWT','SSA','VMD']
stations=['Huaxian','Xianyang','Zhangjiashan']
zorders=[4,3,2,1,0]
ini_pcs_dict={}
fig_file=[
'CVR of different decomposers at Huaxian',
'CVR of different decomposers at Xianyang',
'CVR of different decomposers at Zhangjiashan',
]
colors=['b','g','r','c','m']
fig_idx=['(a)','(b)','(c)']
plt.figure(figsize=(7.48,3.6))
for k in range(len(samples_setss)):
samples_sets = samples_setss[k]
n_components=[#remove dimension of Y
samples_sets[0].shape[1]-1,
samples_sets[1].shape[1]-1,
samples_sets[2].shape[1]-1,
samples_sets[3].shape[1]-1,
samples_sets[4].shape[1]-1,
]
ini_pc={}
plt.subplot(1,3,k+1)
# plt.title(decomposers[i])
plt.xlabel('Number of principle components(PCs)\n'+fig_idx[k])
if k==0:
plt.ylabel('Cumulative variance ratio(CVR)')
else:
plt.yticks([])
# plt.title(stations[k])
for i in range(len(samples_sets)):
samples = samples_sets[i]
y = samples['Y']
X = samples.drop('Y',axis=1)
print(X)
print(n_components[i])
pca = decomposition.PCA(n_components=n_components[i])
pca.fit(X)
var_ratio = pca.explained_variance_ratio_
cum_var_ratio = cumul_var_ratio(var_ratio)
print(cum_var_ratio)
xx = 0
for j in range(len(cum_var_ratio)):
if cum_var_ratio[j]>=0.99:
xx = j+1
break
ini_pc[decomposers[i]]=xx
print('xx={}'.format(xx))
yy1 = cum_var_ratio[xx-1]
yy2 = var_ratio[xx-1]
# plt.plot(range(1,n_components[i]+1),var_ratio,'-o',label=decomposers[i]+': VR',zorder=0)
plt.plot(range(1,n_components[i]+1),cum_var_ratio,'-o',label=decomposers[i],zorder=zorders[i])
plt.plot([xx],[yy1],marker='+',zorder=10,
label=decomposers[i]
)
# if i==len(samples_sets)-1:
# plt.plot(xx,yy1,c='black',marker='+',label='Initial number of PCs with CVR larger than 0.99',zorder=10)
# else:
# plt.plot(xx,yy1,c='black',marker='+',label='',zorder=10)
plt.xlim(0,30)
if k==1:
plt.legend(
loc='upper center',
# bbox_to_anchor=(0.08,1.01, 1,0.101),
bbox_to_anchor=(0.5,1.09),
ncol=10,
shadow=False,
frameon=True,
)
ini_pcs_dict[stations[k]]=ini_pc
# plt.tight_layout()
plt.subplots_adjust(left=0.06, bottom=0.13, right=0.99,top=0.92, hspace=0.2, wspace=0.05)
plt.savefig(root_path+'/graphs/CVR of PCs.tif',format='TIFF',dpi=1200)
plt.savefig(root_path+'/graphs/CVR of PCs.pdf',format='PDF',dpi=1200)
plt.savefig(root_path+'/graphs/CVR of PCs.eps',format='eps',dpi=2000)
ini_pcs_df = | pd.DataFrame(ini_pcs_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Poop analysis
Created 2020
@author: PClough
"""
import pandas as pd
import numpy as np
import chart_studio
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.subplots import make_subplots
from scipy import stats
import datetime as dt
from time import strptime
import calendar
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import vlc
df = pd.read_excel("Poo Data.xlsx", engine='openpyxl')
chart_studio.tools.set_credentials_file(username='YOUR USERNAME HERE', api_key='YOUR API HERE')
#%% Histogram of size of poos
# Replace sizes of 1, 2, and 3 in "size of poo?" heading to be small, medium and large
df['Size of poo? '].replace([1, 2, 3], ['Small', 'Medium', 'Poonarmi'], inplace = True)
fig = go.Figure()
fig.add_trace(go.Histogram(x = df['Size of poo? '],
name = 'Poop',
xbins = dict(
start = "Small",
),
marker_color = ('rgb(166,86,50)')))
fig.update_layout(
title_text = "Size of the poo poo's",
yaxis_title = "Count",
font = dict(size = 16))
plot(fig)
#%% Violin plot for day of week on x axis and type of poo on y axis
fig2 = go.Figure()
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Date_column = df['When did the poo occur? '].dt.strftime("%a")
for day in days:
fig2.add_trace(go.Violin(x = Date_column[Date_column == day],
y = df['Type of poop 💩? '][Date_column == day],
name = day,
box_visible = True,
meanline_visible = True,
showlegend = False,
fillcolor = 'chocolate',
line = dict(color = 'DarkSalmon')))
fig2.update_layout(yaxis = dict(range=[0.5,7.5]), title = "Average poo type over whole year", font = dict(size = 16))
fig2.update_yaxes(ticks="inside", tick0 = 1, dtick = 1, title = "Bristol stool scale index")
plot(fig2)
# %% Ridgeline plot for day of week on x axis and type of poo on y axis
# 12 rows of data, one for each month
# 7 columns of data, averaging that months poo types
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
New_Date_column = df['When did the poo occur? '].dt.strftime("%b")
i = 0
max_val = 0
data = np.zeros([12,100]) # the value of 100 is just massively oversizing it, assuming there will be less than 100 poo's of a single type in one month
for month in months:
for j in range(1,8):
data[i, np.sum(df['Type of poop 💩? '][New_Date_column == month] == str(j))] = j-1
if max_val < np.sum(df['Type of poop 💩? '][New_Date_column == month] == str(j)):
max_val = np.sum(df['Type of poop 💩? '][New_Date_column == month] == str(j))
i += 1
# Find where the furthest right hand datapoint is and then cut everything off after that
idx = np.arange(max_val+1, 100)
data = np.delete(data, idx, axis=1)
data[data == 0] = 'nan'
fig3 = go.Figure()
for data_line in data:
fig3.add_trace(go.Violin(x=data_line))
fig3.update_traces(orientation='h', side='positive', width=2, points=False)
fig3.update_layout(xaxis_showgrid=False,
xaxis_zeroline=False,
xaxis=dict(range=[0,8]),
title = "Average poo type over whole year",
font = dict(size = 16))
plot(fig3)
#%% Violin plot for day of week on x axis and type of poo on y axis broken out month by month
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
fig4 = make_subplots(rows=2, cols=6, shared_yaxes=True, subplot_titles=(months))
# Remove 'Type ' before the number
df['Type of poop 💩? '] = df['Type of poop 💩? '].str.replace('Type ', '')
Date_column = df['When did the poo occur? '].dt.strftime("%a")
row_num = 1
col_num = 0
for month in months:
col_num += 1
if col_num > 6:
col_num = 1
row_num = 2
for day in days:
fig4.add_trace(go.Violin(x = Date_column[Date_column == day][New_Date_column == month],
y = df['Type of poop 💩? '][Date_column == day][New_Date_column == month],
name = month + day,
box_visible = True,
meanline_visible = True,
showlegend = False,
fillcolor = 'chocolate',
line = dict(color = 'DarkSalmon')),
row = row_num, col = col_num)
fig4.update_layout(yaxis = dict(range=[0.5,7.5]), title = "Average poo type, broken down month-by-month", font = dict(size = 16))
fig4.update_yaxes(ticks="inside", col = 1, tick0 = 1, dtick = 1, title = "Bristol stool scale index")
fig4.update_xaxes(ticks="inside")
plot(fig4)
#%% scatter plot x axis = Time since last poo (delta t), y axis (Size of poo)
# Return the number of hours from a timedelta
def days_hours_minutes(td):
return td.days*24 + td.seconds//3600 + (td.seconds//60)%60/60
d = {'When did the poo occur?': df['When did the poo occur? '], 'Size of poo?': df['Size of poo? '], 'time_since_last_poo': pd.Timedelta(0, unit='h')}
scatterplot_df = pd.DataFrame(data=d)
scatterplot_df = scatterplot_df.sort_values(by = ['When did the poo occur?']).reset_index(drop=True)
for i in range(1, len(df['When did the poo occur? '])-1):
scatterplot_df.loc[i, 'time_since_last_poo'] = days_hours_minutes(scatterplot_df['When did the poo occur?'][i] - scatterplot_df['When did the poo occur?'][i-1])
scatterplot_df.loc[0, 'time_since_last_poo'] = 0
scatterplot_df.loc[scatterplot_df['time_since_last_poo'].last_valid_index(), 'time_since_last_poo'] = 0
# Correlation line
dataforfitline = np.zeros([np.size(scatterplot_df,0), 1])
j = 0
for i in scatterplot_df['Size of poo?']:
if i == 'Small':
dataforfitline[j] = 1
if i == 'Medium':
dataforfitline[j] = 2
if i == 'Poonarmi':
dataforfitline[j] = 3
j += 1
dataforfitline2 = pd.DataFrame(data = scatterplot_df['time_since_last_poo'])
dataforfitline2[1] = dataforfitline
dataforfitline2 = dataforfitline2.sort_values(by = ['time_since_last_poo']).reset_index(drop=True)
slope, intercept, r_value, p_value, std_err = stats.linregress(dataforfitline2.astype(float))
line = slope*scatterplot_df['time_since_last_poo'] + intercept
fig5 = go.Figure(data=go.Scatter(x = scatterplot_df['time_since_last_poo'],
# y = scatterplot_df['Size of poo?'],
y = dataforfitline2[1],
mode = 'markers',
text = scatterplot_df['When did the poo occur?'],
name = 'Poops',
hovertemplate = "%{text}"))
fig5.add_trace(go.Scatter(x = scatterplot_df['time_since_last_poo'], y = line, mode = 'lines', name = 'R\u00b2 = ' + round(r_value**2,2).astype(str)))
fig5.update_xaxes(title_text="Hours since last poop")
fig5.update_yaxes(title_text="Size of poop")
fig5.update_layout(title = "Correlation between time since last poo and size of poo", font = dict(size = 16))
plot(fig5)
#%% scatter plot x axis = Time since las poo (delta t), y axis (Type of poo)
d2 = {'When did the poo occur?': df['When did the poo occur? '], 'Type of poo?': df['Type of poop 💩? '], 'time_since_last_poo': pd.Timedelta(0, unit='h')}
scatterplot_df2 = pd.DataFrame(data=d2)
scatterplot_df2 = scatterplot_df2.sort_values(by = ['When did the poo occur?']).reset_index(drop=True)
for i in range(1, len(df['When did the poo occur? '])-1):
scatterplot_df2.loc[i, 'time_since_last_poo'] = days_hours_minutes(scatterplot_df2['When did the poo occur?'][i] - scatterplot_df2['When did the poo occur?'][i-1])
scatterplot_df2.loc[0, 'time_since_last_poo'] = 0
scatterplot_df2.loc[scatterplot_df2['time_since_last_poo'].last_valid_index(), 'time_since_last_poo'] = 0
# Correlation line
dataforfitline3 = pd.DataFrame(data = scatterplot_df2['time_since_last_poo'])
dataforfitline3[1] = scatterplot_df2['Type of poo?']
dataforfitline3 = dataforfitline3.sort_values(by = ['time_since_last_poo']).reset_index(drop=True)
slope, intercept, r_value, p_value, std_err = stats.linregress(dataforfitline3.astype(float))
line = slope*scatterplot_df2['time_since_last_poo'] + intercept
fig6 = go.Figure(data=go.Scatter(x = scatterplot_df2['time_since_last_poo'],
y = scatterplot_df2['Type of poo?'],
mode = 'markers',
text = scatterplot_df2['When did the poo occur?'],
hovertemplate = "%{text}"))
fig6.add_trace(go.Scatter(x = scatterplot_df2['time_since_last_poo'], y = line, mode = 'lines', name = 'R\u00b2 = ' + round(r_value**2,2).astype(str)))
fig6.update_xaxes(title_text = "Hours since last poop")
fig6.update_yaxes(title_text = "Type of poop")
fig6.update_layout(title = "Correlation between time since last poo and type of poo", font = dict(size = 16))
plot(fig6)
# %% Calendar plot of each day and number of poos, darker colour for more poos
# Number of poos for each day
Num_of_poos = pd.DataFrame()
j = 0
for i in df['When did the poo occur? '].dt.strftime("%x").unique():
Num_of_poos.loc[j, 'Date'] = i
Num_of_poos.loc[j, 'Day'] = pd.to_datetime(i).strftime("%d")
Num_of_poos.loc[j, 'Month'] = pd.to_datetime(i).strftime("%b")
Num_of_poos.loc[j, 'Count'] = (df['When did the poo occur? '].dt.strftime("%x") == i).sum()
j += 1
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
total_poos_in_month = []
plot_titles = []
j = 0
for i in months:
total_poos_in_month.append(int(Num_of_poos['Count'][Num_of_poos['Month'] == i].sum()))
plot_titles.append(i + '<br>Total poopies = ' + str(total_poos_in_month[j]))
j += 1
fig7 = make_subplots(rows = 2, cols = 6, shared_yaxes = True, subplot_titles = plot_titles)
year = 2020
row_num = 1
col_num = 0
for month in months:
col_num += 1
if col_num > 6:
col_num = 1
row_num = 2
MyMonthData = calendar.monthcalendar(2020, strptime(month, '%b').tm_mon)
z = MyMonthData[::-1]
m = 0
for i in z:
n = 0
for j in i:
if j == 0:
z[m].pop(n)
z[m].insert(n, '')
elif any((Num_of_poos['Day'] == str(j).zfill(2)) & (Num_of_poos['Month'] == month)) == False:
z[m].pop(n)
z[m].insert(n, 0)
else:
z[m].pop(n)
z[m].insert(n, int(Num_of_poos.loc[(Num_of_poos['Day'] == str(j).zfill(2)) & (Num_of_poos['Month'] == month), 'Count']))
n += 1
m += 1
name = []
for a in calendar.Calendar().monthdatescalendar(year, strptime(month, '%b').tm_mon):
for b in a:
name.append(b.strftime("%d %b %Y"))
name = np.reshape([inner for inner in name], (len(MyMonthData), 7))
name = name[::-1]
fig7.add_trace(go.Heatmap(
x = days,
y = list(range(len(MyMonthData), 0)),
z = z,
meta = name,
hovertemplate = 'Date: %{meta} <br>Number of poos: %{z}<extra></extra>',
xgap = 1, ygap = 1,
zmin = 0, zmax = max(Num_of_poos['Count']),
# colorscale = "turbid"),
colorscale = [
[0, 'rgb(249, 238, 229)'], # 0 for the prettiness
[0.14, 'rgb(249, 230, 217)'], # 0
[0.29, 'rgb(204, 153, 102)'], # 1
[0.43, 'rgb(153, 102, 51)'], # 2
[0.57, 'rgb(115, 77, 38)'], # 3
[0.71, 'rgb(77, 51, 25)'], # 4
[1, 'rgb(38, 26, 13)']]), # 5
row = row_num, col = col_num)
fig7['layout'].update(plot_bgcolor = 'white',
title_text = "Poopy calendar",
yaxis_showticklabels = False,
yaxis7_showticklabels = False,
font = dict(size = 16))
plot(fig7)
# add % of that months poos for each day in hovertemplate
# %% Calendar plot of each day and a function of type/number/size of poos, darker colour for worse poos
# Correlation line
dataforfitline = np.zeros([np.size(scatterplot_df,0), 1])
j = 0
for i in scatterplot_df['Size of poo?']:
if i == 'Small':
dataforfitline[j] = 1
if i == 'Medium':
dataforfitline[j] = 2
if i == 'Poonarmi':
dataforfitline[j] = 3
j += 1
# Number of poos for each day
Num_type_of_poos = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('matplotlib', 'notebook')
import matplotlib
import seaborn as sb
from matplotlib import pyplot as plt
from matplotlib import colors as mpcolors
import numpy as np
from scipy.optimize import linear_sum_assignment
import pandas as pd
# Jupyter Specifics
get_ipython().run_line_magic('matplotlib', 'inline')
from IPython.display import display, HTML
from ipywidgets.widgets import interact, interactive, IntSlider, FloatSlider, Layout, ToggleButton, ToggleButtons, fixed, Output
display(HTML("<style>.container { width:100% !important; }</style>"))
style = {'description_width': '100px'}
slider_layout = Layout(width='99%')
import skfda
from skfda.datasets import fetch_growth
from skfda.exploratory.visualization import plot_fpca_perturbation_graphs
from skfda.preprocessing.dim_reduction.projection import FPCA
from skfda.representation.basis import BSpline, Fourier, Monomial
import hdbscan
import warnings
import math
from tqdm.notebook import tqdm # progress bars
## map stuff
import ipyleaflet
import json
import geopandas as gpd
import pickle as pk
import os
import requests
from ipywidgets import link, FloatSlider, HTML
from branca.colormap import linear
from matplotlib import colors as mpcolors
def corcl(a,b):
if len(set(a)) > 0 or len(set(b)) > 0:
return len(set(a).intersection(set(b)))/float(len(set(a).union(set(b))))
else:
return 1
def match1(a,x):
rtn = [1 if a[i] == x else 0 for i in range(len(a)) ]
return rtn
def rescale(v,d):
""" functional form of correction factor using simple inversion formula
for with v2'=1/(1-v2) the dimensionality correction v = v2 * v2'/(v2'+d/2-1)
projecting equivalent validity at dim = 2"""
if d > 12.:
d = 12.
logd = np.log(0.5*d)
return v*(1.+logd)/(1.+v*logd)
def score_int(a,b):
if len(set(a)) > 0 or len(set(b)) > 0:
return len(set(a).intersection(set(b)))
else:
return 0
def score_int_union(a,b):
if len(set(a)) > 0 or len(set(b)) > 0:
return len(set(a)&set(b))/len(set(a)|set(b)) # length intersection divided by length union
else:
return 0
def matchset(a,x):
rtn = [i for i in range(len(a)) if a[i] == x]
return rtn
def closest_hue(hue,huelist):
mindist = 2.
imin = -1
for i,h in enumerate(huelist):
if h > hue:
dist = min(h-hue,hue+1-h)
else:
dist = min(hue-h,h+1-hue)
if dist < mindist:
mindist = dist
imin = i
return imin
def color_mean_rgb_to_hsv(rgb_colours,weights=None,modal=False):
""" the hue is a circular quantity, so mean needs care
see https://en.wikipedia.org/wiki/Mean_of_circular_quantities
inputs: rgb_colors 1D array of rgb colours with entries [r,g,b]
weights: None,'all' or same length array of weights in 0. to 1. for biasing entries
modal: if Ture then chose hue as mode of hues, otherwise circular mean
"""
pi = np.pi
eps = 0.0001
hsum = 0.
ssum = 0.
vsum = 0.
asum = 0.
bsum = 0.
wsum = 0.
hwsum = 0.
if len(rgb_colours) == 0:
print('Error in color_mean_rgb_to_hsv: empty list of rgb_colours')
return [0.,0.,0.]
if weights == None:
weights = [1. if mpcolors.rgb_to_hsv(c)[1] > 0 else 0. for c in rgb_colours] # designed to exclude -1 unclustered colours
if np.sum(np.array(weights)) < eps:
weights = [1. for c in rgb_colours]
elif weights == 'all':
weights = [1. for c in rgb_colours]
hdic = {}
for i,c in enumerate(rgb_colours):
hsvcol = mpcolors.rgb_to_hsv(c)
h = hsvcol[0]
s = hsvcol[1]
v = hsvcol[2]
if s > eps and v > eps:
asum = asum + np.sin(h*2.*pi)*weights[i]
bsum = bsum + np.cos(h*2.*pi)*weights[i]
hwsum = hwsum + weights[i]
if h in hdic:
hdic.update({h:hdic[h]+1})
else:
hdic.update({h:1})
ssum = ssum + hsvcol[1]*weights[i]
vsum = vsum + hsvcol[2]*weights[i]
wsum = wsum + weights[i]
if modal:
hvals = list(hdic.keys())
hcnts = [hdic[h1] for h1 in hvals]
if len(hcnts) > 0:
hmaxcnt = np.argmax(np.array(hcnts)) # problem if hcnts is empty sequence
else:
hmaxcnt = None
if modal and len(hcnts)>0 and hcnts[hmaxcnt] >= len(rgb_colours)/4:
h = hvals[hmaxcnt]
# print('using modal hue %f with cnts %d',h,hcnts[hmaxcnt])
elif hwsum > eps:
asum = asum/hwsum
bsum = bsum/hwsum
h = np.arctan2(asum,bsum)/(2.*pi)
if h < 0.:
h = 1.+h
else:
h = 0.
if wsum > eps:
s = ssum/wsum
v = vsum/wsum
else:
print('Error in color_mean_rgb_to_hsv: 0 wsum')
s = 0.
v = 0.
# print(rgb_colours,'mean',mpcolors.hsv_to_rgb([h,s,v]))
if h < 0.:
print('error in color_mean, hue out of range',h)
h = 0.
if h > 1.:
print('error in color_mean, hue out of range',h)
h = 1.
return [h,s,v]
def size_order(clusterings):
""" relabel clusters in each clustering in order of increasing size"""
clusterings_o = np.zeros(clusterings.shape,dtype = int)
for i,clustering in enumerate(clusterings):
labels = list(set(clustering)-set([-1]))
sizes = np.zeros(len(labels),dtype = int)
for j,lab in enumerate(labels):
sizes[j] = len(matchset(clustering,lab))
order = np.flip(np.argsort(sizes))
clusterings_o[i,:] = [order[c] if c != -1 else c for c in clustering]
return clusterings_o
def clust_assign(clustering_a,clustering_b,colors_a,colors_b):
""" relables clustering b to match clustering a optimally
according tot he Hungarian algorithm, implemented in scipy
"""
labels_a = list(set(clustering_a))
labels_b = list(set(clustering_b))
scores = np.zeros((len(labels_a),len(labels_b)),dtype=float)
for i,a in enumerate(labels_a):
for j,b in enumerate(labels_b):
scores[i,j] = score_int_union(matchset(clustering_a,a),matchset(clustering_b,b)) # length intersection divided by length union (result 0. to 1. for identity)
assign_a_to_b,assign_b_to_a=scipy.optimize.linear_sum_assignment(scores)
dic_a_2_b = {labels_a[i]:labels_b[j] for i,j in enumerate(assign_a_to_b)}
return dic_a_2_b
def clust(clustering_a,clustering_b,colors_a,colors_b,relabel=True,merge=True):
""" relables clustering b to match clustering a
if more than one cluster in a optimally matches a particular cluster in b, then color of b is merger of colors in a
if more than one cluster in b optimally matches a particular cluster in a, then colors in a merged and split for b
inputs: clustering_a,b are lists of cluster labels by country, colors_a,b are lists of rgb colors by country in same order
returns: newcolors_b in rgb format
NB. colors_b are only used to preserve s,v values relating to probs of cluster membership for b in final colors
NB. the hues of b_cols are determined by the matching of clustering b with clustering a
NB. all elts of same cluster have the same hue
"""
labels_a = list(set(clustering_a))
labels_b = list(set(clustering_b))
newcolors_b = np.zeros((len(colors_b),3),dtype=float)
newcolors_b[:,:] = colors_b[:,:] # initialized as copy of colors_b, colors for each country in clustering b
a_to_b = {}
b_to_a = {}
a_cols = {}
b_cols = {}
# a_to_b mapping of labels a to the label b (+ its match score in a tuple) with largest matching score: ratio of intersecting countries to union
# maxvals_a_to_b are list of max scores for each label in labels_a
# reorder_a is the largest to smallest order of max scores
# labels_a_sort is labels_a reordered by reorder_a :i.e. the labels in a with the best matches to a label in b first
for a in labels_a:
maxscore = 0
maxlab = -2
for b in labels_b:
score = score_int_union(matchset(clustering_a,a),matchset(clustering_b,b)) # length intersection divided by length union (result 0. to 1. for identity)
if score > maxscore:
maxscore = score
maxlab = b
a_to_b.update({a:(maxlab,maxscore)})
maxvals_a_to_b = [a_to_b[a][1] for a in labels_a]
reorder_a = np.flip(np.argsort(maxvals_a_to_b))
labels_a_sort = [labels_a[r] for r in list(reorder_a)]
# same as above for b_to_a
for b in labels_b:
maxscore = 0
maxlab = -2
for a in labels_a:
score = score_int_union(matchset(clustering_a,a),matchset(clustering_b,b))
if score > maxscore:
maxscore = score
maxlab = a
b_to_a.update({b:(maxlab,maxscore)})
maxvals_b_to_a = [b_to_a[b][1] for b in labels_b]
reorder_b = np.flip(np.argsort(maxvals_b_to_a))
labels_b_sort = [labels_b[r] for r in list(reorder_b)]
#print('before relabel')
# relabeling uses labels_b_sort, labels_a_sort, a_to_b, as well as colors_a,b and clustering_a,b
if relabel:
for b in labels_b_sort: # first adjust colors_b to match mapped clusters from a (transfer and merge)
amap = [a for a in labels_a_sort if a_to_b[a][0] == b] # the labels a that prefer b as best match
for a in amap:
alist = matchset(clustering_a,a) # the positions in country list with label a (non empty since a is a label of clustering_a)
a_cols.update({(b,a) : mpcolors.hsv_to_rgb(color_mean_rgb_to_hsv([colors_a[al] for al in alist]))}) # average color of alist for b chosen as color
# print('in relabel a,b,a_cols',a,b,a_cols[(b,a)])
blist = matchset(clustering_b,b) # the positions in country list with label b
amap_t = list(set(amap)-set([-1])) # the labels of real clusters (excluding unclustered set with label -1) that prefer b
if len(amap_t) > 0: # some non-unclustered (ie not -1) clusters that prefer to map to b
# h = sum([mpcolors.rgb_to_hsv(a_cols[a])[0] for a in amap])/len(amap) # average hue from amap
h = color_mean_rgb_to_hsv([a_cols[(b,a)] for a in amap_t],[a_to_b[a][1] for a in amap_t])[0]
for j in blist: # indices of countries with label b
s = mpcolors.rgb_to_hsv(colors_b[j])[1] # take s saturation from b
v = mpcolors.rgb_to_hsv(colors_b[j])[2] # take v value from b
newcolors_b[j,:] = mpcolors.hsv_to_rgb([h,s,v]) # back to rgb
# b_cols[b] = newcolors_b[blist[0]] # first matching elt colour (to extract hue)
b_cols[b] = mpcolors.hsv_to_rgb(color_mean_rgb_to_hsv([newcolors_b[bl] for bl in blist])) # average color of blist chosen as color
#print('before merge')
if merge:
for a in labels_a_sort: # now readjust colors in b that both map to same a (split)
bmap = [b for b in labels_b_sort if b_to_a[b][0] == a]
if len(bmap)>1:
for i,b in enumerate(bmap):
blist = matchset(clustering_b,b)
# h = (mpcolors.rgb_to_hsv(b_cols[b])[0] + mpcolors.rgb_to_hsv(a_cols[a])[0])/2
if (b,a) in list(a_cols.keys()):
h,s0,v0 = color_mean_rgb_to_hsv([b_cols[b],a_cols[(b,a)]]) # mean of current color and that of a class that prefers this b
else:
# h = mpcolors.rgb_to_hsv(colors_b[j])[0]
h,s0,v0 = mpcolors.rgb_to_hsv(b_cols[b])
for j in blist:
# s = mpcolors.rgb_to_hsv(b_cols[b])[1] # take s saturation from b # these two lines cause all elts to have same value as first for s and v
# v = mpcolors.rgb_to_hsv(b_cols[b])[2] # take v from b
s = mpcolors.rgb_to_hsv(colors_b[j])[1] # take s saturation from b
v = mpcolors.rgb_to_hsv(colors_b[j])[2] # take v from b
newcolors_b[j,:]= mpcolors.hsv_to_rgb([h,s,v])
b_cols[b]=mpcolors.hsv_to_rgb([h,s0,v0])
return newcolors_b
def clust_lsa(clustering_a,clustering_b,colors_a,colors_b,base_colors=None,relabel=True,merge=True):
""" relables clustering b to match clustering a, optimally using first linear_sum_assignment,
then augmenting with new clusters for poorly aligned clusters
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html
inputs: clustering_a,b are lists of cluster labels by country, colors_a,b are lists of rgb colors by country in same order
returns: newcolors_b in rgb format
NB. colors_b are only used to preserve s,v values relating to probs of cluster membership for b in final colors
NB. the hues of b_cols are determined by the matching of clustering b with clustering a
NB. all elts of same cluster have the same hue
"""
# print('in clust_lsa')
labels_a = list(set(clustering_a))
labels_b = list(set(clustering_b))
labels_a_clus = list(set(clustering_a)-set([-1])) # all except unclustered class
labels_b_clus = list(set(clustering_b)-set([-1]))
# do linear_sum_assignment based on cost matrix: 1-score_int_union
scores = np.zeros((len(labels_a_clus),len(labels_b_clus)),dtype=float)
score_dict = {}
for i,a in enumerate(labels_a_clus):
for j,b in enumerate(labels_b_clus): # length intersection divided by length union (result 0. to 1. for identity)
scores[i,j] = 1-score_int_union(matchset(clustering_a,a),matchset(clustering_b,b))
score_dict[(a,b)] = scores[i,j]
row_ind,col_ind=linear_sum_assignment(scores)
# construct forward and backward dictionary assignments
if len(row_ind) != len(col_ind):
print('Error: row and col indices have different lengths',row_ind,col_ind)
dic_a_2_b = {labels_a_clus[row_ind[i]]:labels_b_clus[col_ind[i]] for i in range(len(row_ind))}
dic_a_2_b.update({-1:-1})
dic_b_2_a = {labels_b_clus[col_ind[i]]:labels_a_clus[row_ind[i]] for i in range(len(row_ind))}
dic_b_2_a.update({-1:-1})
# introduce new labels for poor matching labels in complete assignment
maxlabel = max(labels_a)
relabel_b = {}
for a in labels_a:
if a not in dic_a_2_b.keys():
dic_a_2_b.update({a:None})
else:
relabel_b[dic_a_2_b[a]]=a
# print('dic a_2_b',dic_a_2_b)
# print('dic b_2_a',dic_b_2_a)
# print('relabel_b I',relabel_b)
for b in labels_b: #unmatched labels b are given new cluster labels
if b not in relabel_b.keys():
maxlabel = maxlabel+1
relabel_b[b]=maxlabel
elif b != -1:
if score_dict[(dic_b_2_a[b] ,b)] > 0.8: # insufficient match, new cluster name
# print('new label',dic_b_2_a[b],b,scoredict[(dic_b_2_a[b] ,b)])
maxlabel = maxlabel+1
relabel_b[b]=maxlabel
# print('relabel_b II',relabel_b)
new_labels_b = np.array([relabel_b[b] for b in labels_b])
newcolors_b = np.zeros((len(colors_b),3),dtype=float)
newcolors_b[:,:] = colors_b[:,:] # initialized as copy of colors_b, colors for each country in clustering b
# relabel colours
if relabel:
for b in labels_b: # first adjust colors_b to match mapped clusters from a (transfer and merge)
if relabel_b[b] in labels_a:
a = dic_b_2_a[b]
alist = matchset(clustering_a,a)
newcol = colors_a[alist[0]]
else:
newcol = base_colors[1+relabel_b[b]]
# print('new color for b from',b,'to',relabel_b[b],'entry +1',newcol)
h = mpcolors.rgb_to_hsv(newcol)[0]
for j in matchset(clustering_b,b): # indices of countries with label b
s = mpcolors.rgb_to_hsv(colors_b[j])[1] # take s saturation from b
v = mpcolors.rgb_to_hsv(colors_b[j])[2] # take v value from b
newcolors_b[j,:] = mpcolors.hsv_to_rgb([h,s,v]) # back to rgb
# no merge yet
return newcolors_b
def cluster_map_colors(cons1,cons2,relabel=True,merge=True):
""" recalculate colors of countries in consensus clustering cons2, based on alignment with clustering cons1
input: two consensus clusterings with completed scans
relabel abnd merge options (default True) as for clust
output: colors2 : the matched coloring of cons2
side_effect : places inverse mapping iidx in cons2 to allow country order alignment
"""
refc1 = cons1.refclustering
refc2 = cons2.refclustering
clusdat1 = np.array([cons1.clusdata[refc1][i] for i in cons1.sidx])
clusdat2 = np.array([cons2.clusdata[refc2][i] for i in cons1.sidx]) # NB not cons2.sidx
if len(clusdat1) != len(clusdat2):
print('Error: country list lengths not equal')
return None
else: ncountries = len(clusdat2)
cons2.iidx = [None]*ncountries
for i, j in zip(range(ncountries), cons2.sidx): cons2.iidx[j] = i # undo cons2.idx reordering
colors1 = np.array([cons1.basecolors[clus+1] for clus in clusdat1])
colors2_c = np.array([cons2.basecolors[clusdat2[cons2.iidx[i]]+1] for i in range(ncountries)] )
colors2_0 = np.array([colors2_c[cons2.sidx[i]] for i in range(ncountries)] )
#colors1 = np.array(cons1.rgblist) # already ordered like scountries
#colors2_c = np.array([cons2.rgblist[cons2.iidx[i]] for i in range(ncountries)] ) # change order back to match countries # DEBUG
#colors2 = np.array([colors2_c[cons2.sidx[i]] for i in range(ncountries)] ) # change order to match scountries of cons1 # DEBUG
# print(np.array(list(zip(clusdat2,mpcolors.rgb_to_hsv(colors2)))))
colors2 = clust_lsa(clusdat1,clusdat2,colors1,colors2_0,base_colors=cons2.basecolors,relabel=relabel,merge=merge)
#for i in range(len(colors2)):
# print(i,clusdat1[i],clusdat2[i],mpcolors.rgb_to_hsv(colors1[i]),mpcolors.rgb_to_hsv(colors2[i]))
return colors1,colors2
def cmap_sankey(clus1,clus2,colors1,colors2,hue_only=True):
cmap12 = {}
for ci in set(clus1):
for i,lab in enumerate(clus1):
if lab == ci:
tmp = colors1[i]
break
cmap12.update({'a'+str(ci):tmp})
for ci in set(clus2):
for i,lab in enumerate(clus2):
if lab == ci:
tmp = colors2[i]
# print(ci,mpcolors.rgb_to_hsv(tmp))
break
cmap12.update({'b'+str(ci):tmp})
if hue_only:
# reset colors to full saturation and value unless sat is 0
cmap12h = {elt:mpcolors.hsv_to_rgb([mpcolors.rgb_to_hsv(cmap12[elt])[0],1,1] if mpcolors.rgb_to_hsv(cmap12[elt])[1]!=0 else [0,0,0]) for elt in cmap12}
else:
cmap12h = cmap12
return cmap12h
def sankey(cons1,cons2,cons1_name='cons1',cons2_name='cons2',relabel=True,merge=True,hue_only=True):
# extract refclustering data and order it according to the scountries list of cons1=cons
# set up dictionary lists of countries for each label
if len(cons1.countries) != len(cons2.countries):
print('Error: lengths of countries not equal',len(cons1.countries),len(cons2.countries))
return
clus1 = [cons1.clusdata[cons1.refclustering][i] for i in cons1.sidx] # ordered like scountries
clus2 = [cons2.clusdata[cons2.refclustering][i] for i in cons1.sidx]
colors1,colors2=cluster_map_colors(cons1,cons2,relabel=relabel,merge=merge)
cmap12=cmap_sankey(clus1,clus2,colors1,colors2,hue_only=hue_only)
dic1 = {lab:[cc for i,cc in enumerate(cons1.scountries) if clus1[i]==lab] for lab in set(clus1)}
dic2 = {lab:[cc for i,cc in enumerate(cons1.scountries) if clus2[i]==lab] for lab in set(clus2)}
df = dic_compare(dic1,dic2)
h1 = hv.Sankey(df,kdims=['c1','c2'],vdims=['val'])
h1.opts(title=cons1_name+' vs '+cons2_name, cmap=cmap12, node_color='index', edge_color='c1', node_alpha=1.0, edge_alpha=0.7)
return h1
# the final cluster alignment
def plot_clusalign(countries,data,report,cols=None):
fig,ax = plt.subplots(1,1,figsize=(10,24))
if cols is not None:
todel = list(set(range(data.shape[1])) - set(cols))
data1 = np.delete(data,todel,1)
else:
data1 = data
img = ax.imshow(data1)
ax.set_yticks(range(len(countries)))
ax.set_yticklabels(countries)
if cols is None:
rep = report
else:
rep = [report[i] for i in cols]
ax.set_xticks(range(len(rep)))
plt.setp(ax.get_xticklabels(), rotation='vertical', family='monospace')
ax.set_xticklabels(rep,rotation='vertical')
#plt.show()
return fig
# Note that the colours are best understood as hue with value v = intensity related to membership prob
# note that unclustered points had probdata values of 0 formerly, now corrected to give outlier_score_
#
# We should be assigning countries to at least 4 categories : probably five. Cluster 0,1,2 other cluster and no cluster (-1)
# Currently the code relies on the color assignments cluster 0 [1,0,0] 1 [0,1,0] 2 [0,0,1] and only works for 3 clusters.
# The unclustered color of [1,1,1] did not disrupt if the probability was always 0 : this will not work with outlier extension
# Other clusters higher in number were assigned rather biassedly to one of 0,1,2 : this needs fixing
#
# count +1 for any RGB component
def cscore(crow,cols):
rgbsc = [0.0]*3
for j in cols:
if crow[j][0] >0:
rgbsc[0] = rgbsc[0]+1
if crow[j][1] >0:
rgbsc[1] = rgbsc[1]+1
if crow[j][2] >0:
rgbsc[2] = rgbsc[2]+1
return rgbsc
# sum RGB components
def cscore_org(crow,cols):
rgbsc = [0.0]*3
for j in cols:
rgbsc[0] = rgbsc[0]+crow[j][0]
rgbsc[1] = rgbsc[1]+crow[j][1]
rgbsc[2] = rgbsc[2]+crow[j][2]
return rgbsc
#sum weighted hues
def hscore_org(crow,cols):
hsvmean = color_mean_rgb_to_hsv([crow[j] for j in cols],'all')
return hsvmean
def hscore_mode_org(crow,cols):
hsvmean = color_mean_rgb_to_hsv([crow[j] for j in cols],'all',modal=True)
return hsvmean
def swizzle_old(countries,data,cols):
rgb = [None]*len(countries)
for i in range(len(countries)):
for j in range(data.shape[1]):
rgbsc = cscore(data[i,:,:],cols)
rgb[i] = np.argmax(rgbsc)
rtn = [None]*len(countries)
cnt = 0
print('-------blue---------')
for i in range(len(rgb)):
if rgb[i] == 2: #blue
rtn[cnt] = i
print(cnt,i,countries[i])
cnt = cnt+1
print('-------green---------')
for i in range(len(rgb)):
if rgb[i] == 1: # green
rtn[cnt] = i
print(cnt,i,countries[i])
cnt = cnt+1
print('-------red---------')
for i in range(len(rgb)):
if rgb[i] == 0: # red
rtn[cnt] = i
print(cnt,i,countries[i])
cnt = cnt+1
print('cnt =',cnt)
return rtn
def swizzleRGB(countries,data,cols):
rgb = [None]*len(countries)
for i in range(len(countries)):
for j in range(data.shape[1]):
rgbsc = cscore(data[i,:,:],cols)
rgb[i] = np.argmax(rgbsc)
rtn = {}
rtn['R']=[]
rtn['G']=[]
rtn['B']=[]
cnt = 0
for i in range(len(rgb)):
if rgb[i] == 2: #blue
rtn['B'].append(countries[i])
cnt = cnt+1
for i in range(len(rgb)):
if rgb[i] == 1: # green
rtn['G'].append(countries[i])
cnt = cnt+1
for i in range(len(rgb)):
if rgb[i] == 0: # red
rtn['R'].append(countries[i])
cnt = cnt+1
print('cnt =',cnt)
return rtn
def swizzle2(countries,data,cols,refcol):
eps = 0.0001
clus = [None]*len(countries)
rgblist = [None]*len(countries)
hsvdic = {}
hsvrefs = [mpcolors.rgb_to_hsv(c) for c in data[:,refcol]]
huesref = np.sort(list(set([hsv[0] for hsv in hsvrefs if hsv[1] > eps])))
# print('huesref',huesref)
for i in range(len(countries)):
hsvsc = hscore_org(data[i,:,:],cols)
hue = hsvsc[0]
sat = hsvsc[1]
if sat <= 0.5: # mean is classed as unclustered
clus[i] = -1
else:
clus[i] = closest_hue(hue,huesref)
hsvdic.update({countries[i]:hsvsc})
rgblist[i] = mpcolors.hsv_to_rgb(hsvsc)
# print('clus',clus,'len',len(clus))
rtn = [None]*len(countries)
cnt = 0
for j in set(clus):
print('-------class',j,'---------')
for i in range(len(countries)):
if clus[i] == j:
rtn[cnt] = i
# print(cnt,i,countries[i],rgblist[i],hsvlist[i])
print(cnt,i,countries[i])
cnt = cnt+1
print('cnt =',cnt)
return rtn,rgblist,hsvdic
def swizzle3(countries,data,cols,refcol,basecolors,refdata,satthresh = 0.7):
eps = 0.0001
clus = [None]*len(countries)
rgblist = [None]*len(countries)
hsvdic = {}
#hsvrefs = [mpcolors.rgb_to_hsv(c) for c in data[:,refcol]]
refclus = np.sort(list(set(refdata))) # cluster classes in reference column
#print('refclus',refclus)
#huesref = np.sort(list(set([hsv[0] for hsv in hsvrefs if hsv[1] > eps])))
huesref = [mpcolors.rgb_to_hsv(basecolors[1+i])[0] for i in refclus if i != -1]
#print('data shape',np.shape(data))
#print('huesref',huesref)
for i in range(len(countries)):
# hsvsc = hscore_org(data[i,:,:],cols)
# hsvsc = hscore_mode_org(data[i,:,:],cols) # using modal hue
hsvsc = hscore_mode_org(data[i,:,:],cols) # using color circle mean hue
hue = hsvsc[0]
sat = hsvsc[1]
if sat <= satthresh: # mean is classed as unclustered
clus[i] = -1
else:
clus[i] = closest_hue(hue,huesref)
#print(i,countries[i],hue,clus[i])
hsvdic.update({countries[i]:hsvsc})
rgblist[i] = mpcolors.hsv_to_rgb(hsvsc)
# print('clus',clus,'len',len(clus))
rtn = [None]*len(countries)
classes = [None]*len(countries)
cnt = 0
dic={}
for j in set(clus):
dic[j]=[]
#print('-------class',j,'---------')
for i in range(len(countries)):
if clus[i] == j:
classes[cnt] = j
rtn[cnt] = i
dic[j].append(countries[i])
hsvdic[countries[i]] = [j]+hsvdic[countries[i]] # add class to hsvdic
# print(cnt,i,countries[i],rgblist[i],hsvlist[i])
#print(cnt,i,countries[i])
cnt = cnt+1
#print('cnt =',cnt)
clus_argsort = np.lexsort((countries,clus)) # lexicographical sort of countries by reference clustering and name
# swcountries = [countries[clus_argsort[i]] for i in range(len(countries))] # sorted country names as above
swclasses = [clus[clus_argsort[i]] for i in range(len(countries))] # sorted country names as above
swrgblist = [rgblist[clus_argsort[i]] for i in range(len(countries))] # sorted country names as above
# return dic,swclasses,rtn,rgblist,hsvdic
return dic,swclasses,clus_argsort,rgblist,hsvdic
def swizzle_class(countries,data,cols,refcol):
clus = [None]*len(countries)
huesref = np.sort(list(set([mpcolors.rgb_to_hsv(c)[0] for c in data[:,refcol]])))
# print('huesref',huesref)
for i in range(len(countries)):
hsvsc = hscore_org(data[i,:,:],cols)
hue = hsvsc[0]
sat = hsvsc[1]
if sat <= 0.5: # mean is classed as unclustered
clus[i] = -1
else:
clus[i] = closest_hue(hue,huesref)
rtn = {}
for cl in set(clus):
rtn[cl]=[]
cnt = 0
for j in set(clus):
# print('-------class',j,'---------')
for i in range(len(countries)):
if clus[i] == j:
rtn[j].append(countries[i])
# print(cnt,i,countries[i])
cnt = cnt+1
print('cnt =',cnt)
return rtn
def swizzleHSV(countries,data,cols,refcol):
rtn = {}
clus = [None]*len(countries)
huesref = np.sort(list(set([mpcolors.rgb_to_hsv(c)[0] for c in data[:,refcol]])))
# print('huesref',huesref)
for i in range(len(countries)):
hsvsc = hscore_org(data[i,:,:],cols)
hue = hsvsc[0]
sat = hsvsc[1]
if sat <= 0.5: # mean is classed as unclustered
clus[i] = -1
else:
clus[i] = closest_hue(hue,huesref)
rtn[countries[i]]=(clus[i],hsvsc[0],hsvsc[1],hsvsc[2])
return rtn
def dic_compare(dic1,dic2):
df = pd.DataFrame(columns=['c1','c2','val'])
cnt=0
for k in dic1:
Nk = len(dic1[k])
s1 = set(dic1[k])
for kk in dic2:
s2 = set(dic2[kk])
#olap = len(s1.intersection(s2))/float(Nk)
olap = len(s1.intersection(s2))
if olap > 0:
df.loc[cnt] = ['a'+str(k),'b'+str(kk),olap]
cnt = cnt+1
return df
def dic2df(dic):
rtn = {k:dic[k].copy() for k in dic}
keys = [x for x in dic]
lenmx = 0
for k in keys:
if len(dic[k])>lenmx:
lenmx = len(dic[k])
kmx = k
for k in keys:
if len(dic[k])<lenmx:
for _ in range(lenmx-len(dic[k])):
rtn[k].append('')
return pd.DataFrame.from_dict(rtn)
#return rtn
def dic_invert(d):
inv = {}
for k, v in d.items():
if isinstance(v,list):
for vv in v:
keys = inv.setdefault(vv, [])
keys.append(k)
else:
keys = inv.setdefault(v, [])
keys.append(k)
for k in inv:
if len(inv[k]) == 1:
inv[k] = inv[k][0]
return inv
def sprint(*args, **kwargs):
output = io.StringIO()
print(*args, file=output, **kwargs)
contents = output.getvalue()
output.close()
return contents
def sprintdic(dic,chosen_country):
global chosen_class
chosen_class = None
for label in dic:
if chosen_country in dic[label]:
chosen_class = label
break
rtn = ''
if chosen_class == None:
#print('Error: chosen_country not classified')
rtn + sprint('Unclassified selection')
elif chosen_class == -1:
rtn = rtn + sprint('unclustered:')
else:
rtn = rtn + sprint('class '+str(chosen_class)+':')
if chosen_class is not None:
countries = np.sort(np.array(dic[chosen_class]))
else:
print("Error sprintdic: no countries in class",chosen_class)
return('')
colwid = max([len(cc) for cc in countries[::2]]) + 5 # padding
for i in range(0,len(countries),2):
if i < len(countries)-1:
rtn = rtn + sprint(countries[i].ljust(colwid)+countries[i+1])
# rtn = rtn + sprint("".join(country.ljust(colwid) for country in [countries[i],countries[i+1]]))
else:
rtn = rtn + sprint(countries[i])
return rtn
class Consensus:
def __init__(self,
cldata,
cases = ['deaths', 'cases', 'cases_lin2020', 'cases_pwlfit', 'cases_nonlin', 'cases_nonlinr'],
ncomp = range(2,16),
minc = range(3,10),
min_samples = range(2,3), # 1 element [2] by default
satthresh = 0.7 # metaparam for swizzle, toward 1 => more unclustered
):
for cc in cases:
if cc not in ['deaths', 'cases', 'cases_lin2020', 'cases_pwlfit', 'cases_nonlin', 'cases_nonlinr']:
print('cases can only be one of:')
print(['deaths', 'cases', 'cases_lin2020', 'cases_pwlfit', 'cases_nonlin', 'cases_nonlinr'])
self.cases = cases
self.ncomp = ncomp
self.minc = minc
self.min_samples = min_samples
self.countries = list(cldata.clusdata_all[cases[0]].keys()) # save countries in first data set as list
self.satthresh = satthresh
self.clusdata = None
self.swcountries=None
self.cldata=cldata
def scan(self,diag=False,progress=True,name=''):
countries = self.countries
lc = len(self.cases)
maxvalid = [None]*lc
maxvalidval= 0.0
maxvalidsc = [None]*lc
maxvalidscval= 0.0
minscore1 = [None]*lc
minscore1val = 999.
minscore2 = [None]*lc
minscore2val = 999.
self.report = [' ']*4*lc
self.reportdata = [None]*4*lc
# runlen = len(self.cldata.clusdata_all[self.cases[0]])
runlen = len(countries)
self.probdata=np.zeros((4*lc,runlen),dtype=float)
self.outlierdata=np.zeros((4*lc,runlen),dtype=float)
self.clusdata = np.zeros((4*lc,runlen),dtype=np.int64)
self.info = | pd.DataFrame(columns=['type','minc','mins','ncomp','clustered','unclustered','validity','validitysc','score1','score2']) | pandas.DataFrame |