update code

This commit is contained in:
Colas Geier 2025-02-25 16:52:00 +01:00
parent 5715ab0179
commit 5291b3b83c
10 changed files with 442 additions and 420 deletions

View File

@ -272,7 +272,8 @@ def _insert_parcelle1(schema='38_202207',list_parid=None):
ELSE ltrim(dnuplam, '0') :: INTEGER ELSE ltrim(dnuplam, '0') :: INTEGER
END AS dnuplam, END AS dnuplam,
-- Numéro Parcelle mère -- Numéro Parcelle mère
type_filiation AS type -- Type de filiation type_filiation AS "type", -- Type de filiation
jdatat
FROM "{sch}".parcelle a FROM "{sch}".parcelle a
JOIN "{sch}".geo_parcelle ON geo_parcelle.geo_parcelle = a.parcelle JOIN "{sch}".geo_parcelle ON geo_parcelle.geo_parcelle = a.parcelle
) )
@ -330,7 +331,8 @@ def _insert_parcelle2(schema='38_202207',list_parid=None):
THEN NULL :: INTEGER THEN NULL :: INTEGER
ELSE ltrim(a.dnuplam, '0') :: INTEGER END AS dnuplam, ELSE ltrim(a.dnuplam, '0') :: INTEGER END AS dnuplam,
-- Numéro Parcelle mère -- Numéro Parcelle mère
a.type_filiation AS "type" -- Type de filiation a.type_filiation AS "type", -- Type de filiation
a.jdatat
FROM "{sch}".parcelle a ) FROM "{sch}".parcelle a )
SELECT SELECT
t1.par_id, t1.par_id,
@ -348,7 +350,8 @@ def _insert_parcelle2(schema='38_202207',list_parid=None):
t1.ccoprem, t1.ccoprem,
t1.ccosecm, t1.ccosecm,
t1.dnuplam, t1.dnuplam,
t1.type t1.type,
t1.jdatat
FROM t1 FROM t1
LEFT JOIN "{sch}".parcelle_info b ON t1.parcelle = b.geo_parcelle -- les parcelles sans geom LEFT JOIN "{sch}".parcelle_info b ON t1.parcelle = b.geo_parcelle -- les parcelles sans geom
LEFT JOIN cadastre.parcelles_cen ON t1.par_id = parcelles_cen.par_id LEFT JOIN cadastre.parcelles_cen ON t1.par_id = parcelles_cen.par_id
@ -586,7 +589,7 @@ def _insert_lot1(schema='38_202207'):
null AS dnulot, -- Numéro du lot null AS dnulot, -- Numéro du lot
dcntpa AS dcntlo-- Contenance cadastrale () dcntpa AS dcntlo-- Contenance cadastrale ()
FROM "{sch}".parcelle FROM "{sch}".parcelle
JOIN "{sch}".geo_parcelle ON parcelle.parcelle = geo_parcelle.geo_parcelle -- on ne garde que les les parcelles dont on a la géométrie JOIN "{sch}".geo_parcelle ON parcelle.parcelle = geo_parcelle.geo_parcelle -- on ne garde que les parcelles dont on a la géométrie
WHERE ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla IN WHERE ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla IN
(SELECT ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||trim(dnulot) FROM "{sch}".suf); -- toutes les parcelles dont dnulot est NULL (SELECT ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||trim(dnulot) FROM "{sch}".suf); -- toutes les parcelles dont dnulot est NULL
'''.format( '''.format(

View File

@ -285,7 +285,8 @@ def recup_id_role(author): # A finir !
azalee_auth = pers.get_auteur2().sort_index()#.replace({' ':' '},regex=True) azalee_auth = pers.get_auteur2().sort_index()#.replace({' ':' '},regex=True)
azalee_auth = azalee_auth[azalee_auth.nom_prenom.isin(adapt_auth)].replace({'Inconnu':'Autre'}) azalee_auth = azalee_auth[azalee_auth.nom_prenom.isin(adapt_auth)].replace({'Inconnu':'Autre'})
# azalee_auth.nom_prenom.replace({'Inconnu':'Autre'},regex=True,inplace=True) # azalee_auth.nom_prenom.replace({'Inconnu':'Autre'},regex=True,inplace=True)
t_roles = pd.merge(get_t_roles().reset_index(),azalee_auth, how='inner',left_on=['nom_role','prenom_role','nom_organisme'],right_on=['nom','prenom','organisme']) tr = get_t_roles().reset_index().replace({'':None})
t_roles = pd.merge(tr,azalee_auth, how='inner',left_on=['nom_role','prenom_role','nom_organisme'],right_on=['nom','prenom','organisme'])
dict_role = dict(zip(t_roles.nom_prenom,t_roles.id_role)) dict_role = dict(zip(t_roles.nom_prenom,t_roles.id_role))
return author.replace({' \(Inconnu\)':'',' ':' '},regex=True).str.strip().replace(dict_role) return author.replace({' \(Inconnu\)':'',' ':' '},regex=True).str.strip().replace(dict_role)
@ -1098,16 +1099,43 @@ def OTHERINV_to_tref(db_file):
) )
def trunc_table(table,cascade=False):
"""
Tronque la table pr_zh.table
Parameters
----------
table : str
Nom de la table à tronquer
cascade : bool, optional
Si True, la suppression se fait avec l'option CASCADE. Cela signifie que
les clés étrangères pointant vers cette table seront également supprimées
(par exemple, si vous supprimez une zone humide, vous supprimez
automatiquement les données de suivis liées à cette zone). Par défaut,
cette option est à False.
"""
cascade = 'CASCADE;' if cascade else ';'
sql = 'TRUNCATE pr_zh.%s %s'%(table,cascade)
with con_gn.begin() as cnx:
cnx.execute(sql)
if __name__ == "__main__": if __name__ == "__main__":
# TRUNCATE TABLE
# trunc_table('t_zh',cascade=True)
# trunc_table('cor_zh_area')
# trunc_table('t_reference')
from pycen.geonature import pr_zh from pycen.geonature import pr_zh
t_zh = pr_zh.t_zh() t_zh = pr_zh.t_zh()
drop_cols = ['auteur_geom','date_geom','type_milieu','type_site',] drop_cols = ['auteur_geom','date_geom','type_milieu','type_site',]
DF = zh.v_zoneshumides() DF = zh.v_zoneshumides()
DF.rename(columns=DICT_TZH,inplace=True) DF.rename(columns=DICT_TZH,inplace=True)
DF.drop(columns=drop_cols,inplace=True) DF.drop(columns=drop_cols,inplace=True)
df = DF.copy()
df = DF[~DF.code.isin(t_zh.code)].copy() df = DF[~DF.code.isin(t_zh.code)].copy()
migrate_to_gnZH(df) migrate_to_gnZH(df)

View File

@ -6,6 +6,7 @@ from sqlalchemy.engine import URL
from datetime import datetime as dt from datetime import datetime as dt
import pandas as pd import pandas as pd
import geopandas as gpd import geopandas as gpd
from pycen import con
# Parametres bdd # Parametres bdd
@ -13,7 +14,7 @@ user = 'cen_admin'
pwd = '#CEN38@venir' pwd = '#CEN38@venir'
adr = '91.134.194.221' adr = '91.134.194.221'
port = '5432' port = '5432'
base = 'azalee' base = 'azalee_20240731'
url = URL.create('postgresql+psycopg2', url = URL.create('postgresql+psycopg2',
username=user, username=user,
@ -21,7 +22,7 @@ url = URL.create('postgresql+psycopg2',
host=adr, host=adr,
database=base, database=base,
) )
con = create_engine(url) # con = create_engine(url)
drop_v_ps = ''' drop_v_ps = '''
DROP VIEW IF EXISTS ps.v_pelouseseches_all CASCADE; DROP VIEW IF EXISTS ps.v_pelouseseches_all CASCADE;
@ -165,10 +166,10 @@ FROM crosstab(
a.id_geom_site::bigint, a.id_geom_site::bigint,
a.date, a.date,
(SELECT MAX(date) FROM ps.r_site_param WHERE id_geom_site = a.id_geom_site ) date_max, (SELECT MAX(date) FROM ps.r_site_param WHERE id_geom_site = a.id_geom_site ) date_max,
c.auteur, STRING_AGG(distinct c.auteur,', '),
a.taux, a.taux,
a2.nom, a2.nom,
a1.description STRING_AGG(a1.description,', ')
FROM ps.r_site_param a FROM ps.r_site_param a
JOIN (ps.param a1 JOIN (ps.param a1
JOIN ps.type_param a2 ON a1.id_type = a2.id JOIN ps.type_param a2 ON a1.id_type = a2.id
@ -176,7 +177,7 @@ FROM crosstab(
JOIN auteur c ON c.id_siteparam = a.id JOIN auteur c ON c.id_siteparam = a.id
--WHERE --WHERE
-- a.id_geom_site in (52716) -- a.id_geom_site in (52716)
--GROUP BY 1 GROUP BY 1,2,7,3,6
ORDER BY 1,2,7,3 DESC ORDER BY 1,2,7,3 DESC
$$, $$,
$$SELECT nom FROM ps.type_param ORDER BY nom;$$ $$SELECT nom FROM ps.type_param ORDER BY nom;$$

View File

@ -5,6 +5,22 @@ from sqlalchemy import text
from datetime import datetime as dt from datetime import datetime as dt
import geopandas as gpd import geopandas as gpd
from pycen import con from pycen import con
# Parametres bdd
from sqlalchemy import create_engine, text
from sqlalchemy.engine import URL
user = 'cen_admin'
pwd = '#CEN38@venir'
adr = '91.134.194.221'
port = '5432'
base = 'azalee_20240731'
url = URL.create('postgresql+psycopg2',
username=user,
password=pwd,
host=adr,
database=base,
)
con = create_engine(url)
drop_v_zh = 'DROP VIEW IF EXISTS zones_humides.v_zoneshumides CASCADE;' drop_v_zh = 'DROP VIEW IF EXISTS zones_humides.v_zoneshumides CASCADE;'
with con.begin() as cnx: with con.begin() as cnx:
@ -15,7 +31,7 @@ v_zh_hab = """
DROP VIEW IF EXISTS zones_humides.v_zh_hab; DROP VIEW IF EXISTS zones_humides.v_zh_hab;
CREATE OR REPLACE VIEW zones_humides.v_zh_hab CREATE OR REPLACE VIEW zones_humides.v_zh_hab
AS AS
WITH auteur AS ( WITH author AS (
SELECT DISTINCT ON (id_sitehab) SELECT DISTINCT ON (id_sitehab)
c.id_sitehab, c.id_sitehab,
string_agg(c1.auteur,';' ORDER BY c1.auteur) auteur string_agg(c1.auteur,';' ORDER BY c1.auteur) auteur
@ -23,21 +39,28 @@ WITH auteur AS (
JOIN personnes.v_personne c1 ON c1.id = c.id_auteur JOIN personnes.v_personne c1 ON c1.id = c.id_auteur
GROUP BY c.id_sitehab GROUP BY c.id_sitehab
ORDER BY 1 ORDER BY 1
),
d_max as (
SELECT distinct on (id_site)
a.id_site,
c.auteur,
a."date"
FROM zones_humides.r_site_habitat a
JOIN author c ON c.id_sitehab = a.id
WHERE a."valid"
ORDER BY a.id_site, "date" DESC
) )
SELECT DISTINCT ON (a.id_site) SELECT --distinct on (a.id_cb)
--a.id_geom_site, d.id_site,
a.id_site, d.auteur,
c.auteur, d."date",
--MAX(a."date") "date", string_agg(a.id_cb,';' order by a.id_cb asc) code_cb,
a."date", string_agg(b.lb_hab_fr,';' order by a.id_cb asc) lib_cb
string_agg(a.id_cb,';' order by a.id_cb) code_cb, FROM (SELECT distinct on (id_site,id_cb) * FROM zones_humides.r_site_habitat ) a
string_agg(b.lb_hab_fr,';' order by a.id_cb) lib_cb
FROM zones_humides.r_site_habitat a
JOIN ref_habitats.corine_biotope b ON a.id_cb = b.id JOIN ref_habitats.corine_biotope b ON a.id_cb = b.id
JOIN auteur c ON c.id_sitehab = a.id JOIN d_max d on a.id_site = d.id_site
WHERE a."valid" WHERE a."valid"
GROUP BY 1,2,3 GROUP BY 1,2,3
ORDER BY a.id_site, a."date" desc,row_number() OVER (ORDER BY a.id_site) desc
;""" ;"""
grant = """ grant = """
GRANT ALL ON TABLE zones_humides.v_zh_hab TO grp_admin; GRANT ALL ON TABLE zones_humides.v_zh_hab TO grp_admin;
@ -563,18 +586,35 @@ v_rhomeosite = """
DROP VIEW IF EXISTS zones_humides.v_rhomeosite; DROP VIEW IF EXISTS zones_humides.v_rhomeosite;
CREATE OR REPLACE VIEW zones_humides.v_rhomeosite CREATE OR REPLACE VIEW zones_humides.v_rhomeosite
AS AS
SELECT with t1 as (
v.site_code||' - '||v.nom "NAME", SELECT
SPLIT_PART(v.auteur_geom,' (',1) "REFERENT", v.site_code,
REPLACE(SPLIT_PART(v.auteur_geom,' (',2),')','') "ORG", r.nom,
SPLIT_PART(v.typo_sdage,' - ',1) "TYPE", st_area(st_intersection(v.geom, r.geom)) area_intersect
CASE WHEN r.nom::text = 'alpin' THEN '1' FROM zones_humides.v_zoneshumides v
WHEN r.nom::text = 'continental' THEN '2' join ref_territoire.ref_biogeo r on st_intersects(v.geom, r.geom)
WHEN r.nom::text = 'mediterraneen' THEN '4' --group by 1,2
END "ODONATE", ), t2 as (
select
site_code,
nom,
row_number() over (partition by site_code order by area_intersect desc) as ismax_ter
from t1
)
SELECT (v.site_code::text || ' - '::text) || v.nom::text AS "NAME",
split_part(v.auteur_geom, ' ('::text, 1) AS "REFERENT",
replace(split_part(v.auteur_geom, ' ('::text, 2), ')'::text, ''::text) AS "ORG",
split_part(v.typo_sdage, ' - '::text, 1) AS "TYPE",
CASE
WHEN t2.nom = 'alpin'::text THEN '1'::text
WHEN t2.nom = 'continental'::text THEN '2'::text
WHEN t2.nom = 'mediterraneen'::text THEN '4'::text
ELSE NULL::text
END AS "ODONATE",
v.geom v.geom
FROM zones_humides.v_zoneshumides v, ref_territoire.ref_biogeo r FROM zones_humides.v_zoneshumides v
WHERE st_intersects(v.geom, r.geom) join t2 on v.site_code = t2.site_code and t2.ismax_ter = 1
;
""" """
grant = """ grant = """
GRANT ALL ON TABLE zones_humides.v_rhomeosite TO grp_admin; GRANT ALL ON TABLE zones_humides.v_rhomeosite TO grp_admin;

View File

@ -315,6 +315,7 @@ def normalize_colname(df):
'ident_':'ident', 'ident_':'ident',
'id':'ident', 'id':'ident',
'idfinal':'id_origine', 'idfinal':'id_origine',
'site_code':'id_site',
'date_':'date', 'date_':'date',
'obs':'auteur', 'obs':'auteur',
'structur':'structure', 'structur':'structure',
@ -323,11 +324,13 @@ def normalize_colname(df):
's_p_brous' :'%_embrous', 's_p_brous' :'%_embrous',
's_p_brouss':'%_embrous', 's_p_brouss':'%_embrous',
'taux_embrou':'%_embrous', 'taux_embrou':'%_embrous',
'tx_embrous':'%_embrous',
'niv__embro':'niv_embrous', 'niv__embro':'niv_embrous',
'niv_embro' :'niv_embrous', 'niv_embro' :'niv_embrous',
'niv_embrou' :'niv_embrous', 'niv_embrou' :'niv_embrous',
'niv_emb' :'niv_embrous', 'niv_emb' :'niv_embrous',
'embroussaillement' :'niv_embrous', 'embroussaillement' :'niv_embrous',
'embrouss' :'niv_embrous',
'taux_recvmt':'%_recouvmnt', 'taux_recvmt':'%_recouvmnt',
'recouvrement':'recouvmnt', 'recouvrement':'recouvmnt',
'recouvreme':'recouvmnt', 'recouvreme':'recouvmnt',
@ -336,6 +339,7 @@ def normalize_colname(df):
'recouvr_' :'recouvmnt', 'recouvr_' :'recouvmnt',
'remarque' :'remarques', 'remarque' :'remarques',
'remarq_' :'remarques', 'remarq_' :'remarques',
'rmq_intere' :'remarques',
'legendes' :'legende', 'legendes' :'legende',
'legend' :'legende', 'legend' :'legende',
'sources' :'source', 'sources' :'source',
@ -430,7 +434,7 @@ def ident_newsite(df,rcvmt=10):
# Identification des superpositions new_site/old_site # Identification des superpositions new_site/old_site
df_inters = gpd.sjoin(df,v_ps, how='left') df_inters = gpd.sjoin(df,v_ps, how='left')
del df_inters['index_right'] del df_inters['index_right']
news1 = df_inters[df_inters.site_code.isna()].id_origine # news1 = df_inters[df_inters.site_code.isna()].id_origine
lst_old_site = df_inters.site_code.unique() lst_old_site = df_inters.site_code.unique()
v_ps = v_ps[v_ps.site_code.isin(lst_old_site)].copy() v_ps = v_ps[v_ps.site_code.isin(lst_old_site)].copy()
v_ps.loc[:,'surf'] = v_ps.area v_ps.loc[:,'surf'] = v_ps.area
@ -1298,9 +1302,9 @@ if __name__ == "__main__":
'"PS_VERCORS_CEN38_2011"' '"PS_VERCORS_CEN38_2011"'
] ]
# from_table = '"cr_ECRIN_habitats_CBNA_2014"' # from_table = '"cr_ECRIN_habitats_CBNA_2014"'
# from_table = '"cr_VERCORS_habitats_CBNA_1999-2007"' from_table = None
from_file = 'PS_AGGREGATION_NB_AG.shp' from_file = 'PS38_modifs_AG_2024.gpkg'
path0 = '/home/colas/Documents/9_PROJETS/2_PS/TO IMPORT/' path0 = '/home/colas/Documents/9_PROJETS/2_PS/TO IMPORT/2024/'
# org = from_file.split('/')[1] # org = from_file.split('/')[1]
tutu = pd.DataFrame() tutu = pd.DataFrame()
@ -1353,10 +1357,10 @@ if __name__ == "__main__":
df = normalize_colname(df) df = normalize_colname(df)
df = format_date(df) df = format_date(df)
df['table_org'] = table df['table_org'] = table
df['structure'] = 'APIE' df['structure'] = 'CEN Isère'
df['type_pat'].replace(['Indéterminé','/'],None,inplace=True) # df['type_pat'].replace(['Indéterminé','/'],None,inplace=True)
df.loc[df.type_pat.notna(),'pratiques'] = \ # df.loc[df.type_pat.notna(),'pratiques'] = \
df[df.type_pat.notna()].pratiques + ' ' + df[df.type_pat.notna()].type_pat.str.lower() # df[df.type_pat.notna()].pratiques + ' ' + df[df.type_pat.notna()].type_pat.str.lower()
tutu = pd.concat([tutu,df]) tutu = pd.concat([tutu,df])
@ -1399,7 +1403,8 @@ if __name__ == "__main__":
df_new = filter_saisierror(df_new) df_new = filter_saisierror(df_new)
if not df_maj.empty: if not df_maj.empty:
df_maj['auteur'] = normalize_auteur(df_maj.auteur) df_maj['auteur'] = normalize_auteur(df_maj.auteur)
df_maj['id_origine'] = df_maj['id_origine'].astype(str) id_col = 'id_origine' if 'id_origine' in df_maj.columns else 'id_site'
df_maj['id_origine'] = df_maj[id_col].astype(str)
df_maj = filter_saisierror(df_maj) df_maj = filter_saisierror(df_maj)
if not df_replace['df'].empty: if not df_replace['df'].empty:
df_replace['df']['auteur'] = normalize_auteur(df_replace['df']['auteur']) df_replace['df']['auteur'] = normalize_auteur(df_replace['df']['auteur'])
@ -1410,7 +1415,7 @@ if __name__ == "__main__":
# # df[d].replace([' '], [' '],regex=True,inplace=True) # # df[d].replace([' '], [' '],regex=True,inplace=True)
# df[d] = df[d].str.normalize('NFKC') # df[d] = df[d].str.normalize('NFKC')
DF = df_new.copy() DF = df_maj.copy()
Cnhab = DF.columns[DF.columns.str.startswith('n_hab')] Cnhab = DF.columns[DF.columns.str.startswith('n_hab')]
if not all(DF[[*Cnhab]].astype(float).sum(axis=1) == 100): if not all(DF[[*Cnhab]].astype(float).sum(axis=1) == 100):
print( DF[ ~(DF[[*Cnhab]].astype(float).sum(axis=1) == 100) ] ) print( DF[ ~(DF[[*Cnhab]].astype(float).sum(axis=1) == 100) ] )

View File

@ -13,10 +13,6 @@ from pyproj import crs
import pycen import pycen
con = pycen.con con = pycen.con
FILE_PATH = '/home/colas/Documents/9_PROJETS/1_ZH/MAJ/reinventairezhisre/'
GEOM_PATH = '20231011_Zone_Humide_Les_Chanines_Typhaie_Saint_Maurice_l_Exil.shp'
DATA_PATH = 'Tableau_saisie_ZH_Les_Chanines.xlsx'
def crsp_colSite(df): def crsp_colSite(df):
''' '''
@ -756,6 +752,10 @@ def insertAttrsRegHydro(sh5):
.replace([*p_inout_perm.nom.str.lower()],[*p_inout_perm.id.astype(str)]) .replace([*p_inout_perm.nom.str.lower()],[*p_inout_perm.id.astype(str)])
if dfcon.columns.str.contains('sub').any(): if dfcon.columns.str.contains('sub').any():
if dfcon['sub_freq'].dtype != object:
dfcon['sub_freq'] = dfcon['sub_freq'].astype(object)
if dfcon['sub_etend'].dtype != object:
dfcon['sub_etend'] = dfcon['sub_etend'].astype(object)
dfcon['sub_freq'].fillna('Inconnu',inplace=True) dfcon['sub_freq'].fillna('Inconnu',inplace=True)
dfcon['sub_etend'].fillna('Inconnu',inplace=True) dfcon['sub_etend'].fillna('Inconnu',inplace=True)
dfcon['id_freqsub'] = dfcon['sub_freq'].str.lower() \ dfcon['id_freqsub'] = dfcon['sub_freq'].str.lower() \
@ -789,7 +789,7 @@ def insertAttrsRegHydro(sh5):
if ins: if ins:
ids = select_ID(dfcon[dfcon.columns.drop('auteur')],sch,tab_sub) ids = select_ID(dfcon[dfcon.columns.drop('auteur')],sch,tab_sub)
ids.loc[~ids.id_etendsub.isna(),'id_etendsub'] = ids.loc[~ids.id_etendsub.isna(),'id_etendsub']\ ids.loc[~ids.id_etendsub.isna(),'id_etendsub'] = ids.loc[~ids.id_etendsub.isna(),'id_etendsub']\
.astype(int).astype(str) .astype(int)
if ids.id_freqsub.dtype==int: if ids.id_freqsub.dtype==int:
ids.id_freqsub = ids.id_freqsub.astype(str) ids.id_freqsub = ids.id_freqsub.astype(str)
same_col = dfcon.columns[dfcon.columns.isin(ids.columns)] same_col = dfcon.columns[dfcon.columns.isin(ids.columns)]
@ -848,12 +848,13 @@ def insertAttrsFct(sh6,nom_typ_court=False):
.str.lower() \ .str.lower() \
.replace([*param_tmp.nom.str.lower()],[*param_tmp.id]) .replace([*param_tmp.nom.str.lower()],[*param_tmp.id])
df['id_fct'] = df['id_fct'].astype(int) df['id_fct'] = df['id_fct'].astype(int)
df,ins = insertAttrs(df,sch, tab) df,ins = insertAttrs(df,sch, tab)
if ins: if ins:
ids = select_ID(df[df.columns.drop('auteur')],sch,tab) ids = select_ID(df[df.columns.drop('auteur')],sch,tab)
if 'description' in df.columns: if 'description' in df.columns:
df.description = df.description.astype(str) df.description = df.description.astype(str)
df.loc[df.description=='nan','description'] = 'None'
ids.description = ids.description.astype(str) ids.description = ids.description.astype(str)
if df.description.str.contains("'").any(): if df.description.str.contains("'").any():
df.description = df.description.replace("'","''",regex=True) df.description = df.description.replace("'","''",regex=True)
@ -872,10 +873,16 @@ def insertAttrsFct(sh6,nom_typ_court=False):
if __name__ == "__main__": if __name__ == "__main__":
gdf = gpd.read_file(FILE_PATH+GEOM_PATH, crs='EPSG:2154') from os import path
FILE_PATH = '/home/colas/Documents/9_PROJETS/1_ZH/MAJ/Actu 2024/TEREO - 20241002_ENVOI_SIG_ZH/ADD DATA'
GEOM_PATH = 'TEREO_newZH.gpkg'
DATA_PATH = 'Tableau_saisie_ZH_TEREO.xlsx'
Gdf = gpd.read_file(path.join(FILE_PATH,GEOM_PATH), crs='EPSG:2154')
# Gdf['site_code'] = '38GR0070'
# lst = ['38BB0089','38BB0090','38BB0091','38BB0092'] # lst = ['38BB0089','38BB0090','38BB0091','38BB0092']
# gdf = gdf[gdf.site_code.isin(lst)] # gdf = gdf[gdf.site_code.isin(lst)]
DF = pd.read_excel(FILE_PATH+DATA_PATH, sheet_name=None, header=1) DF = pd.read_excel(path.join(FILE_PATH,DATA_PATH), sheet_name=None, header=1)
lst_sheet = [*DF.keys()] lst_sheet = [*DF.keys()]
for k in lst_sheet: for k in lst_sheet:
if isinstance(DF[k], pd.DataFrame): if isinstance(DF[k], pd.DataFrame):
@ -896,8 +903,9 @@ if __name__ == "__main__":
inplace=True) inplace=True)
lst_site = list(sh1.site_cod.unique()) lst_site = list(sh1.site_cod.unique())
gdf = gdf[gdf.site_code.isin(lst_site)].reset_index(drop=True) gdf = Gdf[Gdf.site_code.isin(lst_site)].reset_index(drop=True)
gdf.rename_geometry('geom', inplace=True) if gdf.geometry.name != 'geom':
gdf.rename_geometry('geom', inplace=True)
lst_site = list(gdf.site_code.unique()) lst_site = list(gdf.site_code.unique())
sh1 = sh1[sh1.site_cod.isin(lst_site)] sh1 = sh1[sh1.site_cod.isin(lst_site)]
sh1.name = lst_sheet[0] sh1.name = lst_sheet[0]
@ -979,7 +987,7 @@ if __name__ == "__main__":
insertAttrsCB(sh2) insertAttrsCB(sh2)
insertAttrsUsgPrss(sh4) insertAttrsUsgPrss(sh4)
insertAttrsRegHydro(sh5) insertAttrsRegHydro(sh5)
insertAttrsFct(sh6) insertAttrsFct(sh6.dropna(axis=1,how='all'))

View File

@ -1,188 +1,179 @@
from pycen import con_gn from pycen import con_gn
sql = ''' sql = '''
DROP VIEW IF EXISTS gn_exports.v_synthese_sinp_with_metadata_flora; DROP VIEW IF EXISTS gn_exports.v_synthese_sinp_with_metadata_flora_for_gn2pg;
CREATE VIEW gn_exports.v_synthese_sinp_with_metadata_flora AS CREATE VIEW gn_exports.v_synthese_sinp_with_metadata_flora_for_gn2pg AS
WITH af_actors AS ( WITH af_actors AS (
SELECT cafa.id_acquisition_framework, SELECT cafa.id_acquisition_framework
json_build_object('type_role', FROM gn_meta.cor_acquisition_framework_actor cafa
CASE LEFT JOIN utilisateurs.bib_organismes borg ON cafa.id_organism = borg.id_organisme
WHEN cafa.id_organism IS NOT NULL THEN 'organism'::TEXT WHERE borg.uuid_organisme = '5a433bd0-2070-25d9-e053-2614a8c026f8'::uuid
WHEN cafa.id_role IS NOT NULL THEN 'role'::TEXT ), af AS (
ELSE NULL::TEXT SELECT taf.id_acquisition_framework
END, FROM gn_meta.t_acquisition_frameworks taf
'uuid_actor', coalesce(borg.uuid_organisme, tro.uuid_role), JOIN af_actors ON af_actors.id_acquisition_framework = taf.id_acquisition_framework
'cd_nomenclature_actor_role', tn.cd_nomenclature, 'identity', GROUP BY taf.id_acquisition_framework
CASE ), ds AS (
WHEN cafa.id_organism IS NOT NULL SELECT tds.id_dataset,
THEN json_build_object('organism_name', borg.nom_organisme) tds.dataset_name AS nom_jdd,
WHEN cafa.id_role IS NOT NULL THEN json_build_object('first_name', tds.id_acquisition_framework
tro.nom_role, FROM gn_meta.t_datasets tds
'last_name', GROUP BY tds.id_dataset, tds.dataset_name
tro.prenom_role) END, ), geo AS (
'email', coalesce(borg.email_organisme, tro.email)) AS json_data SELECT "left"(geo_1.area_code::text, 2) AS departement,
FROM gn_meta.cor_acquisition_framework_actor cafa geo_1.area_code AS commune,
LEFT JOIN utilisateurs.bib_organismes borg ON cafa.id_organism = borg.id_organisme s_1.id_synthese,
LEFT JOIN utilisateurs.t_roles tro ON cafa.id_role = tro.id_role st_transform(s_1.the_geom_local, 4326) AS st_transform
JOIN ref_nomenclatures.t_nomenclatures tn FROM ref_geo.l_areas geo_1
ON cafa.id_nomenclature_actor_role = tn.id_nomenclature), JOIN gn_synthese.synthese s_1 ON st_intersects(s_1.the_geom_4326, st_transform(geo_1.geom, 4326))
af AS ( WHERE geo_1.id_type = 25
SELECT taf.id_acquisition_framework, )
jsonb_build_object('uuid', taf.unique_acquisition_framework_id, 'name', SELECT
taf.acquisition_framework_name, ds.nom_jdd AS nom_jdd,
'desc', taf.acquisition_framework_desc, 'start_date', s.unique_id_sinp_grp AS id_sinp_releve,
taf.acquisition_framework_start_date, 'end_date', occ.id_releve_occtax AS identifiant_releve,
taf.acquisition_framework_end_date, NULL::text AS code_perso_releve,
'initial_closing_date', taf.initial_closing_date, 'territorial_level', s.unique_id_sinp AS id_sinp_observation,
ntl.cd_nomenclature, 'financing_type', nft.cd_nomenclature, 'target_description', s.entity_source_pk_value AS identifiant_observation,
taf.target_description, 'ecologic_or_geologic_target', geo.departement,
taf.ecologic_or_geologic_target, 'actors', geo.commune,
json_agg(af_actors.json_data)) AS af_data NULL::text AS lieu_dit,
FROM gn_meta.t_acquisition_frameworks taf CASE
JOIN af_actors ON af_actors.id_acquisition_framework = taf.id_acquisition_framework WHEN "position"(sp.srtext::text, 'GEOGCS'::text) = 1 THEN "substring"(replace(sp.srtext::text, 'GEOGCS["'::text, ''::text), 1, "position"(replace(sp.srtext::text, 'GEOGCS["'::text, ''::text), '",'::text) - 1)
LEFT JOIN ref_nomenclatures.t_nomenclatures ntl WHEN "position"(sp.srtext::text, 'PROJCS'::text) = 1 THEN "substring"(replace(sp.srtext::text, 'PROJCS["'::text, ''::text), 1, "position"(replace(sp.srtext::text, 'PROJCS["'::text, ''::text), '",'::text) - 1)
ON taf.id_nomenclature_territorial_level = ntl.id_nomenclature WHEN "position"(sp.srtext::text, 'GEOCCS'::text) = 1 THEN "substring"(replace(sp.srtext::text, 'GEOCCS["'::text, ''::text), 1, "position"(replace(sp.srtext::text, 'GEOCCS["'::text, ''::text), '",'::text) - 1)
LEFT JOIN ref_nomenclatures.t_nomenclatures nft WHEN "position"(sp.srtext::text, 'COMPD_CS'::text) = 1 THEN "substring"(replace(sp.srtext::text, 'COMPD_CS["'::text, ''::text), 1, "position"(replace(sp.srtext::text, 'COMPD_CS["'::text, ''::text), '",'::text) - 1)
ON taf.id_nomenclature_financing_type = nft.id_nomenclature ELSE 'Non défini'::text
GROUP BY taf.id_acquisition_framework, taf.acquisition_framework_name, taf.acquisition_framework_desc, END AS sys_coord,
taf.acquisition_framework_start_date, taf.acquisition_framework_end_date, taf.initial_closing_date, st_srid(s.the_geom_4326) AS srid,
ntl.cd_nomenclature, nft.cd_nomenclature), st_astext(s.the_geom_4326) AS localisation_wkt,
ds_actors AS ( st_x(st_centroid(s.the_geom_4326)) AS coord_x,
SELECT cda.id_dataset, st_y(st_centroid(s.the_geom_4326)) AS coord_y,
json_build_object('type_role', s."precision" AS precision,
CASE NULL::text AS nature_objet,
WHEN cda.id_organism IS NOT NULL THEN 'organism'::TEXT s.altitude_min AS alti_min,
WHEN cda.id_role IS NOT NULL THEN 'role'::TEXT s.altitude_max AS alti_max,
ELSE NULL::TEXT NULL::text AS pente,
END, 'uuid_actor', coalesce(borg.uuid_organisme, tro.uuid_role), NULL::text AS exposition,
'cd_nomenclature_actor_role', tn.cd_nomenclature, 'identity', NULL::text AS comm_geol,
CASE NULL::text AS milieu,
WHEN cda.id_organism IS NOT NULL s.observers AS observateurs,
THEN json_build_object('organism_name', borg.nom_organisme) s.date_min AS date_debut,
WHEN cda.id_role IS NOT NULL THEN json_build_object('first_name', s.date_max AS date_fin,
tro.nom_role, NULL::text AS comm_context,
'last_name', n2.mnemonique AS type_regroupement,
tro.prenom_role) END, s.grp_method AS meth_regroupement,
'email', coalesce(borg.email_organisme, tro.email)) AS json_data NULL::text AS surface,
FROM gn_meta.cor_dataset_actor cda CASE WHEN (s."additional_data"::json#>>'{strate_flore}') IS NOT NULL
LEFT JOIN utilisateurs.bib_organismes borg ON cda.id_organism = borg.id_organisme THEN (s."additional_data"::json#>>'{strate_flore}')::text
LEFT JOIN utilisateurs.t_roles tro ON cda.id_role = tro.id_role WHEN s."additional_data"::json#>>'{id_nomenclature_strate_flore}' IS NOT NULL
JOIN ref_nomenclatures.t_nomenclatures tn THEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_strate_flore}')::int)::text
ON cda.id_nomenclature_actor_role = tn.id_nomenclature), ELSE NULL::text END AS strate_vegetation,
ds AS (SELECT tds.id_dataset, CASE WHEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_strate_flore}')::int)::text = 'Strate herbacée'::text
tds.id_acquisition_framework, THEN (s."additional_data"::json#>>'{heigth_herbace}')::text
-- tds.additional_data, WHEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_strate_flore}')::int)::text = 'Strate arbustive'::text
jsonb_build_object('uuid', tds.unique_dataset_id, 'name', tds.dataset_name, 'desc', tds.dataset_desc, THEN (s."additional_data"::json#>>'{heigth_arbust}')::text
'shortname', tds.dataset_shortname, 'data_type', ndt.cd_nomenclature, WHEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_strate_flore}')::int)::text = 'Strate arborée'::text
'collecting_method', ncm.cd_nomenclature, 'data_origin', ndo.cd_nomenclature, THEN (s."additional_data"::json#>>'{heigth_arbore}')::text
'dataset_objectif', ndso.cd_nomenclature, 'resource_type', nrt.cd_nomenclature, WHEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_strate_flore}')::int)::text = 'Strate sous-arbustive'::text
'source_status', nss.cd_nomenclature, 'territories', array_agg(DISTINCT THEN (s."additional_data"::json#>>'{heigth_ssarbust}')::text
ref_nomenclatures.get_cd_nomenclature(cdt.id_nomenclature_territory)), ELSE NULL::text END
'actors', json_agg(ds_actors.json_data)) AS dataset_data AS hauteur_strate,
FROM gn_meta.t_datasets tds CASE WHEN s."additional_data"::json#>>'{id_nomenclature_strate_flore}' IS NOT NULL
JOIN ds_actors ON ds_actors.id_dataset = tds.id_dataset THEN CASE WHEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_strate_flore}')::int)::text = 'Strate herbacée'::text
LEFT JOIN gn_meta.cor_dataset_territory cdt ON cdt.id_dataset = tds.id_dataset THEN (s."additional_data"::json#>>'{rcvmt_herbace}')::text
LEFT JOIN ref_nomenclatures.t_nomenclatures ndt WHEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_strate_flore}')::int)::text = 'Strate arbustive'::text
ON tds.id_nomenclature_data_type = ndt.id_nomenclature THEN (s."additional_data"::json#>>'{rcvmt_arbust}')::text
LEFT JOIN ref_nomenclatures.t_nomenclatures ncm WHEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_strate_flore}')::int)::text = 'Strate arborée'::text
ON tds.id_nomenclature_collecting_method = ncm.id_nomenclature THEN (s."additional_data"::json#>>'{rcvmt_arbore}')::text
LEFT JOIN ref_nomenclatures.t_nomenclatures ndo WHEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_strate_flore}')::int)::text = 'Strate sous-arbustive'::text
ON tds.id_nomenclature_data_origin = ndo.id_nomenclature THEN (s."additional_data"::json#>>'{rcvmt_ssarbust}')::text
LEFT JOIN ref_nomenclatures.t_nomenclatures ndso ELSE NULL::text END
ON tds.id_nomenclature_dataset_objectif = ndso.id_nomenclature ELSE NULL::text END AS recouvrement_strate,
LEFT JOIN ref_nomenclatures.t_nomenclatures nrt s.cd_hab AS cdhab,
ON tds.id_nomenclature_resource_type = nrt.id_nomenclature NULL::text AS cdhab_v,
LEFT JOIN ref_nomenclatures.t_nomenclatures nss NULL::text AS code_eur,
ON tds.id_nomenclature_source_status = nss.id_nomenclature NULL::text AS code_eunis,
GROUP BY tds.id_dataset, tds.id_acquisition_framework, tds.unique_dataset_id, tds.dataset_name, NULL::text AS code_cahab,
tds.dataset_desc, tds.dataset_shortname, ndt.cd_nomenclature, ncm.cd_nomenclature, NULL::text AS code_cb,
ndo.cd_nomenclature, ndso.cd_nomenclature, nrt.cd_nomenclature, nss.cd_nomenclature) NULL::text AS id_microhab,
SELECT s.id_synthese, n21.regne AS regne,
s.entity_source_pk_value AS id_source, s.nom_cite AS nom_cite,
s.unique_id_sinp AS id_perm_sinp, s.cd_nom as cd_nom,
s.unique_id_sinp_grp AS id_perm_grp_sinp, CASE WHEN (s."additional_data"::json#>>'{effectif_textuel}') IS NOT NULL
s.date_min AS date_debut, THEN (s."additional_data"::json#>>'{effectif_textuel}')::text
s.date_max AS date_fin, WHEN s."additional_data"::json#>>'{id_nomenclature_braunblanq_abdom}' IS NOT NULL
s.cd_nom, THEN ref_nomenclatures.get_nomenclature_label((s."additional_data"::json#>>'{id_nomenclature_braunblanq_abdom}')::int)::text
s.meta_v_taxref AS version_taxref, ELSE NULL::text END AS abondance,
s.nom_cite, NULL::text AS sociabilite,
s.count_min AS nombre_min, n11.mnemonique AS sexe,
s.count_max AS nombre_max, n7.mnemonique AS naturalite,
s.altitude_min, NULL::text AS comm_description,
s.altitude_max, n15.mnemonique AS statut_observation,
s.depth_min AS profondeur_min, n12.mnemonique AS objet_denombrement,
s.depth_max AS profondeur_max, n13.mnemonique AS type_denombrement,
s.observers AS observateurs, s.count_min AS nombre_min,
s.determiner AS determinateur, s.count_max AS nombre_max,
s.validator AS validateur, n17.label_default AS statut_source,
s.sample_number_proof AS numero_preuve, NULL::text AS reference_biblio,
s.digital_proof AS preuve_numerique, NULL::text AS page,
s.non_digital_proof AS preuve_non_numerique, n8.mnemonique AS preuve_existence,
s.comment_context AS comment_releve, s.digital_proof AS preuve_numerique,
s.comment_description AS comment_occurrence, s.non_digital_proof AS preuve_non_numerique,
ds.dataset_data AS jdd_data, NULL::text AS nom_collection,
af.af_data AS ca_data, NULL::text AS ref_collection,
s.reference_biblio, s.determiner AS determinateur,
s.cd_hab AS code_habitat, NULL::text AS niv_val,
h.lb_hab_fr AS habitat, NULL::text AS niveau_diffusion,
s.place_name AS nom_lieu, n16.mnemonique AS floutage_dee,
s.precision, NULL::text AS methode_observation,
s.additional_data::TEXT AS donnees_additionnelles, CASE WHEN ts.name_source like 'MONITORING_%%'
st_astext(s.the_geom_4326) AS wkt_4326, THEN split_part(ts.name_source,'_',2)
n1.cd_nomenclature AS nature_objet_geo, ELSE 'RELEVE FLORE'::text END AS protocole,
n2.cd_nomenclature AS type_regroupement, n6.mnemonique AS etat_biologique,
s.grp_method AS methode_regroupement, n5.mnemonique AS statut_biologique,
n3.cd_nomenclature AS comportement, n10.mnemonique AS stade_vie,
n4.cd_nomenclature AS technique_obs, n19.mnemonique AS methode_determination,
n5.cd_nomenclature AS statut_biologique, n3.mnemonique AS comportement,
n6.cd_nomenclature AS etat_biologique, s.additional_data - '{effectif_textuel,strate_flore,code_atlas,id_nomenclature_braunblanq_abdom,id_nomenclature_strate_flore}'::text[] AS additional_data
n7.cd_nomenclature AS naturalite, FROM gn_synthese.synthese s
n8.cd_nomenclature AS preuve_existante, JOIN ds ON ds.id_dataset = s.id_dataset
n9.cd_nomenclature AS precision_diffusion, JOIN af ON ds.id_acquisition_framework = af.id_acquisition_framework
n10.cd_nomenclature AS stade_vie, JOIN geo ON s.id_synthese = geo.id_synthese
n11.cd_nomenclature AS sexe, JOIN spatial_ref_sys sp ON st_srid(s.the_geom_4326) = sp.auth_srid
n12.cd_nomenclature AS objet_denombrement, LEFT JOIN ref_habitats.habref h ON h.cd_hab = s.cd_hab
n13.cd_nomenclature AS type_denombrement, LEFT JOIN pr_occtax.t_releves_occtax occ ON occ.unique_id_sinp_grp = s.unique_id_sinp_grp
n14.cd_nomenclature AS niveau_sensibilite, LEFT JOIN ref_nomenclatures.t_nomenclatures n1 ON s.id_nomenclature_geo_object_nature = n1.id_nomenclature
n15.cd_nomenclature AS statut_observation, LEFT JOIN ref_nomenclatures.t_nomenclatures n2 ON s.id_nomenclature_grp_typ = n2.id_nomenclature
n16.cd_nomenclature AS floutage_dee, LEFT JOIN ref_nomenclatures.t_nomenclatures n3 ON s.id_nomenclature_behaviour = n3.id_nomenclature
n17.cd_nomenclature AS statut_source, LEFT JOIN ref_nomenclatures.t_nomenclatures n4 ON s.id_nomenclature_obs_technique = n4.id_nomenclature
n18.cd_nomenclature AS type_info_geo, LEFT JOIN ref_nomenclatures.t_nomenclatures n5 ON s.id_nomenclature_bio_status = n5.id_nomenclature
n19.cd_nomenclature AS methode_determination, LEFT JOIN ref_nomenclatures.t_nomenclatures n6 ON s.id_nomenclature_bio_condition = n6.id_nomenclature
n20.cd_nomenclature AS statut_validation, LEFT JOIN ref_nomenclatures.t_nomenclatures n7 ON s.id_nomenclature_naturalness = n7.id_nomenclature
coalesce(s.meta_update_date, s.meta_create_date) AS derniere_action LEFT JOIN ref_nomenclatures.t_nomenclatures n8 ON s.id_nomenclature_exist_proof = n8.id_nomenclature
FROM gn_synthese.synthese s LEFT JOIN ref_nomenclatures.t_nomenclatures n9 ON s.id_nomenclature_diffusion_level = n9.id_nomenclature
JOIN ds ON ds.id_dataset = s.id_dataset LEFT JOIN ref_nomenclatures.t_nomenclatures n10 ON s.id_nomenclature_life_stage = n10.id_nomenclature
JOIN af ON ds.id_acquisition_framework = af.id_acquisition_framework LEFT JOIN ref_nomenclatures.t_nomenclatures n11 ON s.id_nomenclature_sex = n11.id_nomenclature
LEFT JOIN ref_habitats.habref h ON h.cd_hab = s.cd_hab LEFT JOIN ref_nomenclatures.t_nomenclatures n12 ON s.id_nomenclature_obj_count = n12.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n1 ON s.id_nomenclature_geo_object_nature = n1.id_nomenclature LEFT JOIN ref_nomenclatures.t_nomenclatures n13 ON s.id_nomenclature_type_count = n13.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n2 ON s.id_nomenclature_grp_typ = n2.id_nomenclature LEFT JOIN ref_nomenclatures.t_nomenclatures n14 ON s.id_nomenclature_sensitivity = n14.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n3 ON s.id_nomenclature_behaviour = n3.id_nomenclature LEFT JOIN ref_nomenclatures.t_nomenclatures n15 ON s.id_nomenclature_observation_status = n15.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n4 ON s.id_nomenclature_obs_technique = n4.id_nomenclature LEFT JOIN ref_nomenclatures.t_nomenclatures n16 ON s.id_nomenclature_blurring = n16.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n5 ON s.id_nomenclature_bio_status = n5.id_nomenclature LEFT JOIN ref_nomenclatures.t_nomenclatures n17 ON s.id_nomenclature_source_status = n17.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n6 ON s.id_nomenclature_bio_condition = n6.id_nomenclature LEFT JOIN ref_nomenclatures.t_nomenclatures n18 ON s.id_nomenclature_info_geo_type = n18.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n7 ON s.id_nomenclature_naturalness = n7.id_nomenclature LEFT JOIN ref_nomenclatures.t_nomenclatures n19 ON s.id_nomenclature_determination_method = n19.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n8 ON s.id_nomenclature_exist_proof = n8.id_nomenclature LEFT JOIN ref_nomenclatures.t_nomenclatures n20 ON s.id_nomenclature_valid_status = n20.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n9 ON s.id_nomenclature_diffusion_level = n9.id_nomenclature LEFT JOIN taxonomie.taxref n21 ON s.cd_nom = n21.cd_nom
LEFT JOIN ref_nomenclatures.t_nomenclatures n10 ON s.id_nomenclature_life_stage = n10.id_nomenclature JOIN gn_synthese.cor_area_synthese cas ON cas.id_synthese = s.id_synthese
LEFT JOIN ref_nomenclatures.t_nomenclatures n11 ON s.id_nomenclature_sex = n11.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n12 ON s.id_nomenclature_obj_count = n12.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n13 ON s.id_nomenclature_type_count = n13.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n14 ON s.id_nomenclature_sensitivity = n14.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n15 ON s.id_nomenclature_observation_status = n15.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n16 ON s.id_nomenclature_blurring = n16.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n17 ON s.id_nomenclature_source_status = n17.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n18 ON s.id_nomenclature_info_geo_type = n18.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n19 ON s.id_nomenclature_determination_method = n19.id_nomenclature
LEFT JOIN ref_nomenclatures.t_nomenclatures n20 ON s.id_nomenclature_valid_status = n20.id_nomenclature
JOIN taxonomie.taxref t ON t.cd_nom = s.cd_nom -- ADD CEN_38
JOIN gn_synthese.t_sources ts ON ts.id_source = s.id_source JOIN gn_synthese.t_sources ts ON ts.id_source = s.id_source
WHERE n21.regne::text = 'Plantae'::text
WHERE t.regne <> 'Animalia'
-- exclision CA ([SICEN] Données anciennes,[TEST] : migration sicen,[SICEN] Hors Etude) -- exclision CA ([SICEN] Données anciennes,[TEST] : migration sicen,[SICEN] Hors Etude)
AND ds.id_acquisition_framework NOT IN (53,66,1,2,6) AND ds.id_acquisition_framework NOT IN (53,65,66,1,2,6)
-- exclision JDD `Observations opportunistes du CEN Isère importé depuis Faune Isère`
AND ds.id_dataset NOT IN (185,377,236)
-- exclision CA (`Gestion de l'Espace Naturel Sensible du Méandre des Oves`, `Gestion de la Réserve Naturelle Nationale de l'Ile de la Platière`, `Gestion des Natura 2000 FR 8201749 « Milieux alluviaux et aquatiques de lÎle de la Platière » et FR 8212012 « Île de la Platière »`, `RNN Platière` -- exclision CA (`Gestion de l'Espace Naturel Sensible du Méandre des Oves`, `Gestion de la Réserve Naturelle Nationale de l'Ile de la Platière`, `Gestion des Natura 2000 FR 8201749 « Milieux alluviaux et aquatiques de lÎle de la Platière » et FR 8212012 « Île de la Platière »`, `RNN Platière`
--AND (ts.id_module = 7 AND ds.id_acquisition_framework IN (7,38,39,42,44,45,46,48,55,57,58,60) --AND (ts.id_module = 7 AND ds.id_acquisition_framework IN (7,38,39,42,44,45,46,48,55,57,58,60)
AND ( AND (
@ -191,12 +182,10 @@ WHERE t.regne <> 'Animalia'
) )
AND unaccent(observers) NOT LIKE ALL(array[ AND unaccent(observers) NOT LIKE ALL(array[
'%%Sympetrum%%','%%Departement%%','%%Rhone%%','%%Oxalis%%', '%%Sympetrum%%','%%Departement%%','%%Rhone%%','%%Oxalis%%',
'%%LPO%%','%%GRPLS%%','%%Parvi%%','%%CD Isere%%','%%CBNA%%', '%%LPO%%','%%GRPLS%%','%%Parvi%%','%%CD Isere%%','%%Personnel%%','%%Independant%%'
'%%Flavia%%','%%Gentiana%%','%%region%%','%%Personnel%%','%%Independant%%' '%%DELOCHE Denis%%'
]) ])
AND observers NOT IN ('', 'Benoit Dodelin',', ') AND observers NOT IN ('', 'Benoit Dodelin',', ')
-- exclision JDD `Observations opportunistes du CEN Isère importé depuis Faune Isère`
AND ds.id_dataset NOT IN (185,377)
-- statut_validation = 'Certain - très probable' -- statut_validation = 'Certain - très probable'
-- AND n20.cd_nomenclature = '1' -- AND n20.cd_nomenclature = '1'
ORDER BY s.id_synthese; ORDER BY s.id_synthese;

View File

@ -23,6 +23,11 @@ def test_status_type(con,col,status):
return cnx.execute(sql).one()[0] return cnx.execute(sql).one()[0]
def insert_status_alerte(con): def insert_status_alerte(con):
"""
Insert type de statut pour les alertes
Si la valeur AL n'existe pas dans la table bdc_statut_type, on l'insert
"""
if test_data(con,'bdc_statut_type','cd_type_statut','AL') > 0: if test_data(con,'bdc_statut_type','cd_type_statut','AL') > 0:
# if test_status_type(con,'cd_type_statut','AL') > 0: # if test_status_type(con,'cd_type_statut','AL') > 0:
print('ALERTE STATUS ALREADY EXISTS') print('ALERTE STATUS ALREADY EXISTS')
@ -35,6 +40,23 @@ def insert_status_alerte(con):
cnx.execute(sql) cnx.execute(sql)
def insert_status_values(con): def insert_status_values(con):
"""
Inserts predefined status values into the 'bdc_statut_values' table if they do not already exist.
This function iterates over a list of status values, checking whether each value already exists in the
specified database table. If a value does not exist, it inserts the value into the table. Status values
include a code and a label, which describe the extinction risk or conservation status of a taxonomic
group at the departmental level.
Args:
con: A SQLAlchemy connection object to the database.
Note:
This function assumes the existence of a schema named 'taxonomie' and a table named
'bdc_statut_values' in the database connected via 'con'. It also assumes the availability of a
'con_gn' connection for executing the SQL commands.
"""
vals = [ vals = [
['RE','Disparue au niveau départemental'], ['RE','Disparue au niveau départemental'],
['AS-1','Quasi menacée (localisées sans signe de déclin)'], ['AS-1','Quasi menacée (localisées sans signe de déclin)'],
@ -133,15 +155,22 @@ def test_status_text(con,col,cd_doc):
with con.begin() as cnx: with con.begin() as cnx:
return cnx.execute(sql).one()[0] return cnx.execute(sql).one()[0]
def insert_statut_text(con,cd_doc,doc): def insert_statut_text(con,cd_doc,doc,cd_sig,lb_adm_tr,niveau_admin):
if test_data(con,'bdc_statut_text','cd_doc',cd_doc['id_doc']) > 0: if test_data(con,'bdc_statut_text','cd_doc',cd_doc['id_doc']) > 0:
# if test_status_text(con,'cd_doc',cd_doc) > 0:
print('ALERTE TEXT STATUS ALREADY EXISTS : ',doc) print('ALERTE TEXT STATUS ALREADY EXISTS : ',doc)
else: else:
sql = ''' sql = '''
INSERT INTO {sch}.{tab} (cd_type_statut,cd_sig,cd_doc,niveau_admin,lb_adm_tr,doc_url,enable) VALUES INSERT INTO {sch}.{tab} (cd_type_statut,cd_doc,cd_sig,niveau_admin,lb_adm_tr,doc_url,enable) VALUES
('AL','{cd_doc}','INSEED38','Département','Isère','{doc}',TRUE) ('AL',{cd_doc},'{cdsig}','{lb_adm}','{niv_adm}','{doc}',TRUE)
;'''.format(sch='taxonomie',tab='bdc_statut_text',cd_doc=cd_doc['id_doc'],doc=doc) ;'''.format(
sch='taxonomie',
tab='bdc_statut_text',
cd_doc=cd_doc['id_doc'],
doc=doc,
cdsig=cd_sig,
lb_adm=lb_adm_tr,
niv_adm=niveau_admin
)
with con_gn.begin() as cnx: with con_gn.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
@ -162,7 +191,6 @@ def get_max_idstatuttaxo(con):
with con.begin() as cnx: with con.begin() as cnx:
return cnx.execute(sql).one()[0] return cnx.execute(sql).one()[0]
def insert_status_taxo(con,cd_nom,cd_doc,status): def insert_status_taxo(con,cd_nom,cd_doc,status):
id_statut_cor = get_id_status_cor_text_values(con,cd_doc,status)[0][0] id_statut_cor = get_id_status_cor_text_values(con,cd_doc,status)[0][0]
cd_ref = get_cd_ref(con,cd_nom) cd_ref = get_cd_ref(con,cd_nom)
@ -199,16 +227,50 @@ def get_taxonomie(con,cd_nom):
;'''.format(sch='taxonomie',tab='taxref',cd_nom=tuple(cd_nom)).replace(",)",")") ;'''.format(sch='taxonomie',tab='taxref',cd_nom=tuple(cd_nom)).replace(",)",")")
return pd.read_sql(sql,con) return pd.read_sql(sql,con)
if __name__ == "__main__": if __name__ == "__main__":
from pycen import con_gn, ref_hydro
import pandas as pd import pandas as pd
# Définition de la connection à la bdd GéoNature
from pycen import con_gn
# Le premier feuillet du fichier Excel lu.
# Composition minimale: [CD_NOM,Statut, Source, Source_url]
# ATTENTION:
# - le CD_NOM doit correspondre au CD_NOM de la table taxref
# - Les taxons dont la Source_url est None ou NA seront ignorés
file = '/home/colas/Documents/9_PROJETS/6_GEONATURE/listes_alertes_isère.xlsx' file = '/home/colas/Documents/9_PROJETS/6_GEONATURE/listes_alertes_isère.xlsx'
# Echelle administratif des listes à implémenter
niveau_admin = 'Département'
# Nom du niveau adminsitratif
lb_adm_tr = 'Isère'
# Code SIG du niveau adminnistratif
cd_sig = 'INSEED38'
insert_status_alerte(con_gn) insert_status_alerte(con_gn)
# Dictionnaire des listes dalerte à intégrer,
# identifiant et codes status respectivement concernés
cd_doc = { cd_doc = {
'Statut_de_conservation_des_poissons_et_écrevisses_en_Isère_2015':{'id_doc':999990,'id_values':['RE','CR','EN','VU','NT','LC','DD','NA',]}, 'Statut_de_conservation_des_poissons_et_écrevisses_en_Isère_2015':{
'Liste_dalerte_sur_les_orthoptères_menacés_en_Isère_2014':{'id_doc':999991,'id_values':['RE','CR','EN','VU','AS-1','AS-2','AS-3','LC','DD','NA']}, 'id_doc':999990,
'Statuts_de_conservation_de_la_faune_sauvage_en_isere_2016':{'id_doc':999992,'id_values':['RE','CR','EN','VU','NT','LC','DD','NA','NE',]}, 'id_values':['RE','CR','EN','VU','NT','LC','DD','NA',]
},
'Liste_dalerte_sur_les_orthoptères_menacés_en_Isère_2014':{
'id_doc':999991,
'id_values':['RE','CR','EN','VU','AS-1','AS-2','AS-3','LC','DD','NA']
},
'Statuts_de_conservation_de_la_faune_sauvage_en_isere_2016':{
'id_doc':999992,
'id_values':['RE','CR','EN','VU','NT','LC','DD','NA','NE',]
},
'Liste_rouge_des_Odonates_de_lIsère_2013':{
'id_doc':999993,
'id_values':['RE','CR','EN','VU','NT','LC','DD','NA',]
},
'Liste_rouge_des_lépidoprere_rhopaloceres_et_zygenes_de_lIsère_2015':{
'id_doc':999994,
'id_values':['RE','CR','EN','VU','NT','LC','DD','NA','NE','EX']
},
} }
df = (pd.read_excel(file,keep_default_na=False) df = (pd.read_excel(file,keep_default_na=False)
@ -221,12 +283,18 @@ if __name__ == "__main__":
df = df.loc[df.doc_url!=''] df = df.loc[df.doc_url!='']
for d in df.source.unique(): for d in df.source.unique():
doc_url = df.loc[df.source==d,'doc_url'].unique()[0] doc_url = df.loc[df.source==d,'doc_url'].unique()[0]
insert_statut_text(con_gn,cd_doc[d],doc_url) insert_statut_text(con_gn,cd_doc[d],doc_url,cd_sig,lb_adm_tr,niveau_admin)
# INSERTION dans la table bdc_statut_taxons # INSERTION dans la table bdc_statut_taxons
for row in df.itertuples(): # Boucle sur chaque taxon. Peu prendre quelques minutes.
id_doc = cd_doc[row.source]['id_doc'] [
insert_status_taxo(con=con_gn,cd_nom=row.cd_nom,cd_doc=id_doc,status=row.code_statut) insert_status_taxo(
con = con_gn,
cd_nom = row.cd_nom,
cd_doc = cd_doc[row.source]['id_doc'],
status = row.code_statut)
for row in df.itertuples()
]
st = get_status_type(con_gn,'cd_type_statut','AL') st = get_status_type(con_gn,'cd_type_statut','AL')
for c in st: for c in st:
@ -237,7 +305,7 @@ if __name__ == "__main__":
del df['nom_français'] del df['nom_français']
del df['nom_latin'] del df['nom_latin']
df = df.merge(tax,how='inner',on='cd_nom') df = df.merge(tax,how='inner',on='cd_nom')
df['cd_sig'] = 'INSEED38' df['cd_sig'] = cd_sig
df['lb_adm_tr'] = 'Isère' df['lb_adm_tr'] = lb_adm_tr
df['niveau_admin'] = 'Département' df['niveau_admin'] = niveau_admin
df.to_sql('bdc_statut',con_gn,schema='taxonomie',if_exists='append',index=False) df.to_sql('bdc_statut',con_gn,schema='taxonomie',if_exists='append',index=False)

View File

@ -1,10 +1,12 @@
from pycen import con_gn #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import requests import requests
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import os import os
def get_status(lst): def get_status(lst,con):
sql = """ sql = """
SELECT SELECT
t.cd_nom, t.cd_nom,
@ -16,6 +18,7 @@ def get_status(lst):
t.famille, t.famille,
t.group1_inpn, t.group1_inpn,
t.group2_inpn, t.group2_inpn,
t.group3_inpn,
t.nom_vern, t.nom_vern,
t.nom_complet, t.nom_complet,
t.nom_valide, t.nom_valide,
@ -25,25 +28,32 @@ def get_status(lst):
s.code_statut, s.code_statut,
s.cd_type_statut, s.cd_type_statut,
s.label_statut, s.label_statut,
s.niveau_admin,
s.full_citation, s.full_citation,
s.doc_url s.doc_url
FROM taxonomie.taxref t FROM taxonomie.taxref t
JOIN taxonomie.v_bdc_status s USING (cd_nom) JOIN taxonomie.v_bdc_status s USING (cd_nom)
WHERE t.cd_nom IN {cd_nom} WHERE t.cd_nom IN {cd_nom}
;""".format(cd_nom = tuple(lst)) ;""".format(cd_nom = tuple(lst))
return pd.read_sql_query(sql,con_gn) return pd.read_sql_query(sql,con)
def get_api_status(cd_nom:int): def get_type_status(con):
res = requests.api.get('https://geonature.cen-isere.fr/taxhub/api/taxref/%i'%cd_nom) sql = """
SELECT * FROM taxonomie.bdc_statut_type
;"""
return pd.read_sql_query(sql,con)
def get_api_status(api,cd_nom:int):
res = requests.api.get('%s/%i'%(api,cd_nom))
if res.status_code == 200: if res.status_code == 200:
return res.json() return res.json()
else : else :
raise('Error : %i\tcd_nom : %i'%(res.status_code,cd_nom)) raise('Error : %i\tcd_nom : %i'%(res.status_code,cd_nom))
def get_taxon_status(lst): def get_taxon_status(lst,api):
from datetime import datetime as dt from datetime import datetime as dt
init = dt.now() init = dt.now()
st = [get_api_status(x) for x in lst] # TOO LONG st = [get_api_status(api,x) for x in lst] # TOO LONG
print(dt.now()-init) print(dt.now()-init)
phylo = { phylo = {
'cd_ref':[x['cd_ref'] for x in st], 'cd_ref':[x['cd_ref'] for x in st],
@ -123,12 +133,6 @@ def get_taxon_status(lst):
if 'PD' in x['status'].keys() else None if 'PD' in x['status'].keys() else None
for x in st for x in st
], ],
'PD':[
[val['values'][v]['code_statut']
for val in x['status']['PD']['text'].values() for v in val['values'] ]
if 'PD' in x['status'].keys() else None
for x in st
],
'PNA':[ 'PNA':[
[val['values'][v]['code_statut'] [val['values'][v]['code_statut']
for val in x['status']['PNA']['text'].values() for v in val['values'] ] for val in x['status']['PNA']['text'].values() for v in val['values'] ]
@ -194,149 +198,7 @@ def get_taxon_status(lst):
for val in x['status']['exPNA']['text'].values() for v in val['values'] ] for val in x['status']['exPNA']['text'].values() for v in val['values'] ]
if 'exPNA' in x['status'].keys() else None if 'exPNA' in x['status'].keys() else None
for x in st for x in st
], ]
}
cd_status = {
'AL':[
[val['values'][v]['code_statut']
for val in x['status']['AL']['text'].values() for v in val['values'] ]
if 'AL' in x['status'].keys() else None
for x in st
],
'BERN':[
[val['values'][v]['code_statut']
for val in x['status']['BERN']['text'].values() for v in val['values'] ]
if 'BERN' in x['status'].keys() else None
for x in st
],
'BONN':[
[val['values'][v]['code_statut']
for val in x['status']['BONN']['text'].values() for v in val['values'] ]
if 'BONN' in x['status'].keys() else None
for x in st
],
'DH':[
[val['values'][v]['code_statut']
for val in x['status']['DH']['text'].values() for v in val['values'] ]
if 'DH' in x['status'].keys() else None
for x in st
],
'DO':[
[val['values'][v]['code_statut']
for val in x['status']['DO']['text'].values() for v in val['values'] ]
if 'DO' in x['status'].keys() else None
for x in st
],
'LRE':[
[val['values'][v]['code_statut']
for val in x['status']['LRE']['text'].values() for v in val['values'] ]
if 'LRE' in x['status'].keys() else None
for x in st
],
'LRM':[
[val['values'][v]['code_statut']
for val in x['status']['LRM']['text'].values() for v in val['values'] ]
if 'LRM' in x['status'].keys() else None
for x in st
],
'LRN':[
[val['values'][v]['code_statut']
for val in x['status']['LRN']['text'].values() for v in val['values'] ]
if 'LRN' in x['status'].keys() else None
for x in st
],
'LRR':[
[val['values'][v]['code_statut']
for val in x['status']['LRR']['text'].values() for v in val['values'] ]
if 'LRR' in x['status'].keys() else None
for x in st
],
'PAPNAT':[
[val['values'][v]['code_statut']
for val in x['status']['PAPNAT']['text'].values() for v in val['values'] ]
if 'PAPNAT' in x['status'].keys() else None
for x in st
],
'PD':[
[val['values'][v]['code_statut']
for val in x['status']['PD']['text'].values() for v in val['values'] ]
if 'PD' in x['status'].keys() else None
for x in st
],
'PD':[
[val['values'][v]['code_statut']
for val in x['status']['PD']['text'].values() for v in val['values'] ]
if 'PD' in x['status'].keys() else None
for x in st
],
'PNA':[
[val['values'][v]['code_statut']
for val in x['status']['PNA']['text'].values() for v in val['values'] ]
if 'PNA' in x['status'].keys() else None
for x in st
],
'PR':[
[val['values'][v]['code_statut']
for val in x['status']['PR']['text'].values() for v in val['values'] ]
if 'PR' in x['status'].keys() else None
for x in st
],
'REGL':[
[val['values'][v]['code_statut']
for val in x['status']['REGL']['text'].values() for v in val['values'] ]
if 'REGL' in x['status'].keys() else None
for x in st
],
'REGLII':[
[val['values'][v]['code_statut']
for val in x['status']['REGLII']['text'].values() for v in val['values'] ]
if 'REGLII' in x['status'].keys() else None
for x in st
],
'REGLLUTTE':[
[val['values'][v]['code_statut']
for val in x['status']['REGLLUTTE']['text'].values() for v in val['values'] ]
if 'REGLLUTTE' in x['status'].keys() else None
for x in st
],
'REGLSO':[
[val['values'][v]['code_statut']
for val in x['status']['REGLSO']['text'].values() for v in val['values'] ]
if 'REGLSO' in x['status'].keys() else None
for x in st
],
'SCAP NAT':[
[val['values'][v]['code_statut']
for val in x['status']['SCAP NAT']['text'].values() for v in val['values'] ]
if 'SCAP NAT' in x['status'].keys() else None
for x in st
],
'SCAP REG':[
[val['values'][v]['code_statut']
for val in x['status']['SCAP REG']['text'].values() for v in val['values'] ]
if 'SCAP REG' in x['status'].keys() else None
for x in st
],
'SENSNAT':[
[val['values'][v]['code_statut']
for val in x['status']['SENSNAT']['text'].values() for v in val['values'] ]
if 'SENSNAT' in x['status'].keys() else None
for x in st
],
'ZDET':[
[val['values'][v]['code_statut']
for val in x['status']['ZDET']['text'].values() for v in val['values'] ]
if 'ZDET' in x['status'].keys() else None
for x in st
],
'exPNA':[
[val['values'][v]['code_statut']
for val in x['status']['exPNA']['text'].values() for v in val['values'] ]
if 'exPNA' in x['status'].keys() else None
for x in st
],
} }
return pd.DataFrame({**phylo,**cd_status}) return pd.DataFrame({**phylo,**cd_status})
@ -349,26 +211,40 @@ dict_dep = {
if __name__ == "__main__": if __name__ == "__main__":
# Définition de la connection à la bdd GéoNature
from pycen import con_gn
# NOT USE FOR NOW - API Taxref
api_taxref = 'https://geonature.cen-isere.fr/taxhub/api/taxref'
PATH = '/media/colas/SRV/FICHIERS/SITES/SITES GERES/ROLA_ROLANDE-MAUPAS/ROLA_PPI/ROLA_2025-2034_PG/donneesnaturalistes' # Paramètres de chargement du fichier des taxons
file = '3_liste sp_ROLA.xlsx' PATH = '/home/colas/Documents/tmp/CHARVAS'
sheet = 'liste sp' file = 'liste_sp_CHAR.xlsx'
sheet = 'liste_sp'
# Liste des CD_NOM en entrée # Liste des CD_NOM en entrée
cd_col = 'cd_ref' cd_col = 'cd_ref' # Nom de la colonne à utiliser dans le feuillet ``sheet``
# Lecture des données
taxlist = pd.read_excel(os.path.join(PATH,file),sheet,usecols=[cd_col],header=0) taxlist = pd.read_excel(os.path.join(PATH,file),sheet,usecols=[cd_col],header=0)
tab_sp = pd.read_excel(os.path.join(PATH,file),sheet,index_col=cd_col) tab_sp = pd.read_excel(os.path.join(PATH,file),sheet,index_col=cd_col)
lst = taxlist[cd_col] lst = taxlist[cd_col]
df = get_status(taxlist[cd_col].astype(str)) # Récupération des statuts
df = get_status(taxlist[cd_col].astype(str),con_gn)
typ = get_type_status(con_gn)
typ = typ[typ.cd_type_statut.isin(df.cd_type_statut.unique())]
# Distinction LRR [old vs new] région
is_lrr = df.cd_type_statut == 'LRR'
df.loc[is_lrr & (df.niveau_admin == 'Région'),'cd_type_statut'] = 'LRR_AURA'
df.loc[is_lrr & (df.niveau_admin == 'Ancienne région'),'cd_type_statut'] = 'LRR_RA'
del df['niveau_admin']
for c in ['cd_ref','cd_nom','lb_nom']: for c in ['cd_ref','cd_nom','lb_nom']:
if c in tab_sp.columns: if c in tab_sp.columns:
# if 'cd_nom' not in df.columns and c == 'cd_ref': continue # if 'cd_nom' not in df.columns and c == 'cd_ref': continue
tab_sp.drop(c,axis=1,inplace=True) tab_sp.drop(c,axis=1,inplace=True)
# df.to_csv('/media/colas/SRV/FICHIERS/TRANSFERTS-EQUIPE/LC/BOCA_CD_NOM_STATUS.csv')
pivot = pd.pivot_table( pivot = pd.pivot_table(
df, df,
values='code_statut', values='code_statut',
@ -385,11 +261,11 @@ if __name__ == "__main__":
pivot = tab_sp.merge(pivot,on=[cd_col],how='left') pivot = tab_sp.merge(pivot,on=[cd_col],how='left')
pivlib = pd.pivot_table( pivlib = pd.pivot_table(
df, df,
values='label_statut', values='label_statut',
index=['cd_nom', 'cd_ref','lb_nom'#,'niveau_admin','lb_adm_tr' index=[
'cd_nom', 'cd_ref','lb_nom'#,'niveau_admin','lb_adm_tr'
], ],
columns=['cd_type_statut'], columns=['cd_type_statut'],
aggfunc=list,fill_value=None) aggfunc=list,fill_value=None)
@ -419,4 +295,8 @@ if __name__ == "__main__":
) )
# writer.save() # writer.save()
print('pivot_libel OK !') print('pivot_libel OK !')
typ.to_excel(
writer,sheet_name='dic_type_statut',index=False
)
# writer.save()
print('dic_type_statut OK !')

View File

@ -26,7 +26,7 @@ def copy_2another_server(host_in,base_in,user_in,host_out,base_out,user_out,pass
# pg_dump -C -h localhost -U localuser dbname | psql -h remotehost -U remoteuser dbname # pg_dump -C -h localhost -U localuser dbname | psql -h remotehost -U remoteuser dbname
# pg_dump -C -h 172.17.0.2 -U postgres -d postgres --schema="07_202107" | psql -h 91.134.194.221 -U cgeier -d bd_cen # pg_dump -C -h 172.17.0.2 -U postgres -d postgres --schema="07_202107" | psql -h 91.134.194.221 -U cgeier -d bd_cen
# RESTORE AU NOM DE LA BDD CIBLE # RESTORE AU NOM DE LA BDD CIBLE
# pg_dump -C -h 172.17.0.2 -U postgres -d postgres --schema="*_202207" --format=custom | pg_restore -h 91.134.194.221 -U cgeier --dbname="bd_cen" # pg_dump -C -h 172.17.0.2 -U postgres -d postgres --schema="*_202207" --format=custom | pg_restore -h 91.134.194.221 -U cgeier --dbname="azalee_restore"
pwd_in = 'export PGPASSWORD="{pwd}";'.format(pwd=passwd_in) pwd_in = 'export PGPASSWORD="{pwd}";'.format(pwd=passwd_in)
cmd_in = 'pg_dump -C -h {H} -d {bdd} -U {U}'.format( cmd_in = 'pg_dump -C -h {H} -d {bdd} -U {U}'.format(