update code

This commit is contained in:
Colas Geier 2025-09-18 16:54:02 +02:00
parent b498f33237
commit ca6c074aca
16 changed files with 736 additions and 299 deletions

View File

@ -3,4 +3,4 @@
# pg_dump -C -h localhost -U localuser dbname | psql -h remotehost -U remoteuser dbname # pg_dump -C -h localhost -U localuser dbname | psql -h remotehost -U remoteuser dbname
# pg_dump -C -h 172.17.0.2 -U postgres -d postgres --schema="07_202107" | psql -h 91.134.194.221 -U cgeier -d bd_cen # pg_dump -C -h 172.17.0.2 -U postgres -d postgres --schema="07_202107" | psql -h 91.134.194.221 -U cgeier -d bd_cen
# RESTORE AU NOM DE LA BDD CIBLE # RESTORE AU NOM DE LA BDD CIBLE
# pg_dump -C -h 172.17.0.2 -U postgres -d postgres --schema="*_202407" --format=custom | pg_restore -h 91.134.194.221 -U cgeier --dbname="cadastre" # pg_dump -C -h 172.17.0.2 -U postgres -d postgres --schema="*_202501" --format=custom | pg_restore -h 91.134.194.221 -U cgeier --dbname="cadastre"

View File

@ -3,12 +3,12 @@ import pandas as pd
import csv import csv
path = '/media/colas/SRV/FICHIERS/TRANSFERTS-EQUIPE/CG/FONCIER/CADASTRE/2023/Fichier national FANTOIR (situation avril 2023)/' path = '/media/colas/SRV/FICHIERS/TRANSFERTS-EQUIPE/CG/FONCIER/CADASTRE/2023/Fichier national FANTOIR (situation avril 2023)/'
fan = path+'FANTOIR0423' fant = path+'FANTOIR0423'
lst_dep = ['07','26','38','42'] lst_dep = ['07','26','38','42']
if __name__ == '__main__': if __name__ == '__main__':
df = pd.read_table(fan,chunksize=500000) df = pd.read_table(fant,chunksize=500000)
d07 = pd.DataFrame() d07 = pd.DataFrame()
d26 = pd.DataFrame() d26 = pd.DataFrame()
d38 = pd.DataFrame() d38 = pd.DataFrame()

View File

@ -161,6 +161,14 @@ def drop_tables(con):
with con.begin() as cnx: with con.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
def where_parid(parid):
if not parid: return ""
return "AND parcelle.parcelle {} {}".format(
"IN" if isinstance(parid, (list,pd.Series)) and len(parid) > 1 else "=",
tuple(parid) if isinstance(parid, (list,pd.Series)) and len(parid) > 1
else "'%s'"%parid[0] if isinstance(parid, (list,pd.Series)) and len(parid) == 1
else "'%s'"%parid
)
# CENRA 2020 # CENRA 2020
def _insert_voie1(schema='38_202207',list_parid=None): def _insert_voie1(schema='38_202207',list_parid=None):
@ -170,14 +178,19 @@ def _insert_voie1(schema='38_202207',list_parid=None):
sql0 = '''set work_mem='512MB'; sql0 = '''set work_mem='512MB';
INSERT into cadastre.vl INSERT into cadastre.vl
SELECT SELECT
ccodep||ccocom||codvoi AS vl_id, --character varying(10) NOT NULL, -->>> c'est pas le bon compte de caractères, mais je n'ai pas de doublons en auvergne v.ccodep||v.ccocom||v.codvoi AS vl_id, --character varying(10) NOT NULL, -->>> c'est pas le bon compte de caractères, mais je n'ai pas de doublons en auvergne
libvoi AS libelle,--character varying(50), v.libvoi AS libelle,--character varying(50),
null AS geom -- geom geometry(MultiPolygon,2154), null AS geom -- geom geometry(MultiPolygon,2154),
FROM "{sch}".voie FROM "{sch}".voie v
WHERE ccodep||ccocom||codvoi IN ( LEFT JOIN "{sch}".parcelle ON v.ccodep||v.ccocom||v.codvoi = parcelle.ccodep||parcelle.ccocom||parcelle.ccovoi
WHERE v.ccodep||v.ccocom||v.codvoi IN (
SELECT DISTINCT ccodep||ccocom||ccovoi FROM "{sch}".parcelle ) SELECT DISTINCT ccodep||ccocom||ccovoi FROM "{sch}".parcelle )
'''.format( '''.format(
sch=schema) sch=schema)
if list_parid:
sql0 += where_parid(list_parid)
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql0) cnx.execute(sql0)
@ -195,6 +208,10 @@ def _insert_voie2(schema='38_202207',list_parid=None):
SELECT ccodep||ccocom||codvoi AS vl_id FROM "{sch}".voie) SELECT ccodep||ccocom||codvoi AS vl_id FROM "{sch}".voie)
'''.format( '''.format(
sch=schema) sch=schema)
if list_parid:
sql0 += where_parid(list_parid)
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql0) cnx.execute(sql0)
def _insert_voie3(schema='38_202207',list_parid=None): def _insert_voie3(schema='38_202207',list_parid=None):
@ -207,6 +224,10 @@ def _insert_voie3(schema='38_202207',list_parid=None):
WHERE ccodep||ccocom||ccovoi NOT IN (SELECT vl_id from cadastre.vl) WHERE ccodep||ccocom||ccovoi NOT IN (SELECT vl_id from cadastre.vl)
'''.format( '''.format(
sch=schema) sch=schema)
if list_parid:
sql0 += where_parid(list_parid)
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql0) cnx.execute(sql0)
@ -274,11 +295,15 @@ def _insert_parcelle1(schema='38_202207',list_parid=None):
-- Numéro Parcelle mère -- Numéro Parcelle mère
type_filiation AS "type", -- Type de filiation type_filiation AS "type", -- Type de filiation
jdatat jdatat
FROM "{sch}".parcelle a FROM "{sch}".parcelle
JOIN "{sch}".geo_parcelle ON geo_parcelle.geo_parcelle = a.parcelle JOIN "{sch}".geo_parcelle ON geo_parcelle.geo_parcelle = parcelle.parcelle
)
'''.format( '''.format(
sch=schema) sch=schema)
if list_parid:
sql += where_parid(list_parid).replace('AND','WHERE')
sql += ');'
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
@ -290,7 +315,7 @@ def _insert_parcelle2(schema='38_202207',list_parid=None):
On récupère la geom sur un EDIGEO antérieur''' On récupère la geom sur un EDIGEO antérieur'''
sql = '''set work_mem='512MB'; sql = '''set work_mem='512MB';
INSERT INTO cadastre.parcelles INSERT INTO cadastre.parcelles
( WITH t1 as( ( WITH t1 as (
SELECT DISTINCT ON (a.ccodep || a.ccocom || replace(a.ccopre, ' ', '0') || replace(a.ccosec, ' ', '0') || a.dnupla) SELECT DISTINCT ON (a.ccodep || a.ccocom || replace(a.ccopre, ' ', '0') || replace(a.ccosec, ' ', '0') || a.dnupla)
parcelle, parcelle,
a.ccodep || a.ccocom || replace(a.ccopre, ' ', '0') || replace(a.ccosec, ' ', '0') || a.dnupla AS par_id, a.ccodep || a.ccocom || replace(a.ccopre, ' ', '0') || replace(a.ccosec, ' ', '0') || a.dnupla AS par_id,
@ -354,17 +379,22 @@ def _insert_parcelle2(schema='38_202207',list_parid=None):
t1.jdatat t1.jdatat
FROM t1 FROM t1
LEFT JOIN "{sch}".parcelle_info b ON t1.parcelle = b.geo_parcelle -- les parcelles sans geom LEFT JOIN "{sch}".parcelle_info b ON t1.parcelle = b.geo_parcelle -- les parcelles sans geom
LEFT JOIN "{sch}".parcelle c ON t1.parcelle = c.parcelle -- les parcelles sans geom
LEFT JOIN cadastre.parcelles_cen ON t1.par_id = parcelles_cen.par_id LEFT JOIN cadastre.parcelles_cen ON t1.par_id = parcelles_cen.par_id
WHERE t1.par_id NOT IN (SELECT par_id FROM cadastre.parcelles) WHERE t1.par_id NOT IN (SELECT par_id FROM cadastre.parcelles)
) )
'''.format( '''.format(
sch=schema) sch=schema)
if list_parid:
sql += where_parid(list_parid)
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
def insert_parcelle(schema='38_202207'): def insert_parcelle(schema='38_202207',list_parid=None):
_insert_parcelle1(schema) _insert_parcelle1(schema,list_parid)
_insert_parcelle2(schema) _insert_parcelle2(schema,list_parid)
print('INSERT parcelle OK') print('INSERT parcelle OK')
@ -429,7 +459,7 @@ def check_proprio(schema='38_202207',con=con_fon):
if not is_diff_dnomus.empty: if not is_diff_dnomus.empty:
update_synonyme_proprio(con,is_diff_dnomus,'dnomus',schema) update_synonyme_proprio(con,is_diff_dnomus,'dnomus',schema)
def _insert_proprio(schema='38_202207'): def _insert_proprio(schema='38_202207',list_parid=None):
sql = '''set work_mem='512MB'; sql = '''set work_mem='512MB';
INSERT INTO cadastre.proprios INSERT INTO cadastre.proprios
SELECT DISTINCT SELECT DISTINCT
@ -545,7 +575,8 @@ def _insert_r_prop_cptprop1(schema='38_202207'):
FROM "{sch}".suf WHERE comptecommunal NOT IN (SELECT DISTINCT comptecommunal FROM "{sch}".proprietaire) FROM "{sch}".suf WHERE comptecommunal NOT IN (SELECT DISTINCT comptecommunal FROM "{sch}".proprietaire)
'''.format( '''.format(
sch=schema, sch=schema,
dep=schema[:2]) dep=schema[:2]
)
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
@ -555,10 +586,11 @@ def insert_r_prop_cptprop(schema='38_202207'):
print('INSERT r_prop_cptprop OK') print('INSERT r_prop_cptprop OK')
def _insert_lot1(schema='38_202207'): def _insert_lot1(schema='38_202207',list_parid=None):
sql = '''set work_mem='512MB'; sql = '''set work_mem='512MB';
INSERT INTO cadastre.lots (lot_id, par_id, dnulot, dcntlo) INSERT INTO cadastre.lots (lot_id, par_id, dnulot, dcntlo)
--les parcelles divisées en lots de la table lots --les parcelles divisées en lots de la table lots
(WITH parcelle AS (
SELECT DISTINCT SELECT DISTINCT
ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||dnulot AS id_lot, -- Identifiant du lot character varying(21) ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||dnulot AS id_lot, -- Identifiant du lot character varying(21)
ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla AS par_id, -- Identifiant de la parcelle ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla AS par_id, -- Identifiant de la parcelle
@ -591,9 +623,22 @@ def _insert_lot1(schema='38_202207'):
FROM "{sch}".parcelle FROM "{sch}".parcelle
JOIN "{sch}".geo_parcelle ON parcelle.parcelle = geo_parcelle.geo_parcelle -- on ne garde que les parcelles dont on a la géométrie JOIN "{sch}".geo_parcelle ON parcelle.parcelle = geo_parcelle.geo_parcelle -- on ne garde que les parcelles dont on a la géométrie
WHERE ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla IN WHERE ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla IN
(SELECT ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||trim(dnulot) FROM "{sch}".suf); -- toutes les parcelles dont dnulot est NULL (SELECT ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||trim(dnulot) FROM "{sch}".suf) -- toutes les parcelles dont dnulot est NULL
)
SELECT id_lot, par_id, dnulot, dcntlo
FROM parcelle
'''.format( '''.format(
sch=schema) sch=schema)
if list_parid:
list_parid = [x[:2] + x[3:] for x in list_parid]
sql += (where_parid(list_parid)
.replace('AND', 'WHERE')
.replace('.parcelle','.par_id')
)
sql += ');'
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
@ -652,14 +697,17 @@ def _insert_lot2(schema='38_202207'):
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
def insert_lot(schema='38_202207'): def insert_lot(schema='38_202207'):
_insert_lot1(schema) _insert_lot1(schema)
_insert_lot2(schema) _insert_lot2(schema)
print('INSERT lot OK') print('INSERT lot OK')
def insert_cadastre(schema='38_202207'):
def insert_cadastre(schema='38_202207',list_parid=None):
sql = '''set work_mem='512MB'; sql = '''set work_mem='512MB';
INSERT into cadastre.cadastre (lot_id, dnupro) INSERT into cadastre.cadastre (lot_id, dnupro)
( WITH t1 as (
--les pdl --les pdl
SELECT DISTINCT SELECT DISTINCT
ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||dnulot AS lot_id, -- Identifiant du lot character varying(21) ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||dnulot AS lot_id, -- Identifiant du lot character varying(21)
@ -690,15 +738,45 @@ def insert_cadastre(schema='38_202207'):
JOIN "{sch}".geo_parcelle ON parcelle.parcelle = geo_parcelle.geo_parcelle -- on ne garde que les parcelles dont on a la géométrie JOIN "{sch}".geo_parcelle ON parcelle.parcelle = geo_parcelle.geo_parcelle -- on ne garde que les parcelles dont on a la géométrie
WHERE WHERE
ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla IN ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla IN
(SELECT ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||btrim(dnulot) FROM "{sch}".suf); -- toutes les parcelles dont dnulot est NULL (SELECT ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||btrim(dnulot) FROM "{sch}".suf) -- toutes les parcelles dont dnulot est NULL
)
SELECT lot_id, dnupro FROM t1
'''.format( '''.format(
sch=schema) sch=schema)
if list_parid:
sql += 'JOIN cadastre.lots l USING (lot_id) '
list_parid = [x[:2] + x[3:] for x in list_parid]
sql += (where_parid(list_parid)
.replace('AND', 'WHERE')
.replace('parcelle.parcelle','l.par_id')
)
sql += ');'
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
print('INSERT cadastre OK') print('INSERT cadastre OK')
def insert_lotnatcult(schema='38_202207'):
def cadastre_missing(con=con_fon):
sql = '''set work_mem='512MB';
INSERT into cadastre.cadastre (lot_id, dnupro)
SELECT DISTINCT
lot_id, -- Identifiant du lot character varying(21)
dnupro
FROM cadastre.cadastre_cen
WHERE lot_id NOT IN (SELECT lot_id FROM cadastre.cadastre)
AND lot_id IN (SELECT lot_id FROM cadastre.lots)
AND dnupro IN (SELECT dnupro FROM cadastre.cptprop)
;
'''
with con.begin() as cnx:
cnx.execute(sql)
def insert_lotnatcult(schema='38_202207',list_parid=None):
sql = '''set work_mem='512MB'; sql = '''set work_mem='512MB';
INSERT INTO cadastre.lots_natcult (lot_id, dsgrpf, cnatsp, dclssf, ccosub, dcntsf) INSERT INTO cadastre.lots_natcult (lot_id, dsgrpf, cnatsp, dclssf, ccosub, dcntsf)
SELECT DISTINCT SELECT DISTINCT
@ -716,11 +794,18 @@ def insert_lotnatcult(schema='38_202207'):
ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||trim(dnulot) IN (SELECT lot_id FROM cadastre.lots) ccodep||ccocom||replace(ccopre, ' ', '0')||replace(ccosec, ' ', '0')||dnupla||trim(dnulot) IN (SELECT lot_id FROM cadastre.lots)
'''.format( '''.format(
sch=schema) sch=schema)
if list_parid:
sql += (where_parid(list_parid)
.replace('parcelle.parcelle','suf.suf')
)
with con_fon.begin() as cnx: with con_fon.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
print('INSERT lotnatcult OK') print('INSERT lotnatcult OK')
def update_typprop(con=con_fon): def update_typprop(con=con_fon):
sql = '''set work_mem='512MB'; sql = '''set work_mem='512MB';
UPDATE UPDATE
@ -744,10 +829,10 @@ if __name__ == "__main__":
from pycen import update_to_sql from pycen import update_to_sql
# par = '3805050000E0523' # par = '3805050000E0523'
par = None par = None
sch = '38_202407' sch = '38_202501'
drop_tables(con_fon) drop_tables(con_fon)
lst_sch = ['07_202407','26_202407','42_202407','38_202407'] lst_sch = ['07_202501','26_202501','42_202501','38_202501']
for sch in lst_sch: for sch in lst_sch:
print(' INIT ',sch) print(' INIT ',sch)
insert_voie(sch) insert_voie(sch)
@ -758,7 +843,14 @@ if __name__ == "__main__":
insert_lot(sch) insert_lot(sch)
insert_cadastre(sch) insert_cadastre(sch)
insert_lotnatcult(sch) insert_lotnatcult(sch)
cadastre_missing(con_fon)
update_typprop(con_fon) update_typprop(con_fon)
# pg_restore -h 172.17.0.2 -U postgres --dbname="bd_cen" -c /home/colas/Documents/9_PROJETS/0_FONCIER/DUMP/bd_cen_20240418_16h50_beforemaj2023.dump # pg_restore -h 172.17.0.2 -U postgres --dbname="bd_cen" -c /home/colas/Documents/9_PROJETS/0_FONCIER/DUMP/bd_cen_20240418_16h50_beforemaj2023.dump
_insert_lot1('38_202504','38405000ZD0209')
insert_cadastre('38_202504','380405000ZD0209')
insert_lotnatcult('38_202504','380405000ZD0209')

View File

@ -46,7 +46,7 @@ def revoke_all_table(con,sch):
if __name__ == "__main__": if __name__ == "__main__":
sch_cad = '42_202407' sch_cad = '38_202501'
sch_old = '42_202207' sch_old = '42_202207'
grant_all_table(con_cad,sch_cad) grant_all_table(con_cad,sch_cad)
# revoke_all_table(con_cad,sch_old) # revoke_all_table(con_cad,sch_old)

View File

@ -81,7 +81,7 @@ df_structure.rename(columns={'organisme_auteur':'nom'}, inplace=True)
# labels=[ # labels=[
# # 1092,748,1088, # # 1092,748,1088,
# 17], # 17],
# axis=0, # axis=0,
# inplace=True) # inplace=True)
df_structure['nom_autres'] = None df_structure['nom_autres'] = None
df_structure.loc[df_structure.nom == 'Acer campestre', 'nom_autres'] = 'ACER CAMPESTRE' df_structure.loc[df_structure.nom == 'Acer campestre', 'nom_autres'] = 'ACER CAMPESTRE'

View File

@ -4,6 +4,7 @@
from pycen import con_gn, zh, pers from pycen import con_gn, zh, pers
import pandas as pd import pandas as pd
import geopandas as gpd import geopandas as gpd
import numpy as np
import uuid import uuid
import re import re
zh = zh() zh = zh()
@ -136,9 +137,13 @@ def cor_lim_list(crit_delim):
delim.mnemo = delim.mnemo.str.replace(r'.\(.*\)','',regex=True) delim.mnemo = delim.mnemo.str.replace(r'.\(.*\)','',regex=True)
dict_delim = dict(zip(delim.mnemo.str.lower(),delim.id_nomenclature)) dict_delim = dict(zip(delim.mnemo.str.lower(),delim.id_nomenclature))
serie = crit_delim\ serie = (crit_delim
.fillna('non déterminé')\ .fillna('non déterminé')
.str.split(';',expand=True).stack().droplevel(-1).reset_index() .str.split(';',expand=True)
.stack()
.str.strip()
.droplevel(-1)
.reset_index())
serie.columns = ['id','delim'] serie.columns = ['id','delim']
serie.set_index('id',inplace=True) serie.set_index('id',inplace=True)
serie['id_lim'] = serie.delim.str.replace(r'.\(.*\)','',regex=True) serie['id_lim'] = serie.delim.str.replace(r'.\(.*\)','',regex=True)
@ -225,7 +230,7 @@ def to_bib_organismes_przh():
.replace({'Inconnu':'Autre'}) .replace({'Inconnu':'Autre'})
to_insert = insert_from[~insert_from.nom.str.lower().isin(isin_db.name.str.lower())] to_insert = insert_from[~insert_from.nom.str.lower().isin(isin_db.name.str.lower())]
to_insert.abbrev = remove_special_char(to_insert.abbrev,space=True)\ to_insert.loc[to_insert.abbrev.notna(),'abbrev'] = remove_special_char(to_insert[to_insert.abbrev.notna()].abbrev,space=True)\
.str.upper()\ .str.upper()\
.str[:6] .str[:6]
to_insert.loc[to_insert.abbrev.notna()]\ to_insert.loc[to_insert.abbrev.notna()]\
@ -297,7 +302,7 @@ def get_id_t_zh(code=None):
sql = "SELECT id_zh,zh_uuid,code FROM pr_zh.t_zh" sql = "SELECT id_zh,zh_uuid,code FROM pr_zh.t_zh"
if isinstance(code,str): if isinstance(code,str):
sql += " WHERE code='%s'"%code sql += " WHERE code='%s'"%code
elif isinstance(code,list) or isinstance(code,pd.Series) or isinstance(code,pd.Index): elif isinstance(code,(list,pd.Series,pd.Index,pd._libs.testing.np.ndarray)):
sql += " WHERE code IN %s"%str(tuple(code)) sql += " WHERE code IN %s"%str(tuple(code))
return pd.read_sql_query(sql,con_gn) return pd.read_sql_query(sql,con_gn)
@ -525,13 +530,23 @@ def to_t_zh(DF):
org = get_id_org_przh() org = get_id_org_przh()
dict_org = dict(zip(org.name,org.id_org)) dict_org = dict(zip(org.name,org.id_org))
# First modif = update_author. Important # First modif = update_author. Important
test_auth = df.create_author.str.contains(';',na=False) test_auth = df.create_author.str.contains(';',na=False)
if test_auth.any(): if test_auth.any():
df.loc[test_auth,'update_author'] = df.loc[test_auth,'create_author'].str.split(';',expand=True)[1] df.loc[test_auth,'update_author'] = df.loc[test_auth,'create_author'].str.split(';',n=1,expand=True)[1]
df.loc[test_auth,'create_author'] = df.loc[test_auth,'create_author'].str.split(';',expand=True)[0] df.loc[test_auth,'create_author'] = df.loc[test_auth,'create_author'].str.split(';',n=1,expand=True)[0]
test_updt1 = df.update_author.str.contains(';',na=False)
if test_updt1.any():
df.loc[test_updt1,'update_author'] = df.loc[test_updt1,'update_author'].str.split(';',n=1,expand=True)[0]
test_updt2 = df.update_author=='CEN Isère'
if test_updt2.any():
df.loc[test_updt2,'update_author'] = df[test_updt2].create_author
df['create_author'] = recup_id_role(author=df['create_author']) df['create_author'] = recup_id_role(author=df['create_author'])
df['update_author'] = recup_id_role(author=df['update_author']) df['update_author'] = recup_id_role(author=df['update_author'])
df.loc[df.update_author.isna(),'update_author'] = df[df.update_author.isna()].create_author
df['id_org'] = [t_role.loc[t_role.index==x,'nom_organisme'].values[0] for x in df['create_author']] df['id_org'] = [t_role.loc[t_role.index==x,'nom_organisme'].values[0] for x in df['create_author']]
df.id_org.replace(dict_org,inplace=True) df.id_org.replace(dict_org,inplace=True)
@ -555,7 +570,7 @@ def to_t_zh(DF):
# df['is_other_inventory'] = None # A mettre à jour depuis MEDWET # df['is_other_inventory'] = None # A mettre à jour depuis MEDWET
# df['is_carto_hab'] = None # défault : False # df['is_carto_hab'] = None # défault : False
# df['nb_hab'] = [len(x) if x else None for x in df.code_cb.str.split(';')] # Concerne les HABs Patrimoniaux # df['nb_hab'] = [len(x) if x else None for x in df.code_cb.str.split(';')] # Concerne les HABs Patrimoniaux
df = df.merge(find_nb_hab_bylbcode(df),on='id_zh',how='left') # df = df.merge(find_nb_hab_bylbcode(df),on='id_zh',how='left')
# df['total_hab_cover'] = 100 # Concerne les HABs Patrimoniaux ; Ne peut pas être rempli. # df['total_hab_cover'] = 100 # Concerne les HABs Patrimoniaux ; Ne peut pas être rempli.
df['remark_eval_functions'] = df.rmq_fct_majeur.copy() df['remark_eval_functions'] = df.rmq_fct_majeur.copy()
df['remark_eval_heritage'] = df.rmq_interet_patri.copy() df['remark_eval_heritage'] = df.rmq_interet_patri.copy()
@ -606,12 +621,22 @@ def to_t_zh(DF):
print('INSERT cor_zh_hydro OK !') print('INSERT cor_zh_hydro OK !')
_cor_zh_(tzh_code=to_tzh.code,typ='rb') _cor_zh_(tzh_code=to_tzh.code,typ='rb')
print('INSERT cor_zh_rb OK !') print('INSERT cor_zh_rb OK !')
print('END t_zh')
def to_cor_zh_lim_fs(df): def to_cor_zh_lim_fs(df):
df = df[['code','crit_def_esp']]\ _df = (df[['code','crit_def_esp']]
.fillna('Non déterminé') .fillna('Non déterminé')
.copy())
df = (_df.set_index('code')
.crit_def_esp.str.split(';',expand=True)
.stack()
.droplevel(-1)
.str.strip()
.to_frame('crit_def_esp')
.reset_index(drop=False))
cor_zh_lim_fs = pd.merge(df,get_id_t_zh(df.code),on='code')\ cor_zh_lim_fs = pd.merge(df,get_id_t_zh(df.code),on='code')\
.rename(columns={'crit_def_esp':'id_lim_fs'}) .rename(columns={'crit_def_esp':'id_lim_fs'})
crit_def_esp_fct = t_nomenclature_ZH('CRIT_DEF_ESP_FCT') crit_def_esp_fct = t_nomenclature_ZH('CRIT_DEF_ESP_FCT')
@ -621,11 +646,14 @@ def to_cor_zh_lim_fs(df):
name='cor_zh_lim_fs',con=con_gn,schema='pr_zh',if_exists='append',index=False name='cor_zh_lim_fs',con=con_gn,schema='pr_zh',if_exists='append',index=False
) )
print('END zh_lim_fs')
def get_azalee_activity():
def get_azalee_activity(code=None):
sql = """ sql = """
SELECT SELECT DISTINCT ON (a.id_site,a.id_position,a.id_activ_hum)
g.id_site code, g.id_site code,
a.date,
CASE WHEN length(pa.id::varchar)=1 CASE WHEN length(pa.id::varchar)=1
THEN '0'||pa.id::varchar||' - '||pa.nom THEN '0'||pa.id::varchar||' - '||pa.nom
ELSE pa.id::varchar||' - '||pa.nom ELSE pa.id::varchar||' - '||pa.nom
@ -639,11 +667,16 @@ def get_azalee_activity():
LEFT JOIN zones_humides.param_position pp ON pp.id = a.id_position LEFT JOIN zones_humides.param_position pp ON pp.id = a.id_position
LEFT JOIN zones_humides.param_impact pi ON pi.id = a.id_impact LEFT JOIN zones_humides.param_impact pi ON pi.id = a.id_impact
JOIN (sites.r_sites_geom g JOIN sites.sites s ON s.id = g.id_site) JOIN (sites.r_sites_geom g JOIN sites.sites s ON s.id = g.id_site)
ON g.id = a.id_geom_site USING (id_site)
WHERE g.date = (SELECT max(dat) FROM (values (g.date)) AS value(dat)) WHERE --g.date = (SELECT max(dat) FROM (values (g.date)) AS value(dat)) and
and a."valid" a."valid"
and s.date_fin is NULL and s.date_fin is NULL
""" """
if isinstance(code,str):
sql += " AND a.id_site='%s'"%code
elif isinstance(code,(list,pd.Series,pd.Index,pd._libs.testing.np.ndarray)):
sql += " AND a.id_site IN %s"%str(tuple(code))
sql += " ORDER BY a.id_site,a.id_position,a.id_activ_hum,a.date DESC;"
return pd.read_sql_query(sql,zh.con) return pd.read_sql_query(sql,zh.con)
@ -654,21 +687,28 @@ def get_cor_impact_types():
def to_t_activity(df=None): def to_t_activity(df=None):
table = 't_activity' table = 't_activity'
if df is None: code = None if df is None else df.code.unique()
df = get_azalee_activity()\ df = (get_azalee_activity(code)
.drop_duplicates() .drop_duplicates())
else:
df = df[['code','activ_hum','impact','position','rmq_activ_hum']] # if df is None:
activ_hum = df.activ_hum.str.split(';',expand=True).stack() # df = get_azalee_activity()\
impact = df.impact.str.split(';',expand=True).stack() # .drop_duplicates()
position = df.position.str.split(';',expand=True).stack() # else:
rmq_activ_hum = df.rmq_activ_hum.str.split(';',expand=True).stack() # df = df[['code','activ_hum','impact','position','rmq_activ_hum']]
# activ_hum = df.activ_hum.str.split(';',expand=True).stack()
# impact = df.impact.str.split(';',expand=True).stack()
# position = df.position.str.split(';',expand=True).stack()
# rmq_activ_hum = df.rmq_activ_hum.str.split(';',expand=True).stack()
# df['activ_hum'] = remove_special_char(df['activ_hum'],space=True) # df['activ_hum'] = remove_special_char(df['activ_hum'],space=True)
# df['impact'] = remove_special_char(df['impact'],space=True) # df['impact'] = remove_special_char(df['impact'],space=True)
df['impact'].fillna('non déterminé',inplace=True)
df['impact'].replace({'Inconnu':'non déterminé'},inplace=True)
df['impact'] = remove_special_char(df['impact'].str.lower(),space=True) df['impact'] = remove_special_char(df['impact'].str.lower(),space=True)
df['impact'].fillna('aucun',inplace=True)
# df['position'] = remove_special_char(df['position'],space=True) # df['position'] = remove_special_char(df['position'],space=True)
df['position'].replace({'Inconnu':'Non déterminée'},inplace=True)
no_activ_hum = t_nomenclature_ZH('ACTIV_HUM') no_activ_hum = t_nomenclature_ZH('ACTIV_HUM')
dict_activ_hum = dict(zip(no_activ_hum.mnemo,no_activ_hum.id_nomenclature)) dict_activ_hum = dict(zip(no_activ_hum.mnemo,no_activ_hum.id_nomenclature))
@ -696,12 +736,12 @@ def to_t_activity(df=None):
x[0] if len(x)==1 else no_position.loc[no_position.cd_nomenclature=='3','id_nomenclature'].values[0] x[0] if len(x)==1 else no_position.loc[no_position.cd_nomenclature=='3','id_nomenclature'].values[0]
for x in group_df.id_position for x in group_df.id_position
] ]
group_df.remark_activity = ['\n'.join(list(set(x))) if list(set(x)) != [None] else None for x in group_df.remark_activity] group_df.remark_activity = ['\n'.join(list(set(filter(lambda item: item is not None, x)))) if list(set(x)) != [None] else None for x in group_df.remark_activity]
group_df['id_impact_list'] = [uuid.uuid4() for x in group_df.index] group_df['id_impact_list'] = [uuid.uuid4() for x in group_df.index]
cor_impact_list = group_df[['id_impact_list','id_cor_impact_types']]\ cor_impact_list = (group_df[['id_impact_list','id_cor_impact_types']]
.explode('id_cor_impact_types')\ .explode('id_cor_impact_types')
.drop_duplicates() .drop_duplicates())
# activity = pd.merge(group_df[['code','id_activity','id_impact_list','id_position']],df,on=['code','id_activity','id_position'],how='left') # activity = pd.merge(group_df[['code','id_activity','id_impact_list','id_position']],df,on=['code','id_activity','id_position'],how='left')
# t_activity = pd.merge(activity,get_id_t_zh(df.code),on='code') # t_activity = pd.merge(activity,get_id_t_zh(df.code),on='code')
@ -713,24 +753,19 @@ def to_t_activity(df=None):
to_tactiv.to_sql( to_tactiv.to_sql(
name=table, con=con_gn, schema='pr_zh', name=table, con=con_gn, schema='pr_zh',
if_exists='append', index=False, if_exists='append', index=False,
# dtype={
# 'id_impact_list':uuid.UUID
# }
) )
cor_impact_list.to_sql( cor_impact_list.to_sql(
name='cor_impact_list', con=con_gn, schema='pr_zh', name='cor_impact_list', con=con_gn, schema='pr_zh',
if_exists='append', index=False, if_exists='append', index=False,
# dtype={
# 'id_impact_list':uuid.UUID
# }
) )
print('END t_activity')
def get_azalee_functions(): def get_azalee_functions(code=None):
sql = """ sql = """
SELECT SELECT DISTINCT ON (a.id_site,a.id_fct)
g.id_site code, a.id_site code,
pa.nom id_function, pa.nom id_function,
a."quantite", a."quantite",
a.description justification, a.description justification,
@ -738,26 +773,36 @@ def get_azalee_functions():
FROM zones_humides.r_site_fctecosociopatri a FROM zones_humides.r_site_fctecosociopatri a
LEFT JOIN zones_humides.param_fct_eco_socio_patri pa ON pa.id = a.id_fct LEFT JOIN zones_humides.param_fct_eco_socio_patri pa ON pa.id = a.id_fct
JOIN (sites.r_sites_geom g JOIN sites.sites s ON s.id = g.id_site) JOIN (sites.r_sites_geom g JOIN sites.sites s ON s.id = g.id_site)
ON g.id = a.id_geom_site USING (id_site)
WHERE g.date = (SELECT max(dat) FROM (values (g.date)) AS value(dat)) WHERE
and a."valid" a."valid"
and s.date_fin is NULL and s.date_fin is NULL
""" """
if isinstance(code,str):
sql += " AND a.id_site='%s'"%code
elif isinstance(code,(list,pd.Series,pd.Index,pd._libs.testing.np.ndarray)):
sql += " AND a.id_site IN %s"%str(tuple(code))
sql += " ORDER BY a.id_site,a.id_fct,a.date DESC;"
return pd.read_sql_query(sql,zh.con) return pd.read_sql_query(sql,zh.con)
def to_t_functions(df=None): def to_t_functions(df=None):
table = 't_functions' table = 't_functions'
if df is None: code = None if df is None else df.code.unique()
func = get_azalee_functions() func = get_azalee_functions(code)
func.id_function.replace({' / ':'/'},regex=True,inplace=True) func.id_function.replace({' / ':'/'},regex=True,inplace=True)
else:
func = df[['code','fct_bio','fct_hydro','int_patri','val_socioEco']].set_index('code').unstack() # if df is None:
func = func.str.split(';',expand=True).stack()\ # func = get_azalee_functions()
.str.split(' \(',1,expand=True) # func.id_function.replace({' / ':'/'},regex=True,inplace=True)
func.columns = ['id_function','justification'] # else:
func.justification = func.justification.str.rsplit('\)',1,expand=True)[0] # func = df[['code','fct_bio','fct_hydro','int_patri','val_socioEco']].set_index('code').unstack()
# func = func.str.split(';',expand=True).stack()\
# .str.split(' \(',1,expand=True)
# func.columns = ['id_function','justification']
# func.justification = func.justification.str.rsplit('\)',1,expand=True)[0]
functions = pd.concat([ functions = pd.concat([
t_nomenclature_ZH('FONCTIONS_HYDRO'),t_nomenclature_ZH('FONCTIONS_BIO'), t_nomenclature_ZH('FONCTIONS_HYDRO'),t_nomenclature_ZH('FONCTIONS_BIO'),
@ -783,12 +828,14 @@ def to_t_functions(df=None):
t_func = pd.merge(funct,get_id_t_zh(funct.code),on='code') t_func = pd.merge(funct,get_id_t_zh(funct.code),on='code')
tactiv_cols = recup_cols_table(table,con_gn) tactiv_cols = recup_cols_table(table,con_gn)
lst_cols = t_func.columns[t_func.columns.isin(tactiv_cols)] lst_cols = t_func.columns[t_func.columns.isin(tactiv_cols)]
to_tfunction = t_func[lst_cols] ident_no_func = t_func.id_function.isin(['aucun intérêt patrimonial','aucune fonction biologique'])
to_tfunction = t_func.loc[~ident_no_func,lst_cols]
to_tfunction.to_sql( to_tfunction.to_sql(
name=table, con=con_gn, schema='pr_zh', name=table, con=con_gn, schema='pr_zh',
if_exists='append', index=False if_exists='append', index=False
) )
print('END t_functions')
def add_remark_pres(not_bib): def add_remark_pres(not_bib):
table = 't_zh' table = 't_zh'
@ -821,10 +868,10 @@ def __format_lb_code__(t):
df_cb = t[cols].copy() df_cb = t[cols].copy()
df_cb.set_index('code',inplace=True) df_cb.set_index('code',inplace=True)
zh_cb = df_cb.code_cb.str.split(';',expand=True)\ zh_cb = (df_cb.code_cb.str.split(';',expand=True)
.stack()\ .stack()
.droplevel(-1)\ .droplevel(-1)
.reset_index() .reset_index())
zh_cb.columns = cols zh_cb.columns = cols
zh_cb.rename(columns={'code_cb':'lb_code'},inplace=True) zh_cb.rename(columns={'code_cb':'lb_code'},inplace=True)
@ -908,6 +955,11 @@ def to_t_flow(DF, type_flow=None):
dict_flow = dict(zip(_flow.mnemo,_flow.id_nomenclature)) dict_flow = dict(zip(_flow.mnemo,_flow.id_nomenclature))
dict_perm = dict(zip(_perm.mnemo.str.lower(),_perm.id_nomenclature)) dict_perm = dict(zip(_perm.mnemo.str.lower(),_perm.id_nomenclature))
test_ruis = inflow.flow=='Ruissellement diffus'
if test_ruis.any() and type_flow=='outflow':
idx = inflow[test_ruis].index
inflow.drop(idx,inplace=True)
inflow.flow.replace(dict_flow,inplace=True) inflow.flow.replace(dict_flow,inplace=True)
inflow.perm.fillna('non déterminé',inplace=True) inflow.perm.fillna('non déterminé',inplace=True)
inflow.perm.replace({'inconnu':'non déterminé','':'non déterminé'},inplace=True) inflow.perm.replace({'inconnu':'non déterminé','':'non déterminé'},inplace=True)
@ -923,6 +975,15 @@ def to_t_flow(DF, type_flow=None):
name=table, con=con_gn, schema='pr_zh', name=table, con=con_gn, schema='pr_zh',
if_exists='append', index=False if_exists='append', index=False
) )
print('END t_flow')
def check_corine_cover_inbdd(df):
sql = '''
SELECT * FROM (VALUES %s ) as t (id_zh, id_cover)
WHERE (id_zh, id_cover) NOT IN (SELECT id_zh, id_cover FROM pr_zh.cor_zh_corine_cover)
'''%','.join([str(tuple(x)) for x in df.to_dict('split',index=False)['data']])
return pd.read_sql_query(sql,con_gn)
def cor_zh_corine_cover(): def cor_zh_corine_cover():
@ -955,22 +1016,26 @@ def cor_zh_corine_cover():
predicate = 'intersects', predicate = 'intersects',
how = 'inner') how = 'inner')
cor_zh_clc = tmp[['id_zh','CODE_12']]\ _cor_zh_clc = (tmp[['id_zh','CODE_12']]
.drop_duplicates()\ .drop_duplicates()
.rename(columns={'CODE_12':'id_cover'})\ .rename(columns={'CODE_12':'id_cover'})
.replace({'id_cover':dict_clc1})\ .replace({'id_cover':dict_clc1})
.replace({'id_cover':dict_clc2}) .replace({'id_cover':dict_clc2}))
cor_zh_clc = check_corine_cover_inbdd(_cor_zh_clc)
cor_zh_clc.to_sql( cor_zh_clc.to_sql(
name=table, con=con_gn, schema='pr_zh', name=table, con=con_gn, schema='pr_zh',
if_exists='append', index=False if_exists='append', index=False
) )
print('END cor_zh_corine_cover')
def migrate_to_gnZH(df:pd.DataFrame=None): def migrate_to_gnZH(df:pd.DataFrame=None):
to_bib_organismes_przh() to_bib_organismes_przh()
to_t_zh(df) to_t_zh(df)
to_cor_zh_lim_fs(df) to_cor_zh_lim_fs(df)
to_cor_zh_cb(df)
to_t_activity(df) to_t_activity(df)
to_t_functions(df) to_t_functions(df)
to_t_flow(df,type_flow='inflow') to_t_flow(df,type_flow='inflow')
@ -978,7 +1043,6 @@ def migrate_to_gnZH(df:pd.DataFrame=None):
cor_zh_corine_cover() cor_zh_corine_cover()
def to_t_references(db_file, suffixe_refnum=None): def to_t_references(db_file, suffixe_refnum=None):
import pandas_access as mdb import pandas_access as mdb
table = 't_references' table = 't_references'
@ -1119,12 +1183,14 @@ def trunc_table(table,cascade=False):
with con_gn.begin() as cnx: with con_gn.begin() as cnx:
cnx.execute(sql) cnx.execute(sql)
if __name__ == "__main__": if __name__ == "__main__":
# TRUNCATE TABLE # TRUNCATE TABLE
# trunc_table('t_zh',cascade=True) # trunc_table('t_zh',cascade=True)
# trunc_table('cor_zh_area') # trunc_table('cor_zh_area')
# trunc_table('t_reference') # trunc_table('t_references',cascade=True)
# trunc_table('cor_lim_list')
from pycen.geonature import pr_zh from pycen.geonature import pr_zh

View File

@ -20,7 +20,7 @@ url = URL.create('postgresql+psycopg2',
host=adr, host=adr,
database=base, database=base,
) )
con = create_engine(url) # con = create_engine(url)
drop_v_zh = 'DROP VIEW IF EXISTS zones_humides.v_zoneshumides CASCADE;' drop_v_zh = 'DROP VIEW IF EXISTS zones_humides.v_zoneshumides CASCADE;'
with con.begin() as cnx: with con.begin() as cnx:
@ -212,17 +212,7 @@ v_zh_fctecosociopatri_cross = """
DROP VIEW IF EXISTS zones_humides.v_zh_fctecosociopatri_cross; DROP VIEW IF EXISTS zones_humides.v_zh_fctecosociopatri_cross;
CREATE OR REPLACE VIEW zones_humides.v_zh_fctecosociopatri_cross CREATE OR REPLACE VIEW zones_humides.v_zh_fctecosociopatri_cross
AS AS
SELECT DISTINCT ON (id_site) WITH auteur AS (
--id_geom_site,
id_site,
auteur,
"date",
fct_bio,
fct_hydro,
int_patri,
"val_socioEco"
FROM crosstab(
$$ WITH auteur AS (
SELECT DISTINCT ON (id_sitefct) SELECT DISTINCT ON (id_sitefct)
c.id_sitefct, c.id_sitefct,
string_agg(c1.auteur,';' ORDER BY c1.auteur) auteur string_agg(c1.auteur,';' ORDER BY c1.auteur) auteur
@ -230,11 +220,33 @@ FROM crosstab(
JOIN personnes.v_personne c1 ON c1.id = c.id_auteur JOIN personnes.v_personne c1 ON c1.id = c.id_auteur
GROUP BY c.id_sitefct GROUP BY c.id_sitefct
ORDER BY 1 ORDER BY 1
), tmp_select as ( ),max_author as (
SELECT DISTINCT ON (id_site)
a.id_site,
--(SELECT regexp_split_to_table(auth,',') FROM (values (string_agg(auteur,',' order by ROW()))) AS value(auth) Limit 1) auteur,
auteur,
a."date"
FROM auteur
JOIN zones_humides.r_site_fctecosociopatri a ON id_sitefct = a.id
order by 1,3 DESC,2
)
SELECT DISTINCT ON (id_site)
--id_geom_site,
id_site,
--(SELECT auteur ORDER BY ct."date" DESC) "date",
"date",
auteur,
fct_bio,
fct_hydro,
int_patri,
"val_socioEco"
FROM crosstab(
$$ WITH tmp_select as (
SELECT SELECT
max(a."id") id_sitefct,
id_site, id_site,
(SELECT regexp_split_to_table(auth,',') FROM (values (string_agg(d.auteur,',' order by ROW()))) AS value(auth) Limit 1) auteur, --(SELECT regexp_split_to_table(auth,',') FROM (values (string_agg(d.auteur,',' order by ROW()))) AS value(auth) Limit 1) auteur,
MAX(a.date) date, --MAX(a.date) date,
c.nom_court type_param, c.nom_court type_param,
b.nom, b.nom,
string_agg(DISTINCT a.description,'\n') remark string_agg(DISTINCT a.description,'\n') remark
@ -242,20 +254,15 @@ FROM crosstab(
JOIN (zones_humides.param_fct_eco_socio_patri b JOIN (zones_humides.param_fct_eco_socio_patri b
JOIN zones_humides.type_param_fct c ON b.id_type = c.id JOIN zones_humides.type_param_fct c ON b.id_type = c.id
) ON a.id_fct = b.id ) ON a.id_fct = b.id
JOIN auteur d ON d.id_sitefct = a.id --JOIN auteur d ON d.id_sitefct = a.id
WHERE a."valid" WHERE a."valid"
GROUP BY a.id_site,c.nom_court,b.nom GROUP BY a.id_site,c.nom_court,b.nom
ORDER BY a.id_site,c.nom_court DESC,b.nom ORDER BY a.id_site,c.nom_court DESC,b.nom
) )
SELECT SELECT
id_site, id_site,
--(SELECT regexp_split_to_table(auth,',') FROM (values (string_agg(d.auteur,',' order by ROW()))) AS value(auth) Limit 1) auteur,
string_agg( --MAX("date") "date",
DISTINCT CASE WHEN extract(year from date)>'1'
THEN auteur||'-'|| extract(year from date)
ELSE auteur END,
'; ') auteur,
MAX(date) date,
type_param, type_param,
string_agg(distinct string_agg(distinct
CASE WHEN remark IS NULL THEN nom CASE WHEN remark IS NULL THEN nom
@ -263,19 +270,23 @@ FROM crosstab(
ELSE CONCAT(nom,' (',remark,')') END, ELSE CONCAT(nom,' (',remark,')') END,
';') fct ';') fct
FROM tmp_select FROM tmp_select
GROUP BY 1,4 --JOIN auteur d using (id_sitefct)
ORDER BY 1,4,3;$$, GROUP BY id_site,type_param
ORDER BY id_site,type_param;$$,
$$SELECT DISTINCT nom_court FROM zones_humides.type_param_fct ORDER BY 1 ASC;$$ $$SELECT DISTINCT nom_court FROM zones_humides.type_param_fct ORDER BY 1 ASC;$$
) AS ct ( ) AS ct (
"id_site" text, "id_site" text,
"auteur" text, --"auteur" text,
"date" date, --"date" date,
"fct_bio" text, "fct_bio" text,
"fct_hydro" text, "fct_hydro" text,
"int_patri" text, "int_patri" text,
"val_socioEco" text) "val_socioEco" text
)
JOIN max_author USING (id_site)
--GROUP BY id_site,auteur,fct_bio,fct_hydro,int_patri,"val_socioEco" --GROUP BY id_site,auteur,fct_bio,fct_hydro,int_patri,"val_socioEco"
ORDER BY id_site,"date" desc,row_number() OVER (ORDER BY id_site) desc --WHERE id_site in ('38CG0110','38RD0057','38VS0063')
ORDER BY id_site,row_number() OVER (ORDER BY id_site) desc
;""" ;"""
grant = """ grant = """
GRANT ALL ON TABLE zones_humides.v_zh_fctecosociopatri_cross TO grp_admin; GRANT ALL ON TABLE zones_humides.v_zh_fctecosociopatri_cross TO grp_admin;
@ -290,15 +301,7 @@ v_zh_critdelim_cross = """
DROP VIEW IF EXISTS zones_humides.v_zh_critdelim_cross; DROP VIEW IF EXISTS zones_humides.v_zh_critdelim_cross;
CREATE OR REPLACE VIEW zones_humides.v_zh_critdelim_cross CREATE OR REPLACE VIEW zones_humides.v_zh_critdelim_cross
AS AS
SELECT DISTINCT ON (id_site) WITH auteur AS (
--id_geom_site,
id_site,
auteur,
"date",
crit_delim,
crit_def_esp
FROM crosstab(
$$WITH auteur AS (
SELECT DISTINCT ON (id_sitedelim) SELECT DISTINCT ON (id_sitedelim)
c.id_sitedelim, c.id_sitedelim,
string_agg(c1.auteur,';' ORDER BY c1.auteur) auteur string_agg(c1.auteur,';' ORDER BY c1.auteur) auteur
@ -306,16 +309,27 @@ FROM crosstab(
JOIN personnes.v_personne c1 ON c1.id = c.id_auteur JOIN personnes.v_personne c1 ON c1.id = c.id_auteur
GROUP BY c.id_sitedelim GROUP BY c.id_sitedelim
ORDER BY 1 ORDER BY 1
) ),max_author as (
SELECT SELECT DISTINCT ON (id_site)
a.id_site,
--(SELECT regexp_split_to_table(auth,',') FROM (values (string_agg(auteur,',' order by ROW()))) AS value(auth) Limit 1) auteur,
auteur,
a."date"
FROM auteur
JOIN zones_humides.r_site_critdelim a ON id_sitedelim = a.id
order by 1,3 DESC,2
)
SELECT DISTINCT ON (id_site)
--id_geom_site,
id_site,
"date",
auteur,
crit_delim,
crit_def_esp
FROM crosstab(
$$ SELECT
--a.id_geom_site::bigint, --a.id_geom_site::bigint,
id_site, id_site,
string_agg(
DISTINCT CASE WHEN extract(year from a.date)>'1'
THEN d.auteur||'-'|| extract(year from a.date)
ELSE d.auteur END,
'; ') auteur,
MAX(a.date) date,
c.nom_court type_param, c.nom_court type_param,
string_agg( DISTINCT string_agg( DISTINCT
CASE WHEN a.description IS NULL THEN b.nom CASE WHEN a.description IS NULL THEN b.nom
@ -326,19 +340,21 @@ FROM crosstab(
JOIN (zones_humides.param_delim_fct b JOIN (zones_humides.param_delim_fct b
JOIN zones_humides.type_param_delim_fct c ON b.id_type = c.id JOIN zones_humides.type_param_delim_fct c ON b.id_type = c.id
) ON a.id_crit_delim = b.id ) ON a.id_crit_delim = b.id
JOIN auteur d ON d.id_sitedelim = a.id --JOIN auteur d ON d.id_sitedelim = a.id
WHERE a."valid" WHERE a."valid"
GROUP BY 1,4 GROUP BY 1,2
ORDER BY 3,1,2$$, ORDER BY 1$$,
$$SELECT DISTINCT nom_court FROM zones_humides.type_param_delim_fct ORDER BY 1 DESC;$$ $$SELECT DISTINCT nom_court FROM zones_humides.type_param_delim_fct ORDER BY 1 DESC;$$
) AS ct ( ) AS ct (
"id_site" text, "id_site" text,
"auteur" text, --"auteur" text,
"date" date, --"date" date,
"crit_delim" text, "crit_delim" text,
"crit_def_esp" text) "crit_def_esp" text)
JOIN max_author USING (id_site)
--GROUP BY id_site,auteur,crit_delim,crit_def_esp --GROUP BY id_site,auteur,crit_delim,crit_def_esp
ORDER BY id_site,"date" desc,row_number() OVER (ORDER BY id_site) desc --WHERE id_site in ('38CG0110','38RD0057','38VS0063')
ORDER BY id_site,row_number() OVER (ORDER BY id_site) desc
;""" ;"""
grant = """ grant = """
GRANT ALL ON TABLE zones_humides.v_zh_critdelim_cross TO grp_admin; GRANT ALL ON TABLE zones_humides.v_zh_critdelim_cross TO grp_admin;

View File

@ -874,11 +874,14 @@ def insertAttrsFct(sh6,nom_typ_court=False):
if __name__ == "__main__": if __name__ == "__main__":
from os import path from os import path
FILE_PATH = '/home/colas/Documents/9_PROJETS/1_ZH/MAJ/Actu 2024/TEREO - 20241002_ENVOI_SIG_ZH/ADD DATA' FILE_PATH = '/home/colas/Documents/9_PROJETS/1_ZH/MAJ/Assistance GAM/actu_ZH'
GEOM_PATH = 'TEREO_newZH.gpkg' GEOM_PATH = 'contour_ZH.gpkg'
DATA_PATH = 'Tableau_saisie_ZH_TEREO.xlsx' DATA_PATH = 'Tableau_saisie_ZH.xlsx'
Gdf = gpd.read_file(path.join(FILE_PATH,GEOM_PATH), crs='EPSG:2154') Gdf = gpd.read_file(path.join(FILE_PATH,GEOM_PATH), crs='EPSG:2154')
Gdf.columns = Gdf.columns.str.lower()
Gdf.rename(columns={'code_site':'site_code'},inplace=True)
# Gdf['site_code'] = '38GR0070' # Gdf['site_code'] = '38GR0070'
# lst = ['38BB0089','38BB0090','38BB0091','38BB0092'] # lst = ['38BB0089','38BB0090','38BB0091','38BB0092']
# gdf = gdf[gdf.site_code.isin(lst)] # gdf = gdf[gdf.site_code.isin(lst)]
@ -990,8 +993,7 @@ if __name__ == "__main__":
insertAttrsFct(sh6.dropna(axis=1,how='all')) insertAttrsFct(sh6.dropna(axis=1,how='all'))
maj = sh1[sh1.maj=='MAJ'].copy()
maj = sh1[sh1.maj=='MAJ GEOM'].copy()
lst_maj = [*maj.site_cod] lst_maj = [*maj.site_cod]
# site = lst_maj[3] # site = lst_maj[3]
for site in lst_maj: for site in lst_maj:

View File

@ -1,3 +1,6 @@
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# 2024-11-26 # 2024-11-26
# LE script permet d'intégrer de nouvelles géométries # LE script permet d'intégrer de nouvelles géométries
# dans la table sites.r_sites_geom. Les AUTEURS ont été # dans la table sites.r_sites_geom. Les AUTEURS ont été
@ -15,12 +18,12 @@ from sqlalchemy.engine import URL
# isin_bdd = True # isin_bdd = True
# # Parametres bdd CEN38 OUT # # Parametres bdd CEN38 OUT
# user = 'cen_admin' user = 'cen_admin'
# pwd = "#CEN38@venir" pwd = "#CEN38@venir"
# adr = '91.134.194.221' adr = '91.134.194.221'
# base = 'azalee_restore' base = 'azalee_restore'
# url = URL.create("postgresql+psycopg2", username=user, password=pwd, host=adr, database=base) url = URL.create("postgresql+psycopg2", username=user, password=pwd, host=adr, database=base)
# con = create_engine(url) con = create_engine(url)
zh = ZH() zh = ZH()
@ -46,9 +49,11 @@ df.dropna(how='all',axis=1,inplace=True)
# df.contains(r_geom.geom).any() # df.contains(r_geom.geom).any()
# res = df.within(r_geom.unary_union) # res = df.within(r_geom.unary_union)
perc = 80
zh_caract = df[df.zh=='caractéristique'].copy() zh_caract = df[df.zh=='caractéristique'].copy()
zh_caract['surfzhall'] = zh_caract.groupby(['idpolyfinal'])['n06rechab'].transform('sum') zh_caract['surfzhall'] = zh_caract.groupby(['idpolyfinal'])['n06rechab'].transform('sum')
lst_idpoly = zh_caract[zh_caract.surfzhall >= 75].idpolyfinal.unique() lst_idpoly = zh_caract[zh_caract.surfzhall >= perc].idpolyfinal.unique()
# zh_caract.within(r_geom.unary_union) # zh_caract.within(r_geom.unary_union)
# zh_caract.contains(r_geom.unary_union).any() # zh_caract.contains(r_geom.unary_union).any()
@ -59,7 +64,7 @@ no_inters = df[
& (~df.n05lbhab.str.contains('invasif',na=False,case=False)) & (~df.n05lbhab.str.contains('invasif',na=False,case=False))
].copy() ].copy()
no_inters = no_inters.drop(columns=['id_site']).sjoin(r_geom[['id_site','geom']]) no_inters = no_inters.drop(columns=['id_site']).sjoin(r_geom[['id_site','geom']])
no_inters.to_file(path.join(PATH,'inters.gpkg'),driver='GPKG',layer='no_inters75') no_inters.to_file(path.join(PATH,'inters.gpkg'),driver='GPKG',layer='no_inters%i'%perc)
len(no_inters.id_site.unique()) len(no_inters.id_site.unique())
# Intersection Après 2015 - tout milieux # Intersection Après 2015 - tout milieux
@ -70,7 +75,7 @@ inters = df[
& (~df.n05lbhab.str.contains('invasif',na=False,case=False)) & (~df.n05lbhab.str.contains('invasif',na=False,case=False))
].copy() ].copy()
inters = inters.drop(columns=['id_site']).sjoin(r_geom[['id_site','geom']]) inters = inters.drop(columns=['id_site']).sjoin(r_geom[['id_site','geom']])
inters.to_file(path.join(PATH,'inters.gpkg'),driver='GPKG',layer='inters75') inters.to_file(path.join(PATH,'inters.gpkg'),driver='GPKG',layer='inters%i'%perc)
len(inters.id_site.unique()) len(inters.id_site.unique())
# Intersection Après 2009 - milieux Forêts uniquement # Intersection Après 2009 - milieux Forêts uniquement

View File

@ -1,7 +1,10 @@
from csv import list_dialects
import geopandas as gpd import geopandas as gpd
from uuid import uuid4 from uuid import uuid4
import json,urllib.request import json,urllib.request
from numpy import ndarray from numpy import ndarray
from geoalchemy2 import Geometry
from sqlalchemy import text
dict_columns = { dict_columns = {
'code_zh': 'code', 'code_zh': 'code',
@ -9,6 +12,11 @@ dict_columns = {
'date_visite':'create_date', 'date_visite':'create_date',
} }
def normalize_connexion(con):
if gpd.pd.__version__>'2.1.2':
return con.raw_connection()
else:
return con
def get_nomenclature_id(con,cd,typ): def get_nomenclature_id(con,cd,typ):
""" """
@ -32,33 +40,219 @@ def get_nomenclature_id(con,cd,typ):
SELECT ref_nomenclatures.get_id_nomenclature('{typ}', '{cd}') SELECT ref_nomenclatures.get_id_nomenclature('{typ}', '{cd}')
""".format(cd=cd,typ=typ) """.format(cd=cd,typ=typ)
with con.begin() as cnx: with con.begin() as cnx:
res = cnx.execute(sql) res = cnx.execute(text(sql))
return res.one()[0] if res else None return res.one()[0] if res else None
def to_tzh(df,con):
def _cor_zh_area(tzh_code,typ,cover=False):
"""
@tzh : pd.Serie. Série de valeurs
correspondants à la colonne pr_zh.t_zh."code".
@typ : str. COM, DEP, ref_geo
"""
from math import ceil
table = 'cor_zh_area'
sqltzh = """
SELECT zh.id_zh, zh.geom FROM pr_zh.t_zh zh WHERE zh."code" in {tzh_code}
""".format(tzh_code=tuple(tzh_code))
tzh = gpd.read_postgis(sqltzh,con_gn,crs=4326)
if tzh.crs.srs.lower()=='epsg:4326':
tzh.to_crs(2154,inplace=True)
sqllarea = """
SELECT l.id_area, l.geom FROM ref_geo.l_areas l
JOIN ref_geo.bib_areas_types bib USING (id_type)
WHERE bib.type_code='{typ}' and l."enable"
""".format(typ=typ)
larea = gpd.read_postgis(sqllarea,con_gn,crs=2154)
df = _calc_recouvrmt(larea,tzh).rename(columns={'perc_rcvmt':'cover'})
if cover:
df['cover'] = [ceil(x) for x in df.cover]
else :
df.drop(columns=['cover'],inplace=True)
# return df
if not df.empty:
df.to_sql(
name=table, con=con_gn, schema='pr_zh',
if_exists='append', index=False
)
print('INSERT %i correspondances'%df.shape[0])
else:
print('AUCUNE nouvelles correspondances identifiées')
def _calc_recouvrmt(df1,df2,how='inner'):
'''
Calcule le recouvrement de df2 sur df1
pour chaque géométrie de df1:
Parameters
----------
df1 : GeoDataFrame.
df2 : GeoDataFrame.
'''
iddf1 = df1.columns[0]
iddf2 = df2.columns[0]
# Jointure spaciale
tmp = gpd.sjoin(
df1,
df2[['geom']],
predicate = 'intersects',
how = how)
tmp.dropna(subset=['index_right'],inplace=True)
tmp.index_right = tmp.index_right.astype(int)
tmp.reset_index(inplace=True)
tmp = tmp.join(
df2[['geom',iddf2]].rename(columns={'geom': 'right_geom'}),
on=['index_right'], how='left')
tmp2 = tmp[['index_right','right_geom',iddf2]].copy() \
.rename(columns={'right_geom': 'geom'}) \
.set_geometry('geom')
tmp1 = tmp[[iddf1,'geom']].copy() \
.set_geometry('geom')
if not tmp1.geom.values.is_valid.all():
tmp1.loc[~tmp1.geom.values.is_valid,'geom'] = tmp1.loc[~tmp1.geom.values.is_valid,'geom'].buffer(0)
if not tmp2.geom.values.is_valid.all():
tmp2.loc[~tmp2.geom.values.is_valid,'geom'] = tmp2.loc[~tmp2.geom.values.is_valid,'geom'].buffer(0)
tmp['perc_rcvmt'] = (tmp1.intersection(tmp2).area/tmp1.area)*100
return tmp[[iddf1,iddf2,'perc_rcvmt']]
def _cor_zh_hydro(tzh_code):
"""
@tzh : pd.Serie. Série de valeurs
correspondants à la colonne pr_zh.t_zh."code".
"""
table = 'cor_zh_hydro'
sql = '''
SELECT h.id_hydro,zh.id_zh
FROM pr_zh.t_hydro_area h, pr_zh.t_zh zh
WHERE zh."code" in {tzh_code}
AND ST_INTERSECTS( ST_SetSRID(h.geom,4326),ST_MakeValid(ST_SetSRID(zh.geom,4326)))
AND (h.id_hydro,zh.id_zh) NOT IN (SELECT id_hydro,id_zh FROM pr_zh.cor_zh_hydro)
'''.format(tzh_code=tuple(tzh_code))
df = gpd.pd.read_sql_query(sql,con_gn)
if not df.empty:
df.to_sql(
name=table, con=con_gn, schema='pr_zh',
if_exists='append', index=False
)
print('INSERT %i correspondances'%df.shape[0])
else:
print('AUCUNE nouvelles correspondances identifiées')
def _cor_zh_(tzh_code,typ):
"""
@tzh : pd.Serie. Série de valeurs
correspondants à la colonne pr_zh.t_zh."code".
@typ : str. [hydro,rb]
"""
typ = typ.lower()
table = 'cor_zh_%s'%typ
tab_typ = 't_hydro_area' if typ == 'hydro' else 't_river_basin'
id_typ = 'id_hydro' if typ == 'hydro' else 'id_rb'
sql = '''
SELECT h.{id_typ},zh.id_zh
FROM pr_zh.{tab_typ} h, pr_zh.t_zh zh
WHERE zh."code" in {tzh_code}
AND ST_INTERSECTS( ST_SetSRID(h.geom,4326),ST_MakeValid(ST_SetSRID(zh.geom,4326)))
AND (h.{id_typ},zh.id_zh) NOT IN (SELECT {id_typ},id_zh FROM pr_zh.{tab_to})
;'''.format(
tzh_code = tuple(tzh_code),
id_typ = id_typ,
tab_typ = tab_typ,
tab_to = table)
df = gpd.pd.read_sql_query(sql,con_gn)
if not df.empty:
df.to_sql(
name=table, con=con_gn, schema='pr_zh',
if_exists='append', index=False
)
print('INSERT %i correspondances'%df.shape[0])
else:
print('AUCUNE nouvelles correspondances identifiées')
def to_tzh(df,con,lst_area):
tab = 't_zh' tab = 't_zh'
sch = 'pr_zh' sch = 'pr_zh'
con.connect()
lst_columns = [ lst_columns = [
x['name'] for x in con.dialect.get_columns(con,tab,sch) if x['name'] in df.columns x['name'] for x in con.dialect.get_columns(con.connect(),tab,sch) if x['name'] in df.columns
] ]
# con = normalize_connexion(con)
to_ins = df[lst_columns].copy() to_ins = df[lst_columns].copy()
try:
to_ins.to_postgis(
name=tab,
con=con,
schema=sch,
index=False,
if_exists='append',
method='multi'
)
except Exception as e:
print(e)
finally:
print("INSERT data TO t_zh OK !")
return gpd.pd.read_sql('SELECT id_zh,code FROM pr_zh.t_zh',con=con) lst = []
for r,row in to_ins.iterrows():
tmplst = []
for v, vals in row.items():
if isinstance(vals,(str,int)):
if isinstance(vals,str) and vals.__contains__("'"):
vals = vals.replace("'","''")
tmplst.append(vals)
else:
tmplst.append(str(vals))
lst.append(str(tuple(tmplst)))
sqlValue = ','.join(lst).replace('"',"'").replace("'nan'",'NULL')
sql = '''
INSERT INTO pr_zh.t_zh ({cols}) VALUES {vals}
RETURNING id_zh,code;
'''.format(
cols=','.join(to_ins.columns),
vals=sqlValue
)
try:
with con.begin() as cnx:
_res = cnx.execute(text(sql))
res = _res.fetchall()
# to_ins.to_postgis(tab,con,sch,if_exists='append',index=False,)
except Exception as e:
raise e
res_df = gpd.pd.DataFrame(res)
for area in lst_area:
cov = True if area == 'COM' else False
_cor_zh_area(tzh_code=res_df.code,typ=area,cover=cov)
print('INSERT cor_zh_area %s OK !'%area)
# _cor_zh_hydro(tzh_code=res_df.code)
_cor_zh_(tzh_code=res_df.code,typ='hydro')
print('INSERT cor_zh_hydro OK !')
_cor_zh_(tzh_code=res_df.code,typ='rb')
print('INSERT cor_zh_rb OK !')
print("INSERT data TO t_zh OK !")
return res_df
# return gpd.pd.read_sql(
# 'SELECT id_zh,code FROM pr_zh.t_zh WHERE code {sym} {lst};'.format(
# sym='IN' if to_ins.shape[0] > 1 else '=',
# lst=str(tuple(to_ins.code)) if to_ins.shape[0] > 1 else "'%s'"%to_ins.code.to_list()[0]
# ),
# con=con)
# id_zh = df[['code','pk']].rename(columns={'pk':'id_zh'}) # id_zh = df[['code','pk']].rename(columns={'pk':'id_zh'})
def to_cor_lim_list(cor_lim,con): def to_cor_lim_list(cor_lim,con):
@ -68,16 +262,26 @@ def to_cor_lim_list(cor_lim,con):
:param con: a sqlalchemy connection :param con: a sqlalchemy connection
:return: None :return: None
""" """
_cor_lim = (cor_lim typ_class = list(set([tup.cd_nomenclature_delimitation.__class__ for tup in cor_lim.itertuples()]))
.set_index('id_lim_list') if typ_class[0] == list:
.cd_nomenclature_delimitation.str.split(',',expand=True) _cor_lim = (cor_lim
.stack() .set_index('id_lim_list')
.str.strip() .cd_nomenclature_delimitation
.to_frame('cd_lim') .explode()
.droplevel(-1) .to_frame('cd_lim'))
) else:
_cor_lim = (cor_lim
.set_index('id_lim_list')
.cd_nomenclature_delimitation.str.split(',',expand=True)
.stack()
.str.strip()
.to_frame('cd_lim')
.droplevel(-1)
)
_cor_lim['id_lim'] = [get_nomenclature_id(con,x,'CRIT_DELIM') for x in _cor_lim.cd_lim] _cor_lim['id_lim'] = [get_nomenclature_id(con,x,'CRIT_DELIM') for x in _cor_lim.cd_lim]
_cor_lim.drop('cd_lim',axis=1,inplace=True) _cor_lim.drop('cd_lim',axis=1,inplace=True)
con = normalize_connexion(con)
try: try:
_cor_lim.to_sql( _cor_lim.to_sql(
name='cor_lim_list', name='cor_lim_list',
@ -116,8 +320,9 @@ def to_cor_zh_cb(id_zh,df,con):
.explode() .explode()
.str.strip() .str.strip()
.to_frame('lb_code') .to_frame('lb_code')
.droplevel(-1)
) )
con = normalize_connexion(con)
try: try:
cor_zh_cb.to_sql( cor_zh_cb.to_sql(
name='cor_zh_cb', name='cor_zh_cb',
@ -144,7 +349,10 @@ def to_t_activity(id_zh,actv,con):
t_activ = gpd.pd.DataFrame() t_activ = gpd.pd.DataFrame()
for i,x in _activ.iterrows(): for i,x in _activ.iterrows():
res = gpd.pd.DataFrame(json.loads(x['acti_impact'])) if isinstance(x['acti_impact'],str):
res = gpd.pd.DataFrame(json.loads(x['acti_impact']))
else :
res = gpd.pd.DataFrame(x['acti_impact'])
res['id_zh'] = x['id_zh'] res['id_zh'] = x['id_zh']
t_activ = gpd.pd.concat([t_activ,res]) t_activ = gpd.pd.concat([t_activ,res])
t_activ.set_index('id_zh',inplace=True) t_activ.set_index('id_zh',inplace=True)
@ -167,6 +375,7 @@ def to_t_activity(id_zh,actv,con):
t_activ.drop(['cd_activite_humaine','localisation','impact'],axis=1,inplace=True) t_activ.drop(['cd_activite_humaine','localisation','impact'],axis=1,inplace=True)
impact_list.drop(['cd_impact','_cd_impact'],axis=1,inplace=True) impact_list.drop(['cd_impact','_cd_impact'],axis=1,inplace=True)
con = normalize_connexion(con)
try: try:
t_activ.to_sql( t_activ.to_sql(
name='t_activity', name='t_activity',
@ -205,7 +414,7 @@ def check_habitat(habitat,con):
code=tuple(habitat) if isinstance(habitat,(list,gpd.pd.Series,ndarray)) else f"'{habitat}'" code=tuple(habitat) if isinstance(habitat,(list,gpd.pd.Series,ndarray)) else f"'{habitat}'"
) )
with con.begin() as cnx: with con.begin() as cnx:
res = cnx.execute(sql).all() res = cnx.execute(text(sql)).all()
return [x[0] for x in res if x] return [x[0] for x in res if x]
@ -218,8 +427,9 @@ def filter_habitat(habitat,con):
return cd_zh,cd_notzh return cd_zh,cd_notzh
def check_observ(obs,org,con): def check_observ(obs,id_org,con):
_obs = normalize_observers(obs) _obs = normalize_observers(obs)
con = normalize_connexion(con)
sql = ''' sql = '''
SELECT SELECT
r.id_role AS ids_observers, r.id_role AS ids_observers,
@ -227,19 +437,54 @@ def check_observ(obs,org,con):
FROM utilisateurs.t_roles r FROM utilisateurs.t_roles r
JOIN utilisateurs.bib_organismes USING (id_organisme) JOIN utilisateurs.bib_organismes USING (id_organisme)
WHERE CONCAT(UPPER(r.nom_role), ' ', INITCAP(prenom_role)) {symbol} {code} WHERE CONCAT(UPPER(r.nom_role), ' ', INITCAP(prenom_role)) {symbol} {code}
AND nom_organisme = '{org}' AND id_organisme = '{org}'
'''.format( '''.format(
symbol='IN' if isinstance(_obs,(list,gpd.pd.Series,ndarray)) else '=', symbol='IN' if isinstance(_obs,(list,gpd.pd.Series,ndarray)) else '=',
code=tuple(_obs) if isinstance(_obs,(list,gpd.pd.Series,ndarray)) else f"'{_obs}'", code=tuple(_obs) if isinstance(_obs,(list,gpd.pd.Series,ndarray)) else f"'{_obs}'",
org=org org=id_org
) )
with con.begin() as cnx: res = (gpd.pd.read_sql(
res = cnx.execute(sql).all() sql,
return [x[0] for x in res if x] con)
.merge(_obs.to_frame('observers'),how='right',on='observers'))
return res
def insert_observ(obs,org,con): def insert_observ(obs,id_org,con):
check_observ(obs,org,con) check = check_observ(obs,id_org,con)
pass usr_missing = check[check.ids_observers.isnull()].drop_duplicates()
if usr_missing.empty:
print("Observateurs trouvés dans le schéma `utilisateurs`")
return check
Q = input("Observateurs `{}` non trouvés dans le schéma `utilisateurs`, voulez-vous les ajouter ? (y/n) ".format(usr_missing.observers.to_list()))
if Q.lower() == 'y':
id_obs_miss = _insert_observ(usr_missing,id_org,con)
for i,row in id_obs_miss.iterrows():
check.loc[check.observers==row.observers,'ids_observers'] = row.id_role
# check = check_observ(obs,id_org,con)
return check.astype({'ids_observers':int})
else:
return 'No'
def _insert_observ(obs,id_org,con):
_obs = gpd.pd.concat(
[obs,obs.observers.str.split(' ',n=-1,expand=True)],
axis=1,ignore_index=True)
_obs['id_org'] = id_org
sql ='''
INSERT INTO utilisateurs.t_roles (nom_role,prenom_role,id_organisme)
VALUES {vals}
--ON CONFLICT (nom_organisme) DO NOTHING
RETURNING id_role, CONCAT(UPPER(nom_role), ' ', INITCAP(prenom_role)) AS observers;
'''.format(vals=','.join(["('%s','%s',%i)"%(row[3],row[4],row[5]) for row in _obs.itertuples()]))
# con = normalize_connexion(con)
# res = gpd.pd.read_sql(sql,con)
# return res
with con.begin() as cnx:
_res = cnx.execute(text(sql))
res = _res.fetchall()
return gpd.pd.DataFrame(res)
def normalize_observers(obs): def normalize_observers(obs):
_obs = obs.str.split(' ',expand=True) _obs = obs.str.split(' ',expand=True)
@ -248,15 +493,14 @@ def normalize_observers(obs):
_obs_stack = _obs.stack().droplevel(-1) _obs_stack = _obs.stack().droplevel(-1)
return _obs_stack.groupby(_obs_stack.index).aggregate(' '.join) return _obs_stack.groupby(_obs_stack.index).aggregate(' '.join)
def insert_orga(org,con):
pass
def select_orga_user(org,con): def select_orga_user(org,con):
sql =''' sql ='''
SELECT id_organisme FROM utilisateurs.bib_organismes SELECT id_organisme FROM utilisateurs.bib_organismes
WHERE nom_organisme = '{nom}' WHERE nom_organisme = '{nom}'
'''.format(nom=org[0]) '''.format(nom=org[0])
with con.begin() as cnx: with con.begin() as cnx:
_res = cnx.execute(sql) _res = cnx.execute(text(sql))
res = _res.one_or_none() res = _res.one_or_none()
return res return res
@ -274,7 +518,7 @@ def insert_orga_user(org,con):
RETURNING id_organisme; RETURNING id_organisme;
'''.format(nom=org[0]) '''.format(nom=org[0])
with con.begin() as cnx: with con.begin() as cnx:
_res = cnx.execute(sql) _res = cnx.execute(text(sql))
res = _res.one_or_none() res = _res.one_or_none()
else: else:
print("Organisme `{}` existant dans le schéma `utilisateurs`".format(org[0])) print("Organisme `{}` existant dans le schéma `utilisateurs`".format(org[0]))
@ -283,10 +527,10 @@ def insert_orga_user(org,con):
def select_orga_przh(org,con): def select_orga_przh(org,con):
sql =''' sql ='''
SELECT id_org FROM pr_zh.bib_organismes SELECT id_org FROM pr_zh.bib_organismes
WHERE name = '{nom}' WHERE name = '{nom}';
'''.format(nom=org[0]) '''.format(nom=org[0])
with con.begin() as cnx: with con.begin() as cnx:
_res = cnx.execute(sql) _res = cnx.execute(text(sql))
res = _res.one_or_none() res = _res.one_or_none()
return res return res
@ -303,19 +547,29 @@ def insert_orga_przh(org,con):
RETURNING id_org; RETURNING id_org;
'''.format(nom=org[0],abbrev=f"'{org[1]}'" if org[1] else 'NULL') '''.format(nom=org[0],abbrev=f"'{org[1]}'" if org[1] else 'NULL')
with con.begin() as cnx: with con.begin() as cnx:
_res = cnx.execute(sql) _res = cnx.execute(text(sql))
res = _res.fetchall() res = _res.fetchall()
else: else:
print("Organisme `{}` existant dans le schéma `pr_zh`".format(org[0])) print("Organisme `{}` existant dans le schéma `pr_zh`".format(org[0]))
return res[0] return res[0]
def insert_users_missing(user,org,con): def insert_users_missing(user,org,con):
id_org = insert_orga_przh(org,con) id_org_przh = insert_orga_przh(org,con)
id_user_orga = insert_orga_user(org,con) id_user_orga = insert_orga_user(org,con)
obsv_missing = insert_observ(user,id_user_orga,con) obsv = insert_observ(user,id_user_orga,con)
return obsv, id_org_przh
def filter_zh(code,con):
sql ='''
SELECT code FROM pr_zh.t_zh
WHERE code IN {}
'''.format(code)
def insert_zh_fromapi(url,con,dep_filter,orga,prefix_hab_rq=''): con = normalize_connexion(con)
res = gpd.pd.read_sql(sql,con)
return res
def insert_zh_fromapi(url,con,dep_filter,orga,lst_area=['DEP','COM'],prefix_hab_rq=''):
""" """
Insert data from API into geonature database Insert data from API into geonature database
@ -335,17 +589,29 @@ def insert_zh_fromapi(url,con,dep_filter,orga,prefix_hab_rq=''):
None None
""" """
api = gpd.read_file(url) api = gpd.read_file(url)
df = (api#[api.code_zh.str.startswith(dep_filter)] if api.empty:
print("Aucun zone humide trouvée")
return None
df = (api[api.action=='Créer']
.rename(columns=dict_columns) .rename(columns=dict_columns)
.rename_geometry('geom') .rename_geometry('geom')
.merge(load_missing_propertie(url,'cd_nomenclature_delimitation',dep_filter),on='code') # .merge(load_missing_propertie(url,'cd_nomenclature_delimitation',dep_filter), on='pk')
)
df.observateur = normalize_observers(df.observateur)
users, id_org = insert_users_missing(df.observateur,orga,con)
df['id_org'] = id_org
df = (df
.merge(users.drop_duplicates(),how='left',left_on='observateur',right_on='observers')
.rename(columns={'ids_observers':'id_role'})
) )
insert_users_missing(df.observers,orga,con)
df['zh_uuid'] = [uuid4() for _ in range(len(df))] df['zh_uuid'] = [uuid4() for _ in range(len(df))]
df['id_lim_list'] = [uuid4() for _ in range(len(df))] df['id_lim_list'] = [uuid4() for _ in range(len(df))]
df['id_sdage'] = [get_nomenclature_id(con,x,'SDAGE') for x in df.cd_typo_sdage] df['id_sdage'] = [get_nomenclature_id(con,x,'SDAGE') for x in df.cd_typo_sdage]
df['create_author'] = df.id_role.copy()
df['update_author'] = df.id_role.copy()
cd_hab = (df cd_hab = (df
.set_index('code') .set_index('code')
@ -366,7 +632,7 @@ def insert_zh_fromapi(url,con,dep_filter,orga,prefix_hab_rq=''):
u = _df[_df.action=="Modifier"].copy() u = _df[_df.action=="Modifier"].copy()
if not c.empty: if not c.empty:
id_zh = to_tzh(c,con) id_zh = to_tzh(c,con,lst_area)
to_cor_zh_cb(id_zh,c,con) to_cor_zh_cb(id_zh,c,con)
to_cor_lim_list(c[['id_lim_list','cd_nomenclature_delimitation']],con) to_cor_lim_list(c[['id_lim_list','cd_nomenclature_delimitation']],con)
to_t_activity(id_zh,c[['code','acti_impact']],con) to_t_activity(id_zh,c[['code','acti_impact']],con)
@ -398,11 +664,11 @@ def load_missing_propertie(url,propertie,dep_filter='38'):
features = output['items']['features'] features = output['items']['features']
res = { res = {
'code':[x['properties']['code_zh'] for x in features if x['properties']['code_zh'].startswith(dep_filter)], 'pk':[x['properties']['pk'] for x in features if x['properties']['departement'] == dep_filter],
propertie:[ propertie:[
','.join(x['properties'][propertie]) ','.join(x['properties'][propertie])
for x in features for x in features
if x['properties']['code_zh'].startswith(dep_filter)], if x['properties']['departement'] == dep_filter],
} }
return gpd.pd.DataFrame(res) return gpd.pd.DataFrame(res)
@ -411,14 +677,14 @@ if __name__ == "__main__":
from sqlalchemy import create_engine from sqlalchemy import create_engine
from sqlalchemy.engine import URL from sqlalchemy.engine import URL
# Parametres bdd # Parametres bdd
user = '' user = 'geonatadmin'
pwd = "" pwd = "prep!!Ge0naT38*aDm1n"
adr = '' adr = 'localhost'
base = '' base = 'geonature2db'
url = URL.create("postgresql+psycopg2", username=user, password=pwd, host=adr, database=base) url = URL.create("postgresql+psycopg2", username=user, password=pwd, host=adr, database=base)
con_gn = create_engine(url) con_gn = create_engine(url)
from pycen import con_gn # from pycen import con_gn
# Numéro de département permettant d'identifier les zones humides concernées par le territoire # Numéro de département permettant d'identifier les zones humides concernées par le territoire
# ['38', '05'], default : 38 # ['38', '05'], default : 38
dep_filter = '38' dep_filter = '38'
@ -426,6 +692,9 @@ if __name__ == "__main__":
prefix_hab_rmk = 'Autre(s) habitat(s) décrit(s) :\n' prefix_hab_rmk = 'Autre(s) habitat(s) décrit(s) :\n'
# [Nom de l'organisme, Abbreviation] # [Nom de l'organisme, Abbreviation]
organisme = ['Parc national des Écrins','PNE'] organisme = ['Parc national des Écrins','PNE']
# Liste des type_area à intersecter pour remplir la table cor_zh_area
listAreaCover = ['DEP','COM','APB','ZPS','SIC','ZNIEFF1','ZNIEFF2','ENS_ZI']
api = 'https://geonature.ecrins-parcnational.fr/api/exports/api/21?departement=38' api = 'https://geonature.ecrins-parcnational.fr/api/exports/api/21?departement=%s'%dep_filter
# insert_zh_fromapi(api,con_gn,dep_filter,organisme,prefix_hab_rq=prefix_hab_rmk) insert_zh_fromapi(api,con_gn,dep_filter,organisme,lst_area=listAreaCover,prefix_hab_rq=prefix_hab_rmk)

View File

@ -79,7 +79,7 @@ else:
sql = """ sql = """
SELECT DISTINCT cd_nom,cd_ref,nom_vern,id_rang FROM {sch}.{tab} SELECT DISTINCT cd_nom,cd_ref,nom_vern,id_rang FROM {sch}.{tab}
WHERE cd_nom IN ('{lst}') WHERE cd_nom IN ('{lst}')
--AND cd_nom NOT IN (SELECT cd_nom FROM {sch}.bib_noms WHERE cd_nom IN ('{lst}')) AND cd_nom NOT IN (SELECT cd_nom FROM {sch}.bib_noms WHERE cd_nom IN ('{lst}'))
""".format( """.format(
sch = schema_geo, sch = schema_geo,
tab = table_geo, tab = table_geo,
@ -98,8 +98,12 @@ df_taxref.drop(columns=['id_rang']).to_sql(
) )
# récupération des IDs la table taxonomie.bib_noms # récupération des IDs la table taxonomie.bib_noms
# sql = 'SELECT id_nom FROM {sch}.{tab} WHERE id_nom > {id_nom_max}'.format( sql = 'SELECT distinct cd_nom FROM {sch}.{tab} WHERE cd_nom IN ({id_nom_max})'.format(
id_liste = 104 # id_liste de la liste présente dans la table taxonomie.bib_listes sch = schema_geo,
tab = 'bib_noms',
id_nom_max = ",".join(lst_cdnom))
id_liste = 112 # id_liste de la liste présente dans la table taxonomie.bib_listes
sql = """ sql = """
SELECT id_nom FROM {sch}.{tab} WHERE cd_nom IN ({id_nom_max}) SELECT id_nom FROM {sch}.{tab} WHERE cd_nom IN ({id_nom_max})
AND id_nom NOT IN (SELECT id_nom FROM {sch}.cor_nom_liste WHERE id_liste = '{id_liste}') AND id_nom NOT IN (SELECT id_nom FROM {sch}.cor_nom_liste WHERE id_liste = '{id_liste}')
@ -109,14 +113,14 @@ AND id_nom NOT IN (SELECT id_nom FROM {sch}.cor_nom_liste WHERE id_liste = '{id_
id_nom_max = ",".join(lst_cdnom), id_nom_max = ",".join(lst_cdnom),
id_liste = id_liste id_liste = id_liste
) )
df = pd.read_sql_query(sql,con_geo) _df = pd.read_sql_query(sql,con_geo)
# df = pd.read_sql_table( # _df = pd.read_sql_table(
# table_name='bib_noms', # table_name='bib_noms',
# con= con_geo, # con= con_geo,
# schema=schema_geo # schema=schema_geo
# ) # )
df['id_liste'] = id_liste # id_liste de la liste présente dans la table taxonomie.bib_listes _df['id_liste'] = id_liste # id_liste de la liste présente dans la table taxonomie.bib_listes
df = df[['id_liste','id_nom']] df = _df[['id_liste','id_nom']]
# envoie de la liste dans la table taxonomie.bib_noms # envoie de la liste dans la table taxonomie.bib_noms
df.to_sql( df.to_sql(
name = 'cor_nom_liste', name = 'cor_nom_liste',

View File

@ -217,16 +217,17 @@ if __name__ == "__main__":
api_taxref = 'https://geonature.cen-isere.fr/taxhub/api/taxref' api_taxref = 'https://geonature.cen-isere.fr/taxhub/api/taxref'
# Paramètres de chargement du fichier des taxons # Paramètres de chargement du fichier des taxons
PATH = '/home/colas/Documents/tmp/CHARVAS' PATH0 = '/media/colas/SRV/FICHIERS'
file = 'liste_sp_CHAR.xlsx' PATH = 'SITES/SITES GERES/PLAN_PLANCHETTES/Scientifique et technique/Flore et habitats/Suivi flore patrimoniale 2025'
sheet = 'liste_sp' file = 'donnes_sp_suivi2025.xlsx'
sheet = 'liste sp'
# Liste des CD_NOM en entrée # Liste des CD_NOM en entrée
cd_col = 'cd_ref' # Nom de la colonne à utiliser dans le feuillet ``sheet`` cd_col = 'cd_ref' # Nom de la colonne à utiliser dans le feuillet ``sheet``
# Lecture des données # Lecture des données
taxlist = pd.read_excel(os.path.join(PATH,file),sheet,usecols=[cd_col],header=0) taxlist = pd.read_excel(os.path.join(PATH0,PATH,file),sheet,usecols=[cd_col],header=0)
tab_sp = pd.read_excel(os.path.join(PATH,file),sheet,index_col=cd_col) tab_sp = pd.read_excel(os.path.join(PATH0,PATH,file),sheet,index_col=cd_col)
lst = taxlist[cd_col] lst = taxlist[cd_col]
# Récupération des statuts # Récupération des statuts
@ -278,7 +279,7 @@ if __name__ == "__main__":
pivlib = tab_sp.merge(pivlib,on=[cd_col],how='left') pivlib = tab_sp.merge(pivlib,on=[cd_col],how='left')
print('INIT writer') print('INIT writer')
NAME_OUT = os.path.join(PATH,sheet+'_status.xlsx') NAME_OUT = os.path.join(PATH0,PATH,sheet+'_status.xlsx')
with pd.ExcelWriter(NAME_OUT) as writer: with pd.ExcelWriter(NAME_OUT) as writer:
df.to_excel( df.to_excel(
writer,sheet_name='v_bdc_status',index=False writer,sheet_name='v_bdc_status',index=False

View File

@ -14,8 +14,8 @@ CREATE SERVER IF NOT EXISTS %s
'''%(wpr_name,url) '''%(wpr_name,url)
table = ''' table = '''
DROP FOREIGN TABLE IF EXISTS %s; DROP FOREIGN TABLE IF EXISTS {table};
CREATE FOREIGN TABLE IF NOT EXISTS flux_geo.%s ( CREATE FOREIGN TABLE IF NOT EXISTS flux_geo.{table} (
id_parcel varchar, id_parcel varchar,
surf_parcel float, surf_parcel float,
code_cultu varchar, code_cultu varchar,
@ -24,9 +24,9 @@ CREATE FOREIGN TABLE IF NOT EXISTS flux_geo.%s (
culture_d2 varchar, culture_d2 varchar,
geom public.geometry(multipolygon, 4326) geom public.geometry(multipolygon, 4326)
) )
SERVER %s SERVER {srv}
OPTIONS (layer '%s'); OPTIONS (layer '{layer}');
'''%(tab_name,tab_name,wpr_name,layer) '''.format(table=tab_name,srv=wpr_name,layer=layer)
with con.begin() as cnx: with con.begin() as cnx:
cnx.execute(drop_fgn) cnx.execute(drop_fgn)
@ -38,14 +38,16 @@ with con.begin() as cnx:
####### #######
####### #######
from pycen import con_fon as con from pycen import con_anm as con
from sqlalchemy import text from sqlalchemy import text
db = 'azalee' db = 'bd_cen'
host = '91.134.194.221' host = '91.134.194.221'
port = '5432' port = '5432'
srv_name = 'fdw_azalee' srv_name = 'fdw_bd_cen'
tab_name = 'inventaire_zh' tab_name = 'communes'
sch_name = 'inventaires' sch_name = 'territoire'
view_name = 'v_'+tab_name
mview_name = 'vm_'+tab_name
user = 'cen_admin' user = 'cen_admin'
pwd = '#CEN38@venir' pwd = '#CEN38@venir'
@ -66,56 +68,40 @@ CREATE USER MAPPING IF NOT EXISTS FOR %s
table = ''' table = '''
CREATE FOREIGN TABLE IF NOT EXISTS {sch_name}.{tab_name} ( CREATE FOREIGN TABLE IF NOT EXISTS {sch_name}.{tab_name} (
site_code varchar(10), code_insee varchar(5),
nom varchar, id varchar(24),
auteur_site varchar, prec_plani float8,
auteur_geom varchar, nom varchar(45),
auteur_last_maj varchar, statut varchar(20),
date_site date, canton varchar(45),
date_geom date, arrondisst varchar(45),
date_last_maj date, depart varchar(30),
type_milieu varchar, region varchar(30),
type_site varchar, popul int4,
typo_sdage varchar, multican varchar(3),
rmq_site text, actif bool,
rmq_fct_majeur text, epfl varchar(10),
rmq_interet_patri text,
rmq_bilan_menace text,
rmq_orient_act text,
rmq_usage_process text,
code_cb varchar,
lib_cb text,
activ_hum varchar,
impact varchar,
"position" varchar,
rmq_activ_hum text,
connexion varchar,
subm_orig varchar,
subm_freq varchar,
subm_etend varchar,
fct_bio varchar,
fct_hydro varchar,
int_patri varchar,
"val_socioEco" varchar,
crit_delim varchar,
crit_def_esp varchar,
entree_eau_reg varchar,
entree_eau_perm varchar,
entree_eau_topo varchar,
sortie_eau_reg varchar,
sortie_eau_perm varchar,
sortie_eau_topo varchar,
geom geometry(geometry,2154) geom geometry(geometry,2154)
) )
SERVER {fgn_server} SERVER {fgn_server}
OPTIONS (schema_name 'zones_humides', table_name 'v_zoneshumides'); OPTIONS (schema_name 'administratif', table_name 'communes');
'''.format( '''.format(
sch_name=sch_name, tab_name=tab_name, fgn_server=srv_name sch_name=sch_name, tab_name=tab_name, fgn_server=srv_name
) )
vm = '''
CREATE MATERIALIZED VIEW {sch_name}.{mview_name} AS
SELECT * FROM {sch_name}.{tab_name};
GRANT SELECT ON {sch_name}.{mview_name} TO grp_consult;
GRANT SELECT ON {sch_name}.{mview_name} TO cen_user;
'''.format(sch_name=sch_name, mview_name=mview_name, tab_name=tab_name)
with con.begin() as cnx: with con.begin() as cnx:
cnx.execute(create_ext) # cnx.execute(create_ext)
cnx.execute(drop_fgn) # cnx.execute(drop_fgn)
cnx.execute(fgn_data_wrapper) # cnx.execute(fgn_data_wrapper)
cnx.execute(fgn_user_wrapper) # cnx.execute(fgn_user_wrapper)
cnx.execute(text(table)) cnx.execute(text(table))
with con.begin() as cnx:
cnx.execute(text(vm))

View File

@ -54,6 +54,12 @@ def add_limit_user(con,user,date_limit):
cnx.execute(alter.format(usr=user,date=date_limit)) cnx.execute(alter.format(usr=user,date=date_limit))
def change_password(con,user,pwd):
alter = """ALTER USER "{usr}" WITH PASSWORD '{password}';"""
with con.begin() as cnx:
cnx.execute(alter.format(usr=user,password=pwd))
def create_grp(con,grp): def create_grp(con,grp):
sql = """CREATE ROLE {grp} WITH sql = """CREATE ROLE {grp} WITH
NOSUPERUSER NOSUPERUSER

View File

@ -15,10 +15,7 @@ def znieff(path,code,column='NM_REGZN'):
if isinstance(code,str): if isinstance(code,str):
return df[df[column]==code] return df[df[column]==code]
elif isinstance(code,pd.Series) or \ elif isinstance(code,(pd.Series,list,pd.Index,np.ndarray)):
isinstance(code,list) or \
isinstance(code,pd.Index) or \
isinstance(code,np.ndarray):
code = list(code) code = list(code)
return df[df[column].isin(code)] return df[df[column].isin(code)]
else: else:
@ -31,10 +28,7 @@ def source_znieff(path,code,column='NM_SFFZN'):
if isinstance(code,str): if isinstance(code,str):
return df[df[column]==code] return df[df[column]==code]
elif isinstance(code,pd.Series) or \ elif isinstance(code,(pd.Series,list,pd.Index,np.ndarray)):
isinstance(code,list) or \
isinstance(code,pd.Index) or \
isinstance(code,np.ndarray):
code = list(code) code = list(code)
return df[df[column].isin(code)] return df[df[column].isin(code)]
else: else:
@ -47,10 +41,7 @@ def source(path,code,column='ID_SOURCE'):
if isinstance(code,str): if isinstance(code,str):
return df[df[column]==code] return df[df[column]==code]
elif isinstance(code,pd.Series) or \ elif isinstance(code,(pd.Series,list,pd.Index,np.ndarray)):
isinstance(code,list) or \
isinstance(code,pd.Index) or \
isinstance(code,np.ndarray):
code = list(code) code = list(code)
return df[df[column].isin(code)] return df[df[column].isin(code)]
else: else:

View File

@ -6,7 +6,6 @@ from io import BytesIO
from os import (walk, path, rename) from os import (walk, path, rename)
from zipfile import ZipFile, ZIP_DEFLATED from zipfile import ZipFile, ZIP_DEFLATED
def main(in_dir, dic_values, backup_suffix=None,encoding='UTF-8'): def main(in_dir, dic_values, backup_suffix=None,encoding='UTF-8'):
for (dirpath, dirnames, filenames) in walk(in_dir): for (dirpath, dirnames, filenames) in walk(in_dir):
qgzs = filter(filenames, '*.qgz') qgzs = filter(filenames, '*.qgz')
@ -37,12 +36,12 @@ def main(in_dir, dic_values, backup_suffix=None,encoding='UTF-8'):
if __name__ == '__main__': if __name__ == '__main__':
backup_suffix = '.orig' # Don't overwrite orig .qgz (just in case...), append ".orig" backup_suffix = '.orig' # Don't overwrite orig .qgz (just in case...), append ".orig"
in_dir = r'/home/colas/Documents/tmp/TRAVAUX' in_dir = r'/home/colas/Documents/tmp/MARJORIE'
encodage = 'UTF-8' encodage = 'UTF-8'
dic_values = { dic_values = {
'192.168.0.189': '91.134.194.221', '192.168.0.189': '91.134.194.221',
'authcfg=1739lo5': 'authcfg=ceniser', 'authcfg=xoibz7j': 'authcfg=ceniser',
'bd_cen38': 'bd-cen-38', 'bd_cen38': 'bd-cen-38',
# 'MS Shell Dlg 2':'Tahoma', # 'MS Shell Dlg 2':'Tahoma',
# 'https://datacarto.datara.gouv.fr':' https://datacarto.open-datara.fr', # 'https://datacarto.datara.gouv.fr':' https://datacarto.open-datara.fr',