create file

This commit is contained in:
Colas Geier 2025-02-25 16:51:06 +01:00
parent 8a350557db
commit 7b1d14325d
7 changed files with 709 additions and 0 deletions

14
12_ODK/remunt.sh Normal file
View File

@ -0,0 +1,14 @@
# check letsencrypt in docker nginx
docker exec -it central-nginx-1 /bin/bash
ls -l /etc/letsencrypt/archive/odk2.cen-isere.fr/
sudo ls -la /etc/letsencrypt/live/odk2.cen-isere.fr/
# Transfert Certificats Certbot
WEB_ADRESSE="odk2.cen-isere.fr"
ID_CONTAINER=$(docker container ls --all | grep central-nginx-1 | awk '{print $1}')
NUM_PEM=$(sudo ls -Art /etc/letsencrypt/archive/$WEB_ADRESSE | tail -n 1 | grep -o -E '[0-9]+')
sudo docker cp /etc/letsencrypt/archive/$WEB_ADRESSE $ID_CONTAINER:/etc/letsencrypt/archive/
docker exec -it central-nginx-1 sh -c "ln -fs /etc/letsencrypt/archive/$WEB_ADRESSE/privkey$NUM_PEM.pem /etc/letsencrypt/live/$WEB_ADRESSE/privkey.pem"
docker exec -it central-nginx-1 sh -c "ln -fs /etc/letsencrypt/archive/$WEB_ADRESSE/chain$NUM_PEM.pem /etc/letsencrypt/live/$WEB_ADRESSE/chain.pem"
docker exec -it central-nginx-1 sh -c "ln -fs /etc/letsencrypt/archive/$WEB_ADRESSE/fullchain$NUM_PEM.pem /etc/letsencrypt/live/$WEB_ADRESSE/fullchain.pem"
docker exec -it central-nginx-1 sh -c "ln -fs /etc/letsencrypt/archive/$WEB_ADRESSE/cert$NUM_PEM.pem /etc/letsencrypt/live/$WEB_ADRESSE/cert.pem"

View File

@ -0,0 +1,145 @@
import geopandas as gpd
from pycen import con_bdcen
###############
#### TOPO #####
###############
sql = '''
ALTER TABLE sites_topo.topo_ligne RENAME column "Geometry" to geom;
ALTER TABLE sites_topo.topo_ligne OWNER TO cen_admin;
CREATE SEQUENCE sites_topo.topo_ligne_id_seq OWNED BY sites_topo.topo_ligne."PK_UID";
SELECT setval('sites_topo.topo_ligne_id_seq', coalesce(max("PK_UID"), 0) + 1, false) FROM sites_topo.topo_ligne;
ALTER TABLE sites_topo.topo_ligne ALTER COLUMN "PK_UID" SET DEFAULT nextval('sites_topo.topo_ligne_id_seq');
'''
with con_bdcen.begin() as cnx:
cnx.execute(sql)
tline_clt = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_consultation.sqlite',layer='topo_ligne')
tline_sai = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_saisie.sqlite',layer='topo_ligne')
tmp = gpd.pd.concat([tline_clt,tline_sai])
isdupl = tmp.geometry.to_wkt().duplicated(keep=False)
tmp[isdupl]
iners = tline_sai.buffer(-1).intersects(tline_clt.unary_union)
(tline_sai[~iners]
.rename_geometry('geom')
.to_postgis(
'topo_ligne',
con_bdcen,
schema='sites_topo',
if_exists='append',
index=False,
))
tpoly_clt = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_consultation.sqlite',layer='topo_polygone')
tpoly_sai = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_saisie.sqlite',layer='topo_polygone')
gpd.pd.concat([tpoly_clt,tpoly_sai]).drop_duplicates(subset='geometry')
iners = tpoly_sai.intersects(tpoly_clt.unary_union)
(tpoly_sai[~iners]
.rename_geometry('geom')
.to_postgis(
'topo_polygone',
con_bdcen,
schema='sites_topo',
if_exists='append',
index=False,
))
tpnt_clt = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_consultation.sqlite',layer='topo_point')
tpnt_sai = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_saisie.sqlite',layer='topo_point')
tmp = gpd.pd.concat([tpnt_clt,tpnt_sai])
isdupl = tmp.geometry.to_wkt().duplicated(keep=False)
tmp[isdupl].drop_duplicates(subset='geometry')
gpd.pd.concat([tpnt_clt,tpnt_sai]).drop_duplicates(subset='geometry')
iners = tpnt_sai.intersects(tpnt_clt.unary_union)
(tpnt_sai[~iners]
.rename_geometry('geom')
.to_postgis(
'topo_point',
con_bdcen,
schema='sites_topo',
if_exists='append',
index=False,
))
###############
#### HYDRO ####
###############
sql = '''
--ALTER TABLE sites_hydro.hydro_point RENAME column "Geometry" to geom;
ALTER TABLE sites_hydro.hydro_point OWNER TO cen_admin;
CREATE SEQUENCE sites_hydro.hydro_point_id_seq OWNED BY sites_hydro.hydro_point."PK_UID";
SELECT setval('sites_hydro.hydro_point_id_seq', coalesce(max("PK_UID"), 0) + 1, false) FROM sites_hydro.hydro_point;
ALTER TABLE sites_hydro.hydro_point ALTER COLUMN "PK_UID" SET DEFAULT nextval('sites_hydro.hydro_point_id_seq');
'''
with con_bdcen.begin() as cnx:
cnx.execute(sql)
hline_clt = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_consultation.sqlite',layer='hydro_ligne')
hline_sai = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_saisie.sqlite',layer='hydro_ligne')
hline_sai.dropna(subset='geometry',inplace=True)
tmp = gpd.pd.concat([hline_clt,hline_sai])
isdupl = tmp.geometry.to_wkt().duplicated(keep=False)
tmp[isdupl]
hline_clt[hline_clt.geometry.to_wkt().duplicated(keep=False)]
hline_sai[hline_sai.geometry.to_wkt().duplicated(keep=False)]
iners = hline_sai.buffer(-1).intersects(hline_clt.unary_union)
(hline_sai[~iners]
.rename_geometry('geom')
.to_postgis(
'hydro_ligne',
con_bdcen,
schema='sites_hydro',
if_exists='append',
index=False,
))
res = gpd.read_postgis('SELECT * FROM sites_hydro.hydro_ligne',con_bdcen)
res['geom_wkt'] = res.geom.to_wkt()
res[res.geom_wkt.duplicated(keep=False)].sort_values(['geom_wkt','Type']).PK_UID.to_list()
hpoly_clt = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_consultation.sqlite',layer='hydro_polygone')
hpoly_sai = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_saisie.sqlite',layer='hydro_polygone')
tmp = gpd.pd.concat([hpoly_clt,hpoly_sai])
isdupl = tmp.geometry.to_wkt().duplicated(keep=False)
tmp[isdupl]
iners = hpoly_sai.buffer(-1).intersects(hpoly_clt.unary_union)
(hpoly_sai
.rename_geometry('geom')
.to_postgis(
'hydro_polygone',
con_bdcen,
schema='sites_hydro',
if_exists='append',
index=False,
))
res = gpd.read_postgis('SELECT * FROM sites_hydro.hydro_polygone',con_bdcen)
res['geom_wkt'] = res.geom.to_wkt()
res[res.geom_wkt.duplicated(keep=False)].sort_values(['geom_wkt','Type','NOM']).PK_UID.to_list()
hpnt_clt = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_consultation.sqlite',layer='hydro_point')
hpnt_clt.dropna(subset='geometry', inplace=True)
hpnt_sai = gpd.read_file('/home/colas/Documents/5_BDD/7_CEN38/topographie_saisie.sqlite',layer='hydro_point')
hpnt_sai.replace({'':None}, inplace=True)
tmp = gpd.pd.concat([hpnt_clt,hpnt_sai])
isdupl = tmp.geometry.to_wkt().duplicated(keep=False)
tmp[isdupl].drop_duplicates(subset='geometry')
gpd.pd.concat([hpnt_clt,hpnt_sai]).drop_duplicates(subset='geometry')
iners = hpnt_sai.intersects(hpnt_clt.unary_union)
(hpnt_sai
.rename_geometry('geom')
.to_postgis(
'hydro_point',
con_bdcen,
schema='sites_hydro',
if_exists='append',
index=False,
))
res = gpd.read_postgis('SELECT * FROM sites_hydro.hydro_point',con_bdcen)
res['geom_wkt'] = res.geom.to_wkt()
res[res.geom_wkt.duplicated(keep=False)].sort_values(['geom_wkt','Type','TYPE_PRINC']).PK_UID.to_list()

View File

@ -0,0 +1,227 @@
# 2024-11-26
# LE script permet d'intégrer de nouvelles géométries
# dans la table sites.r_sites_geom. Les AUTEURS ont été
# intégrés manuellement.
import geopandas as gpd
from pycen import con, zh as ZH
from pycen.tools import dropZ, Polygons_to_MultiPolygon
from os import path
from shapely import wkb
from matplotlib import pyplot as plt
import pycen
from sqlalchemy import create_engine
from sqlalchemy.engine import URL
# isin_bdd = True
# # Parametres bdd CEN38 OUT
# user = 'cen_admin'
# pwd = "#CEN38@venir"
# adr = '91.134.194.221'
# base = 'azalee_restore'
# url = URL.create("postgresql+psycopg2", username=user, password=pwd, host=adr, database=base)
# con = create_engine(url)
zh = ZH()
r_geom = zh.get_sitesGeom()
r_geom = gpd.read_postgis('''
SELECT r_sites_geom.* FROM sites.r_sites_geom
JOIN sites.sites ON sites.id = r_sites_geom.id_site AND sites.id_type_milieu = 1
''',con)
r_geom.dropna(how="all",axis=1,inplace=True)
r_geom.drop(columns=['id','date_insert','id_origine','id_lot'],inplace=True)
r_geom.sort_values(['id_site','date'],inplace=True)
r_geom.drop_duplicates(['id_site'],keep='last',inplace=True)
PATH = '/home/colas/Documents/9_PROJETS/1_ZH/MAJ/Actu 2024/CBNA/zh38'
file = 'hab_agreg.gpkg'
df = gpd.read_file(path.join(PATH,file))
df.rename_geometry('geom', inplace=True)
df.geom = [wkb.loads(wkb.dumps(g,output_dimension=2)) for g in df.geom]
df.dropna(how='all',axis=1,inplace=True)
# gpd.GeoSeries([r_geom.unary_union],crs=2154).contains(df.geom)
# df.contains(r_geom.geom).any()
# res = df.within(r_geom.unary_union)
zh_caract = df[df.zh=='caractéristique'].copy()
zh_caract['surfzhall'] = zh_caract.groupby(['idpolyfinal'])['n06rechab'].transform('sum')
lst_idpoly = zh_caract[zh_caract.surfzhall >= 75].idpolyfinal.unique()
# zh_caract.within(r_geom.unary_union)
# zh_caract.contains(r_geom.unary_union).any()
# Non Intersection Après 2015 - tout milieux
no_inters = df[
(df.lien_zh == 'aucun') & (df.n07anneehab > 2015) #& (df.zh=='caractéristique')
& (df.idpolyfinal.isin(lst_idpoly))
& (~df.n05lbhab.str.contains('invasif',na=False,case=False))
].copy()
no_inters = no_inters.drop(columns=['id_site']).sjoin(r_geom[['id_site','geom']])
no_inters.to_file(path.join(PATH,'inters.gpkg'),driver='GPKG',layer='no_inters75')
len(no_inters.id_site.unique())
# Intersection Après 2015 - tout milieux
inters = df[
(df.lien_zh == 'intersecte') & (df.n07anneehab > 2015) #& (df.zh=='caractéristique')
# & (df.n06rechab.astype(int)>45)
& (df.idpolyfinal.isin(lst_idpoly))
& (~df.n05lbhab.str.contains('invasif',na=False,case=False))
].copy()
inters = inters.drop(columns=['id_site']).sjoin(r_geom[['id_site','geom']])
inters.to_file(path.join(PATH,'inters.gpkg'),driver='GPKG',layer='inters75')
len(inters.id_site.unique())
# Intersection Après 2009 - milieux Forêts uniquement
inters_foret = df[
(df.lien_zh == 'intersecte') & (df.zh=='caractéristique') #& (df.n07anneehab > 1999)
# & (df.n06rechab.astype(int)>45)
& (df.idpolyfinal.isin(lst_idpoly)) & (df.n05lbhab.str.contains('for.t',na=False,case=False))
& (~df.n05lbhab.str.contains('invasif',na=False,case=False))
& (~df.idpolyfinal.isin(inters.idpolyfinal.tolist()))
].copy()
inters_foret = inters_foret.drop(columns=['id_site']).sjoin(r_geom[['id_site','geom']])
inters_foret.to_file(path.join(PATH,'inters.gpkg'),driver='GPKG',layer='inters_forets')
# Fusion Géométries
intersF = gpd.pd.concat([inters,inters_foret])
rgeo = (r_geom[r_geom.id_site.isin(intersF.id_site.unique())]
.copy())
for id_site in rgeo.id_site.unique():
rgeo.loc[rgeo.id_site==id_site,'geom'] = gpd.GeoSeries([
gpd.GeoSeries(
[*rgeo[rgeo.id_site==id_site].geom,
*intersF[intersF.id_site==id_site].geom],
crs=2154).unary_union
],
crs=2154,
index=rgeo[rgeo.id_site==id_site].index)
rgeo.date = '2024-01-21'
# Filtre geométries plus grandes
rgeo2 = rgeo[
rgeo.sort_values('id_site').area
> r_geom[r_geom.id_site.isin(rgeo.id_site.unique())].sort_values('id_site').area
]
# Check habitats describes
sql = 'SELECT * FROM zones_humides.r_site_habitat WHERE valid IS True and id_site IN {lst}'.format(lst=tuple(intersF.id_site.unique()))
hab_bdd = gpd.pd.read_sql(sql,con)
insert_hab = gpd.pd.DataFrame()
for id_site in intersF.id_site.unique():
lst_bddhab = hab_bdd[hab_bdd.id_site==id_site].id_cb.tolist()
cbn_hab = intersF[(intersF.id_site==id_site)&(~intersF.n24cdcb.astype(str).isin(lst_bddhab))]
insert_hab = gpd.pd.concat([insert_hab,cbn_hab])
ins_hab = (insert_hab[['n03observat','n04organism','n07anneehab','n24cdcb','id_site']]
.rename(columns={'n24cdcb':'id_cb','n03observat':'observer','n04organism':'organisme','n07anneehab':'annee'})
.replace(
{'AMODEI T':'AMODEI Thomas',
'Lo Parvi':'LO PARVI',
'ACER CAMPESTRE':'Acer Campestre',
'Réflex environnement':'REFLEX Environnement',
'Office National des Forets':'Office National des Forêts',
'Conservatoire botanique national alpin':'Conservatoire Botanique National Alpin',
'Mosaique Environnement, M. Voirin':'Mosaïque Environnement',
'Boucard E.':'BOUCARD E.',
'MALET, A., JOUD. D., LINOSSIER, T.':'MALET A., JOUD Didier, LINOSSIER T.',
'AURAND T., TAIN C':'AURAND Theo, TAIN C.',
'FOLCHER C.':'FOLCHER Caroline',
"Conservatoire d'espaces naturels": 'CEN Isère'
})
.drop_duplicates())
ins_hab['auteur'] = None
ins_hab.loc[ins_hab.observer.isna(),['auteur']] = ins_hab[ins_hab.observer.isna()].organisme
ins_hab.loc[~ins_hab.observer.isna(),['auteur']] = (ins_hab[~ins_hab.observer.isna()].observer +
' (' +ins_hab[~ins_hab.observer.isna()].organisme + ')')
ins_hab['date'] = ins_hab.annee.astype(str) + '-01-01'
ins_hab.drop(columns=['observer','organisme','annee'],inplace=True)
# Insertion geom BDD
rgeo2.to_postgis('r_sites_geom',pycen.con,'sites',if_exists='append',index=False)
siteid = gpd.pd.read_sql('''
SELECT DISTINCT ON (id_site) id,id_site
FROM sites.r_sites_geom
WHERE id_site IN {lst}
ORDER BY id_site,id DESC
'''.format(lst=tuple(rgeo2.id_site.unique())),pycen.con)
usr = siteid.drop(columns=['id_site']).copy()
usr.rename(columns={'id':'id_geom_site'},inplace=True)
usr['id_auteur'] = 96
usr.to_sql('r_geomsites_auteur',pycen.con,schema='sites',if_exists='append',index=False)
# Insertion habitat BDD
siteidgeo = gpd.pd.read_sql('''
SELECT DISTINCT ON (id_site) id id_geom_site,id_site
FROM sites.r_sites_geom
WHERE id_site IN {lst}
ORDER BY id_site,id DESC
'''.format(lst=tuple(ins_hab.id_site.unique())),pycen.con)
ins_habF = ins_hab.merge(siteidgeo, on='id_site',how='left')
ins_habF.sort_values(['id_site','id_cb','date'],inplace=True)
ins_habF.drop_duplicates(subset=['id_site','id_cb'],keep='last',inplace=True)
ins_habF['valid'] = True
ins_habF.to_csv(path.join(PATH,'habs_inserted.csv'))
(ins_habF
.drop(columns=['auteur'])
.to_sql('r_site_habitat',pycen.con,schema='zones_humides',if_exists='append',index=False))
refpers = (pycen.pers.get_auteur2()
.replace({'nom_prenom':{
'ONF38 (ONF)':'Office National des Forêts',
'CBNA (CBNA)':'Conservatoire Botanique National Alpin',
'LO PARVI (LO PARVI)':'LO PARVI',
'CEN Isère (CEN Isère)':'CEN Isère',
'Ecosphère (Ecosphère)':'Ecosphère',
'BURGEAP (BURGEAP)':'BURGEAP',
'Acer Campestre (Acer Campestre)':'Acer Campestre',
'REFLEX Environnement (REFLEX Environnement)':'REFLEX Environnement',
'REFLEX Environnement (REFLEX Environnement)':'REFLEX Environnement'
}})
.replace({'nom_prenom':{
'CBNA':'Conservatoire Botanique National Alpin',
'ONF':'Office National des Forêts',
'BÉGUIN Lucile':'BEGUIN Lucile',
'CEN AURA':'Conservatoire Régional des Espaces Naturels Rhône-Alpes',
'CD Isère':"Département de l'Isère"
}},regex=True))
pers_dict = dict(zip(refpers.nom_prenom,refpers.index))
siteidhab = gpd.pd.read_sql('''
SELECT DISTINCT id,id_site,id_cb,date,id_geom_site
FROM zones_humides.r_site_habitat
WHERE (id_site,id_cb,date,id_geom_site) IN {lst}
ORDER BY id_site,id DESC
'''.format(lst=tuple(tuple(ins_habF[['id_site','id_cb','date','id_geom_site']].itertuples(index=False,name=None)))),pycen.con)
hab_auth = (ins_habF[['id_site','auteur','id_cb','date']]
.astype(str)
.merge(siteidhab.astype(str), on=['id_site','id_cb','date'],how='left')
.rename(columns={'id':'id_sitehab'})
.set_index(['id_sitehab']))
hab_auth['orga'] = (hab_auth.auteur.str.split('(').str[-1]
.str.split(')').str[0])
test_auth = hab_auth.auteur.str.contains(',')
hab_auth1 = (hab_auth[~test_auth].auteur
.replace(pers_dict)
.to_frame('id_auteur'))
hab_auth_tmp = (hab_auth[test_auth]
.auteur.str.split('(').str[0]
.str.split(',')
.explode()
.str.strip()
.to_frame('auteur')
.merge(hab_auth.loc[test_auth,['orga']],left_index=True,right_index=True))
hab_auth2 = ((hab_auth_tmp.auteur+' ('+hab_auth_tmp.orga+')')
.replace({
'BURGEAP (BURGEAP, ECOSPHERE)':'BURGEAP',
'ECOSPHERE (BURGEAP, ECOSPHERE)':'Ecosphère',})
.replace(pers_dict)
.to_frame('id_auteur'))
(gpd.pd.concat([hab_auth1,hab_auth2])
.reset_index(drop=False)
.to_sql('r_rsitehab_auteur',pycen.con,schema='zones_humides',if_exists='append',index=False))

View File

@ -0,0 +1,107 @@
# 2024-11-26
# LE script permet d'intégrer de nouvelles géométries
# dans la table sites.r_sites_geom. Les AUTEURS ont été
# intégrés manuellement.
import geopandas as gpd
from pycen import con, zh as ZH
from pycen.tools import dropZ, Polygons_to_MultiPolygon
from os import path
from matplotlib import pyplot as plt
zh = ZH()
r_geom = zh.get_sitesGeom()
r_geom = gpd.read_postgis('''
SELECT r_sites_geom.* FROM sites.r_sites_geom
JOIN sites.sites ON sites.id = r_sites_geom.id_site AND sites.id_type_milieu = 1
''',con)
r_geom.dropna(how="all",axis=1,inplace=True)
r_geom.drop(columns=['id','date_insert','id_origine','id_lot'],inplace=True)
PATH = '/home/colas/Documents/9_PROJETS/1_ZH/MAJ/Actu 2024/CBNA/zh38'
file = 'zh_to_integ.gpkg'
df = gpd.read_file(path.join(PATH,file))
df.rename_geometry('geom', inplace=True)
df = dropZ(df)
# DEL geo A SUPP
del_id=df[df.lien_zh=='A SUPP'].index.tolist()
df.drop(del_id,inplace=True)
lstdel_id2 = [
'38RH0171','38RH0066','38RH0077','38BO0144','38BO0109',
'38BO0105','38RH0012','38BO0139','38BO0200','38RH0292',
'38BO0105 + combl vid'
]
del_id2 = df[df.lien_zh.isin(lstdel_id2)].index.tolist()
df.drop(del_id2,inplace=True)
lstdel_id3 = [
'38RH0293', '38RH0297', '38RH0296', '38RH0294', '38BO0331',
'38RH0295'
]
del_id3 = df[df.lien_zh.isin(lstdel_id3)].index.tolist()
df.drop(del_id3,inplace=True)
ins = df[df.lien_zh!='aucun'].copy()
ins.lien_zh.unique()
# # Fait
# r_geom[r_geom.id_site=='38RH0171']
# r_geom[r_geom.id_site=='38RH0066']
# r_geom[r_geom.id_site=='38RH0077']
# r_geom[r_geom.id_site=='38BO0144']
# r_geom[r_geom.id_site=='38BO0109']
# r_geom[r_geom.id_site=='38BO0105']
# r_geom[r_geom.id_site=='38RH0012']
# # Fait, Ne pas reproduire.
# BO139 = r_geom[r_geom.id_site=='38BO0139'].copy()
# BO200 = r_geom[r_geom.id_site=='38BO0200'].copy()
# RH292 = r_geom[r_geom.id_site=='38RH0292'].copy()
# BO139.date = '2024-11-26'
# BO200.date = '2024-11-26'
# RH292.date = '2024-11-26'
# BO139.geom = gpd.GeoSeries([
# BO139.geom.values[-1],
# *ins[ins.lien_zh=='38BO0139'].geom.values
# ],crs=2154).unary_union
# BO200.geom = gpd.GeoSeries([
# BO200.geom.values[-1],
# *ins[ins.lien_zh=='38BO0200'].geom.values
# ],crs=2154).unary_union
# RH292.geom = gpd.GeoSeries([
# RH292.geom.values[-1],
# *ins[ins.lien_zh=='38RH0292'].geom.values
# ],crs=2154).unary_union
# Polygons_to_MultiPolygon(BO139).to_postgis('r_sites_geom',con,schema='sites',if_exists='append',index=False)
# Polygons_to_MultiPolygon(BO200).to_postgis('r_sites_geom',con,schema='sites',if_exists='append',index=False)
# Polygons_to_MultiPolygon(RH292).to_postgis('r_sites_geom',con,schema='sites',if_exists='append',index=False)
# Traitement zh id3
gdf = gpd.GeoDataFrame()
for i in ins.lien_zh.unique():
if i == 'A prospecter':
continue
tmp = gpd.GeoDataFrame(
{'site_code':[i]},
geometry=[ins[ins.lien_zh==i].unary_union],
crs=2154
).rename_geometry('geom')
gdf = gpd.pd.concat([gdf,tmp])
# Traitement ZH id4, après tri des données avec Mathieu Juton
df = gpd.read_file(path.join(PATH,file),layer='zh_to_integ')
df.rename_geometry('geom', inplace=True)
del_id=df[df.lien_zh=='A SUPP'].index.tolist()
df.drop(del_id,inplace=True)
id_updtgeo = [
'38BI0086', '38BO0128', '38BO0160 + remove trou', '38BO0181',
'38BO0182', '38CG0001', '38CG0002', '38CG0019',
'38CG0023 + bouche trou', '38CG0025 + remove vide', '38CG0027',
'38CG0031', '38CG0147', '38CG0148', '38RH0054', '38RH0076',
'38RH0122', '38RH0123 + remove vide', '38RH0132 + remove vide',
'38RH0135 + remove route', '38RH0143', '38RH0148',
'38RH0148 + remove route'
]

View File

@ -0,0 +1,84 @@
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# SHELL COMMAND
# source venv/bin/activate
# flask shell
import os
import logging
from zipfile import ZipFile
import importlib.resources
from apptax.taxonomie.commands.utils import truncate_bdc_statuts
from apptax.database import db
from apptax.taxonomie.commands.utils import (
copy_from_csv,
refresh_taxref_vm,
populate_bdc_statut_cor_text_area,
)
logger = logging.getLogger()
BASE_URL = "/home/geonatureadmin/tmp"
zipfile = "BDC-Statuts-v17.zip"
status_types_file = "BDC-Statuts-v17/BDC_STATUTS_TYPES_17.csv"
status_file = "BDC-Statuts-v17/BDC_STATUTS_17.csv"
truncate_bdc_statuts()
db.session.commit()
archive = ZipFile(os.path.join(BASE_URL, zipfile), "r")
with archive.open(status_file) as f:
logger.info("Insert BDC statuts…")
copy_from_csv(
f,
"bdc_statut",
dest_cols=(
"cd_nom",
"cd_ref",
"cd_sup",
"cd_type_statut",
"lb_type_statut",
"regroupement_type",
"code_statut",
"label_statut",
"rq_statut",
"cd_sig",
"cd_doc",
"lb_nom",
"lb_auteur",
"nom_complet_html",
"nom_valide_html",
"regne",
"phylum",
"classe",
"ordre",
"famille",
"group1_inpn",
"group2_inpn",
"lb_adm_tr",
"niveau_admin",
"cd_iso3166_1",
"cd_iso3166_2",
"full_citation",
"doc_url",
"thematique",
"type_value",
),
)
logger.info("Populate BDC statuts…")
db.session.execute(
importlib.resources.read_text("apptax.migrations.data", "taxonomie_bdc_statuts.sql")
)
populate_bdc_statut_cor_text_area(logger)
refresh_taxref_vm()
db.session.commit()
exit()
# flask taxref link-bdc-statut-to-areas
# flask taxref enable-bdc-statut-text --clean -d 38

98
tmp/PG_CHARVAS.py Normal file
View File

@ -0,0 +1,98 @@
import geopandas as gpd
import json
from shapely.geometry import shape
from pycen import con_gn
PATH = '/home/colas/Documents/tmp/CHARVAS/'
gnaura = gpd.pd.read_csv(PATH+'GN_AURA_synthese_observations_2025-01-17T09_01_44.977Z.csv',sep=';')
gncen38 = gpd.pd.read_csv(PATH+'GN_CEN38_synthese_observations_2025-01-17T09_02_45.818Z.csv',sep=';',dtype={'niveau_precision_diffusion':str})
gnaura = gpd.GeoDataFrame(gnaura, geometry=gpd.points_from_xy(gnaura.x_centroid_4326,gnaura.y_centroid_4326),crs=4326)
gncen38 = gpd.GeoDataFrame(gncen38, geometry=gpd.points_from_xy(gncen38.x_centroid_4326,gncen38.y_centroid_4326),crs=4326)
gnaura.dropna(how='all',inplace=True,axis=1)
gncen38.dropna(how='all',inplace=True,axis=1)
# Suppression des données CEN38 dans gnaura
lstdrop = gnaura[gnaura.uuid_perm_sinp.isin(gncen38.uuid_perm_sinp)].index
gnaura.drop(lstdrop,inplace=True)
# Completion phylogenie BIODIVAURA
sql = """
SELECT
t.cd_ref,
t.regne,
-- t.phylum,
-- t.classe,
-- t.ordre,
-- t.famille,
t.group1_inpn,
t.group2_inpn,
--t.nom_vern,
--t.nom_complet,
--t.nom_valide,
--t.lb_nom,
--s.*
--s.rq_statut,
--s.code_statut,
--s.cd_type_statut,
--s.label_statut,
--s.full_citation,
--s.doc_url
t.group3_inpn
FROM taxonomie.taxref t
--JOIN taxonomie.v_bdc_status s USING (cd_nom)
WHERE t.cd_ref IN {cd_nom}
;""".format(cd_nom = tuple(gnaura.cd_ref.unique()))
phylo = gpd.pd.read_sql_query(sql,con_gn)
gnaura = gnaura.merge(phylo,how='left',left_on='cd_ref',right_on='cd_ref',copy=False)
# Suppression des données polygones dans gnaura
# gnaura = gnaura[~gnaura.geojson_4326.str.contains('polygon',case=False,na=False)]
# Compilation des données
compil = gpd.pd.concat([gncen38,gnaura])
aura_imprecise = compil[compil.geojson_4326.str.contains('polygon',case=False,na=False)].copy()
compilF = compil[~compil.geojson_4326.str.contains('polygon',case=False,na=False)].copy()
compilF.to_csv(PATH+'compile_38&AURA_synthese_observations_2025-01-17.csv',sep=';',index=False)
compilF.to_crs(2154).to_file(PATH+'compile_38&AURA_synthese_observations.geojson')
# Ecriture des données imprécises
geom = [shape(json.loads(i)) for i in aura_imprecise.geojson_4326]
gdf_imprecis = gpd.GeoDataFrame(aura_imprecise,geometry=geom,crs=4326)
aura_imprecise.to_csv(PATH+'AURA_synthese_observations_imprecise_2025-01-17.csv',sep=';',index=False)
gdf_imprecis.to_crs(2154).to_file(PATH+'AURA_synthese_observations_imprecise.geojson')
# Génération du fichier liste_sp
liste_sp = compilF[['cd_ref','date_debut','date_fin','nom_valide','nom_vernaculaire','regne','group1_inpn','group2_inpn','group3_inpn','classe','ordre','famille','nombre_min','nombre_max','observateurs','fournisseur','communes','x_centroid_4326','y_centroid_4326']].copy()
liste_sp.sort_values(by=['cd_ref','date_debut'],inplace=True)
liste_sp.drop_duplicates(subset='cd_ref',keep='last',inplace=True)
liste_sp.to_excel(PATH+'liste_sp_CHAR.xlsx',index=False,sheet_name='liste_sp')
# intersection des sites
gdf = gpd.read_file(PATH+'emprise_extrac_explode_sp.gpkg')[['nom','geometry']]
dfF = (compilF.to_crs(2154).sjoin(gdf, how='left')
.drop(columns=['geometry','index_right',"Unnamed: 0"],errors='ignore')
)
dfF = dfF[[
'nom','id_synthese', 'date_debut', 'date_fin', 'heure_debut',
'heure_fin', 'cd_nom', 'cd_ref', 'nom_valide', 'nom_vernaculaire',
'nom_cite', 'regne', 'group1_inpn', 'group2_inpn', 'group3_inpn',
'classe', 'ordre', 'famille', 'rang_taxo', 'nombre_min', 'nombre_max',
'alti_min', 'alti_max', 'observateurs', 'determinateur', 'communes',
'geometrie_wkt_4326', 'x_centroid_4326', 'y_centroid_4326', 'nom_lieu',
'comment_releve', 'comment_occurrence', 'validateur',
'niveau_validation', 'jdd_nom', 'jdd_uuid', 'jdd_id', 'ca_nom',
'ca_uuid', 'ca_id', 'precision_geographique', 'nature_objet_geo',
'type_regroupement', 'technique_observation', 'biologique_statut',
'etat_biologique', 'biogeographique_statut', 'naturalite',
'preuve_existante', 'niveau_precision_diffusion', 'stade_vie', 'sexe',
'objet_denombrement', 'type_denombrement', 'niveau_sensibilite',
'statut_observation', 'floutage_dee', 'statut_source', 'type_info_geo',
'methode_determination', 'comportement', 'id_origine', 'uuid_perm_sinp',
'uuid_perm_grp_sinp', 'date_creation', 'date_modification',
'champs_additionnels', 'fournisseur', 'geojson_4326',
'statut_biologique', 'type_source', 'type_precision',
'sensibilite', 'confidentialite'
]]
dfF.rename(columns={'nom':'nom_site'},inplace=True)
dfF.to_csv(PATH+'compile_38&AURA_synthese_observations_2025-01-17.csv', sep=";")

34
tmp/import_ZHP_gerecko.py Normal file
View File

@ -0,0 +1,34 @@
import geopandas as gpd
from os import path
from pycen import con
PATH = '/home/colas/Documents/9_PROJETS/1_ZH/MAJ/GERECO - Schéma_strat_ZH_Huez/Inventaire_2016_GERECO'
F1 = 'Inf1000m2_NonSurfaciques.shp'
F2 = 'Inf1000m2_Surfaciques.shp'
df1 = gpd.read_file(path.join(PATH,F1))
del df1['id']
df1.rename_geometry('geom',inplace=True)
df2 = gpd.read_file(path.join(PATH,F2))
del df2['id']
del df2['SurfHa']
df2['geom'] = df2.centroid
df2.set_geometry('geom', crs=2154, inplace=True)
del df2['geometry']
df = (gpd.pd.concat([df1,df2])
.rename(columns={'NOM_ZH':'comment'}))
df['qui'] = 'GERECKO'
df['source'] = "Bureau d'étude"
df['ponctuelle'] = 'Oui'
df['type'] = 'Inconnu'
df['classe'] = 'Inconnu'
df['x'] = df.geom.x
df['y'] = df.geom.y
df['datemodif'] = '2016-01-01'
df['observer'] = 'GERECKO'
df.to_postgis(
'zh_ponctuelle',con,'zones_humides',if_exists='append', index=False
)