Commit dade3b27 authored by Miriam Redi's avatar Miriam Redi Committed by GitHub
Browse files

Merge pull request #4 from gmodena/T274798-include-all-unillustrated-articles

T274798 include all unillustrated articles
parents 6b12aad4 be18f56a
venv: requirements.txt
test -d venv || virtualenv --python=$(shell which python3) venv
. venv/bin/activate; pip install -Ur requirements.txt;
test: venv
. venv/bin/activate; pytest --cov etl
%% Cell type:code id: tags:
``` python
import pyspark
import re
import pyspark.sql
import pickle
import pandas as pd
import math
import numpy as np
import random
import requests
#from bs4 import BeautifulSoup
import json
import os
```
%% Cell type:code id: tags:
``` python
!which python
```
%% Output
/srv/home/clarakosi/venv/bin/python
%% Cell type:code id: tags:
``` python
qids_and_properties={}
```
%% Cell type:code id: tags:parameters
``` python
# Pass in directory to place output files
output_dir = 'Output'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Pass in the full snapshot date
snapshot = '2020-12-28'
# Allow the passing of a single language as a parameter
language = 'arwiki'
```
%% Cell type:code id: tags:
``` python
# languages=['enwiki','arwiki','kowiki','cswiki','viwiki','frwiki','fawiki','ptwiki','ruwiki','trwiki','plwiki','hewiki','svwiki','ukwiki','huwiki','hywiki','srwiki','euwiki','arzwiki','cebwiki','dewiki','bnwiki'] #language editions to consider
#val=100 #threshold above which we consider images as non-icons
languages=[language]
```
%% Cell type:code id: tags:
``` python
reg = r'^([\w]+-[\w]+)'
short_snapshot = re.match(reg, snapshot).group()
short_snapshot
```
%% Output
'2020-12'
%% Cell type:code id: tags:
``` python
len(languages)
```
%% Output
1
%% Cell type:code id: tags:
``` python
def get_threshold(wiki_size):
#change th to optimize precision vs recall. recommended val for accuracy = 5
sze, th, lim = 50000, 15, 4
if (wiki_size >= sze):
#if wiki_size > base size, scale threshold by (log of ws/bs) + 1
return (math.log(wiki_size/sze, 10)+1)*th
#else scale th down by ratio bs/ws, w min possible val of th = th/limiting val
return max((wiki_size/sze) * th, th/lim)
```
%% Cell type:code id: tags:
``` python
val={}
total={}
for wiki in languages:
querytot="""SELECT COUNT(*) as c
FROM wmf_raw.mediawiki_page
WHERE page_namespace=0
AND page_is_redirect=0
AND snapshot='"""+short_snapshot+"""'
AND wiki_db='"""+wiki+"""'"""
wikisize = spark.sql(querytot).toPandas()
val[wiki]=get_threshold(int(wikisize['c']))
total[wiki]=int(wikisize['c'])
```
%% Cell type:code id: tags:
``` python
val
```
%% Output
{'arwiki': 35.11995065862349}
%% Cell type:code id: tags:
``` python
total
```
%% Output
{'arwiki': 1097236}
%% Cell type:code id: tags:
``` python
wikisize
```
%% Output
c
0 1097236
%% Cell type:markdown id: tags:
The query below retrieves, for each unillustrated article: its Wikidata ID, the image of the Wikidata ID (if any), the Commons category of the Wikidata ID (if any), and the lead images of the articles in other languages (if any).
`allowed_images` contains the list of icons (images appearing in more than `val` articles)
`image_pageids` contains the list of illustrated articles (articles with images that are not icons)
`noimage_pages` contains the pageid and Qid of unillustrated articles
`qid_props` contains for each Qid in `noimage_pages`, the values of the following properties, when present:
* P18: the item's image
* P373: the item's Commons category
* P31: the item's "instance of" property
`category_image_list` contains the list of all images in a Commons category in `qid_props`
`lan_page_images` contains the list of lead images in Wikipedia articles in all languages linked to each Qid
`qid_props_with_image_list` is qid_props plus the list of images in the Commons category linked to the Wikidata item
%% Cell type:code id: tags:
``` python
for wiki in languages:
print(wiki)
queryd="""WITH allowed_images AS
(
SELECT il_to
FROM wmf_raw.mediawiki_imagelinks
WHERE il_from_namespace=0
AND snapshot='"""+short_snapshot+"""'
AND wiki_db='"""+wiki+"""'
AND il_to not like '%\"%' AND il_to not like '%,%'
GROUP BY il_to
HAVING COUNT(il_to)>"""+str(val[wiki])+"""),
image_pageids AS
(SELECT DISTINCT il_from as pageid
FROM wmf_raw.mediawiki_imagelinks il1
LEFT ANTI JOIN allowed_images
ON allowed_images.il_to=il1.il_to
WHERE il1.il_from_namespace=0
AND il1.wiki_db='"""+wiki+"""'
AND il1.snapshot='"""+short_snapshot+"""'
),
pageimage_pageids AS
(
SELECT DISTINCT pp_page as pageid
FROM wmf_raw.mediawiki_page_props pp
WHERE pp.wiki_db ='"""+wiki+"""'
AND pp.snapshot='"""+short_snapshot+"""'
AND pp_propname in ('page_image','page_image_free')),
all_image_pageids as(
SELECT pageid
FROM image_pageids
UNION
SELECT pageid
FROM pageimage_pageids
),
noimage_pages as
(
SELECT wipl.item_id,p.page_id,p.page_title,page_len
FROM wmf_raw.mediawiki_page p
JOIN wmf.wikidata_item_page_link wipl
ON p.page_id=wipl.page_id
LEFT ANTI JOIN all_image_pageids
on all_image_pageids.pageid=wipl.page_id
WHERE p.page_namespace=0
AND page_is_redirect=0 AND p.wiki_db='"""+wiki+"""'
AND p.snapshot='"""+short_snapshot+"""'
AND wipl.snapshot='"""+snapshot+"""'
AND wipl.page_namespace=0
AND wipl.wiki_db='"""+wiki+"""'
ORDER BY page_len desc
),
qid_props AS
(
SELECT we.id,label_val,
MAX(CASE WHEN claim.mainSnak.property = 'P18' THEN claim.mainSnak.datavalue.value ELSE NULL END) AS hasimage,
MAX(CASE WHEN claim.mainSnak.property = 'P373' THEN REPLACE(REPLACE(claim.mainSnak.datavalue.value,'\"',''),' ','_') ELSE NULL END) AS commonscategory,
MAX(CASE WHEN claim.mainSnak.property = 'P31' THEN claim.mainSnak.datavalue.value ELSE NULL END) AS instanceof
FROM wmf.wikidata_entity we
JOIN noimage_pages
ON we.id=noimage_pages.item_id
LATERAL VIEW explode(labels) t AS label_lang,label_val
LATERAL VIEW OUTER explode(claims) c AS claim
WHERE t.label_lang='en'
AND typ='item'
AND snapshot='"""+snapshot+"""'
AND claim.mainSnak.property in ('P18','P31','P373')
GROUP BY id,label_val
),
category_image_list AS
(
SELECT cl_to,concat_ws(';',collect_list(mp.page_title)) as category_imagelist
from qid_props
left join wmf_raw.mediawiki_categorylinks mc
on qid_props.commonscategory=mc.cl_to
join wmf_raw.mediawiki_page mp
on mp.page_id=mc.cl_from
WHERE mp.wiki_db ='commonswiki'
AND mp.snapshot='"""+short_snapshot+"""'
AND mp.page_namespace=6
AND mp.page_is_redirect=0
AND mc.snapshot='"""+short_snapshot+"""'
AND mc.wiki_db ='commonswiki'
AND mc.cl_type='file'
group by mc.cl_to
),
qid_props_with_image_list AS
(
SELECT id, label_val, hasimage, commonscategory, instanceof,category_imagelist
from qid_props
left join category_image_list
on qid_props.commonscategory=category_image_list.cl_to
),
lan_page_images AS
(
SELECT nip.item_id,nip.page_id,nip.page_title,nip.page_len,collect_list(concat(pp.wiki_db,': ',pp.pp_value)) as lan_images
FROM noimage_pages nip
LEFT JOIN wmf.wikidata_item_page_link wipl
LEFT JOIN wmf_raw.mediawiki_page_props pp
LEFT JOIN wmf_raw.mediawiki_page mp
ON nip.item_id=wipl.item_id
AND wipl.page_id=pp.pp_page
AND wipl.wiki_db=pp.wiki_db
AND mp.page_title=pp.pp_value
WHERE wipl.wiki_db !='"""+wiki+"""'
AND wipl.snapshot='"""+snapshot+"""'
AND wipl.page_namespace=0
AND pp.snapshot='"""+short_snapshot+"""'
AND pp_propname in ('page_image','page_image_free')
AND mp.wiki_db ='commonswiki'
AND mp.snapshot='"""+short_snapshot+"""'
AND mp.page_namespace=6
AND mp.page_is_redirect=0
GROUP BY nip.item_id,nip.page_id,nip.page_title,nip.page_len
),
joined_lan_page_images AS
(
SELECT nip.item_id,nip.page_id,nip.page_title,nip.page_len, lpi.lan_images
from noimage_pages nip
LEFT JOIN lan_page_images lpi
on nip.item_id=lpi.item_id
)
SELECT * from joined_lan_page_images
LEFT JOIN qid_props_with_image_list
on qid_props_with_image_list.id=joined_lan_page_images.item_id
"""
qid_props = spark.sql(queryd).toPandas()
qids_and_properties[wiki]=qid_props
```
%% Output
arwiki
%% Cell type:markdown id: tags:
Below I am just creating different tables according to whether an image is retrieved from a specific source (Wikidata image, Commons Category, or interlingual links)
%% Cell type:code id: tags:
``` python
hasimage={}
commonscategory={}
lanimages={}
allimages={}
for wiki in languages:
print(wiki)
hasimage[wiki]=qids_and_properties[wiki][qids_and_properties[wiki]['hasimage'].astype(str).ne('None')]
commonscategory[wiki]=qids_and_properties[wiki][qids_and_properties[wiki]['category_imagelist'].astype(str).ne('None')]
lanimages[wiki]=qids_and_properties[wiki][qids_and_properties[wiki]['lan_images'].astype(str).ne('None')]
print("number of unillustrated articles: "+str(len(qids_and_properties[wiki])))
print("number of articles items with Wikidata image: "+str(len(hasimage[wiki])))
print("number of articles items with Wikidata Commons Category: "+str(len(commonscategory[wiki])))
print("number of articles items with Language Links: "+str(len(lanimages[wiki])))
####
allimages[wiki]=qids_and_properties[wiki][(qids_and_properties[wiki]['hasimage'].astype(str).ne('None')) | (qids_and_properties[wiki]['lan_images'].astype(str).ne('None')) | (qids_and_properties[wiki]['category_imagelist'].astype(str).ne('None'))]
```
%% Output
arwiki
number of unillustrated articles: 583972
number of articles items with Wikidata image: 6880
number of articles items with Wikidata Commons Category: 22288
number of articles items with Language Links: 95147
%% Cell type:markdown id: tags:
Below the two functions to select images depending on the source:
* `select_image_language` takes as input the list of images from articles in multiple languages and selects the one which is used more often across languages (after some major filtering)
* `select_image_category` selects at random one of the images in the Commons category linked to the Wikidata item.
%% Cell type:code id: tags:
``` python
def image_language_checks(iname):
#list of substrings to check for
substring_list=['.svg','flag','noantimage','no_free_image','image_manquante',
'replace_this_image','disambig','regions','map','map','default',
'defaut','falta_imagem_','imageNA','noimage','noenzyimage']
iname=iname.lower()
if any(map(iname.__contains__, substring_list)):
return False
else:
return True
def select_image_language(imagelist):
counts={} #contains counts of image occurrences across languages
languages={} #constains which languages cover a given image
#for each image
for image in imagelist:
data=image.strip().split(' ')#this contains the language and image name data
###
if len(data)==2: #if we actually have 2 fields
iname=data[1].strip()
lan=data[0].strip()[:-1]
###
if iname not in counts: #if this image does not exist in our counts yet, initialize counts
if not image_language_checks(iname): #if the image name is not valid
continue
# urll = 'https://commons.wikimedia.org/wiki/File:'+iname.replace(' ','_')+'?uselang='+language
#page = requests.get(urll)
#if page.status_code == 404:
# print (urll)
# continue
counts[iname]=1
languages[iname]=[]
else:
counts[iname]+=1
languages[iname].append(lan)
return languages
def select_image_category(imagelist):
counts={}
languages={}
data=list(imagelist.strip().split(';'))
data=[d for d in data if d.find('.')!=-1]
return random.choice(data)
```
%% Cell type:markdown id: tags:
Below the priority assignment process:
* If the article has a Wikidata image (not a flag, as this is likely a duplicate), give it priority 1
* Choose up to 3 images among the ones from related Wikipedia articles in other languages, using the `select_image_language` function, and give priority 2.x where `x` is a ranking given by the number of languages using that image
* If the article has an associated Commons category, call the `select_image_category` function, randomly selecting up to 3 images form that category
%% Cell type:code id: tags:
``` python
stats={}
data_small={}
####
for wiki in languages:
selected=[] #stores selected images for each article
notes=[] #stores information about the source where the candidate image was drawn from
wikis=[]
data_small[wiki]=allimages[wiki].sample(len(allimages[wiki]))
language=wiki.replace('wiki','')
#rtl=direction[wiki] #right to left -> rtl; left to right -> ltr
for wikipedia in data_small[wiki]['lan_images']:
if str(wikipedia)!='None':
lg=select_image_language(wikipedia)
if len(lg)==0:
lg=None
wikis.append(lg)
else:
wikis.append(None)
data_small[wiki]['wikipedia_imagelist']=wikis
for wikidata,commons,wikipedia,jdata in zip(data_small[wiki]['hasimage'],data_small[wiki]['category_imagelist'],data_small[wiki]['wikipedia_imagelist'],data_small[wiki]['instanceof']):
if jdata is not None:
qid=json.loads(jdata)["numeric-id"]
if qid in [4167410,577,13406463]:
selected.append(None)
notes.append(None)
continue
image=None
tier={}
note={}
if str(commons)!='None':
for i in range(min(len(list(commons.strip().split(';'))),3)):
image=select_image_category(commons)
tier[image]=3
note[image]='image was found in the Commons category linked in the Wikidata item'
###
if str(wikipedia) !='None':
index=np.argsort([len(l) for l in list(wikipedia.values())])
#print(wikipedia)
for i in range(min(len(wikipedia),3)):
image=list(wikipedia.keys())[index[-(i+1)]]
tier[image]=2+(float(i)/10)
note[image]='image was found in the following Wikis: '+', '.join(wikipedia[image])
if str(wikidata)!='None' and wikidata.lower().find('flag') ==-1:
image=wikidata[1:-1]
tier[image]=1
note[image]='image was in the Wikidata item'
selected.append(tier if len(tier)>0 else None)
notes.append(note if len(note)>0 else None)
# if image is not None:
# properties.append(get_properties(image,language,rtl,page))
# else:
# properties.append([None,None,None,None,None,None,None,None,None])
#updating table
data_small[wiki]['selected']=selected
data_small[wiki]['notes']=notes
data_small[wiki]['good_interlinks']=wikis
data_small[wiki]=data_small[wiki][data_small[wiki]['selected'].astype(str).ne('None')]
#print("total number of articles: "+str(total[wiki]))
#print("number of unillustrated articles: "+str(len(qids_and_properties[wiki])))
#print("number of articles with at least 1 recommendation: "+str(len(data_small[wiki])))
#stats[wiki]=[total[wiki],len(qids_and_properties[wiki]),len(data_small[wiki]),len(all3images),len(hasimage),len(commonscategory),len(lanimages)]
```
%% Cell type:code id: tags:
``` python
#the final selection process: select up to 3 images per candidateand their relative confidence score (1=high, 2=medium, 3=low)
#based on the priorities assigned earlier
for wiki in languages:
top_candidates=[]
for selected,notes in zip (data_small[wiki]['selected'],data_small[wiki]['notes']):
if selected is not None:
index=np.argsort([l for l in list(selected.values())])
candidates=[]
#print(wikipedia)
for i in range(min(len(index),3)):
image=list(selected.keys())[index[i]]
rating=selected[image]
note=notes[image]
candidates.append({'image':image,'rating':rating,'note':note})
top_candidates.append(candidates)
else:
top_candidates.append(None)
data_small[wiki]['top_candidates']=top_candidates
data_small[wiki][['item_id','page_id','page_title','top_candidates']].to_csv(output_dir+'/'+wiki+'_'+snapshot+'_wd_image_candidates.tsv',sep='\t')
```
......
import pytest
from etl.transform import RawDataset
@pytest.fixture(scope="session")
def raw_data(spark_session):
return spark_session.createDataFrame(
[
(
"0",
"Q1234",
"44444",
"Some page with suggestions",
'[{"image": "image1.jpg", "rating": 2.0, "note": "image was found in the following Wikis: ruwiki"}]',
"arwiki",
"2020-12",
),
(
"1",
"Q56789",
"55555",
"Some page with no suggestion",
None,
"arwiki",
"2020-12"
)
],
RawDataset.schema,
)
......@@ -24,7 +24,8 @@ ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
WITH SERDEPROPERTIES (
'field.delim'='\t',
'serialization.format'='\t')
'serialization.format'='\t',
'serialization.null.format'='""')
STORED AS INPUTFORMAT
'org.apache.hadoop.mapred.TextInputFormat'
OUTPUTFORMAT
......@@ -32,5 +33,6 @@ OUTPUTFORMAT
LOCATION
'hdfs://analytics-hadoop/user/${hiveconf:username}/imagerec_prod/data';
-- Update partition metadata
MSCK REPAIR TABLE `imagerec_prod`;
......@@ -52,8 +52,9 @@ class ImageRecommendation:
)
def transform(self) -> DataFrame:
return (
self.dataFrame.withColumn(
with_recommendations = (
self.dataFrame.where(~F.col("top_candidates").isNull())
.withColumn(
"data",
F.explode(
F.from_json("top_candidates", RawDataset.recommendation_schema)
......@@ -73,6 +74,22 @@ class ImageRecommendation:
"source",
)
)
without_recommendations = (
self.dataFrame.where(F.col("top_candidates").isNull())
.withColumnRenamed("wiki_db", "wiki")
.withColumn("image_id", F.lit(None))
.withColumn("confidence_rating", F.lit(None))
.withColumn("source", F.lit(None))
.select(
"wiki",
"page_id",
"page_title",
"image_id",
"confidence_rating",
"source",
)
)
return with_recommendations.union(without_recommendations)
if __name__ == "__main__":
......
pytest==6.2.2
pytest-spark==0.6.0
pytest-cov==2.10.1
\ No newline at end of file
from etl.transform import RawDataset, ImageRecommendation
def test_etl(raw_data):
assert raw_data.count() == 2
ddf = ImageRecommendation(raw_data).transform()
assert (
len(
set(ddf.columns).difference(
{
"wiki",
"page_id",
"page_title",
"image_id",
"confidence_rating",
"source",
}
)
)
== 0
)
expected_num_records = 2
assert ddf.count() == expected_num_records
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment