add component statistics generation
This commit is contained in:
parent
2873093f3f
commit
60d6ae9dde
1 changed files with 78 additions and 4 deletions
82
build.py
82
build.py
|
@ -35,13 +35,13 @@ def parse(file):
|
|||
data['category'] = data['filename'].str.split('/', 3, expand=True)[2]
|
||||
data['lang'] = data['filename'].str.split('/', 4, expand=True)[3]
|
||||
|
||||
# TODO: build global.csv and progress.csv
|
||||
build_index_adoc(data)
|
||||
|
||||
build_language_adoc(data)
|
||||
build_language_stats(data)
|
||||
|
||||
# TODO: build each language.csv
|
||||
build_component_adoc(data)
|
||||
build_component_stats(data)
|
||||
|
||||
# TODO: build each component.adoc
|
||||
|
||||
|
@ -140,8 +140,6 @@ include::example$language/__LANG__.csv[]
|
|||
|
||||
def build_language_stats(data):
|
||||
"""Build csv files used in adoc file per lang"""
|
||||
#
|
||||
|
||||
#
|
||||
# Generate progress.csv
|
||||
#
|
||||
|
@ -177,6 +175,82 @@ def build_language_stats(data):
|
|||
out = out[["component", "pageCount", "pageOver80", "totalMessage", "translatedMessages", "totalSourceWords", "translatedSourceWords", "wordsProgress" ]]
|
||||
out.to_csv("modules/ROOT/examples/language/"+lang+".csv", header=headers,index=False)
|
||||
|
||||
def build_component_adoc(data):
|
||||
"""Build one adoc file per component"""
|
||||
|
||||
hop = """= __COMPONENT__
|
||||
|
||||
This component contains:
|
||||
|
||||
* __NB_PAGES__ pages
|
||||
* __NB_MESSAGES__ messages to translate
|
||||
* __NB_WORDS__ words to translate
|
||||
|
||||
You can probably.footnote:[All component aren't available for translation for now
|
||||
because our translation platform is not fully deployed. Please ask the translation
|
||||
mailing list trans@lists.fedoraproject.org if a missing component bother you.]
|
||||
translate this component in: https://translate.stg.fedoraproject.org/projects/
|
||||
|
||||
[%header,format=csv]
|
||||
.Statistics per component
|
||||
|===
|
||||
include::example$component/__COMPONENT__.csv[]
|
||||
|===
|
||||
|
||||
"""
|
||||
|
||||
data = data[data.category == "pot"]
|
||||
|
||||
for component in data.component.unique():
|
||||
data_component = data[data.component == component]
|
||||
nb_pages = len(data_component.filename.unique())
|
||||
nb_messages = data_component.totalMessage.sum()
|
||||
nb_words = data_component.totalSourceWords.sum()
|
||||
|
||||
with open("modules/ROOT/pages/component/"+component+".adoc", "w") as f:
|
||||
to_write = hop.replace('__COMPONENT__', component)
|
||||
to_write = to_write.replace('__NB_PAGES__', str(nb_pages))
|
||||
to_write = to_write.replace('__NB_MESSAGES__', str(nb_messages))
|
||||
to_write = to_write.replace('__NB_WORDS__', str(nb_words))
|
||||
f.write(to_write)
|
||||
|
||||
def build_component_stats(data):
|
||||
"""Build csv files used in adoc file per component"""
|
||||
#
|
||||
# Generate progress.csv
|
||||
#
|
||||
details = data[data.category == "po"]
|
||||
|
||||
for component in details.component.unique():
|
||||
stats = details[details.component == component]
|
||||
|
||||
stats_lang = stats.groupby(["lang"]).sum()
|
||||
stats_lang = stats_lang.reset_index()
|
||||
|
||||
stats_lang = stats_lang[["lang", "translatedSourceWords", "totalSourceWords"]]
|
||||
stats_lang["wordsProgress"] = stats_lang["translatedSourceWords"] / stats_lang["totalSourceWords"]
|
||||
stats_lang["wordsProgress"] = pandas.Series(["{0:.2f}%".format(val * 100) for val in stats_lang["wordsProgress"]], index = stats_lang.index)
|
||||
|
||||
|
||||
# create stats per filename (aka pages)
|
||||
stats_file = stats.groupby(["lang", "filename"]).sum()
|
||||
stats_file["pageprogress"] = stats_file["translatedSourceWords"] / stats_file["totalSourceWords"]
|
||||
stats_file["pageOver80"] = (stats_file["pageprogress"] >= 0.8) *1
|
||||
stats_file["pageCount"] = stats.groupby(["component", "filename", "lang"]).filename.nunique().value_counts().index.tolist()[0]
|
||||
stats_file = stats_file.groupby("lang").sum().reset_index()
|
||||
stats_file = stats_file[["lang", "pageCount", "pageOver80"]]
|
||||
|
||||
|
||||
out = pandas.merge(stats_lang, stats_file, on="lang")
|
||||
out = out.drop_duplicates()
|
||||
|
||||
out['lang'] = pandas.Series(["xref:language/{code}.adoc[{code}]".format(code = val) for val in out["lang"]], index = out.index)
|
||||
|
||||
# write csv
|
||||
headers = ["Language code", "Pages", "Pages >= 80%", "Words progress"]
|
||||
out = out[["lang", "pageCount", "pageOver80", "wordsProgress" ]]
|
||||
out.to_csv("modules/ROOT/examples/component/"+component+".csv", header=headers,index=False)
|
||||
|
||||
def store(dataset, name):
|
||||
"""Store dataset to csv"""
|
||||
dataset.to_csv(name, index=False)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue