remove python generation

This commit is contained in:
Jean-Baptiste Holcroft 2019-11-16 14:01:56 +01:00
parent f747f78922
commit c81b619dcb
3 changed files with 0 additions and 5116 deletions

256
build.py
View file

@ -1,256 +0,0 @@
#!/usr/bin/env python3
"""generate adoc and csv files for each language & components"""
import os
import pandas
def main():
"""Run functions"""
file = "./results.csv"
parse(file)
def parse(file):
"""Call all cleaning functions"""
data = pandas.read_csv(file)
data.columns = ['filename',
'translatedMessages',
'translatedSourceWords',
'translatedTargetWords',
'fuzzyMessages',
'fuzzySourceWords',
'untranslatedMessages',
'untranslatedSourceWords',
'totalMessage',
'totalSourceWords',
'reviewMessages',
'reviewSourceWords']
data = data[['filename',
'translatedMessages',
'translatedSourceWords',
'totalMessage',
'totalSourceWords']]
data['component'] = data['filename'].str.split('/', 2, expand=True)[1]
data['category'] = data['filename'].str.split('/', 3, expand=True)[2]
data['lang'] = data['filename'].str.split('/', 4, expand=True)[3]
build_index_adoc(data)
build_language_adoc(data)
build_language_stats(data)
build_component_adoc(data)
build_component_stats(data)
def build_index_adoc(data):
"""Build csv files used in index.adoc"""
#
# Generate global.csv
#
stats = data[data.category == "pot"]
df = pandas.DataFrame({'Total messages': stats['totalMessage'].sum(),
'Total words': stats['totalSourceWords'].sum()}, index=[0])
store(df, "modules/ROOT/examples/global.csv")
totalWords = stats['totalSourceWords'].sum()
#
# Generate progress.csv
#
details = data[data.category == "po"]
# create stats per filename (aka pages)
stats_file = details.groupby(["component", "filename", "lang"]).sum()
stats_file["pageprogress"] = stats_file["translatedSourceWords"] / stats_file["totalSourceWords"]
stats_file["pageOver80"] = (stats_file["pageprogress"] >= 0.8) *1
stats_file["pageCount"] = details.groupby(["component", "filename", "lang"]).filename.nunique().value_counts().index.tolist()[0]
# create stats per components
stats_comp = stats_file.groupby(["component", "lang"]).sum()
stats_comp["componentprogress"] = stats_comp["translatedSourceWords"] / stats_comp["totalSourceWords"]
stats_comp["componentOver80"] = (stats_comp["componentprogress"] >= 0.8) *1
count_component = details[["lang", "component"]].drop_duplicates().groupby("lang").count().reset_index()
count_component = count_component.rename(columns={'component': 'componentCount'})
stats_comp = stats_comp.reset_index()
stats_comp = pandas.merge(stats_comp, count_component, on="lang")
# create stats per language
stats_lang = stats_comp.groupby(["lang"]).sum()
stats_lang["wordprogress"] = stats_lang["translatedSourceWords"] / totalWords
stats_lang['wordprogress'] = pandas.Series(["{0:.2f}%".format(val * 100) for val in stats_lang["wordprogress"]], index = stats_lang.index)
# add a URL
stats_lang = stats_lang.reset_index()
stats_lang['lang'] = pandas.Series(["xref:language/{code}.adoc[{code}]".format(code = val) for val in stats_lang["lang"]], index = stats_lang.index)
# wrote csv
headers = ["Language code", "Components", "Components >= 80%", "Pages", "Pages >= 80%", "Words progress"]
out = stats_lang[["lang", "componentCount", "componentOver80", "pageCount", "pageOver80", "wordprogress" ]]
out.to_csv("modules/ROOT/examples/progress.csv", header=headers,index=False)
def build_language_adoc(data):
"""Build one adoc file per language"""
hop = """= __LANG_NAME__
Would you like to help?
* Good, connect and translate: https://translate.stg.fedoraproject.org/languages/__LANG__/
* Better, get it touch with the team: https://fedoraproject.org/wiki/L10N_Teams
Current progress: __PROGRESS__ (in words, for started components) +
Overall progress: __PROGRESS_ALL__ (in words, for all components)
[%header,format=csv]
.Statistics per component
|===
include::example$language/__LANG__.csv[]
|==="""
totalWords = data[data.category == "pot"]['totalSourceWords'].sum()
data = data[data.category == "po"]
for lang in data.lang.unique():
progress = data[data.lang == lang]
progress = progress.translatedSourceWords.sum() / progress.totalSourceWords.sum()
progress_all = data[data.lang == lang].translatedSourceWords.sum() / totalWords
with open("modules/ROOT/pages/language/"+lang+".adoc", "w") as f:
to_write = hop.replace('__LANG__', lang)
to_write = to_write.replace('__LANG_NAME__', lang)
to_write = to_write.replace('__PROGRESS__', "{0:.2f}%".format(progress * 100))
to_write = to_write.replace('__PROGRESS_ALL__', "{0:.2f}%".format(progress_all * 100))
f.write(to_write)
def build_language_stats(data):
"""Build csv files used in adoc file per lang"""
#
# Generate progress.csv
#
details = data[data.category == "po"]
for lang in details.lang.unique():
stats = data[data.lang == lang]
stats_comp = stats.groupby(["component"]).sum()
stats_comp = stats_comp.reset_index()
stats_comp = stats_comp[["component", "translatedMessages", "translatedSourceWords", "totalMessage", "totalSourceWords"]]
stats_comp["wordsProgress"] = stats_comp["translatedSourceWords"] / stats_comp["totalSourceWords"]
stats_comp["wordsProgress"] = pandas.Series(["{0:.2f}%".format(val * 100) for val in stats_comp["wordsProgress"]], index = stats_comp.index)
# create stats per filename (aka pages)
stats_file = stats.groupby(["component", "filename"]).sum()
stats_file["pageprogress"] = stats_file["translatedSourceWords"] / stats_file["totalSourceWords"]
stats_file["pageOver80"] = (stats_file["pageprogress"] >= 0.8) *1
stats_file["pageCount"] = stats.groupby(["component", "filename", "lang"]).filename.nunique().value_counts().index.tolist()[0]
stats_file = stats_file.groupby("component").sum().reset_index()
stats_file = stats_file[["component", "pageCount", "pageOver80"]]
out = pandas.merge(stats_comp, stats_file, on="component")
out = out.drop_duplicates()
out['component'] = pandas.Series(["xref:component/{code}.adoc[{code}]".format(code = val) for val in out["component"]], index = out.index)
# write csv
headers = ["Component", "Pages", "Pages >= 80%", "Total messages", "Translated messages", "Total words", "Translated words", "Words progress"]
out = out[["component", "pageCount", "pageOver80", "totalMessage", "translatedMessages", "totalSourceWords", "translatedSourceWords", "wordsProgress" ]]
out.to_csv("modules/ROOT/examples/language/"+lang+".csv", header=headers,index=False)
def build_component_adoc(data):
"""Build one adoc file per component"""
hop = """= __COMPONENT__
This component contains:
* __NB_PAGES__ pages
* __NB_MESSAGES__ messages to translate
* __NB_WORDS__ words to translate
You can probably.footnote:[All component aren't available for translation for now
because our translation platform is not fully deployed. Please ask the translation
mailing list trans@lists.fedoraproject.org if a missing component bother you.]
translate this component in: https://translate.stg.fedoraproject.org/projects/
[%header,format=csv]
.Statistics per component
|===
include::example$component/__COMPONENT__.csv[]
|===
"""
data = data[data.category == "pot"]
for component in data.component.unique():
data_component = data[data.component == component]
nb_pages = len(data_component.filename.unique())
nb_messages = data_component.totalMessage.sum()
nb_words = data_component.totalSourceWords.sum()
with open("modules/ROOT/pages/component/"+component+".adoc", "w") as f:
to_write = hop.replace('__COMPONENT__', component)
to_write = to_write.replace('__NB_PAGES__', str(nb_pages))
to_write = to_write.replace('__NB_MESSAGES__', str(nb_messages))
to_write = to_write.replace('__NB_WORDS__', str(nb_words))
f.write(to_write)
def build_component_stats(data):
"""Build csv files used in adoc file per component"""
#
# Generate progress.csv
#
details = data[data.category == "po"]
for component in details.component.unique():
stats = details[details.component == component]
stats_lang = stats.groupby(["lang"]).sum()
stats_lang = stats_lang.reset_index()
stats_lang = stats_lang[["lang", "translatedSourceWords", "totalSourceWords"]]
stats_lang["wordsProgress"] = stats_lang["translatedSourceWords"] / stats_lang["totalSourceWords"]
stats_lang["wordsProgress"] = pandas.Series(["{0:.2f}%".format(val * 100) for val in stats_lang["wordsProgress"]], index = stats_lang.index)
# create stats per filename (aka pages)
stats_file = stats.groupby(["lang", "filename"]).sum()
stats_file["pageprogress"] = stats_file["translatedSourceWords"] / stats_file["totalSourceWords"]
stats_file["pageOver80"] = (stats_file["pageprogress"] >= 0.8) *1
stats_file["pageCount"] = stats.groupby(["component", "filename", "lang"]).filename.nunique().value_counts().index.tolist()[0]
stats_file = stats_file.groupby("lang").sum().reset_index()
stats_file = stats_file[["lang", "pageCount", "pageOver80"]]
out = pandas.merge(stats_lang, stats_file, on="lang")
out = out.drop_duplicates()
out['lang'] = pandas.Series(["xref:language/{code}.adoc[{code}]".format(code = val) for val in out["lang"]], index = out.index)
# write csv
headers = ["Language code", "Pages", "Pages >= 80%", "Words progress"]
out = out[["lang", "pageCount", "pageOver80", "wordsProgress" ]]
out.to_csv("modules/ROOT/examples/component/"+component+".csv", header=headers,index=False)
def store(dataset, name):
"""Store dataset to csv"""
dataset.to_csv(name, index=False)
if __name__ == '__main__':
main()

View file

@ -1 +0,0 @@
pandas

File diff suppressed because it is too large Load diff