Initial commit
This commit is contained in:
commit
442ac1426b
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Auto detect text files and perform LF normalization
|
||||
* text=auto
|
18
.vscode/settings.json
vendored
Normal file
18
.vscode/settings.json
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"sqltools.connections": [
|
||||
{
|
||||
"mysqlOptions": {
|
||||
"authProtocol": "default",
|
||||
"enableSsl": "Disabled"
|
||||
},
|
||||
"previewLimit": 50,
|
||||
"server": "localhost",
|
||||
"port": 3306,
|
||||
"driver": "MySQL",
|
||||
"name": "local",
|
||||
"database": "b3log_symphony",
|
||||
"username": "root",
|
||||
"password": "123456"
|
||||
}
|
||||
]
|
||||
}
|
71
getPaper.py
Normal file
71
getPaper.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
import arxiv
|
||||
|
||||
def get_authors(authors, first_author = False):
|
||||
output = str()
|
||||
if first_author == False:
|
||||
output = ", ".join(str(author) for author in authors)
|
||||
else:
|
||||
output = authors[0]
|
||||
return output
|
||||
|
||||
def get_daily_papers(topic,query="slam", max_results=2):
|
||||
"""
|
||||
@param topic: str
|
||||
@param query: str
|
||||
@return paper_with_code: dict
|
||||
"""
|
||||
|
||||
# output
|
||||
content = dict()
|
||||
|
||||
search_engine = arxiv.Search(
|
||||
query = query,
|
||||
max_results = max_results,
|
||||
sort_by = arxiv.SortCriterion.SubmittedDate
|
||||
)
|
||||
|
||||
for result in search_engine.results():
|
||||
|
||||
paper_id = result.get_short_id()
|
||||
paper_title = result.title
|
||||
paper_url = result.entry_id
|
||||
|
||||
paper_abstract = result.summary.replace("\n"," ")
|
||||
paper_authors = get_authors(result.authors)
|
||||
paper_first_author = get_authors(result.authors,first_author = True)
|
||||
primary_category = result.primary_category
|
||||
|
||||
publish_time = result.published.date()
|
||||
|
||||
print("Time = ", publish_time ,
|
||||
" title = ", paper_title,
|
||||
" author = ", paper_first_author)
|
||||
|
||||
# eg: 2108.09112v1 -> 2108.09112
|
||||
ver_pos = paper_id.find('v')
|
||||
if ver_pos == -1:
|
||||
paper_key = paper_id
|
||||
else:
|
||||
paper_key = paper_id[0:ver_pos]
|
||||
|
||||
content[paper_key] = f"|**{publish_time}**|**{paper_title}**|{paper_first_author} et.al.|[{paper_id}]({paper_url})|\n"
|
||||
data = {topic:content}
|
||||
|
||||
return data
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
data_collector = []
|
||||
keywords = dict()
|
||||
keywords["SLAM"] = "SLAM"
|
||||
|
||||
for topic,keyword in keywords.items():
|
||||
|
||||
print("Keyword: " + topic)
|
||||
data = get_daily_papers(topic, query = keyword, max_results = 10)
|
||||
data_collector.append(data)
|
||||
print("\n")
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user