-
Notifications
You must be signed in to change notification settings - Fork 7
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Article checker #182
Article checker #182
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,65 @@ | ||
name: Check articles are valid | ||
|
||
on: | ||
workflow_call: | ||
inputs: | ||
datasource: | ||
type: string | ||
required: true | ||
workflow_dispatch: # allow manual triggering | ||
inputs: | ||
datasource: | ||
description: 'The datasource to process' | ||
type: choice | ||
options: | ||
- all | ||
- agentmodels | ||
- agisf | ||
- aisafety.info | ||
- alignment_newsletter | ||
- alignmentforum | ||
- arbital | ||
- arxiv | ||
- blogs | ||
- distill | ||
- eaforum | ||
- indices | ||
- lesswrong | ||
- special_docs | ||
- youtube | ||
schedule: | ||
- cron: "0 */4 * * *" # Every 4 hours | ||
|
||
jobs: | ||
build-dataset: | ||
runs-on: ubuntu-latest | ||
|
||
steps: | ||
- name: Checkout repository | ||
uses: actions/checkout@v2 | ||
|
||
- name: Setup Python environment | ||
uses: actions/setup-python@v2 | ||
with: | ||
python-version: '3.x' | ||
|
||
- name: Install Pandoc | ||
run: | | ||
if [ "${{ inputs.datasource }}" = "gdocs" ]; then | ||
sudo apt-get update | ||
sudo apt-get -y install pandoc | ||
fi | ||
|
||
- name: Install dependencies | ||
run: pip install -r requirements.txt | ||
|
||
- name: Process dataset | ||
env: | ||
CODA_TOKEN: ${{ secrets.CODA_TOKEN }} | ||
AIRTABLE_API_KEY: ${{ secrets.AIRTABLE_API_KEY }} | ||
YOUTUBE_API_KEY: ${{ secrets.YOUTUBE_API_KEY }} | ||
ARD_DB_USER: ${{ secrets.ARD_DB_USER }} | ||
ARD_DB_PASSWORD: ${{ secrets.ARD_DB_PASSWORD }} | ||
ARD_DB_HOST: ${{ secrets.ARD_DB_HOST }} | ||
ARD_DB_NAME: alignment_research_dataset | ||
run: python main.py fetch ${{ inputs.datasource }} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
import re | ||
from typing import Any, Dict, List | ||
|
||
from align_data.settings import ARTICLE_MAIN_KEYS | ||
from align_data.sources.utils import merge_dicts | ||
|
||
|
||
def normalize_url(url: str | None) -> str | None: | ||
if not url: | ||
return url | ||
|
||
# ending '/' | ||
url = url.rstrip("/") | ||
|
||
# Remove http and use https consistently | ||
url = url.replace("http://", "https://") | ||
|
||
# Remove www | ||
url = url.replace("https://www.", "https://") | ||
|
||
# Remove index.html or index.htm | ||
url = re.sub(r'/index\.html?$', '', url) | ||
|
||
# Convert youtu.be links to youtube.com | ||
url = url.replace("https://youtu.be/", "https://youtube.com/watch?v=") | ||
|
||
# Additional rules for mirror domains can be added here | ||
|
||
# agisafetyfundamentals.com -> aisafetyfundamentals.com | ||
url = url.replace("https://agisafetyfundamentals.com", "https://aisafetyfundamentals.com") | ||
|
||
return url | ||
|
||
|
||
def normalize_text(text: str | None) -> str | None: | ||
return (text or '').replace('\n', ' ').replace('\r', '').strip() or None | ||
|
||
|
||
def format_authors(authors: List[str]) -> str: | ||
# TODO: Don't keep adding the same authors - come up with some way to reuse them | ||
authors_str = ",".join(authors) | ||
if len(authors_str) > 1024: | ||
authors_str = ",".join(authors_str[:1024].split(",")[:-1]) | ||
return authors_str | ||
|
||
|
||
def article_dict(data, **kwargs) -> Dict[str, Any]: | ||
data = merge_dicts(data, kwargs) | ||
|
||
summaries = data.pop("summaries", []) | ||
summary = data.pop("summary", None) | ||
|
||
data['summaries'] = summaries + [summary] if summary else [] | ||
data['authors'] = format_authors(data.pop("authors", [])) | ||
data['title'] = normalize_text(data.get('title')) | ||
|
||
return dict( | ||
meta={k: v for k, v in data.items() if k not in ARTICLE_MAIN_KEYS and v is not None}, | ||
**{k: v for k, v in data.items() if k in ARTICLE_MAIN_KEYS}, | ||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,90 @@ | ||
import logging | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't have a strong opinion on it, but would it make more sense to have this file in align_data/db, since it's used to validate db objects, rather than any source in particular (afaict) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. sort of. It also makes use of the parsers, as it fetches data from the appropriate source etc., so dunno :/ |
||
from datetime import datetime, timedelta | ||
from typing import Any, List | ||
|
||
from tqdm import tqdm | ||
from sqlalchemy.exc import IntegrityError | ||
from align_data.common.formatters import normalize_url, normalize_text, article_dict | ||
from align_data.db.session import make_session | ||
from align_data.db.models import Article | ||
from align_data.sources.articles.parsers import item_metadata | ||
from align_data.sources.articles.html import fetch | ||
|
||
|
||
logger = logging.getLogger(__name__) | ||
|
||
|
||
def update_article_field(article: Article, field: str, value: Any): | ||
if not value: | ||
return | ||
|
||
if field == 'url' and normalize_url(article.url) == normalize_url(value): | ||
# This is pretty much the same url, so don't modify it | ||
return | ||
if field == 'title' and normalize_text(article.title) == normalize_text(value): | ||
# If there are slight differences in the titles (e.g. punctuation), assume the | ||
# database version is more correct | ||
return | ||
if field == 'meta': | ||
article.meta = article.meta or {} | ||
for k, v in value.items(): | ||
meta_val = article.meta.get(k) | ||
try: | ||
if not meta_val or v > meta_val: | ||
article.meta[k] = v | ||
except Exception as e: | ||
# Ignore exceptions here - the metadata isn't that important | ||
logger.info('Error checking metadata value for article %s: %s', article.url, value) | ||
return | ||
|
||
article_val = getattr(article, field, None) | ||
# Assume that if the provided value is larger (or later, in the case of dates), then it's | ||
# better. This might very well not hold, but it seems like a decent heuristic? | ||
if not article_val: | ||
setattr(article, field, value) | ||
elif isinstance(value, datetime) and value > article_val: | ||
setattr(article, field, value) | ||
elif isinstance(value, str) and len(normalize_text(value) or '') > len(normalize_text(article_val) or ''): | ||
setattr(article, field, normalize_text(value)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the > heuristic seems fine since it will only do anything in the case of comparables like dates and numbers and idk what else, but imo, in other cases it would always update it? Like, if the text changed it would always be updated, rather than only updated when the length of the text grows, which seems like what's going on here. Lmk if I'm wrong There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Text changes seem to be:
I didn't want to just overwrite the text if it changed, because the changes might not be for the better. So this seemed like a heuristic that might be better than that. Albeit not much.. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What are you thinking would be situations where the more recent changes are worse? If I'm understanding it right, "contents" will only be successful (no errors, non empty) when the url corresponds to a parsers-friendly url, ie a url that was originally fetched using the parsers, right? In that case, the newly fetched There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actually, it's meant to also work with any pdf and epub. Is that the reason why the contents may be of lesser quality? if the url is not the thing we used to get the pdf in the first place, maybe this generic pdf parser will be worse than a specialized one. Seems tricky. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. exactly. Plus some websites return an error in HTML, but with a 200 code, or other such madness, so you can't really trust them :/ |
||
|
||
|
||
def update_article(article: Article) -> Article: | ||
"""Check whether there are better data for this article and whether its url is pointing somewhere decent.""" | ||
source_url = article.meta.get('source_url') or article.url | ||
contents = {} | ||
if source_url: | ||
contents = item_metadata(source_url) | ||
|
||
if 'error' not in contents: | ||
for field, value in article_dict(contents).items(): | ||
update_article_field(article, field, value) | ||
else: | ||
logger.info('Error getting contents for %s: %s', article, contents.get('error')) | ||
|
||
if 400 <= fetch(article.url).status_code < 500: | ||
logger.info('Could not get url for %s', article) | ||
article.status = 'Unreachable url' | ||
|
||
article.date_checked = datetime.utcnow() | ||
|
||
return article | ||
|
||
|
||
def check_articles(sources: List[str], batch_size=100): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. similarly as above, maybe There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is really minor, but for consistency the first and second function should have the same name, except one plural, imo (ie update_article and update_articles or check_article and check_articles, since one just calls the other on a set of lots of articles |
||
"""Check `batch_size` articles with the given `sources` to see if they have better data.""" | ||
logger.info('Checking %s articles for %s', batch_size, ', '.join(sources)) | ||
with make_session() as session: | ||
for article in tqdm( | ||
session.query(Article) | ||
.filter(Article.date_checked < datetime.now() - timedelta(weeks=4)) | ||
.filter(Article.source.in_(sources)) | ||
.limit(batch_size) | ||
.all() | ||
): | ||
update_article(article) | ||
session.add(article) | ||
logger.debug('commiting') | ||
try: | ||
session.commit() | ||
except IntegrityError as e: | ||
logger.error(e) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nice