This repository has been archived by the owner on Sep 10, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 163
/
local_settings.py
44 lines (37 loc) · 3.02 KB
/
local_settings.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from os import environ
'''
Local Settings for a heroku_ebooks account.
'''
# Configuration for Twitter API
TWITTER_API_VERSION = 'v2' # Use "1.1" for older API keys.
ENABLE_TWITTER_SOURCES = True # Fetch twitter statuses as source
ENABLE_TWITTER_POSTING = True # Tweet resulting status?
MY_BEARER_TOKEN = environ.get('TWITTER_BEARER_TOKEN') # Your Twitter API Bearer Token
MY_CONSUMER_KEY = environ.get('TWITTER_CONSUMER_KEY')#Your Twitter API Consumer Key set in Heroku config
MY_CONSUMER_SECRET = environ.get('TWITTER_CONSUMER_SECRET')#Your Consumer Secret Key set in Heroku config
MY_ACCESS_TOKEN_KEY = environ.get('TWITTER_ACCESS_TOKEN_KEY')#Your Twitter API Access Token Key set in Heroku config
MY_ACCESS_TOKEN_SECRET = environ.get('TWITTER_ACCESS_SECRET')#Your Access Token Secret set in Heroku config
# Configuration for Mastodon API
ENABLE_MASTODON_SOURCES = False # Fetch mastodon statuses as a source?
ENABLE_MASTODON_POSTING = False # Toot resulting status?
MASTODON_API_BASE_URL = "" # an instance url like https://botsin.space
CLIENT_CRED_FILENAME = '' # the MASTODON client secret file you created for this project
USER_ACCESS_FILENAME = '' # The MASTODON user credential file you created at installation.
# Sources (Twitter, Mastodon, local text file or a web page)
TWITTER_SOURCE_ACCOUNTS = [""] # A list of comma-separated, quote-enclosed Twitter handles of account that you'll generate tweets based on. It should look like ["account1", "account2"]. If you want just one account, no comma needed.
TWEETS_TO_GRAB = 500 # APIv2 Specific. How many tweets to grab to train the chain. Note that Twitter APIv2 lets you pull a maximum of 500,000 tweets per month.
MASTODON_SOURCE_ACCOUNTS = [""] # A list, e.g. ["@[email protected]"]
SOURCE_EXCLUDE = r'^$' # Source tweets that match this regexp will not be added to the Markov chain. You might want to filter out inappropriate words for example.
STATIC_TEST = False # Set this to True if you want to test Markov generation from a static file instead of the API.
TEST_SOURCE = ".txt" # The name of a text file of a string-ified list for testing. To avoid unnecessarily hitting Twitter API. You can use the included testcorpus.txt, if needed.
SCRAPE_URL = False # Set this to true to scrape a webpage.
SRC_URL = ['http://www.example.com/one', 'https://www.example.com/two'] # A comma-separated list of URLs to scrape
WEB_CONTEXT = ['span', 'h2'] # A comma-separated list of the tag or object to search for in each page above.
WEB_ATTRIBUTES = [{'class': 'example-text'}, {}] # A list of dictionaries containing the attributes for each page.
ODDS = 8 # How often do you want this to run? 1/8 times?
ORDER = 2 # How closely do you want this to hew to sensical? 2 is low and 4 is high.
DEBUG = True # Set this to False to start Tweeting live
TWEET_ACCOUNT = "" # The name of the account you're tweeting to.
#Configuration for Twitter parser. TEST_SOURCE will be re-used as as the corpus location.
TWITTER_ARCHIVE_NAME = "tweets.csv" #Name of your twitter archive
IGNORE_RETWEETS = True #If you want to remove retweets